def getOrders(path, columns): with open(path) as file: prevRow = list() parsedRows = list() reader = csv.reader(file) next(reader) # skip header row for row in reader: if len(row) < 2 or (prevRow and row[0] == prevRow[0]): continue # skip if < 2 cols or same order as prev row # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) newRow['companyCode'] = 112 # marvellous newRow['merchantID'] = 40 newRow['completeOrderReference'] = validate.clean(row[1]) newRow['shortOrderReference'] = validate.clean(row[1]) newRow['fullName'] = validate.clean(row[6]) + ' ' + validate.clean(row[7]) newRow["originFile"] = os.path.basename(path) newRow['phoneNumber'] = validate.phone(row[23]) newRow['address1'] = validate.clean(row[8]) newRow['address2'] = validate.clean(row[9]) newRow['town'] = validate.clean(row[10]) newRow['packingSlip'] = 1 newRow['country'] = validate.country(validate.clean(row[13])) if not newRow['country']: msg = newRow['completeOrderReference'] + " from file '" + os.path.basename(path) + "' was skipped.\n" msg += 'Could not validate country: ' + row[13] errors.append(msg) continue newRow['region'] = validate.region(validate.clean(row[11]), newRow['country']) if not newRow['region']: msg = newRow['completeOrderReference'] + " from file '" + os.path.basename(path) + "' was skipped.\n" msg += 'Could not validate region: ' + row[11] errors.append(msg) continue newRow['postCode'] = validate.postCode(validate.clean(row[12]), newRow['country']) if not newRow['postCode']: msg = newRow['completeOrderReference'] + " from file '" + os.path.basename(path) + "' was skipped.\n" msg += 'Could not validate post code: ' + row[12] errors.append(msg) continue if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, NMR order parser added a column") quit() prevRow = row print("\nImported " + str(len(parsedRows)) + " orders from NMR file '" + os.path.basename(path) + "'") return parsedRows
def getItems(path, columns): with open(path) as file: reader = csv.reader(file) parsedRows = list() prevRow = list() next(reader) # skip first row for row in reader: if len(row) < 2: continue # skip if < 2 columns # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) newRow['merchantID'] = 39 newRow['shortOrderReference'] = validate.clean(row[0]) newRow['lineNumber'] = 1 newRow['itemSKU'] = validate.clean(row[15]) newRow['itemQuantity'] = 1 if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, Ncrowd item parser added a column") quit() prevRow = row print("Imported " + str(len(parsedRows)) + " item rows from Ncrowd file '" + os.path.basename(path) + "'") return parsedRows
def getItems(path, columns): with open(path) as file: reader = csv.reader(file) parsedRows = list() prevRow = list() next(reader) # skip first row for row in reader: if len(row) < 2: continue # skip if < 2 columns # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) newRow['merchantID'] = 36 newRow['shortOrderReference'] = validate.clean(row[0]) newRow['lineNumber'] = 1 newRow['itemTitle'] = validate.clean(row[23]) newRow['itemSKU'] = validate.clean(row[3]) newRow['itemQuantity'] = validate.clean(row[4]) if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, Groupon item parser added a column") quit() prevRow = row print("Imported " + str(len(parsedRows)) + " item rows from Groupon file '" + os.path.basename(path) + "'") return parsedRows
def getPackages(path, columns): parsedRows = list() with open(path) as file: reader = csv.reader(file) next(reader) # skip header # read the whole file into memory for row in reader: # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) newRow['merchantID'] = 36 newRow['shortOrderReference'] = validate.clean(row[0]) newRow["bulk"] = 1 newRow["carrier"] = 26 newRow['serviceClass'] = 12 newRow['weight'] = validate.clean(row[34]) newRow['note'] = validate.clean(row[4]) + '-' + validate.clean(row[3]) # qty-sku # save the package row in completedLines if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, Groupon shipping allocator added a column") quit() print("Created " + str(len(parsedRows)) + " packages from '" + os.path.basename(path) + "'") return parsedRows
def getPackages(path, columns): parsedRows = list() with open(path) as file: reader = csv.reader(file) next(reader) # skip header # read the whole file into memory for row in reader: # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) newRow['merchantID'] = 36 newRow['shortOrderReference'] = validate.clean(row[0]) newRow["bulk"] = 1 newRow["carrier"] = 26 newRow['serviceClass'] = 12 newRow['weight'] = validate.clean(row[34]) newRow['note'] = validate.clean(row[4]) + '-' + validate.clean( row[3]) # qty-sku # save the package row in completedLines if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, Groupon shipping allocator added a column") quit() print("Created " + str(len(parsedRows)) + " packages from '" + os.path.basename(path) + "'") return parsedRows
def getOrders(path, columns): with open(path) as file: reader = csv.reader(file) # create a CSV reader object parsedRows = list() # create a list to hold the new rows for row in reader: # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) if len(row) < 2 or row[13] == "": continue # skip if < 2 cols or no sku newRow["companyCode"] = 113 newRow["completeOrderReference"] = validate.clean(row[0]) newRow["shortOrderReference"] = validate.clean(row[0]) newRow["originFile"] = os.path.basename(path) newRow["merchantID"] = 38 newRow["fullName"] = validate.clean(row[11] + " " + row[12]) newRow["phoneNumber"] = "".join([char for char in row[18] if str.isdigit(char)]) newRow["address1"] = validate.clean(row[13]) newRow["address2"] = validate.clean(row[14]) newRow["town"] = validate.clean(row[15]) newRow["packingSlip"] = 0 newRow["country"] = validate.country(validate.clean(row[18])) if not newRow["country"]: msg = newRow["completeOrderReference"] + " from file '" + os.path.basename(path) + "' was skipped.\n" msg += "Could not validate country: " + row[18] + "\n" errors.append(msg) continue newRow["region"] = validate.region(validate.clean(row[16]), newRow["country"]) if not newRow["region"]: msg = newRow["completeOrderReference"] + " from file '" + os.path.basename(path) + "' was skipped.\n" msg += "Could not validate region: " + row[16] + "\n" errors.append(msg) continue newRow["postCode"] = validate.postCode(validate.clean(row[17]), newRow["country"]) if not newRow["postCode"]: msg = newRow["completeOrderReference"] + " from file '" + os.path.basename(path) + "' was skipped.\n" msg += "Could not validate post code: " + row[17] + "\n" errors.append(msg) continue if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops StackSocial order parser added a column") quit() print("\nImported " + str(len(parsedRows)) + " orders from StackSocial file '" + os.path.basename(path) + "'") return parsedRows
def getItems(self, columns): with open(self.file) as file: reader = csv.reader(file) parsedRows = list() prevRow = list() next(reader) # skip first row for row in reader: if len(row) < 2: continue # skip if < 2 columns # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) if prevRow and row[0] == prevRow[0]: lineNumber += 1 else: lineNumber = 1 market = row[1][:2] newRow['merchantID'] = self.getMarketParam( market, 'merchantID') if not newRow['merchantID']: continue newRow['shortOrderReference'] = validate.shortenPossibleAmazon( row[0]) newRow['lineNumber'] = lineNumber newRow['itemTitle'] = validate.clean(row[2]) newRow['itemSKU'] = validate.clean( row[10].split('-')[-1]) # grab after the last - newRow['itemAttribKey'] = 'Full SKU' newRow['itemAttribVal'] = validate.clean( row[10]) # save entire sku newRow['itemQuantity'] = validate.clean(row[11]) if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, Marvellous item parser added a column") quit() prevRow = row print("Imported " + str(len(parsedRows)) + " item rows from Marvellous file '" + os.path.basename(self.file) + "'") return parsedRows
def getPackages(path, columns): lines = list() # this list will hold the whole file parsedRows = list() with open(path) as file: reader = csv.reader(file) next(reader) # skip header # read the whole file into memory for row in reader: # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) # FIGURE OUT WHAT TO DO WITH THIS ORDER newRow['merchantID'] = 39 newRow['shortOrderReference'] = validate.clean(row[0]) newRow["carrier"] = 26 newRow['serviceClass'] = 12 newRow["bulk"] = 0 # save the package row in completedLines if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, Ncrowd shipping allocator added a column") quit() print("Created " + str(len(parsedRows)) + " packages from '" + os.path.basename(path) + "'") return parsedRows
def getItems(path, columns): with open(path) as file: reader = csv.reader(file) parsedRows = list() for row in reader: # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) if len(row) < 2 or row[13] == '': continue # skip if < 2 cols or no sku newRow["shortOrderReference"] = validate.clean(row[0]) newRow["merchantID"] = 38 newRow["lineNumber"] = 0 skuCol = 20 while skuCol < len(row) and row[skuCol]: newRow["lineNumber"] += 1 newRow["itemSKU"] = row[skuCol] newRow["itemQuantity"] = row[skuCol + 1] if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops StackSocial item parser added a column") quit() skuCol += 2 print("Imported " + str(len(parsedRows)) + " item lines from StackSocial file '" + os.path.basename(path) + "'") return (parsedRows)
def getItems(path, columns): with open(path) as file: reader = csv.reader(file) parsedRows = list() for row in reader: # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) if len(row) < 2 or row[13] == "": continue # skip if < 2 cols or no sku newRow["shortOrderReference"] = validate.clean(row[0]) newRow["merchantID"] = 38 newRow["lineNumber"] = 0 skuCol = 20 while skuCol < len(row) and row[skuCol]: newRow["lineNumber"] += 1 newRow["itemSKU"] = row[skuCol] newRow["itemQuantity"] = row[skuCol + 1] if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops StackSocial item parser added a column") quit() skuCol += 2 print("Imported " + str(len(parsedRows)) + " item lines from StackSocial file '" + os.path.basename(path) + "'") return parsedRows
def getPackages(path, columns): lines = list() # this list will hold the whole file parsedRows = list() with open(path) as file: reader = csv.reader(file) next(reader) # skip header # read the whole file into memory for row in reader: if len(row) > 1: lines.append(row) orderStart = 0 orderEnd = 0 while orderEnd < len(lines): orderEnd += 1 # while the current line has the same order number as the starting line while orderEnd < len(lines) and lines[orderEnd][3] == lines[orderStart][3]: orderEnd += 1 # grab the slice of the file that contains the next order currentOrder = lines[orderStart:orderEnd] # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) # FIGURE OUT WHAT TO DO WITH THIS ORDER itemCount = sum([int(row[12]) for row in currentOrder]) newRow['merchantID'] = 44 newRow['shortOrderReference'] = validate.clean(currentOrder[0][3]) if itemCount == 1: newRow['bulk'] = 1 newRow['carrier'] = 26 newRow['serviceClass'] = 12 newRow['weight'] = float(4/16) newRow['note'] = '1-'+currentOrder[0][14] else: newRow["bulk"] = 0 newRow["carrier"] = 26 newRow['serviceClass'] = 12 newRow['weight'] = float(4/16); # save the package row in completedLines if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, Sweetjack shipping allocator added a column") quit() orderStart = orderEnd # move on to the next order print("Created " + str(len(parsedRows)) + " packages from '" + os.path.basename(path) + "'") return parsedRows
def getItems(path, columns): with open(path) as file: reader = csv.reader(file) parsedRows = list() prevRow = list() next(reader) # skip first row data = sorted(reader, key=operator.itemgetter(0)) for row in data: if len(row) < 2: continue # skip if < 2 columns # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) if prevRow and row[0] == prevRow[0]: # if this is another row of the same item if row[36] == prevRow[36]: # grab the last row and increment the qty parsedRows[-1][5] = int(parsedRows[-1][5]) + 1 continue # ignore this line # else, make a new line lineNumber += 1 else: lineNumber = 1 newRow['merchantID'] = 42 newRow['shortOrderReference'] = validate.clean(row[0]) newRow['lineNumber'] = lineNumber newRow['itemTitle'] = validate.clean(row[3]) newRow['itemSKU'] = validate.clean(row[36]) newRow['itemQuantity'] = validate.clean(row[12]) if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, Restaurant item parser added a column") quit() prevRow = row print("Imported " + str(len(parsedRows)) + " item rows from Restaurant file '" + os.path.basename(path) + "'") return parsedRows
def getOrders(path, columns): with open(path) as file: prevRow = list() parsedRows = list() reader = csv.reader(file) next(reader) # skip header row for row in reader: if len(row) < 2 or (prevRow and row[0] == prevRow[0]): continue # skip if < 2 cols or same order as prev row # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) newRow['companyCode'] = 112 # marvellous newRow['merchantID'] = 46 newRow['completeOrderReference'] = validate.clean(row[0]) newRow['shortOrderReference'] = validate.clean(row[0].split('_')[-1]) newRow['fullName'] = validate.clean(row[5]) + ' ' + validate.clean(row[6]) newRow["originFile"] = os.path.basename(path) newRow['address1'] = validate.clean(row[7]) newRow['town'] = validate.clean(row[6]) newRow['packingSlip'] = 1 newRow['country'] = 'US' if not newRow['country']: msg = newRow['completeOrderReference'] + " from file '" + os.path.basename(path) + "' was skipped.\n" msg += 'Could not validate country: ' + row[9] errors.append(msg) continue newRow['region'] = validate.region(validate.clean(row[9]), newRow['country']) if not newRow['region']: msg = newRow['completeOrderReference'] + " from file '" + os.path.basename(path) + "' was skipped.\n" msg += 'Could not validate region: ' + row[9] errors.append(msg) continue newRow['postCode'] = validate.postCode(validate.clean(row[10]), newRow['country']) if not newRow['postCode']: msg = newRow['completeOrderReference'] + " from file '" + os.path.basename(path) + "' was skipped.\n" msg += 'Could not validate post code: ' + row[10] errors.append(msg) continue if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, LTM order parser added a column") quit() prevRow = row print("\nImported " + str(len(parsedRows)) + " orders from Lightake file '" + os.path.basename(path) + "'") return parsedRows
def getItems(self, columns): with open(self.file) as file: reader = csv.reader(file) parsedRows = list() prevRow = list() next(reader) # skip first row for row in reader: if len(row) < 2: continue # skip if < 2 columns # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) if prevRow and row[0] == prevRow[0]: lineNumber += 1 else: lineNumber = 1 market = row[1][:2] newRow['merchantID'] = self.getMarketParam(market,'merchantID') if not newRow['merchantID']: continue newRow['shortOrderReference'] = validate.shortenPossibleAmazon(row[0]) newRow['lineNumber'] = lineNumber newRow['itemTitle'] = validate.clean(row[2]) newRow['itemSKU'] = validate.clean(row[10].split('-')[-1]) # grab after the last - newRow['itemAttribKey'] = 'Full SKU' newRow['itemAttribVal'] = validate.clean(row[10]) # save entire sku newRow['itemQuantity'] = validate.clean(row[11]) if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, Marvellous item parser added a column") quit() prevRow = row print("Imported " + str(len(parsedRows)) + " item rows from Marvellous file '" + os.path.basename(self.file) + "'") return parsedRows
def getItems(path, columns): with open(path) as file: reader = csv.reader(file) parsedRows = list() prevRow = list() next(reader) # skip first row for row in reader: if len(row) < 2: continue # skip if < 2 columns # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) if prevRow and row[0] == prevRow[0]: lineNumber += 1 else: lineNumber = 1 newRow['merchantID'] = 46 newRow['shortOrderReference'] = validate.clean(row[0].split('_')[-1]) newRow['lineNumber'] = lineNumber newRow['itemTitle'] = validate.clean(row[2]) newRow['itemSKU'] = row[2].split('(')[-1][:-1] # extract sku from () in title newRow['itemQuantity'] = validate.clean(row[4]) if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, Half Off Deals item parser added a column") quit() prevRow = row print("Imported " + str(len(parsedRows)) + " item rows from Lightake file '" + os.path.basename(path) + "'") return parsedRows
def getPackages(path, columns): with open(path) as file: reader = csv.reader(file) parsedRows = list() for row in reader: # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) if len(row) < 2 or row[20] == '': continue # skip if < 2 cols or no sku newRow['shortOrderReference'] = validate.clean(row[0]) newRow['merchantID'] = 38 newRow['returnCompany'] = validate.clean(row[4]) newRow['returnAdd1'] = validate.clean(row[5]) newRow['returnAdd2'] = validate.clean(row[6]) newRow['returnCity'] = validate.clean(row[7]) newRow['returnState'] = validate.clean(row[8]) newRow['returnZip'] = validate.clean(row[9]) newRow['bulk'] = 1 newRow['packageNumber'] = 1 newRow['carrier'] = 26 newRow['serviceClass'] = 12 itemNotes = list() skuCol = 20 while skuCol < len(row) and row[skuCol]: itemNotes.append(row[skuCol + 1] + '-' + row[skuCol]) # add sku and quantity to note skuCol += 2 newRow['note'] = ','.join(itemNotes) if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops StackSocial package parser added a column") quit() print("Imported " + str(len(parsedRows)) + " packages from StackSocial file '" + os.path.basename(path) + "'") return parsedRows
def getPackages(path, columns): with open(path) as file: reader = csv.reader(file) parsedRows = list() for row in reader: # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) if len(row) < 2 or row[20] == "": continue # skip if < 2 cols or no sku newRow["shortOrderReference"] = validate.clean(row[0]) newRow["merchantID"] = 38 newRow["returnCompany"] = validate.clean(row[4]) newRow["returnAdd1"] = validate.clean(row[5]) newRow["returnAdd2"] = validate.clean(row[6]) newRow["returnCity"] = validate.clean(row[7]) newRow["returnState"] = validate.clean(row[8]) newRow["returnZip"] = validate.clean(row[9]) newRow["bulk"] = 1 newRow["packageNumber"] = 1 newRow["carrier"] = 26 newRow["serviceClass"] = 12 itemNotes = list() skuCol = 20 while skuCol < len(row) and row[skuCol]: itemNotes.append(row[skuCol + 1] + "-" + row[skuCol]) # add sku and quantity to note skuCol += 2 newRow["note"] = ",".join(itemNotes) if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops StackSocial package parser added a column") quit() print("Imported " + str(len(parsedRows)) + " packages from StackSocial file '" + os.path.basename(path) + "'") return parsedRows
def getOrders(path, columns): with open(path) as file: reader = csv.reader(file, delimiter='\t') # create a CSV reader object parsedRows = list() # create a list to hold the new rows prevOrderNum = '' next(reader) # skip header row for row in reader: # if more than 2 cols and order number exists and new order if len(row) > 2 and row[0].strip() and row[0] != prevOrderNum: # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) # map info from input file row to new row dict order_number = validate.clean(row[0]).replace(' ', '') newRow["completeOrderReference"] = order_number newRow["shortOrderReference"] = validate.shortenPossibleAmazon( order_number) newRow["originFile"] = os.path.basename(path) newRow["companyCode"] = 97 newRow["merchantID"] = 10 newRow["fullName"] = validate.clean(row[5]) newRow["phoneNumber"] = "".join( [char for char in row[6] if str.isdigit(char)]) newRow["emailAddress"] = row[4].strip() newRow["address1"] = validate.clean(row[17]) newRow["address2"] = validate.clean(row[18]) newRow["address3"] = validate.clean(row[19]) newRow["town"] = row[20].strip() newRow['country'] = validate.country(validate.clean(row[23])) if not newRow['country']: msg = newRow[ 'completeOrderReference'] + " from file '" + os.path.basename( path) + "' was skipped.\n" msg += 'Could not validate country: ' + row[23] + '\n' errors.append(msg) continue newRow["region"] = validate.region(validate.clean(row[21]), newRow['country']) if not newRow["region"]: msg = newRow[ 'completeOrderReference'] + " from file '" + os.path.basename( path) + "' was skipped.\n" msg += 'Could not validate state: ' + row[21] + '\n' errors.append(msg) continue newRow['postCode'] = validate.postCode(validate.clean(row[22]), newRow['country']) if not newRow['postCode']: msg = newRow[ 'completeOrderReference'] + " from file '" + os.path.basename( path) + "' was skipped.\n" msg += 'Could not validate post code: ' + row[22] + '\n' errors.append(msg) continue newRow["packingSlip"] = 1 if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, DSOL Amazon order parser added a column") quit() # save the previous order number prevOrderNum = row[0] print("\nImported " + str(len(parsedRows)) + " orders from Dance Shoes Online file '" + os.path.basename(path) + "'") return parsedRows
def getPackages(path, columns): mensSkus = ["A330101", "A350501"] lines = list() # this list will hold the whole file completedLines = list() with open(path) as file: reader = csv.reader(file, delimiter='\t') next(reader) # skip header row for row in reader: # read the whole file into memory so that I can index it and # iterate over parts of it multiple times if len(row) > 2 and row[10]: # if > 2 cols and sku exists lines.append(row) orderStart = 0 orderEnd = 0 while orderEnd < len(lines): orderEnd += 1 # while the current line has the same order number as the starting line while orderEnd < len( lines) and lines[orderEnd][0] == lines[orderStart][0]: orderEnd += 1 # increment the orderEnd counter # grab the slice of the file that contains the next order currentOrder = lines[orderStart:orderEnd] # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) # FIGURE OUT WHAT TO DO WITH THIS ORDER order_number = validate.clean(currentOrder[0][0]).replace(' ', '') newRow["shortOrderReference"] = validate.shortenPossibleAmazon( order_number) country = validate.country(validate.clean(currentOrder[0][7])) newRow["merchantID"] = 10 newRow["returnCompany"] = "DanceShoesOnline.com" newRow["returnAdd1"] = "8900 Rosehill Rd" newRow["returnAdd2"] = "Unit B." newRow["returnCity"] = "Lenexa" newRow["returnState"] = "KS" newRow["returnZip"] = "66214-1656" newRow["bulk"] = 0 itemCount = sum(int(row[9]) for row in currentOrder) # orders with 1 item if itemCount == 1: line = currentOrder[0] sku = line[7][:-1].strip() womens = sku not in mensSkus attribString = line[8][line[8].find("("):].replace("(", "").replace( ")", "") attributes = attribString.split(",") size = 0 if attributes[0].strip(): size = float(attributes[0].strip()) if size and size < 9 and womens: newRow["packageNumber"] = 1 newRow["carrier"] = 26 newRow["serviceClass"] = 10 newRow["length"] = 10.25 newRow["width"] = 7 newRow["height"] = 3.5 if size and size < 7: newRow["weight"] = 1 else: newRow["weight"] = 1.2 newRow['note'] = 'Small box' # 1 women's 9 and above or 1 men's else: newRow["packageNumber"] = 1 newRow["carrier"] = 26 newRow["serviceClass"] = 10 newRow["length"] = 12.25 newRow["width"] = 7.25 newRow["height"] = 4.25 newRow["weight"] = 1.2 newRow['note'] = 'Large box' # orders with 2 items elif itemCount == 2: # assume we will be able to ship combo # and then try to prove this false canShipCombo = True for line in currentOrder: sku = line[10][:-1].strip() womens = sku not in mensSkus attribString = line[8][line[8].find("("):].replace("(", "").replace( ")", "") attributes = attribString.split(",") size = 0 if attributes[0].strip(): size = float(attributes[0].strip()) if not size or not womens or not size < 9: canShipCombo = False if canShipCombo: newRow["packageNumber"] = 1 newRow["carrier"] = 26 newRow["serviceClass"] = 10 newRow["length"] = 14 newRow["width"] = 10.25 newRow["height"] = 3.5 newRow["weight"] = 2.2 newRow['note'] = 'Double box' else: # create a generic USPS package newRow['packageNumber'] = 1 newRow['carrier'] = 26 newRow['serviceClass'] = 10 newRow['note'] = 'Dim add' # orders with more than 2 items else: if country == 'PR': # create a generic USPS package newRow['packageNumber'] = 1 newRow['carrier'] = 26 newRow['serviceClass'] = 10 newRow['note'] = 'Dim add' else: orderStart = orderEnd # move on to the next order continue # don't create a package # save the package row in completedLines if len(columns) == len(newRow): completedLines.append(list(newRow.values())) else: print("Oops, DSOL Amazon shipping allocator added a column") quit() orderStart = orderEnd # move on to the next order print("Created " + str(len(completedLines)) + " packages from '" + os.path.basename(path) + "'") return completedLines
def getItems(path, columns): with open(path) as file: reader = csv.reader(file, delimiter='\t') # create a CSV reader object parsedRows = list() # create a list to hold the new rows orderLine = 0 prevOrderNum = '' next(reader) # skip header row for row in reader: overstock = False # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) if len(row) < 2 or not row[10].strip(): continue # skip row if < 2 cols or no sku if row[0] == prevOrderNum: orderLine += 1 # this is another line of the same order else: orderLine = 1 # reset line number # map info from input file row to new row dict order_number = validate.clean(row[0]).replace(' ', '') newRow["shortOrderReference"] = validate.shortenPossibleAmazon( order_number) newRow["merchantID"] = 10 newRow["lineNumber"] = orderLine newRow["itemSKU"] = row[7][:-1].strip() newRow["itemTitle"] = validate.clean(row[8]) newRow["itemQuantity"] = row[9].strip() newRow['itemAttribKey'] = 'AmazonPostfix' newRow['itemAttribVal'] = row[7][-1:] if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, DSOL Amazon item parser added a column") quit() newRow['itemAttribKey'] = 'order-item-id' newRow['itemAttribVal'] = row[1] if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, DSOL Amazon item parser added a column") quit() # parse attribs if "(" in row[8] and ")" in row[8]: attribString = row[8][row[8].find("("):].replace("(", "").replace( ")", "") attributes = attribString.split(",") if attributes[0].strip(): newRow["itemAttribKey"] = "size" newRow["itemAttribVal"] = attributes[0].strip() if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, DSOL Amazon item parser added a column") quit() if len(attributes) > 1 and attributes[1].strip(): newRow["itemAttribKey"] = "color" newRow["itemAttribVal"] = attributes[1].strip() if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, DSOL Amazon item parser added a column") quit() # write out row with width newRow["itemAttribKey"] = "width" newRow["itemAttribVal"] = row[14].strip() prevOrderNum = row[0] # keep reference in case next row needs it print("Imported " + str(len(parsedRows)) + " item rows from Dance Shoes Online file '" + os.path.basename(path) + "'") return parsedRows
def getOrders(self, columns): with open(self.file) as file: prevRow = list() parsedRows = list() reader = csv.reader(file) next(reader) # skip header row for row in reader: if len(row) < 2 or (prevRow and row[0] == prevRow[0]): continue # skip if < 2 cols or same order as prev row # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) # set company and market self.companyCode = row[2] self.marketId = row[0][:2] self.market = row[0][3:] newRow['marketId'] = self.market # save market id for confirmations newRow['companyCode'] = self.companyCode newRow['merchantID'] = self.getMarketParam('merchantID') if not newRow['merchantID']: continue newRow['completeOrderReference'] = validate.clean(row[1]) newRow['shortOrderReference'] = validate.shortenPossibleAmazon(row[1]) newRow['fullName'] = validate.clean(row[3]) newRow["originFile"] = os.path.basename(self.file) newRow['phoneNumber'] = validate.phone(row[4]) newRow['address1'] = validate.clean(row[5]) newRow['address2'] = validate.clean(row[6]) newRow['town'] = validate.clean(row[7]) newRow['packingSlip'] = 1 newRow['country'] = validate.country(validate.clean(row[10])) if not newRow['country']: msg = newRow['completeOrderReference'] + " from file '" + os.path.basename(self.file) + "' was skipped.\n" msg += 'Could not validate country: ' + row[10] self.errors.add(msg) continue newRow['region'] = validate.region(validate.clean(row[8]), newRow['country']) if not newRow['region']: msg = newRow['completeOrderReference'] + " from file '" + os.path.basename(self.file) + "' was skipped.\n" msg += 'Could not validate region: ' + row[8] self.errors.add(msg) continue newRow['postCode'] = validate.postCode(validate.clean(row[9]), newRow['country']) if not newRow['postCode']: msg = newRow['completeOrderReference'] + " from file '" + os.path.basename(self.file) + "' was skipped.\n" msg += 'Could not validate post code: ' + row[9] self.errors.add(msg) continue if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, standard order parser added a column") quit() prevRow = row print("\nImported " + str(len(parsedRows)) + " orders from "+self.market+" file '" + os.path.basename(self.file) + "'") return parsedRows
def getPackages(path, columns): lines = list() # this list will hold the whole file parsedRows = list() with open(path) as file: reader = csv.reader(file) next(reader) # skip header # read the whole file into memory for row in reader: if len(row) > 1: lines.append(row) orderStart = 0 orderEnd = 0 while orderEnd < len(lines): orderEnd += 1 # while the current line has the same order number as the starting line while orderEnd < len( lines) and lines[orderEnd][3] == lines[orderStart][3]: orderEnd += 1 # grab the slice of the file that contains the next order currentOrder = lines[orderStart:orderEnd] # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) # FIGURE OUT WHAT TO DO WITH THIS ORDER itemCount = sum([int(row[12]) for row in currentOrder]) newRow['merchantID'] = 44 newRow['shortOrderReference'] = validate.clean(currentOrder[0][3]) if itemCount == 1: newRow['bulk'] = 1 newRow['carrier'] = 26 newRow['serviceClass'] = 12 newRow['weight'] = float(4 / 16) newRow['note'] = '1-' + currentOrder[0][14] else: newRow["bulk"] = 0 newRow["carrier"] = 26 newRow['serviceClass'] = 12 newRow['weight'] = float(4 / 16) # save the package row in completedLines if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, Sweetjack shipping allocator added a column") quit() orderStart = orderEnd # move on to the next order print("Created " + str(len(parsedRows)) + " packages from '" + os.path.basename(path) + "'") return parsedRows
def getOrders(path, columns): with open(path) as file: reader = csv.reader(file) # create a CSV reader object parsedRows = list() # create a list to hold the new rows for row in reader: # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) if len(row) < 2 or row[13] == '': continue # skip if < 2 cols or no sku newRow['companyCode'] = 113 newRow["completeOrderReference"] = validate.clean(row[0]) newRow["shortOrderReference"] = validate.clean(row[0]) newRow["originFile"] = os.path.basename(path) newRow["merchantID"] = 38 newRow["fullName"] = validate.clean(row[11] + " " + row[12]) newRow["phoneNumber"] = "".join( [char for char in row[18] if str.isdigit(char)]) newRow["address1"] = validate.clean(row[13]) newRow['address2'] = validate.clean(row[14]) newRow["town"] = validate.clean(row[15]) newRow['packingSlip'] = 0 newRow["country"] = validate.country(validate.clean(row[18])) if not newRow['country']: msg = newRow[ 'completeOrderReference'] + " from file '" + os.path.basename( path) + "' was skipped.\n" msg += 'Could not validate country: ' + row[18] + '\n' errors.append(msg) continue newRow["region"] = validate.region(validate.clean(row[16]), newRow['country']) if not newRow['region']: msg = newRow[ 'completeOrderReference'] + " from file '" + os.path.basename( path) + "' was skipped.\n" msg += 'Could not validate region: ' + row[16] + '\n' errors.append(msg) continue newRow["postCode"] = validate.postCode(validate.clean(row[17]), newRow['country']) if not newRow['postCode']: msg = newRow[ 'completeOrderReference'] + " from file '" + os.path.basename( path) + "' was skipped.\n" msg += 'Could not validate post code: ' + row[17] + '\n' errors.append(msg) continue if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops StackSocial order parser added a column") quit() print("\nImported " + str(len(parsedRows)) + " orders from StackSocial file '" + os.path.basename(path) + "'") return parsedRows
def getOrders(path, columns): with open(path) as file: prevRow = list() parsedRows = list() reader = csv.reader(file) next(reader) # skip header row for row in reader: if len(row) < 2 or not row[0]: continue # skip if < 2 cols no order number # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) company = os.path.split(os.path.dirname(path))[1] if company.upper() == 'BETAFRESH': newRow['companyCode'] = 113 elif company.upper() == 'MARVELLOUS': newRow['companyCode'] = 112 else: print('Unknown company:' + company) quit() newRow['merchantID'] = 36 newRow['completeOrderReference'] = validate.clean(row[0]) newRow['shortOrderReference'] = validate.clean(row[0]) newRow["originFile"] = os.path.basename(path) newRow['fullName'] = validate.clean(row[6]) newRow['phoneNumber'] = validate.phone(row[40]) newRow['address1'] = validate.clean(row[7]) newRow['address2'] = validate.clean(row[8]) newRow['town'] = validate.clean(row[9]) newRow['packingSlip'] = 1 newRow['country'] = validate.country(validate.clean(row[12])) if not newRow['country']: msg = newRow[ 'completeOrderReference'] + " from file '" + os.path.basename( path) + "' was skipped.\n" msg += 'Could not validate country: ' + row[12] errors.append(msg) continue newRow['region'] = validate.region(validate.clean(row[10]), newRow['country']) if not newRow['region']: msg = newRow[ 'completeOrderReference'] + " from file '" + os.path.basename( path) + "' was skipped.\n" msg += 'Could not validate region: ' + row[10] errors.append(msg) continue newRow['postCode'] = validate.postCode(validate.clean(row[11]), newRow['country']) if not newRow['postCode']: msg = newRow[ 'completeOrderReference'] + " from file '" + os.path.basename( path) + "' was skipped.\n" msg += 'Could not validate post code: ' + row[11] errors.append(msg) continue if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, Groupon order parser added a column") quit() print("\nImported " + str(len(parsedRows)) + " orders from Groupon file '" + os.path.basename(path) + "'") return parsedRows
def getItems(path, columns): with open(path) as file: reader = csv.reader(file) # create a CSV reader object parsedRows = list() # create a list to hold the new rows orderLine = 0 next(reader) # skip header row for row in reader: overstock = False # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) if len(row) < 2 or not row[10].strip(): continue # skip row if < 2 cols or no sku if row[0]: # this line has an order number # we've found a new order orderLine = 1 # reset line number to 1 else: # this line has no order number # this is another line of the same order orderLine += 1 # increment the line number row[0] = prevRef # use the order reference from the previous line # map info from input file row to new row dict order_number = validate.clean(row[0]).replace(' ','') if '&' in order_number: newRow["shortOrderReference"] = validate.shortenPossibleAmazon(order_number.split('&')[0]) else: newRow["shortOrderReference"] = validate.shortenPossibleAmazon(order_number) newRow["merchantID"] = 10 newRow["lineNumber"] = orderLine if row[10][-3:] == "-OS": newRow["itemSKU"] = row[10][:-3].strip() overstock = True else: newRow["itemSKU"] = row[10].strip() newRow["itemQuantity"] = row[11].strip() # write out row with size newRow["itemAttribKey"] = "size" newRow["itemAttribVal"] = row[12].strip() if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, DSOL item parser added a column") quit() # write out row with heel newRow["itemAttribKey"] = "heel" newRow["itemAttribVal"] = row[13].strip() if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, DSOL item parser added a column") quit() # write out row with width newRow["itemAttribKey"] = "width" newRow["itemAttribVal"] = row[14].strip() if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, DSOL item parser added a column") quit() # write out row with bin/us code if row[15].strip(): newRow['itemAttribKey'] = 'bin' newRow['itemAttribVal'] = validate.clean(row[15]).strip() if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, DSOL item parser added a column") quit() # add overstock attrib if overstock: newRow["itemAttribKey"] = "overstock" newRow["itemAttribVal"] = "true" if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, DSOL item parser added a column") quit() prevRef = row[0] # keep reference in case next row needs it print("Imported " + str(len(parsedRows)) + " item rows from Dance Shoes Online file '" + os.path.basename(path) + "'") return parsedRows
def getOrders(path, columns): with open(path) as file: reader = csv.reader(file) # create a CSV reader object parsedRows = list() # create a list to hold the new rows next(reader) # skip header row for row in reader: # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) if len(row) < 2 or not row[10].strip(): continue # skip row if < 2 cols or no sku if row[0]: # this line has an order number # map info from input file row to new row dict order_number = validate.clean(row[0]).replace(' ','') if '&' in order_number: newRow["completeOrderReference"] = order_number.replace('&','/') newRow["shortOrderReference"] = validate.shortenPossibleAmazon(order_number.split('&')[0]) else: newRow["completeOrderReference"] = order_number newRow["shortOrderReference"] = validate.shortenPossibleAmazon(order_number) newRow["originFile"] = os.path.basename(path) newRow["companyCode"] = 97 newRow["merchantID"] = 10 newRow["fullName"] = row[1].strip() + " " + row[2].strip() newRow["phoneNumber"] = "".join([char for char in row[8] if str.isdigit(char)]) newRow["emailAddress"] = row[9].strip() newRow["address1"] = row[3].strip() newRow["town"] = row[4].strip() newRow['country'] = validate.country(validate.clean(row[7])) if not newRow['country']: msg = newRow['completeOrderReference'] + " from file '" + os.path.basename(path) + "' was skipped.\n" msg += 'Could not validate country: ' + row[7] + '\n' errors.append(msg) continue newRow["region"] = validate.region(validate.clean(row[5]), newRow['country']) if not newRow["region"]: msg = newRow['completeOrderReference'] + " from file '" + os.path.basename(path) + "' was skipped.\n" msg += 'Could not validate state: ' + row[5] + '\n' errors.append(msg) continue newRow['postCode'] = validate.postCode(validate.clean(row[6]), newRow['country']) if not newRow['postCode']: msg = newRow['completeOrderReference'] + " from file '" + os.path.basename(path) + "' was skipped.\n" msg += 'Could not validate post code: ' + row[6] + '\n' errors.append(msg) continue newRow["packingSlip"] = 1 if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, DSOL order parser added a column") quit() print("\nImported " + str(len(parsedRows)) + " orders from Dance Shoes Online file '" + os.path.basename(path) + "'") return parsedRows
def getOrders(path, columns): with open(path) as file: reader = csv.reader(file, delimiter='\t') # create a CSV reader object parsedRows = list() # create a list to hold the new rows prevOrderNum = '' next(reader) # skip header row for row in reader: # if more than 2 cols and order number exists and new order if len(row) > 2 and row[0].strip() and row[0] != prevOrderNum: # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) # map info from input file row to new row dict order_number = validate.clean(row[0]).replace(' ','') newRow["completeOrderReference"] = order_number newRow["shortOrderReference"] = validate.shortenPossibleAmazon(order_number) newRow["originFile"] = os.path.basename(path) newRow["companyCode"] = 97 newRow["merchantID"] = 10 newRow["fullName"] = validate.clean(row[5]) newRow["phoneNumber"] = "".join([char for char in row[6] if str.isdigit(char)]) newRow["emailAddress"] = row[4].strip() newRow["address1"] = validate.clean(row[17]) newRow["address2"] = validate.clean(row[18]) newRow["address3"] = validate.clean(row[19]) newRow["town"] = row[20].strip() newRow['country'] = validate.country(validate.clean(row[23])) if not newRow['country']: msg = newRow['completeOrderReference'] + " from file '" + os.path.basename(path) + "' was skipped.\n" msg += 'Could not validate country: ' + row[23] + '\n' errors.append(msg) continue newRow["region"] = validate.region(validate.clean(row[21]), newRow['country']) if not newRow["region"]: msg = newRow['completeOrderReference'] + " from file '" + os.path.basename(path) + "' was skipped.\n" msg += 'Could not validate state: ' + row[21] + '\n' errors.append(msg) continue newRow['postCode'] = validate.postCode(validate.clean(row[22]), newRow['country']) if not newRow['postCode']: msg = newRow['completeOrderReference'] + " from file '" + os.path.basename(path) + "' was skipped.\n" msg += 'Could not validate post code: ' + row[22] + '\n' errors.append(msg) continue newRow["packingSlip"] = 1 if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, DSOL Amazon order parser added a column") quit() # save the previous order number prevOrderNum = row[0] print("\nImported " + str(len(parsedRows)) + " orders from Dance Shoes Online file '" + os.path.basename(path) + "'") return parsedRows
def getPackages(path, columns): mensSkus = ["A330101","A350501"] lines = list() # this list will hold the whole file completedLines = list() with open(path) as file: reader = csv.reader(file, delimiter='\t') next(reader) # skip header row for row in reader: # read the whole file into memory so that I can index it and # iterate over parts of it multiple times if len(row) > 2 and row[10]: # if > 2 cols and sku exists lines.append(row) orderStart = 0 orderEnd = 0 while orderEnd < len(lines): orderEnd += 1 # while the current line has the same order number as the starting line while orderEnd < len(lines) and lines[orderEnd][0] == lines[orderStart][0]: orderEnd += 1 # increment the orderEnd counter # grab the slice of the file that contains the next order currentOrder = lines[orderStart:orderEnd] # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) # FIGURE OUT WHAT TO DO WITH THIS ORDER order_number = validate.clean(currentOrder[0][0]).replace(' ','') newRow["shortOrderReference"] = validate.shortenPossibleAmazon(order_number) country = validate.country(validate.clean(currentOrder[0][7])) newRow["merchantID"] = 10 newRow["returnCompany"] = "DanceShoesOnline.com" newRow["returnAdd1"] = "8900 Rosehill Rd" newRow["returnAdd2"] = "Unit B." newRow["returnCity"] = "Lenexa" newRow["returnState"] = "KS" newRow["returnZip"] = "66214-1656" newRow["bulk"] = 0 itemCount = sum(int(row[9]) for row in currentOrder) # orders with 1 item if itemCount == 1: line = currentOrder[0] sku = line[7][:-1].strip() womens = sku not in mensSkus attribString = line[8][line[8].find("("):].replace("(","").replace(")","") attributes = attribString.split(",") size = 0 if attributes[0].strip(): size = float(attributes[0].strip()) if size and size < 9 and womens: newRow["packageNumber"] = 1 newRow["carrier"] = 26 newRow["serviceClass"] = 10 newRow["length"] = 10.25 newRow["width"] = 7 newRow["height"] = 3.5 if size and size < 7: newRow["weight"] = 1 else: newRow["weight"] = 1.2 newRow['note'] = 'Small box' # 1 women's 9 and above or 1 men's else: newRow["packageNumber"] = 1 newRow["carrier"] = 26 newRow["serviceClass"] = 10 newRow["length"] = 12.25 newRow["width"] = 7.25 newRow["height"] = 4.25 newRow["weight"] = 1.2 newRow['note'] = 'Large box' # orders with 2 items elif itemCount == 2: # assume we will be able to ship combo # and then try to prove this false canShipCombo = True for line in currentOrder: sku = line[10][:-1].strip() womens = sku not in mensSkus attribString = line[8][line[8].find("("):].replace("(","").replace(")","") attributes = attribString.split(",") size = 0 if attributes[0].strip(): size = float(attributes[0].strip()) if not size or not womens or not size < 9: canShipCombo = False if canShipCombo: newRow["packageNumber"] = 1 newRow["carrier"] = 26 newRow["serviceClass"] = 10 newRow["length"] = 14 newRow["width"] = 10.25 newRow["height"] = 3.5 newRow["weight"] = 2.2 newRow['note'] = 'Double box' else: # create a generic USPS package newRow['packageNumber'] = 1 newRow['carrier'] = 26 newRow['serviceClass'] = 10 newRow['note'] = 'Dim add' # orders with more than 2 items else: if country == 'PR': # create a generic USPS package newRow['packageNumber'] = 1 newRow['carrier'] = 26 newRow['serviceClass'] = 10 newRow['note'] = 'Dim add' else: orderStart = orderEnd # move on to the next order continue # don't create a package # save the package row in completedLines if len(columns) == len(newRow): completedLines.append(list(newRow.values())) else: print("Oops, DSOL Amazon shipping allocator added a column") quit() orderStart = orderEnd # move on to the next order print("Created " + str(len(completedLines)) + " packages from '" + os.path.basename(path) + "'") return completedLines
def getOrders(path, columns): with open(path) as file: reader = csv.reader(file) # create a CSV reader object parsedRows = list() # create a list to hold the new rows next(reader) # skip header row for row in reader: # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) if len(row) < 2 or not row[10].strip(): continue # skip row if < 2 cols or no sku if row[0]: # this line has an order number # map info from input file row to new row dict order_number = validate.clean(row[0]).replace(' ', '') if '&' in order_number: newRow["completeOrderReference"] = order_number.replace( '&', '/') newRow[ "shortOrderReference"] = validate.shortenPossibleAmazon( order_number.split('&')[0]) else: newRow["completeOrderReference"] = order_number newRow[ "shortOrderReference"] = validate.shortenPossibleAmazon( order_number) newRow["originFile"] = os.path.basename(path) newRow["companyCode"] = 97 newRow["merchantID"] = 10 newRow["fullName"] = row[1].strip() + " " + row[2].strip() newRow["phoneNumber"] = "".join( [char for char in row[8] if str.isdigit(char)]) newRow["emailAddress"] = row[9].strip() newRow["address1"] = row[3].strip() newRow["town"] = row[4].strip() newRow['country'] = validate.country(validate.clean(row[7])) if not newRow['country']: msg = newRow[ 'completeOrderReference'] + " from file '" + os.path.basename( path) + "' was skipped.\n" msg += 'Could not validate country: ' + row[7] + '\n' errors.append(msg) continue newRow["region"] = validate.region(validate.clean(row[5]), newRow['country']) if not newRow["region"]: msg = newRow[ 'completeOrderReference'] + " from file '" + os.path.basename( path) + "' was skipped.\n" msg += 'Could not validate state: ' + row[5] + '\n' errors.append(msg) continue newRow['postCode'] = validate.postCode(validate.clean(row[6]), newRow['country']) if not newRow['postCode']: msg = newRow[ 'completeOrderReference'] + " from file '" + os.path.basename( path) + "' was skipped.\n" msg += 'Could not validate post code: ' + row[6] + '\n' errors.append(msg) continue newRow["packingSlip"] = 1 if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, DSOL order parser added a column") quit() print("\nImported " + str(len(parsedRows)) + " orders from Dance Shoes Online file '" + os.path.basename(path) + "'") return parsedRows
def getPackages(path, columns): lines = list() # this list will hold the whole file parsedRows = list() with open(path) as file: reader = csv.reader(file) next(reader) # skip header # read the whole file into memory for row in reader: if len(row) > 1: lines.append(row) orderStart = 0 orderEnd = 0 while orderEnd < len(lines): orderEnd += 1 # while the current line has the same order number as the starting line while orderEnd < len(lines) and lines[orderEnd][0] == lines[orderStart][0]: orderEnd += 1 # grab the slice of the file that contains the next order currentOrder = lines[orderStart:orderEnd] # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) # FIGURE OUT WHAT TO DO WITH THIS ORDER newRow['merchantID'] = 46 newRow['shortOrderReference'] = validate.clean(currentOrder[0][0].split('_')[-1]) newRow['returnCompany'] = 'Half Off Deals' newRow['returnAdd1'] = '8900 Rosehill Rd' newRow['returnAdd2'] = 'Unit B Dock' newRow['returnCity'] = 'Lenexa' newRow['returnState'] = 'KS' newRow['returnZip'] = '66215' itemCount = sum(int(row[4]) if row[4] else 0 for row in currentOrder) if itemCount == 1: line = currentOrder[0] sku = line[2].split('(')[-1][:-1] qty = line[4] newRow["carrier"] = 26 newRow['serviceClass'] = 12 newRow['weight'] = float(15/16); newRow['note'] = qty + '-' + sku newRow["bulk"] = 1 else: # create a default package newRow["carrier"] = 26 newRow['serviceClass'] = 12 newRow["bulk"] = 0 # save the package row in completedLines if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, Half Off Deals shipping allocator added a column") quit() orderStart = orderEnd # move on to the next order print("Created " + str(len(parsedRows)) + " packages from '" + os.path.basename(path) + "'") return parsedRows
def getPackages(self, columns): lines = list() # this list will hold the whole file parsedRows = list() with open(self.file) as file: reader = csv.reader(file) next(reader) # skip header # read the whole file into memory for row in reader: if len(row) > 1: lines.append(row) orderStart = 0 orderEnd = 0 while orderEnd < len(lines): orderEnd += 1 # while the current line has the same order number as the starting line while orderEnd < len( lines) and lines[orderEnd][0] == lines[orderStart][0]: orderEnd += 1 # grab the slice of the file that contains the next order currentOrder = lines[orderStart:orderEnd] # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) # FIGURE OUT WHAT TO DO WITH THIS ORDER newRow['merchantID'] = self.getMarketParam('merchantID', job="Packages") if not newRow['merchantID']: orderStart = orderEnd continue newRow['shortOrderReference'] = validate.shortenPossibleAmazon( currentOrder[0][1]) newRow['returnCompany'] = self.getMarketParam('returnCompany', job="Packages") if not newRow['returnCompany']: orderStart = orderEnd continue newRow['returnCompany2'] = self.getMarketParam('returnCompany2', required=False) newRow['returnAdd1'] = self.getMarketParam('returnAdd1', job="Packages") if not newRow['returnAdd1']: orderStart = orderEnd continue newRow['returnAdd2'] = self.getMarketParam('returnAdd2', required=False) newRow['returnCity'] = self.getMarketParam('returnCity', job="Packages") if not newRow['returnCity']: orderStart = orderEnd continue newRow['returnState'] = self.getMarketParam('returnState', job="Packages") if not newRow['returnState']: orderStart = orderEnd continue newRow['returnZip'] = self.getMarketParam('returnZip', job="Packages") if not newRow['returnZip']: orderStart = orderEnd continue itemCount = sum( int(row[13]) if row[13] else 0 for row in currentOrder) if itemCount == 1: line = currentOrder[0] sku = validate.clean(line[11].split('-')[-1]) qty = line[13] newRow["carrier"] = 26 newRow['serviceClass'] = 12 newRow['weight'] = float(15 / 16) newRow['note'] = qty + '-' + sku newRow["bulk"] = 1 else: # create a default package newRow["carrier"] = 26 newRow['serviceClass'] = 12 newRow["bulk"] = 0 # save the package row in completedLines if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, " + self.market + " shipping allocator added a column") quit() orderStart = orderEnd # move on to the next order print("Created " + str(len(parsedRows)) + " packages from '" + os.path.basename(self.file) + "'") return parsedRows
def getOrders(self, columns): with open(self.file) as file: prevRow = list() parsedRows = list() reader = csv.reader(file) next(reader) # skip header row for row in reader: if len(row) < 2 or (prevRow and row[0] == prevRow[0]): continue # skip if < 2 cols or same order as prev row # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) # set company and market self.companyCode = row[2] self.marketId = row[0][:2] self.market = row[0][3:] newRow[ 'marketId'] = self.market # save market id for confirmations newRow['companyCode'] = self.companyCode newRow['merchantID'] = self.getMarketParam('merchantID') if not newRow['merchantID']: continue newRow['completeOrderReference'] = validate.clean(row[1]) newRow['shortOrderReference'] = validate.shortenPossibleAmazon( row[1]) newRow['fullName'] = validate.clean(row[3]) newRow["originFile"] = os.path.basename(self.file) newRow['phoneNumber'] = validate.phone(row[4]) newRow['address1'] = validate.clean(row[5]) newRow['address2'] = validate.clean(row[6]) newRow['town'] = validate.clean(row[7]) newRow['packingSlip'] = 1 newRow['country'] = validate.country(validate.clean(row[10])) if not newRow['country']: msg = newRow[ 'completeOrderReference'] + " from file '" + os.path.basename( self.file) + "' was skipped.\n" msg += 'Could not validate country: ' + row[10] self.errors.add(msg) continue newRow['region'] = validate.region(validate.clean(row[8]), newRow['country']) if not newRow['region']: msg = newRow[ 'completeOrderReference'] + " from file '" + os.path.basename( self.file) + "' was skipped.\n" msg += 'Could not validate region: ' + row[8] self.errors.add(msg) continue newRow['postCode'] = validate.postCode(validate.clean(row[9]), newRow['country']) if not newRow['postCode']: msg = newRow[ 'completeOrderReference'] + " from file '" + os.path.basename( self.file) + "' was skipped.\n" msg += 'Could not validate post code: ' + row[9] self.errors.add(msg) continue if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, standard order parser added a column") quit() prevRow = row print("\nImported " + str(len(parsedRows)) + " orders from " + self.market + " file '" + os.path.basename(self.file) + "'") return parsedRows
def getOrders(path, columns): with open(path) as file: prevRow = list() parsedRows = list() reader = csv.reader(file) next(reader) # skip header row data = sorted(reader, key=operator.itemgetter(0)) for row in data: if len(row) < 2 or (prevRow and row[0] == prevRow[0]): continue # skip if < 2 cols or same order as prev row # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) newRow['companyCode'] = 112 # marvellous newRow['merchantID'] = 42 newRow['completeOrderReference'] = validate.clean(row[0]) newRow['shortOrderReference'] = validate.clean(row[0]) newRow["originFile"] = os.path.basename(path) newRow['fullName'] = validate.clean(row[17]) newRow['phoneNumber'] = validate.phone(row[7]) newRow['address1'] = validate.clean(row[18]) newRow['address2'] = validate.clean(row[19]) newRow['town'] = validate.clean(row[20]) newRow['packingSlip'] = 1 newRow['country'] = validate.country(validate.clean(row[23])) if not newRow['country']: msg = newRow[ 'completeOrderReference'] + " from file '" + os.path.basename( path) + "' was skipped.\n" msg += 'Could not validate country: ' + row[23] errors.append(msg) continue newRow['region'] = validate.region(validate.clean(row[21]), newRow['country']) if not newRow['region']: msg = newRow[ 'completeOrderReference'] + " from file '" + os.path.basename( path) + "' was skipped.\n" msg += 'Could not validate region: ' + row[21] errors.append(msg) continue newRow['postCode'] = validate.postCode(validate.clean(row[22]), newRow['country']) if not newRow['postCode']: msg = newRow[ 'completeOrderReference'] + " from file '" + os.path.basename( path) + "' was skipped.\n" msg += 'Could not validate post code: ' + row[22] errors.append(msg) continue if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, Restaurant order parser added a column") quit() prevRow = row print("\nImported " + str(len(parsedRows)) + " orders from Restaurant file '" + os.path.basename(path) + "'") return parsedRows
def getPackages(path, columns): lines = list() parsedRows = list() with open(path) as file: reader = csv.reader(file) next(reader) # skip header row data = sorted(reader, key=operator.itemgetter(0)) for row in data: # read the whole file into memory so that I can index it and # iterate over parts of it multiple times if len(row) > 2 and row[10]: # if > 2 cols and sku exists lines.append(row) orderStart = 0 orderEnd = 0 while orderEnd < len(lines): orderEnd += 1 while orderEnd < len(lines) and lines[orderStart][0] == lines[orderEnd][0]: orderEnd += 1 # increment the orderEnd counter # grab the slice of the file that contains the next order currentOrder = lines[orderStart:orderEnd] # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) # FIGURE OUT WHAT TO DO WITH THIS ORDER # count skus to set for unique set skus = set() for line in currentOrder: skus.add(line[36]) if len(skus) == 1: sku = skus.pop() qty = sum(int(row[12]) for row in currentOrder) newRow['merchantID'] = 42 newRow['shortOrderReference'] = validate.clean(currentOrder[0][0]) newRow["carrier"] = 26 newRow['serviceClass'] = 12 newRow["bulk"] = 1 newRow['note'] = str(qty) + '-' + sku # save the package row in completedLines if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, Restaurant shipping allocator added a column") quit() else: newRow['merchantID'] = 42 newRow['shortOrderReference'] = validate.clean(currentOrder[0][0]) newRow["carrier"] = 26 newRow['serviceClass'] = 12 newRow["bulk"] = 0 # save the package row in completedLines if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, Restaurant shipping allocator added a column") quit() orderStart = orderEnd # move on to the next order print("Created " + str(len(parsedRows)) + " packages from '" + os.path.basename(path) + "'") return parsedRows
def getPackages(self, columns): lines = list() # this list will hold the whole file parsedRows = list() with open(self.file) as file: reader = csv.reader(file) next(reader) # skip header # read the whole file into memory for row in reader: if len(row) > 1: lines.append(row) orderStart = 0 orderEnd = 0 while orderEnd < len(lines): orderEnd += 1 # while the current line has the same order number as the starting line while orderEnd < len(lines) and lines[orderEnd][0] == lines[orderStart][0]: orderEnd += 1 # grab the slice of the file that contains the next order currentOrder = lines[orderStart:orderEnd] # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) # FIGURE OUT WHAT TO DO WITH THIS ORDER newRow['merchantID'] = self.getMarketParam('merchantID', job="Packages") if not newRow['merchantID']: orderStart = orderEnd continue newRow['shortOrderReference'] = validate.shortenPossibleAmazon(currentOrder[0][1]) newRow['returnCompany'] = self.getMarketParam('returnCompany', job="Packages") if not newRow['returnCompany']: orderStart = orderEnd continue newRow['returnCompany2'] = self.getMarketParam('returnCompany2', required=False) newRow['returnAdd1'] = self.getMarketParam('returnAdd1', job="Packages") if not newRow['returnAdd1']: orderStart = orderEnd continue newRow['returnAdd2'] = self.getMarketParam('returnAdd2', required=False) newRow['returnCity'] = self.getMarketParam('returnCity', job="Packages") if not newRow['returnCity']: orderStart = orderEnd continue newRow['returnState'] = self.getMarketParam('returnState', job="Packages") if not newRow['returnState']: orderStart = orderEnd continue newRow['returnZip'] = self.getMarketParam('returnZip', job="Packages") if not newRow['returnZip']: orderStart = orderEnd continue itemCount = sum(int(row[13]) if row[13] else 0 for row in currentOrder) if itemCount == 1: line = currentOrder[0] sku = validate.clean(line[11].split('-')[-1]) qty = line[13] newRow["carrier"] = 26 newRow['serviceClass'] = 12 newRow['weight'] = float(15/16); newRow['note'] = qty + '-' + sku newRow["bulk"] = 1 else: # create a default package newRow["carrier"] = 26 newRow['serviceClass'] = 12 newRow["bulk"] = 0 # save the package row in completedLines if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, "+self.market+" shipping allocator added a column") quit() orderStart = orderEnd # move on to the next order print("Created " + str(len(parsedRows)) + " packages from '" + os.path.basename(self.file) + "'") return parsedRows
def getPackages(path, columns): lines = list() parsedRows = list() with open(path) as file: reader = csv.reader(file) next(reader) # skip header row data = sorted(reader, key=operator.itemgetter(0)) for row in data: # read the whole file into memory so that I can index it and # iterate over parts of it multiple times if len(row) > 2 and row[10]: # if > 2 cols and sku exists lines.append(row) orderStart = 0 orderEnd = 0 while orderEnd < len(lines): orderEnd += 1 while orderEnd < len( lines) and lines[orderStart][0] == lines[orderEnd][0]: orderEnd += 1 # increment the orderEnd counter # grab the slice of the file that contains the next order currentOrder = lines[orderStart:orderEnd] # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) # FIGURE OUT WHAT TO DO WITH THIS ORDER # count skus to set for unique set skus = set() for line in currentOrder: skus.add(line[36]) if len(skus) == 1: sku = skus.pop() qty = sum(int(row[12]) for row in currentOrder) newRow['merchantID'] = 42 newRow['shortOrderReference'] = validate.clean(currentOrder[0][0]) newRow["carrier"] = 26 newRow['serviceClass'] = 12 newRow["bulk"] = 1 newRow['note'] = str(qty) + '-' + sku # save the package row in completedLines if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, Restaurant shipping allocator added a column") quit() else: newRow['merchantID'] = 42 newRow['shortOrderReference'] = validate.clean(currentOrder[0][0]) newRow["carrier"] = 26 newRow['serviceClass'] = 12 newRow["bulk"] = 0 # save the package row in completedLines if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, Restaurant shipping allocator added a column") quit() orderStart = orderEnd # move on to the next order print("Created " + str(len(parsedRows)) + " packages from '" + os.path.basename(path) + "'") return parsedRows
def getOrders(path, columns): with open(path) as file: prevRow = list() parsedRows = list() reader = csv.reader(file) next(reader) # skip header row for row in reader: if len(row) < 2 or not row[0]: continue # skip if < 2 cols no order number # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) company = os.path.split(os.path.dirname(path))[1] if company.upper() == 'BETAFRESH': newRow['companyCode'] = 113 elif company.upper() == 'MARVELLOUS': newRow['companyCode'] = 112 else: print('Unknown company:' + company) quit() newRow['merchantID'] = 36 newRow['completeOrderReference'] = validate.clean(row[0]) newRow['shortOrderReference'] = validate.clean(row[0]) newRow["originFile"] = os.path.basename(path) newRow['fullName'] = validate.clean(row[6]) newRow['phoneNumber'] = validate.phone(row[40]) newRow['address1'] = validate.clean(row[7]) newRow['address2'] = validate.clean(row[8]) newRow['town'] = validate.clean(row[9]) newRow['packingSlip'] = 1 newRow['country'] = validate.country(validate.clean(row[12])) if not newRow['country']: msg = newRow['completeOrderReference'] + " from file '" + os.path.basename(path) + "' was skipped.\n" msg += 'Could not validate country: ' + row[12] errors.append(msg) continue newRow['region'] = validate.region(validate.clean(row[10]), newRow['country']) if not newRow['region']: msg = newRow['completeOrderReference'] + " from file '" + os.path.basename(path) + "' was skipped.\n" msg += 'Could not validate region: ' + row[10] errors.append(msg) continue newRow['postCode'] = validate.postCode(validate.clean(row[11]), newRow['country']) if not newRow['postCode']: msg = newRow['completeOrderReference'] + " from file '" + os.path.basename(path) + "' was skipped.\n" msg += 'Could not validate post code: ' + row[11] errors.append(msg) continue if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, Groupon order parser added a column") quit() print("\nImported " + str(len(parsedRows)) + " orders from Groupon file '" + os.path.basename(path) + "'") return parsedRows
def getItems(path, columns): with open(path) as file: reader = csv.reader(file, delimiter='\t') # create a CSV reader object parsedRows = list() # create a list to hold the new rows orderLine = 0 prevOrderNum = '' next(reader) # skip header row for row in reader: overstock = False # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) if len(row) < 2 or not row[10].strip(): continue # skip row if < 2 cols or no sku if row[0] == prevOrderNum: orderLine += 1 # this is another line of the same order else: orderLine = 1 # reset line number # map info from input file row to new row dict order_number = validate.clean(row[0]).replace(' ','') newRow["shortOrderReference"] = validate.shortenPossibleAmazon(order_number) newRow["merchantID"] = 10 newRow["lineNumber"] = orderLine newRow["itemSKU"] = row[7][:-1].strip() newRow["itemTitle"] = validate.clean(row[8]) newRow["itemQuantity"] = row[9].strip() newRow['itemAttribKey'] = 'AmazonPostfix' newRow['itemAttribVal'] = row[7][-1:] if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, DSOL Amazon item parser added a column") quit() newRow['itemAttribKey'] = 'order-item-id' newRow['itemAttribVal'] = row[1] if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, DSOL Amazon item parser added a column") quit() # parse attribs if "(" in row[8] and ")" in row[8]: attribString = row[8][row[8].find("("):].replace("(","").replace(")","") attributes = attribString.split(",") if attributes[0].strip(): newRow["itemAttribKey"] = "size" newRow["itemAttribVal"] = attributes[0].strip() if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, DSOL Amazon item parser added a column") quit() if len(attributes) > 1 and attributes[1].strip(): newRow["itemAttribKey"] = "color" newRow["itemAttribVal"] = attributes[1].strip() if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, DSOL Amazon item parser added a column") quit() # write out row with width newRow["itemAttribKey"] = "width" newRow["itemAttribVal"] = row[14].strip() prevOrderNum = row[0] # keep reference in case next row needs it print("Imported " + str(len(parsedRows)) + " item rows from Dance Shoes Online file '" + os.path.basename(path) + "'") return parsedRows
def getItems(path, columns): with open(path) as file: reader = csv.reader(file) # create a CSV reader object parsedRows = list() # create a list to hold the new rows orderLine = 0 next(reader) # skip header row for row in reader: overstock = False # create a new ordered dictionary to hold the row info newRow = collections.OrderedDict.fromkeys(columns) if len(row) < 2 or not row[10].strip(): continue # skip row if < 2 cols or no sku if row[0]: # this line has an order number # we've found a new order orderLine = 1 # reset line number to 1 else: # this line has no order number # this is another line of the same order orderLine += 1 # increment the line number row[0] = prevRef # use the order reference from the previous line # map info from input file row to new row dict order_number = validate.clean(row[0]).replace(' ', '') if '&' in order_number: newRow["shortOrderReference"] = validate.shortenPossibleAmazon( order_number.split('&')[0]) else: newRow["shortOrderReference"] = validate.shortenPossibleAmazon( order_number) newRow["merchantID"] = 10 newRow["lineNumber"] = orderLine if row[10][-3:] == "-OS": newRow["itemSKU"] = row[10][:-3].strip() overstock = True else: newRow["itemSKU"] = row[10].strip() newRow["itemQuantity"] = row[11].strip() # write out row with size newRow["itemAttribKey"] = "size" newRow["itemAttribVal"] = row[12].strip() if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, DSOL item parser added a column") quit() # write out row with heel newRow["itemAttribKey"] = "heel" newRow["itemAttribVal"] = row[13].strip() if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, DSOL item parser added a column") quit() # write out row with width newRow["itemAttribKey"] = "width" newRow["itemAttribVal"] = row[14].strip() if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, DSOL item parser added a column") quit() # write out row with bin/us code if row[15].strip(): newRow['itemAttribKey'] = 'bin' newRow['itemAttribVal'] = validate.clean(row[15]).strip() if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, DSOL item parser added a column") quit() # add overstock attrib if overstock: newRow["itemAttribKey"] = "overstock" newRow["itemAttribVal"] = "true" if len(columns) == len(newRow): parsedRows.append(list(newRow.values())) else: print("Oops, DSOL item parser added a column") quit() prevRef = row[0] # keep reference in case next row needs it print("Imported " + str(len(parsedRows)) + " item rows from Dance Shoes Online file '" + os.path.basename(path) + "'") return parsedRows