Ejemplo n.º 1
0
 def _prepare_filepaths(self):
     '''creates cls variables of files abs paths to be created one dir above this script dir'''
     output_dir = get_output_dir()
     lp_output_dir = get_output_dir(client_file=False)
     date_stamp = datetime.today().strftime("%Y.%m.%d %H.%M")
     self.same_buyers_filename = os.path.join(output_dir, f'{self.sales_channel}-Same Buyer {date_stamp}.txt')
     self.replacement_filename = os.path.join(output_dir, f'{self.sales_channel}-Replacement Orders {date_stamp}.txt')
     self.etonas_filename = os.path.join(output_dir, f'{self.sales_channel}-Etonas {date_stamp}.xlsx')
     self.nlpost_filename = os.path.join(output_dir, f'{self.sales_channel}-NLPost {date_stamp}.xlsx')
     self.dpost_filename = os.path.join(output_dir, f'{self.sales_channel}-DPost {date_stamp}.csv')
     self.dpdups_filename = os.path.join(output_dir, f'{self.sales_channel}-DPDUPS {date_stamp}.xlsx')        
     self.lp_filename = os.path.join(lp_output_dir, f'{self.sales_channel}-LP.csv')
     self.lp_tracked_filename = os.path.join(lp_output_dir, f'{self.sales_channel}-LP-Tracked.csv')
Ejemplo n.º 2
0
 def __init__(self):
     self.json_path = os.path.join(get_output_dir(client_file=False),
                                   RATES_JSON)
     self.rates_file = self.__get_rates_file()
     if self.__requires_update():
         self.rates_file = self.__download_fresh_rates()
     self.rates = self._get_rates()
Ejemplo n.º 3
0
 def __get_db_paths(self):
     output_dir = get_output_dir(client_file=False)
     self.db_path = os.path.join(output_dir, DATABASE_NAME)
     self.db_backup_b4_path = os.path.join(output_dir,
                                           BACKUP_DB_BEFORE_NAME)
     self.db_backup_after_path = os.path.join(output_dir,
                                              BACKUP_DB_AFTER_NAME)
Ejemplo n.º 4
0
 def __init__(self, proxy_keys:dict):
     self.proxy_keys = proxy_keys
     wb_path = os.path.join(get_output_dir(client_file=False), PRICING_WB)
     self.wb = openpyxl.load_workbook(wb_path, data_only=True)
     self.ws_tracked = self.wb['PrTracked']
     self.ws_untracked = self.wb['PrUntracked']
     self.ws_tracked_limits = get_last_used_row_col(self.ws_tracked)
     self.ws_untracked_limits = get_last_used_row_col(self.ws_untracked)
Ejemplo n.º 5
0
 def __init__(self, config: dict):
     self.wb_name = config['wb_name']
     self.ws_name = config['ws_name']
     self.start_row = config['start_row']
     self.check_integrity = config['check_integrity']
     self.alert_for_duplicates = config['alert_for_duplicates']
     self.wb_path = os.path.join(get_output_dir(client_file=False),
                                 self.wb_name)
Ejemplo n.º 6
0
 def _parse_weights_wb(self) -> dict:
     '''returns weights data as dict from reading excel workbook'''
     weight_wb_path = os.path.join(get_output_dir(client_file=False),
                                   WB_NAME)
     ws = self._get_weight_ws(weight_wb_path)
     ws_limits = get_last_used_row_col(ws)
     weight_data = self._get_ws_data(ws, ws_limits)
     self.wb.close()
     return weight_data
Ejemplo n.º 7
0
 def export_unmapped_skus(self):
     '''exports unmatched (weight or mapping) skus list to txt file'''
     date_stamp = datetime.today().strftime("%Y.%m.%d %H.%M")
     txt_path = os.path.join(get_output_dir(),
                             f'Not matching SKUs {date_stamp}.txt')
     if self.no_matching_skus:
         with open(txt_path, 'w') as f:
             for i, sku_sublist in enumerate(self.no_matching_skus,
                                             start=1):
                 text_line = ' ,'.join(sku_sublist)
                 f.write(f'{i}. {text_line}\n')
         logging.info(
             f'{len(self.no_matching_skus)} skus without complete weight data or amazon mapping were written to txt file: {txt_path}'
         )
     else:
         logging.info(
             'All skus were matched, skipping export of self.no_matching_skus'
         )
Ejemplo n.º 8
0
SALES_CHANNEL = 'AmazonCOM'
SKIP_ETONAS_FLAG = False
EXPECTED_SYS_ARGS = 4
VBA_ERROR_ALERT = 'ERROR_CALL_DADDY'
VBA_KEYERROR_ALERT = 'ERROR_IN_SOURCE_HEADERS'
VBA_OK = 'EXPORTED_SUCCESSFULLY'

if is_windows_machine():
    # ORDERS_SOURCE_FILE = r'C:\Coding\Ebay\Working\Backups\Etsy\EtsySoldOrders2022-1 24.csv'
    # ORDERS_SOURCE_FILE = r'C:\Coding\Ebay\Working\Backups\Amazon exports\EU 2022.02.23.txt'
    ORDERS_SOURCE_FILE = r'C:\Coding\Ebay\Working\Backups\Amazon exports\COM 2022.03.10.txt'
else:
    ORDERS_SOURCE_FILE = r'/home/devyo/Coding/Git/Amazon Orders Parser/Amazon exports/Collected exports/run4.txt'

# Logging config:
log_path = os.path.join(get_output_dir(client_file=False),
                        'loading_orders.log')
logging.basicConfig(handlers=[logging.FileHandler(log_path, 'a', 'utf-8')],
                    level=logging.INFO)


def get_cleaned_orders(source_file: str, sales_channel: str,
                       proxy_keys: dict) -> list:
    '''returns cleaned orders (as cleaned in clean_orders func) from source_file arg path'''
    delimiter = ',' if sales_channel == 'Etsy' else '\t'
    raw_orders = get_raw_orders(source_file, delimiter)
    cleaned_orders = clean_orders(raw_orders, sales_channel, proxy_keys)
    return cleaned_orders


def get_raw_orders(source_file: str, delimiter: str) -> list: