# coding: utf-8 import logging import unittest import pandas as pd from cfg.paths import LOG_FILENAME from src.tradeflow.TradeflowDC import TradeflowDC from src.tradeflow.TradeflowIL import TradeflowIL from src.utils.logging import setup_loggers logger = logging.getLogger(__name__) setup_loggers('test_tradeflow', log_filepath=LOG_FILENAME, level=logging.INFO) class TestTradeflow(unittest.TestCase): def test_offtake_setter(self): offtake = pd.Series([10, 8, 9, 10, 10, 11, 9, 9, 8, 9, 14, 10]) ic_r = pd.Series([0.5] * offtake.size) ic_s = pd.Series([1.0] * offtake.size) tradeflow = TradeflowDC() tradeflow.offtake = offtake.copy() tradeflow.ic_r = ic_r.copy() tradeflow.ic_s = ic_s.copy() tradeflow.update_from_offtake() new_offtake = pd.Series([10, 8, 9, 10, 11, 11, 9, 9, 8, 9, 14, 10]) tradeflow.offtake = new_offtake.copy() tradeflow.update_from_offtake()
RUN_DATETIME from cfg.paths import LOG_FILENAME from run import get_cycle_date from src.data_wrangling import F_DI_TRADEFLOW, F_IL_OFFTAKE, F_IL_SELLIN, F_IL_OFFTAKE_CHANNEL, F_IL_MAP_SELLIN_EIB from src.data_wrangling.Data import Data from src.data_wrangling.RawMasterIL import RawMasterIL from src.export_results.ExportFeatureImportance import ExportFeatureImportance from src.forecaster.ForecasterIL import ForecasterIL from src.postprocessing.postprocessing import PostProcessing from src.scenario import F_RESULTS, F_DATABASE, F_TABLE, F_CONNECT, V_IL, F_FEATURE_IMPORTANCE from src.scenario.Scenario import Scenario from src.utils.logging import setup_loggers from src.utils.misc import run_cmd logger = logging.getLogger(__name__) setup_loggers('demand_forecast_il', log_filepath=LOG_FILENAME) DATA_FROM_CACHE = False FORECASTS_FROM_CACHE = False POSTPROC_FROM_CACHE = False SCENARIO_FROM_CACHE = False SAVE_TO_FS = True SAVE_TO_IMPALA = True # PATH_CSV_RESULTS = DIR_DATA # todo Zhaoxia: set a path to store output as CSV def run_il(date_start: int, horizon: int) -> None: """ Defines the flow to calculate IL forecasts based on the data available in Impala. :return:
# coding: utf-8 import logging import os import unittest import pandas as pd import yaml from cfg.paths import CFG_ADDL_GRAN, DIR_CACHE from cfg.paths import LOG_FILENAME from src.postprocessing.postprocessing import PostProcessing from src.utils.logging import setup_loggers logger = logging.getLogger(__name__) setup_loggers('test_postprocessing', log_filepath=LOG_FILENAME, level=logging.INFO) class AddGranTest(unittest.TestCase): @classmethod def setUpClass(cls): with open(CFG_ADDL_GRAN) as f: cfg = yaml.load(f) cls.df_hist_di = pd.read_csv(cfg['di']['history'], sep=';', low_memory=False) cls.df_hist_il = pd.read_csv(cfg['il']['history'], sep=';', low_memory=False) cls.df_hist_dc = pd.read_csv(cfg['dc']['offtake_history'], sep=';', low_memory=False) cls.df_hist_il_sellin = pd.read_csv(cfg['il']['sellin_history'], sep=';', low_memory=False) cls.df_hist_eib = pd.read_csv(cfg['il']['eib_history'], sep=';', low_memory=False) cls.df_fcst_il_all = pd.read_csv(cfg['il']['forecast']) cls.df_fcst_dc = pd.read_csv(cfg['dc']['forecast'])
import logging import os import unittest from datetime import date import numpy as np import pandas as pd from dateutil.relativedelta import relativedelta from cfg.paths import LOG_FILENAME, DIR_TEST_DATA from src.deagg.Deagg import Deagg from src.scenario import * from src.utils.logging import setup_loggers logger = logging.getLogger(__name__) setup_loggers('test_deagg', log_filepath=LOG_FILENAME, level=logging.INFO) today = date.today() class TestDeagg(unittest.TestCase): def test_deagg(self): dummy_forecasts = pd.DataFrame({ F_DN_MEA_DAT: [today] * 25 + [today + relativedelta(months=1)] * 25 + [today + relativedelta(months=2)] * 25 + [today + relativedelta(months=3)] * 25, F_DN_LV2_UMB_BRD_COD: ['DC'] * 100, F_DN_LV3_PDT_BRD_COD: ['AP'] * 50 + ['NC'] * 50, F_DN_LV5_PDT_SFM_COD: ['1'] * 25 + ['2'] * 25 + ['1'] * 25 + ['2'] * 25,
import pandas as pd import yaml from cfg.paths import DIR_CFG, DIR_CACHE from cfg.paths import LOG_FILENAME from src.data_wrangling.RawMasterDC import SELECTED_SKUS as SELECTED_SKU_DC from src.data_wrangling.RawMasterIL import EXCLUDED_SKUS as EXCLUDED_SKUS_IL from src.scenario import * from src.scenario import F_DN_OFT_VAL, F_DN_MEA_DAT, F_CONNECT, F_RESULTS, F_DATABASE, F_TABLE, \ F_DN_FRC_USR_NAM_DSC, F_DN_FRC_CRE_DAT, F_DN_FRC_MDF_DAT, F_DN_DATE_FMT from src.scenario.Scenario import Scenario from src.utils.impala_utils import ImpalaUtils from src.utils.logging import setup_loggers logger = logging.getLogger(__name__) setup_loggers('test_data_pipeline', log_filepath=LOG_FILENAME, level=logging.INFO) COMP_SELLOUT = 'comp_sellout' COMP_SELLIN = 'comp_sellin' COMP_OFFTAKE = 'comp_offtake' class TestDataPipeline(unittest.TestCase): # @unittest.skip('Skipped') def test_check_il_raw_to_postproc(self): # load data obtained from run.demand_forecast_il data_il_raw_master: pd.DataFrame = pd.read_pickle(os.path.join(DIR_CACHE, 'raw_master_il.pkl')) data_il_output_postproc: pd.DataFrame = pd.read_pickle(os.path.join(DIR_CACHE, 'il_actual_split.pkl')) first_forecast_month = data_il_output_postproc[F_AF_DATE].max()