Ejemplo n.º 1
0
class DIAGEOAUToolBox:

    def __init__(self, data_provider, output):
        self.data_provider = data_provider
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.output = output
        self.common = Common(self.data_provider)
        self.commonV2 = CommonV2(self.data_provider)
        self.diageo_generator = DIAGEOGenerator(self.data_provider, self.output, self.common)

    def main_calculation(self):

        # SOS Out Of The Box kpis
        self.diageo_generator.activate_ootb_kpis(self.commonV2)

        # sos by scene type
        self.diageo_generator.sos_by_scene_type(self.commonV2)

        # Global assortment kpis
        assortment_results = self.diageo_generator.diageo_global_grouping_assortment_calculation()
        self.commonV2.save_json_to_new_tables(assortment_results)

        # committing to new tables
        self.commonV2.commit_results_data()
        # committing to the old tables
        self.common.commit_results_data()
Ejemplo n.º 2
0
 def __init__(self, data_provider, output):
     self.output = output
     self.data_provider = data_provider
     self.project_name = self.data_provider.project_name
     self.session_uid = self.data_provider.session_uid
     self.products = self.data_provider[Data.PRODUCTS]
     self.all_products = self.data_provider[Data.ALL_PRODUCTS]
     self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
     self.rds_conn = PSProjectConnector(self.project_name,
                                        DbUsers.CalculationEng)
     self.tools = CCBRGENERALToolBox(self.data_provider,
                                     self.output,
                                     rds_conn=self.rds_conn)
     self.store_info = self.data_provider[Data.STORE_INFO]
     self.kpi_results_queries = []
     self.survey = Survey(self.data_provider, self.output)
     self.kpi_results_new_tables_queries = []
     self.New_kpi_static_data = self.get_new_kpi_static_data()
     self.session_id = self.data_provider.session_id
     self.prices_per_session = PsDataProvider(
         self.data_provider, self.output).get_price_union(self.session_id)
     self.common_db = Common(self.data_provider)
     self.count_sheet = pd.read_excel(PATH, Const.COUNT).fillna("")
     self.group_count_sheet = pd.read_excel(PATH,
                                            Const.GROUP_COUNT).fillna("")
     self.survey_sheet = pd.read_excel(PATH, Const.SURVEY).fillna("")
Ejemplo n.º 3
0
 def __init__(self, data_provider, output):
     self.k_engine = BaseCalculationsScript(data_provider, output)
     self.data_provider = data_provider
     self.project_name = self.data_provider.project_name
     self.session_uid = self.data_provider.session_uid
     self.products = self.data_provider[Data.PRODUCTS]
     self.all_products = self.data_provider[Data.ALL_PRODUCTS]
     self.match_product_in_scene = self.data_provider[Data.MATCHES]
     self.visit_date = self.data_provider[Data.VISIT_DATE]
     self.session_info = self.data_provider[Data.SESSION_INFO]
     self.rds_conn = PSProjectConnector(self.project_name, DbUsers.CalculationEng)
     self.store_info = self.data_provider[Data.STORE_INFO]
     self.store_channel = self.store_info['store_type'].values[0]
     if self.store_channel:
         self.store_channel = self.store_channel.upper()
     self.store_type = self.store_info['additional_attribute_1'].values[0]
     self.scene_info = self.data_provider[Data.SCENES_INFO]
     self.store_id = self.data_provider[Data.STORE_FK]
     self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
     self.match_display_in_scene = self.get_match_display()
     self.set_templates_data = {}
     self.kpi_static_data = self.get_kpi_static_data()
     self.scores = {self.LEVEL1: {},
                    self.LEVEL2: {},
                    self.LEVEL3: {}}
     self.kpi_results_queries = []
     self.output = output
     self.common = Common(self.data_provider)
     self.commonV2 = CommonV2(self.data_provider)
     self.tools = DIAGEOToolBox(self.data_provider, output,
                                match_display_in_scene=self.match_display_in_scene)  # replace the old one
     self.diageo_generator = DIAGEOGenerator(self.data_provider, self.output, self.common)
Ejemplo n.º 4
0
 def __init__(self, data_provider, output):
     self.output = output
     self.data_provider = data_provider
     self.project_name = self.data_provider.project_name
     self.session_uid = self.data_provider.session_uid
     self.products = self.data_provider[Data.PRODUCTS]
     self.all_products = self.data_provider[Data.ALL_PRODUCTS]
     self.match_product_in_scene = self.data_provider[Data.MATCHES]
     self.visit_date = self.data_provider[Data.VISIT_DATE]
     self.session_info = self.data_provider[Data.SESSION_INFO]
     self.scene_info = self.data_provider[Data.SCENES_INFO]
     self.store_id = self.data_provider[Data.STORE_FK]
     self.rds_conn = PSProjectConnector(self.project_name, DbUsers.CalculationEng)
     # self.store_type = self.data_provider[Data.STORE_INFO]['store_type'].iloc[0]
     query_store_type = CCZAQueries.get_attr3(self.session_uid)
     store_type = pd.read_sql_query(query_store_type, self.rds_conn.db)
     self.store_type = store_type[Const.ATTR3].iloc[0]
     self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
     self.tools = GENERALToolBox(self.data_provider, self.output, rds_conn=self.rds_conn)
     self.survey_response = self.data_provider[Data.SURVEY_RESPONSES]
     self.tool_box_for_flow = ToolBox
     # create data-frames from template
     self.kpi_sheets = {}
     for name in Const.sheet_names_and_rows:
         self.kpi_sheets[name] = parse_template(TEMPLATE_PATH, sheet_name=name,
                                                lower_headers_row_index=Const.sheet_names_and_rows[name])
     self.common = Common(self.data_provider, Const.RED_SCORE)
     self.survey_handler = Survey(self.data_provider, self.output, self.kpi_sheets[Const.SURVEY_QUESTIONS])
     self.kpi_static_data = self.common.kpi_static_data
     self.kpi_results_queries = []
     self.common_v2 = CommonV2(self.data_provider)
     self.own_manuf_fk = self.get_own_manufacturer_fk()
     self.scif_match_react = self.scif[self.scif[ScifConsts.RLV_SOS_SC] == 1]
Ejemplo n.º 5
0
class DIAGEOUGToolBox:

    def __init__(self, data_provider, output):
        self.data_provider = data_provider
        self.store_id = self.data_provider[Data.STORE_FK]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.output = output
        self.common = Common(self.data_provider)
        self.commonV2 = CommonV2(self.data_provider)
        self.diageo_generator = DIAGEOGenerator(self.data_provider, self.output, self.common)

    def main_calculation(self):
        """This function calculates the KPI results."""
        # SOS Out Of The Box kpis
        self.diageo_generator.activate_ootb_kpis(self.commonV2)

        # sos by scene type
        self.diageo_generator.sos_by_scene_type(self.commonV2)

        # Global assortment kpis
        assortment_res_dict = self.diageo_generator.diageo_global_assortment_function_v2()
        self.commonV2.save_json_to_new_tables(assortment_res_dict)

        # Global assortment kpis - v3 for NEW MOBILE REPORTS use
        assortment_res_dict_v3 = self.diageo_generator.diageo_global_assortment_function_v3()
        self.commonV2.save_json_to_new_tables(assortment_res_dict_v3)

        # committing to new tables
        self.commonV2.commit_results_data()
        # committing to the old tables
        self.common.commit_results_data()
Ejemplo n.º 6
0
 def __init__(self, data_provider, output):
     self.output = output
     self.data_provider = data_provider
     self.project_name = self.data_provider.project_name
     self.session_uid = self.data_provider.session_uid
     self.products = self.data_provider[Data.PRODUCTS]
     self.all_products = self.data_provider[Data.ALL_PRODUCTS]
     self.match_product_in_scene = self.data_provider[Data.MATCHES]
     self.visit_date = self.data_provider[Data.VISIT_DATE]
     self.session_info = self.data_provider[Data.SESSION_INFO]
     self.scene_info = self.data_provider[Data.SCENES_INFO]
     self.store_id = self.data_provider[Data.STORE_FK]
     self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
     self.rds_conn = PSProjectConnector(self.project_name,
                                        DbUsers.CalculationEng)
     self.tools = CUBAUCUBAUGENERALToolBox(self.data_provider,
                                           self.output,
                                           rds_conn=self.rds_conn)
     self.common = Common(self.data_provider)
     self.common_sos = SOS(self.data_provider, self.output)
     self.kpi_results_queries = []
     self.k_engine = BaseCalculationsGroup(data_provider, output)
     self.store_type = data_provider.store_type
     self.matches_with_direction = self.get_match_product_in_scene_with_direction(
     )
Ejemplo n.º 7
0
 def __init__(self, data_provider, output):
     self.output = output
     self.data_provider = data_provider
     self.common = Common(self.data_provider)
     self.common_v2 = CommonV2(self.data_provider)
     self.project_name = self.data_provider.project_name
     self.session_uid = self.data_provider.session_uid
     self.products = self.data_provider[Data.PRODUCTS]
     self.all_products = self.data_provider[Data.ALL_PRODUCTS]
     self.match_product_in_scene = self.data_provider[Data.MATCHES]
     self.visit_date = self.data_provider[Data.VISIT_DATE]
     self.session_info = self.data_provider[Data.SESSION_INFO]
     self.scene_info = self.data_provider[Data.SCENES_INFO]
     self.store_id = self.data_provider[Data.STORE_FK]
     self.store_info = self.data_provider[Data.STORE_INFO]
     self.store_channel = self.store_info['store_type'].values[0]
     if self.store_channel:
         self.store_channel = self.store_channel.upper()
     self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
     self.rds_conn = PSProjectConnector(self.project_name,
                                        DbUsers.CalculationEng)
     self.kpi_static_data = self.common.get_kpi_static_data()
     self.kpi_results_queries = []
     self.set_templates_data = {}
     self.match_display_in_scene = self.get_match_display()
     self.tools = DIAGEOToolBox(
         self.data_provider,
         output,
         match_display_in_scene=self.match_display_in_scene)
     self.global_gen = DIAGEOGenerator(self.data_provider,
                                       self.output,
                                       self.common,
                                       menu=True)
Ejemplo n.º 8
0
 def __init__(self, data_provider, output):
     self.data_provider = data_provider
     self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
     self.output = output
     self.common = Common(self.data_provider)
     self.commonV2 = CommonV2(self.data_provider)
     self.diageo_generator = DIAGEOGenerator(self.data_provider, self.output, self.common)
Ejemplo n.º 9
0
class DIAGEOIEToolBox:
    def __init__(self, data_provider, output):
        self.output = output
        self.data_provider = data_provider
        self.store_id = self.data_provider[Data.STORE_FK]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.output = output
        self.common = Common(self.data_provider)
        self.commonV2 = CommonV2(self.data_provider)
        self.diageo_generator = DIAGEOGenerator(self.data_provider,
                                                self.output,
                                                self.common,
                                                menu=True)

    def main_calculation(self):
        """
        This function calculates the KPI results.
         """
        # SOS Out Of The Box kpis
        sos_res = self.diageo_generator.global_sos_calculation(
            include_posm=True)
        self.commonV2.save_json_to_new_tables(sos_res)

        # sos by scene type
        self.diageo_generator.sos_by_scene_type_with_posm(self.commonV2)

        # Global assortment kpis
        assortment_res = self.diageo_generator.diageo_global_grouping_assortment_calculation(
        )
        self.commonV2.save_json_to_new_tables(assortment_res)

        # Global Menu kpis
        menus_res = self.diageo_generator.diageo_global_new_share_of_menu_function(
        )
        self.commonV2.save_json_to_new_tables(menus_res)

        # Global Tap Brand Score
        template_path = os.path.join(
            os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
            'Data', 'Brand Score.xlsx')
        res_dict = self.diageo_generator.diageo_global_tap_brand_score_function(
            template_path, save_to_tables=False, calculate_components=True)
        self.commonV2.save_json_to_new_tables(res_dict)

        # Global Visible to Consumer function
        sku_list = filter(
            None, self.scif[self.scif['product_type'] ==
                            'SKU'].product_ean_code.tolist())
        res_dict = self.diageo_generator.diageo_global_visible_percentage(
            sku_list)
        if res_dict:
            self.commonV2.save_json_to_new_tables(res_dict)

        # committing to new tables
        self.commonV2.commit_results_data()
        # committing to the old tables
        self.common.commit_results_data()
Ejemplo n.º 10
0
 def __init__(self, data_provider, output):
     self.output = output
     self.data_provider = data_provider
     self.project_name = data_provider.project_name
     self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
     self.common = Common(self.data_provider)
     self.commonV2 = CommonV2(self.data_provider)
     self.diageo_generator = DIAGEOGenerator(self.data_provider, self.output, self.common)
     self.template_handler = TemplateHandler(self.project_name)
Ejemplo n.º 11
0
class DIAGEOPLToolBox:
    def __init__(self, data_provider, output):
        self.data_provider = data_provider
        self.store_id = self.data_provider[Data.STORE_FK]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.output = output
        self.common = Common(self.data_provider)
        self.commonV2 = CommonV2(self.data_provider)
        self.diageo_generator = DIAGEOGenerator(self.data_provider,
                                                self.output,
                                                self.common,
                                                menu=True)

    def main_calculation(self):
        """ This function calculates the KPI results """
        # SOS Out Of The Box kpis
        self.diageo_generator.activate_ootb_kpis(self.commonV2)

        # sos by scene type
        self.diageo_generator.sos_by_scene_type(self.commonV2)

        # Global assortment kpis
        assortment_results = self.diageo_generator.diageo_global_grouping_assortment_calculation(
        )
        self.commonV2.save_json_to_new_tables(assortment_results)

        # Global Share of Menu kpis
        menus_res_dict = self.diageo_generator.diageo_global_new_share_of_menu_function(
        )
        self.commonV2.save_json_to_new_tables(menus_res_dict)

        # Global Visible to Consumer function
        sku_list = filter(
            None, self.scif[self.scif['product_type'] ==
                            'SKU'].product_ean_code.tolist())
        res_dict = self.diageo_generator.diageo_global_visible_percentage(
            sku_list)
        if res_dict:
            self.commonV2.save_json_to_new_tables(res_dict)

        # Global Secondary Displays function
        res_json = self.diageo_generator.diageo_global_secondary_display_secondary_function(
        )
        if res_json:
            self.commonV2.write_to_db_result(fk=res_json['fk'],
                                             numerator_id=1,
                                             denominator_id=self.store_id,
                                             result=res_json['result'])

        # committing to new tables
        self.commonV2.commit_results_data()

        # committing to the old tables
        self.common.commit_results_data()
Ejemplo n.º 12
0
 def run_project_calculations(self):
     self.timer.start()
     common = Common(self.data_provider)
     heinz = HEINZGenerator(self.data_provider, self.output, common)
     heinz.heinz_global_distribution_per_category()
     heinz.heinz_global_share_of_shelf_function()
     heinz.heinz_global_price_adherence(
         pd.read_excel(os.path.join(
             os.path.dirname(os.path.realpath(__file__)), 'Config',
             'Price Adherence Targets 3Ene2019.xlsx'),
                       sheetname="Price Adherence"))
     heinz.heinz_global_extra_spaces()
     common.commit_results_data_to_new_tables()
     self.timer.stop('KPIGenerator.run_project_calculations')
Ejemplo n.º 13
0
 def __init__(self, data_provider, output):
     self.data_provider = data_provider
     self.project_name = self.data_provider.project_name
     self.ps_data_provider = PsDataProvider(data_provider)
     self.store_id = self.data_provider[Data.STORE_FK]
     self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
     self.output = output
     self.common = Common(self.data_provider)
     self.commonV2 = CommonV2(self.data_provider)
     self.diageo_generator = DIAGEOGenerator(self.data_provider,
                                             self.output, self.common)
     self.rds_conn = PSProjectConnector(self.project_name,
                                        DbUsers.CalculationEng)
     self.session_uid = self.data_provider.session_uid
Ejemplo n.º 14
0
class DIAGEOBRToolBox:
    def __init__(self, data_provider, output):
        self.data_provider = data_provider
        self.store_id = self.data_provider[Data.STORE_FK]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.output = output
        self.common = Common(self.data_provider)
        self.commonV2 = CommonV2(self.data_provider)
        self.diageo_generator = DIAGEOGenerator(self.data_provider,
                                                self.output,
                                                self.common,
                                                menu=True)

    def main_calculation(self):
        """ This function calculates the KPI results """
        # SOS Out Of The Box kpis
        self.diageo_generator.activate_ootb_kpis(self.commonV2)

        # sos by scene type
        self.diageo_generator.sos_by_scene_type(self.commonV2)

        # Global assortment kpis
        assortment_res_dict = self.diageo_generator.diageo_global_assortment_function_v2(
        )
        self.commonV2.save_json_to_new_tables(assortment_res_dict)

        # Global assortment kpis - v3 for NEW MOBILE REPORTS use
        assortment_res_dict_v3 = self.diageo_generator.diageo_global_assortment_function_v3(
        )
        self.commonV2.save_json_to_new_tables(assortment_res_dict_v3)

        # Global Secondary Displays function
        res_json = self.diageo_generator.diageo_global_secondary_display_secondary_function(
        )
        if res_json:
            self.commonV2.write_to_db_result(fk=res_json['fk'],
                                             numerator_id=1,
                                             denominator_id=self.store_id,
                                             result=res_json['result'])

        # Global Menu kpis
        menus_res_dict = self.diageo_generator.diageo_global_new_share_of_menu_function(
        )
        self.commonV2.save_json_to_new_tables(menus_res_dict)

        # committing to new tables
        self.commonV2.commit_results_data()
        # committing to the old tables
        self.common.commit_results_data()
Ejemplo n.º 15
0
    def __init__(self, data_provider, output):
        self.output = output
        self.data_provider = data_provider
        self.project_name = self.data_provider.project_name
        self.commonV2 = CommonV2(self.data_provider)
        self.common = Common(self.data_provider)
        self.assortment = Assortment(self.data_provider, self.output)

        self.own_manuf_fk = int(
            self.data_provider.own_manufacturer.param_value.values[0])
        self.store_info = self.data_provider[Data.STORE_INFO]
        self.new_kpi_static_data = self.commonV2.get_new_kpi_static_data()
        self.all_products_include_deleted = self.data_provider[
            Data.ALL_PRODUCTS_INCLUDING_DELETED]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
Ejemplo n.º 16
0
class DIAGEOKEToolBox:
    def __init__(self, data_provider, output):
        self.data_provider = data_provider
        self.store_id = self.data_provider[Data.STORE_FK]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.output = output
        self.common = Common(self.data_provider)
        self.commonV2 = CommonV2(self.data_provider)
        self.diageo_generator = DIAGEOGenerator(self.data_provider,
                                                self.output, self.common)

    def main_calculation(self):
        """
        This function calculates the KPI results.
        """
        # SOS Out Of The Box kpis
        self.diageo_generator.activate_ootb_kpis(self.commonV2)

        # sos by scene type
        self.diageo_generator.sos_by_scene_type(self.commonV2)

        # Global assortment kpis
        assortment_res_dict = self.diageo_generator.diageo_global_assortment_function_v2(
        )
        self.commonV2.save_json_to_new_tables(assortment_res_dict)

        # Global assortment kpis - v3 for NEW MOBILE REPORTS use
        assortment_res_dict_v3 = self.diageo_generator.diageo_global_assortment_function_v3(
        )
        self.commonV2.save_json_to_new_tables(assortment_res_dict_v3)

        # Global Visible to Consumer function
        sku_list = filter(
            None, self.scif[self.scif['product_type'] ==
                            'SKU'].product_ean_code.tolist())
        res_dict = self.diageo_generator.diageo_global_visible_percentage(
            sku_list)
        if res_dict:
            self.commonV2.save_json_to_new_tables(res_dict)

        # committing to new tables
        self.commonV2.commit_results_data()
        # committing to the old tables
        self.common.commit_results_data()
Ejemplo n.º 17
0
class DIAGEOARDIAGEOARToolBox:

    def __init__(self, data_provider, output):
        self.output = output
        self.data_provider = data_provider
        self.project_name = data_provider.project_name
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.common = Common(self.data_provider)
        self.commonV2 = CommonV2(self.data_provider)
        self.diageo_generator = DIAGEOGenerator(self.data_provider, self.output, self.common)
        self.template_handler = TemplateHandler(self.project_name)

    def main_calculation(self):
        """
        This function calculates the KPI results.
        """
        log_runtime('Updating templates')(self.template_handler.update_templates)()

        # SOS Out Of The Box kpis
        self.diageo_generator.activate_ootb_kpis(self.commonV2)

        # sos by scene type
        self.diageo_generator.sos_by_scene_type(self.commonV2)

        # Global assortment kpis
        assortment_res_dict = self.diageo_generator.diageo_global_assortment_function_v2()
        self.commonV2.save_json_to_new_tables(assortment_res_dict)

        # Global assortment kpis - v3 for NEW MOBILE REPORTS use
        assortment_res_dict_v3 = self.diageo_generator.diageo_global_assortment_function_v3()
        self.commonV2.save_json_to_new_tables(assortment_res_dict_v3)

        # Global Relative Position function
        template_data = self.template_handler.download_template(DiageoKpiNames.RELATIVE_POSITION)
        res_dict = self.diageo_generator.diageo_global_relative_position_function(
            template_data, location_type='template_name')
        self.commonV2.save_json_to_new_tables(res_dict)

        # committing to new tables
        self.commonV2.commit_results_data()
        # committing to the old tables
        self.common.commit_results_data()
Ejemplo n.º 18
0
 def __init__(self, data_provider, output):
     self.output = output
     self.data_provider = data_provider
     self.project_name = self.data_provider.project_name
     self.adjacency = Adjancency(self.data_provider)
     self.block = Block(self.data_provider)
     self.template_name = 'summary_kpis.xlsx'
     self.TEMPLATE_PATH = os.path.join(
         os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
         'Data', self.template_name)
     self.template_data = parse_template(self.TEMPLATE_PATH, "KPIs")
     self.all_products = self.data_provider[Data.ALL_PRODUCTS]
     self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
     self.tools = GENERALToolBox(self.data_provider)
     self.common = Common(self.data_provider)
     self.kpi_results_queries = []
     self.cub_tools = CUBAUCUBAUGENERALToolBox(self.data_provider,
                                               self.output)
     self.store_type = self.data_provider.store_type
     self.store_info = self.data_provider[Data.STORE_INFO]
     self.session_uid = self.data_provider.session_uid
     self.visit_date = self.data_provider.visit_date
Ejemplo n.º 19
0
 def __init__(self, data_provider, output):
     self.data_provider = data_provider
     self.commonV2 = CommonV2(self.data_provider)
     self.rds_conn = PSProjectConnector(self.data_provider.project_name, DbUsers.CalculationEng)
     self.store_assortment = PSAssortmentDataProvider(self.data_provider).execute(policy_name=None)
     self.output = output
     self.kpi_static_data = self.get_kpi_static_data()
     self.store_id = self.data_provider[Data.STORE_FK]
     self.own_manufacturer_fk = 12
     self.store_info = self.get_store_info(self.store_id)
     self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
     self.all_templates = self.data_provider[Data.ALL_TEMPLATES]
     self.common = Common(self.data_provider)
     self.scene_info = self.data_provider[Data.SCENES_INFO]
     self.diageo_generator = DIAGEOGenerator(self.data_provider, self.output, self.common)
     self.template_handler = TemplateHandler(self.data_provider.project_name)
     self.template_path = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'Data')
     self.template_path = os.path.join(self.template_path, "Template.xlsx")
     self.kpi_template_data = parse_template(self.template_path, sheet_name='KPI')
     self.store_info = self.data_provider[Data.STORE_INFO]
     self.current_date = datetime.now()
Ejemplo n.º 20
0
class CCBRToolBox:
    def __init__(self, data_provider, output):
        self.output = output
        self.data_provider = data_provider
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.products = self.data_provider[Data.PRODUCTS]
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.rds_conn = PSProjectConnector(self.project_name,
                                           DbUsers.CalculationEng)
        self.tools = CCBRGENERALToolBox(self.data_provider,
                                        self.output,
                                        rds_conn=self.rds_conn)
        self.store_info = self.data_provider[Data.STORE_INFO]
        self.kpi_results_queries = []
        self.survey = Survey(self.data_provider, self.output)
        self.kpi_results_new_tables_queries = []
        self.New_kpi_static_data = self.get_new_kpi_static_data()
        self.session_id = self.data_provider.session_id
        self.prices_per_session = PsDataProvider(
            self.data_provider, self.output).get_price_union(self.session_id)
        self.common_db = Common(self.data_provider)
        self.count_sheet = pd.read_excel(PATH, Const.COUNT).fillna("")
        self.group_count_sheet = pd.read_excel(PATH,
                                               Const.GROUP_COUNT).fillna("")
        self.survey_sheet = pd.read_excel(PATH, Const.SURVEY).fillna("")

    def main_calculation(self):
        """
        This function calculates the KPI results.
        """
        kpis_sheet = pd.read_excel(PATH, Const.KPIS).fillna("")
        for index, row in kpis_sheet.iterrows():
            self.handle_atomic(row)
        self.handle_simon_kpis()
        self.commit_results_data()

    def handle_simon_kpis(self):
        """
        activate the availability and pricing functions
        """
        active_products = self.all_products.loc[
            self.all_products["is_active"] > 0]
        self.calculate_availability(active_products)
        self.calculate_pricing(self.all_products)

    def calculate_availability(self, active_products):
        """
        calculates the availability for all products per session, used is sovi and sovi vertical reports
        :param active_products: a df containing only active products
        """
        active_products_sku_and_other = active_products[
            (active_products['product_type'] == 'SKU')
            | (active_products['product_type'] == 'Other')]
        active_products_pks = active_products_sku_and_other[
            'product_fk'].unique().tolist()
        filters = {'product_fk': active_products_pks}
        filtered_df = self.scif[self.tools.get_filter_condition(
            self.scif, **filters)]
        facing_filtered = filtered_df.loc[filtered_df['facings'] > 0][[
            'template_fk', 'product_fk', 'facings'
        ]]
        facing_filtered_pks = facing_filtered['product_fk'].unique().tolist()
        for product in facing_filtered_pks:
            product_df = facing_filtered.loc[facing_filtered['product_fk'] ==
                                             product]
            product_template_fks = product_df['template_fk'].unique().tolist()
            for template_fk in product_template_fks:
                sum_facing = product_df.loc[product_df['template_fk'] ==
                                            template_fk]['facings'].sum()
                self.write_to_db_result_new_tables(fk=Const.AVAILABILITY_PK,
                                                   numerator_id=product,
                                                   score='1',
                                                   denominator_id=template_fk,
                                                   numerator_result='1',
                                                   result=sum_facing)

    def calculate_pricing(self, all_products):
        """
        inserting the db the pricing of all active and inactive skus.
        used in preco and preco vertical reports
        :param all_products: df containing all products
        """
        only_sku_type_products = all_products.loc[all_products['product_type']
                                                  == 'SKU']
        all_products_fks_size = only_sku_type_products[['product_fk',
                                                        'size']].fillna("")
        product_fks_and_prices = self.prices_per_session
        merge_size_and_price = pd.merge(all_products_fks_size,
                                        product_fks_and_prices,
                                        how='left',
                                        on='product_fk')
        merge_size_and_price['value'] = merge_size_and_price['value'].fillna(
            '0')
        for row in merge_size_and_price.itertuples():
            product = row[1]  # row['product_fk']
            size = row[2]  # row['size']
            price = row[3]  # row['value']
            if size == '':
                size = 0
            if price > 0:
                self.write_to_db_result_new_tables(fk=Const.PRICING_PK,
                                                   numerator_id=product,
                                                   numerator_result=size,
                                                   result=price)

    def handle_atomic(self, row):
        """
        run the correct kpi for a specific row in the template
        :param row: a row from the template
        """
        atomic_name = row[Const.ENGLISH_KPI_NAME].strip()
        kpi_type = row[Const.KPI_TYPE].strip()
        if kpi_type == Const.SURVEY:
            self.handle_survey_atomics(atomic_name)
        elif kpi_type == Const.COUNT:
            self.handle_count_atomics(atomic_name)
        elif kpi_type == Const.GROUP_COUNT:
            self.handle_group_count_atomics(atomic_name)

    def handle_survey_atomics(self, atomic_name):
        """
        handle survey questions
        :param atomic_name: the name of the kpi
        :return: only if the survey filters aren't satisfied
        """
        row = self.survey_sheet.loc[self.survey_sheet[Const.ENGLISH_KPI_NAME]
                                    == atomic_name]
        if row.empty:
            Log.warning("Dataframe is empty, wrong kpi name: " + atomic_name)
            return
        store_type_filter = self.store_info['store_type'].values[0].strip()
        store_type_template = row[Const.STORE_TYPE_TEMPLATE].values[0].strip()

        # if cell in template is not empty
        if store_type_template != "":
            store_types = store_type_template.split(",")
            store_types = [item.strip() for item in store_types]
            if store_type_filter not in store_types:
                return

        # find the answer to the survey in session
        question_id = row[Const.SURVEY_QUESTION_ID].values[0]
        question_answer_template = row[Const.TARGET_ANSWER].values[0]

        survey_result = self.survey.get_survey_answer(
            ('question_fk', question_id))
        if question_answer_template == Const.NUMERIC:
            if not survey_result:
                survey_result = 0
            if not isinstance(survey_result, (int, long, float)):
                Log.warning("question id " + str(question_id) +
                            " in template is not a number")
                survey_result = 0

        else:
            answer = self.survey.check_survey_answer(
                ('question_fk', question_id), question_answer_template)
            survey_result = 1 if answer else -1

        try:
            atomic_pk = self.common_db.get_kpi_fk_by_kpi_name_new_tables(
                atomic_name)
        except IndexError:
            Log.warning("There is no matching Kpi fk for kpi name: " +
                        atomic_name)
            return

        self.write_to_db_result_new_tables(fk=atomic_pk,
                                           numerator_id=self.session_id,
                                           numerator_result=survey_result,
                                           result=survey_result)

    def handle_count_atomics(self, atomic_name):
        """
        handle count kpis, used in consolidada report
        :param atomic_name: the name of the kpi to calculate
        """
        sum_of_count = 0
        target = 0
        count_result = 0
        row = self.count_sheet.loc[self.count_sheet[Const.ENGLISH_KPI_NAME] ==
                                   atomic_name]
        if row.empty:
            Log.warning("Dataframe is empty, wrong kpi name: " + atomic_name)
            return

        try:
            atomic_pk = self.common_db.get_kpi_fk_by_kpi_name_new_tables(
                atomic_name)
        except IndexError:
            Log.warning("There is no matching Kpi fk for kpi name: " +
                        atomic_name)
            return
        for index, row in row.iterrows():
            sum_of_count, target, count_result = self.handle_count_row(row)
        if not isinstance(sum_of_count, (int, float, long)):
            sum_of_count = count_result

        self.write_to_db_result_new_tables(fk=atomic_pk,
                                           numerator_id=self.session_id,
                                           numerator_result=sum_of_count,
                                           denominator_result=target,
                                           result=count_result)

    def handle_group_count_atomics(self, atomic_name):
        """
        handle group count kpis (different from count in or and and conditions), used in consolidada report
        :param atomic_name: the name of the kpi to calculate
        """
        rows = self.group_count_sheet.loc[self.group_count_sheet[
            Const.GROUP_KPI_NAME] == atomic_name]
        group_weight = 0
        group_result = 0
        group_target = 0
        group_sum_of_count = 0
        sum_of_count_df = pd.DataFrame()
        target_operator = ""
        if rows.empty:
            Log.warning("Dataframe is empty, wrong kpi name: " + atomic_name)
            return

        try:
            atomic_pk = self.common_db.get_kpi_fk_by_kpi_name_new_tables(
                atomic_name)
        except IndexError:
            Log.warning("There is no matching Kpi fk for kpi name: " +
                        atomic_name)
            return

        for index, row in rows.iterrows():
            target_operator = row[Const.TARGET_OPERATOR].strip()
            weight = row[Const.WEIGHT]
            sum_of_count, target, count_result = self.handle_count_row(row)
            if count_result >= 1:
                group_weight += weight
                if group_weight >= 100:
                    # use for getting numeric results instead of 1 and 0
                    if (target_operator == '+'):
                        sum_of_count_df = pd.concat(
                            [sum_of_count_df, sum_of_count])
                    else:
                        group_result = 1
                        break

            # conditional, if given -1000 kpi must fail
            elif count_result == -1000:
                group_result = 0
                break

        # use for getting numeric results instead of 1 and 0
        if (target_operator == '+'):
            if sum_of_count_df.empty:
                group_sum_of_count = 0
            else:
                group_sum_of_count = len(sum_of_count_df.groupby('scene_id'))
            group_result = group_sum_of_count

        self.write_to_db_result_new_tables(fk=atomic_pk,
                                           numerator_id=self.session_id,
                                           numerator_result=group_sum_of_count,
                                           denominator_result=group_target,
                                           result=group_result)

    def handle_count_row(self, row):
        """
        filters qall params in aspecific row and send it to the correct count calculation
        :param row:
        :return:
        """
        count_type = row[Const.COUNT_TYPE].strip()
        target = row[Const.TARGET]
        target_operator = row[Const.TARGET_OPERATOR].strip()
        product_template = row[Const.PRODUCT]
        store_type_filter = self.store_info['store_type'].values[0]
        store_type_template = row[Const.STORE_TYPE_TEMPLATE]
        product_size = row[Const.PRODUCT_SIZE]
        product_size_operator = row[Const.PRODUCT_SIZE_OPERATOR].strip()
        product_measurement_unit = row[Const.MEASUREMENT_UNIT].strip()
        consider_few = row[Const.CONSIDER_FEW]
        multipack_template = row[Const.MULTIPACK].strip()
        multipack_df = None

        # filter store type
        if store_type_template != "":
            store_types = store_type_template.split(",")
            store_types = [item.strip() for item in store_types]
            if store_type_filter not in store_types:
                return 0, 0, 0

        filtered_df = self.scif.copy()

        # filter product
        if product_template != "":
            products_to_check = product_template.split(",")
            products_to_check = [item.strip() for item in products_to_check]
            filtered_df = filtered_df[filtered_df['product_name'].isin(
                products_to_check)]
            if filtered_df.empty:
                return 0, 0, 0

        # filter product size
        if product_size != "":
            if product_measurement_unit == 'l':
                product_size *= 1000

            ml_df = filtered_df[filtered_df['size_unit'] == 'ml']
            l_df = filtered_df[filtered_df['size_unit'] == 'l']

            if multipack_template != "":
                multipack_df = filtered_df[filtered_df['MPACK'] == 'Y']
            temp_df = l_df.copy()
            temp_df['size'] = l_df['size'].apply((lambda x: x * 1000))
            filtered_df = pd.concat([temp_df, ml_df])

            if product_size_operator == '<':
                filtered_df = filtered_df[filtered_df['size'] < product_size]
            elif product_size_operator == '<=':
                filtered_df = filtered_df[filtered_df['size'] <= product_size]
            elif product_size_operator == '>':
                filtered_df = filtered_df[filtered_df['size'] > product_size]
            elif product_size_operator == '>=':
                filtered_df = filtered_df[filtered_df['size'] >= product_size]
            elif product_size_operator == '=':
                filtered_df = filtered_df[filtered_df['size'] == product_size]

            # multipack conditions is an or between product size and MPACK
            if multipack_template != "":
                filtered_df = pd.concat([filtered_df,
                                         multipack_df]).drop_duplicates()

        filters = self.get_filters_from_row(row)
        count_of_units = 0
        if count_type == Const.SCENE:
            count_of_units = self.count_of_scenes(filtered_df, filters,
                                                  target_operator, target)
        elif count_type == Const.FACING:
            count_of_units = self.count_of_facings(filtered_df, filters,
                                                   consider_few, target)
        elif count_type == Const.SCENE_SOS:
            count_of_units = self.count_of_sos(filtered_df, filters)
        else:
            Log.warning("Couldn't find a correct COUNT variable in template")

        if target_operator == '<=':
            count_result = 1 if (target <= count_of_units) else 0

        # use for getting numeric results instead of 1 and 0
        elif target_operator == '+':
            if isinstance(count_of_units, (int, float, long)):
                count_result = count_of_units
            else:
                count_result = len(count_of_units)
        else:
            count_result = 1 if (target >= count_of_units) else 0
        return count_of_units, target, count_result

    def get_filters_from_row(self, row):
        """
        handle filters appering in scif
        :param row: row containing all filters
        :return: a dictionary of the filters
        """
        filters = dict(row)

        # no need to be accounted for, fields that aren't in scif
        for field in Const.DELETE_FIELDS:
            if field in filters:
                del filters[field]

        if Const.WEIGHT in filters.keys():
            del filters[Const.WEIGHT]
        if Const.GROUP_KPI_NAME in filters.keys():
            del filters[Const.GROUP_KPI_NAME]

        exclude_manufacturer = filters[Const.EXCLUDE_MANUFACTURER].strip()
        if exclude_manufacturer != "":
            filters[Const.MANUFACTURER] = (exclude_manufacturer,
                                           Const.EXCLUDE_FILTER)
            del filters[Const.EXCLUDE_MANUFACTURER]

        exclude_category = filters[Const.EXCLUDE_CATEGORY].strip()
        if exclude_category != "":
            filters[Const.CATEGORY] = (exclude_category, Const.EXCLUDE_FILTER)
            del filters[Const.EXCLUDE_CATEGORY]

        # filter all the empty cells
        for key in filters.keys():
            if (filters[key] == ""):
                del filters[key]
            elif isinstance(filters[key], tuple):
                filters[key] = (filters[key][0].split(","), filters[key][1])
            else:
                filters[key] = filters[key].split(",")
                filters[key] = [item.strip() for item in filters[key]]

        return self.create_filters_according_to_scif(filters)

    def create_filters_according_to_scif(self, filters):
        """
        adjusting the template names to scif names
        :param filters: only the scif filters in the template shape
        :return: the filters dictionary
        """
        convert_from_scif = {
            Const.TEMPLATE_GROUP: 'template_group',
            Const.TEMPLATE_NAME: 'template_name',
            Const.BRAND: 'brand_name',
            Const.CATEGORY: 'category',
            Const.MANUFACTURER: 'manufacturer_name',
            Const.PRODUCT_TYPE: 'product_type',
            Const.MULTIPACK: 'MPAK'
        }
        for key in filters.keys():
            filters[convert_from_scif[key]] = filters.pop(key)
        return filters

    def count_of_scenes(self, filtered_df, filters, target_operator, target):
        """
        calculate the count of scene types
        :param filtered_df: the first filtered (no scif filters) dataframe
        :param filters: the scif filters
        :param target_operator: the operation to do, + for returning a dataframe (used in group count)
        :param target: the target
        :return: dataframe for group counts +, number of scenes for all other functions
        """
        scene_data = filtered_df[self.tools.get_filter_condition(
            filtered_df, **filters)]
        if target_operator == '+':

            # filter by scene_id and by template_name (scene type)
            scene_types_groupby = scene_data.groupby(
                ['template_name', 'scene_id'])['facings'].sum().reset_index()
            number_of_scenes = scene_types_groupby[
                scene_types_groupby['facings'] >= target]
        else:
            number_of_scenes = len(scene_data['scene_id'].unique())
        return number_of_scenes

    def count_of_sos(self, filtered_df, filters):
        """
        calculating the share of shelf
        :param filtered_df: the first filtered (no scif filters) dataframe
        :param filters: the scif filters
        :return: the number of different scenes answered the condition  (hard coded 50%)
        """
        scene_data = filtered_df[self.tools.get_filter_condition(
            filtered_df, **filters)]
        scene_data = scene_data.rename(columns={"facings": "facings_nom"})

        # filter by scene_id and by template_name (scene type)
        scene_types_groupby = scene_data.groupby(['template_name', 'scene_id'
                                                  ])['facings_nom'].sum()
        all_products_groupby = self.scif.groupby(['template_name', 'scene_id'
                                                  ])['facings'].sum()
        merge_result = pd.concat((scene_types_groupby, all_products_groupby),
                                 axis=1,
                                 join='inner').reset_index()
        return len(merge_result[
            merge_result['facings_nom'] >= merge_result['facings'] * 0.5])

    def count_of_facings(self, filtered_df, filters, consider_few, target):
        '''
        calculate the count of facings
        :param filtered_df: the first filtered (no scif filters) dataframe
        :param filters: the scif filters
        :param consider_few: in case there is a need to consider more then one brand
        :param target: the target to pass
        :return:
        '''
        facing_data = filtered_df[self.tools.get_filter_condition(
            filtered_df, **filters)]
        if consider_few != "":
            facing_data_groupby = facing_data.groupby(['brand_name'
                                                       ])['facings'].sum()
            if len(facing_data_groupby[
                    facing_data_groupby >= target]) >= consider_few:
                number_of_facings = facing_data['facings'].sum()
            else:
                number_of_facings = 0
        else:
            number_of_facings = facing_data['facings'].sum()
        return number_of_facings

    def get_new_kpi_static_data(self):
        """
        This function extracts the static new KPI data (new tables) and saves it into one global data frame.
        The data is taken from static.kpi_level_2.
        """
        query = CCBRQueries.get_new_kpi_data()
        kpi_static_data = pd.read_sql_query(query, self.rds_conn.db)
        return kpi_static_data

    def write_to_db_result_new_tables(self,
                                      fk,
                                      numerator_id,
                                      numerator_result,
                                      result,
                                      denominator_id=None,
                                      denominator_result=None,
                                      score=None):
        """
        This function creates the result data frame of new rables KPI,
        and appends the insert SQL query into the queries' list, later to be written to the DB.
        """
        table = KPI_NEW_TABLE
        attributes = self.create_attributes_dict_new_tables(
            fk, numerator_id, numerator_result, denominator_id,
            denominator_result, result, score)
        query = insert(attributes, table)
        self.kpi_results_new_tables_queries.append(query)

    def create_attributes_dict_new_tables(self, kpi_fk, numerator_id,
                                          numerator_result, denominator_id,
                                          denominator_result, result, score):
        """
        This function creates a data frame with all attributes needed for saving in KPI results new tables.
        """
        attributes = pd.DataFrame(
            [(kpi_fk, self.session_id, numerator_id, numerator_result,
              denominator_id, denominator_result, result, score)],
            columns=[
                'kpi_level_2_fk', 'session_fk', 'numerator_id',
                'numerator_result', 'denominator_id', 'denominator_result',
                'result', 'score'
            ])
        return attributes.to_dict()

    @log_runtime('Saving to DB')
    def commit_results_data(self):
        """
        This function writes all KPI results to the DB, and commits the changes.
        """
        insert_queries = self.merge_insert_queries(
            self.kpi_results_new_tables_queries)
        self.rds_conn.disconnect_rds()
        self.rds_conn.connect_rds()
        cur = self.rds_conn.db.cursor()
        delete_query = CCBRQueries.get_delete_session_results_query(
            self.session_uid, self.session_id)
        cur.execute(delete_query)
        for query in insert_queries:
            cur.execute(query)
        self.rds_conn.db.commit()
        self.rds_conn.disconnect_rds()

    @staticmethod
    def merge_insert_queries(insert_queries):
        query_groups = {}
        for query in insert_queries:
            static_data, inserted_data = query.split('VALUES ')
            if static_data not in query_groups:
                query_groups[static_data] = []
            query_groups[static_data].append(inserted_data)
        merged_queries = []
        for group in query_groups:
            merged_queries.append('{0} VALUES {1}'.format(
                group, ',\n'.join(query_groups[group])))
        return merged_queries
Ejemplo n.º 21
0
class DIAGEOMXToolBox:
    def __init__(self, data_provider, output):
        self.data_provider = data_provider
        self.project_name = self.data_provider.project_name
        self.ps_data_provider = PsDataProvider(data_provider)
        self.store_id = self.data_provider[Data.STORE_FK]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.output = output
        self.common = Common(self.data_provider)
        self.commonV2 = CommonV2(self.data_provider)
        self.diageo_generator = DIAGEOGenerator(self.data_provider,
                                                self.output, self.common)
        self.rds_conn = PSProjectConnector(self.project_name,
                                           DbUsers.CalculationEng)
        self.session_uid = self.data_provider.session_uid

    def main_calculation(self):
        if not self.exists_survey_resposnse():
            """ This function calculates the KPI results """
            # SOS Out Of The Box kpis
            self.diageo_generator.activate_ootb_kpis(self.commonV2)

            # sos by scene type
            self.diageo_generator.sos_by_scene_type(self.commonV2)

            # Global assortment kpis
            assortment_res_dict = self.diageo_generator.diageo_global_assortment_function_v2(
            )
            self.commonV2.save_json_to_new_tables(assortment_res_dict)

            # Global assortment kpis - v3 for NEW MOBILE REPORTS use
            assortment_res_dict_v3 = self.diageo_generator.diageo_global_assortment_function_v3(
            )
            self.commonV2.save_json_to_new_tables(assortment_res_dict_v3)

            # global SOS kpi
            res_dict = self.diageo_generator.diageo_global_share_of_shelf_function(
            )
            self.commonV2.save_json_to_new_tables(res_dict)

            # global touch point kpi
            template_path = os.path.join(
                os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
                'Data', 'TOUCH POINT (template) Agosto 2020.xlsx')
            res_dict = self.diageo_generator.diageo_global_touch_point_function(
                template_path)
            self.commonV2.save_json_to_new_tables(res_dict)

            # committing to the old tables
            self.common.commit_results_data()  # commit to old tables
            # committing to new tables
            self.commonV2.commit_results_data()

    def exists_survey_resposnse(self):
        # Method is used to check in the db to if survey question_fk 98 is answered
        # If answered, the kpi should not run

        query = """SELECT session_uid,sr.question_fk, sr.selected_option_fk
                    FROM probedata.survey_response sr
                    Left Join static.survey_question_answer qa on sr.selected_option_text = qa.pk
                    where sr.question_fk = 98 and session_uid = '{}';""".format(
            self.session_uid)
        cur = self.rds_conn.db.cursor()
        cur.execute(query)
        res = cur.fetchall()

        if res:
            relevant_response_df = pd.DataFrame(list(res))

            try:
                survey_response = relevant_response_df.iloc[0, 2]
            except IndexError:
                survey_response = None
        else:
            survey_response = None
        return survey_response
Ejemplo n.º 22
0
class DIAGEOBENELUX_SANDToolBox:

    LEVEL1 = 1
    LEVEL2 = 2
    LEVEL3 = 3

    ACTIVATION_STANDARD = 'Activation Standard'

    def __init__(self, data_provider, output):
        self.k_engine = BaseCalculationsScript(data_provider, output)
        self.data_provider = data_provider
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.products = self.data_provider[Data.PRODUCTS]
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.rds_conn = PSProjectConnector(self.project_name,
                                           DbUsers.CalculationEng)
        self.store_info = self.data_provider[Data.STORE_INFO]
        self.store_channel = self.store_info['store_type'].values[0]
        if self.store_channel:
            self.store_channel = self.store_channel.upper()
        self.store_type = self.store_info['additional_attribute_1'].values[0]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.store_id = self.data_provider[Data.STORE_FK]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.match_display_in_scene = self.get_match_display()
        self.set_templates_data = {}
        self.kpi_static_data = self.get_kpi_static_data()
        self.output = output
        self.common = Common(self.data_provider)
        self.commonV2 = CommonV2(self.data_provider)
        self.global_gen = DIAGEOGenerator(self.data_provider, self.output,
                                          self.common)
        self.tools = DIAGEOToolBox(
            self.data_provider,
            output,
            match_display_in_scene=self.match_display_in_scene
        )  # replace the old one
        self.diageo_generator = DIAGEOGenerator(self.data_provider,
                                                self.output,
                                                self.common,
                                                menu=True)
        self.scores = {self.LEVEL1: {}, self.LEVEL2: {}, self.LEVEL3: {}}
        self.kpi_results_queries = []

    def get_kpi_static_data(self):
        """
        This function extracts the static KPI data and saves it into one global data frame.
        The data is taken from static.kpi / static.atomic_kpi / static.kpi_set.
        """
        query = DIAGEOQueries.get_all_kpi_data()
        kpi_static_data = pd.read_sql_query(query, self.rds_conn.db)
        return kpi_static_data

    def get_match_display(self):
        """
        This function extracts the display matches data and saves it into one global data frame.
        The data is taken from probedata.match_display_in_scene.
        """
        query = DIAGEOQueries.get_match_display(self.session_uid)
        match_display = pd.read_sql_query(query, self.rds_conn.db)
        return match_display

    def main_calculation(self):
        # SOS Out Of The Box kpis
        self.diageo_generator.activate_ootb_kpis(self.commonV2)

        # sos by scene type
        self.diageo_generator.sos_by_scene_type(self.commonV2)

        # Global assortment kpis
        assortment_res = self.diageo_generator.diageo_global_grouping_assortment_calculation(
        )
        self.commonV2.save_json_to_new_tables(assortment_res)

        # Global Menu kpis
        menus_res = self.diageo_generator.diageo_global_new_share_of_menu_function(
        )
        self.commonV2.save_json_to_new_tables(menus_res)

        # Global Secondary Displays function
        res_json = self.diageo_generator.diageo_global_secondary_display_secondary_function(
        )
        if res_json:
            self.commonV2.write_to_db_result(fk=res_json['fk'],
                                             numerator_id=1,
                                             denominator_id=self.store_id,
                                             result=res_json['result'])
        # committing to new tables
        self.commonV2.commit_results_data()
        # committing to the old tables
        self.common.commit_results_data()

    #
    # def main_calculation(self, set_names):
    #     """
    #     This function calculates the KPI results.
    #     """
    #     # Global assortment kpis
    #     assortment_res_dict = self.diageo_generator.diageo_global_assortment_function_v2()
    #     self.commonV2.save_json_to_new_tables(assortment_res_dict)
    #
    #     # Global Menu kpis
    #     menus_res_dict = self.diageo_generator.diageo_global_share_of_menu_cocktail_function(
    #         cocktail_product_level=True)
    #     self.commonV2.save_json_to_new_tables(menus_res_dict)
    #
    #     for set_name in set_names:
    #         set_score = 0
    #
    #         # Global Secondary Displays
    #         if set_name in ('Secondary Displays', 'Secondary'):
    #             res_json = self.diageo_generator.diageo_global_secondary_display_secondary_function()
    #             if res_json:
    #                 self.commonV2.write_to_db_result(fk=res_json['fk'], numerator_id=1, denominator_id=self.store_id,
    #                                                  result=res_json['result'])
    #                 set_score = self.tools.calculate_number_of_scenes(location_type='Secondary')
    #                 self.save_level2_and_level3(set_name, set_name, set_score)
    #
    #         if set_score == 0:
    #             pass
    #         elif set_score is False:
    #             continue
    #
    #         set_fk = self.kpi_static_data[self.kpi_static_data['kpi_set_name'] == set_name]['kpi_set_fk'].values[0]
    #         self.write_to_db_result(set_fk, set_score, self.LEVEL1)
    #
    #     # committing to new tables
    #     self.commonV2.commit_results_data()
    #
    #     return

    def save_level2_and_level3(self, set_name, kpi_name, score):
        """
        Given KPI data and a score, this functions writes the score for both KPI level 2 and 3 in the DB.
        """
        kpi_data = self.kpi_static_data[
            (self.kpi_static_data['kpi_set_name'] == set_name)
            & (self.kpi_static_data['kpi_name'] == kpi_name)]
        kpi_fk = kpi_data['kpi_fk'].values[0]
        atomic_kpi_fk = kpi_data['atomic_kpi_fk'].values[0]
        self.write_to_db_result(kpi_fk, score, self.LEVEL2)
        self.write_to_db_result(atomic_kpi_fk, score, self.LEVEL3)

    def calculate_activation_standard(self):
        """
        This function calculates the Activation Standard KPI, and saves the result to the DB (for all 3 levels).
        """
        final_score = 0
        for params in self.tools.download_template(self.ACTIVATION_STANDARD):
            set_name = params.get(self.tools.ACTIVATION_SET_NAME)
            kpi_name = params.get(self.tools.ACTIVATION_KPI_NAME)
            target = float(params.get(self.tools.ACTIVATION_TARGET))
            target = target * 100 if target < 1 else target
            score_type = params.get(self.tools.ACTIVATION_SCORE)
            weight = float(params.get(self.tools.ACTIVATION_WEIGHT))
            if kpi_name:
                kpi_fk = self.kpi_static_data[
                    (self.kpi_static_data['kpi_set_name'] == set_name)
                    & (self.kpi_static_data['kpi_name'] == kpi_name
                       )]['kpi_fk'].values[0]
                score = self.scores[self.LEVEL2].get(kpi_fk, 0)
            else:
                set_fk = self.kpi_static_data[
                    self.kpi_static_data['kpi_set_name'] ==
                    set_name]['kpi_set_fk'].values[0]
                score = self.scores[self.LEVEL1].get(set_fk, 0)
            if score >= target:
                score = 100
            else:
                if score_type == 'PROPORTIONAL':
                    score = (score / float(target)) * 100
                else:
                    score = 0
            final_score += score * weight
            self.save_level2_and_level3(self.ACTIVATION_STANDARD, set_name,
                                        score)
        set_fk = self.kpi_static_data[
            self.kpi_static_data['kpi_set_name'] ==
            self.ACTIVATION_STANDARD]['kpi_set_fk'].values[0]
        self.write_to_db_result(set_fk, final_score, self.LEVEL1)

    def calculate_sos_sets(self, set_name):
        """
        This function calculates every SOS-typed KPI from the relevant sets, and returns the set final score.
        """
        scores = []
        for params in self.set_templates_data[set_name]:
            if params.get(self.store_type) == self.tools.RELEVANT_FOR_STORE:
                result = self.tools.calculate_share_of_shelf(
                    manufacturer=self.tools.DIAGEO,
                    include_empty=self.tools.EXCLUDE_EMPTY)
                score = 1 if result >= params.get(self.tools.TARGET) else 0
                scores.append(score)

                self.save_level2_and_level3(set_name,
                                            params.get(self.tools.KPI_NAME),
                                            score)

        if not scores:
            return False
        set_score = (sum(scores) / float(len(scores))) * 100
        return set_score

    def calculate_survey_sets(self, set_name):
        """
        This function calculates every Survey-Answer-typed KPI and returns the set final score.
        """
        scores = []
        for params in self.set_templates_data[set_name]:
            survey_text = params.get(self.tools.SURVEY_QUESTION)
            answer_field = 'number_value' if params.get(
                self.tools.SURVEY_ANSWER_TYPE) == 'Numeric' else None
            answer_target = params.get(self.tools.SURVEY_ANSWER)
            survey_answer = self.tools.get_survey_answer(
                survey_text, answer_field)
            if answer_field == 'number_value':
                if str(answer_target).isdigit():
                    answer_target = '=={}'.format(answer_target)
                if not str(survey_answer).isdigit():
                    result = False
                else:
                    result = eval('{}{}'.format(survey_answer, answer_target))
            else:
                result = True if survey_answer in answer_target.split(
                    ';') else False
            score = 1 if result else 0
            scores.append(score)

            self.save_level2_and_level3(set_name,
                                        params.get(self.tools.KPI_NAME), score)

        if not scores:
            return False
        set_score = (sum(scores) / float(len(scores))) * 100
        return set_score

    def validate_survey(self, params):
        """
        This function checks whether the Survey-typed KPI should be run, based on the Execution Condition Field.
        """
        survey_validated = False
        condition = params.get(self.tools.SURVEY_CONDITION)
        if not condition:
            survey_validated = True
        else:
            question, answers = condition.split('=')
            survey_answer = self.tools.get_survey_answer(question)
            if survey_answer in answers.split(';'):
                survey_validated = True
        return survey_validated

    def calculate_block_together_sets(self, set_name):
        """
        This function calculates every block-together-typed KPI from the relevant sets, and returns the set final score.
        """
        scores = []
        for params in self.set_templates_data[set_name]:
            if self.store_channel == params.get(self.tools.CHANNEL,
                                                '').upper():
                filters = {'template_name': params.get(self.tools.LOCATION)}
                if params.get(self.tools.SUB_BRAND_NAME):
                    filters['sub_brand_name'] = params.get(
                        self.tools.SUB_BRAND_NAME)
                else:
                    filters['brand_name'] = params.get(self.tools.BRAND_NAME)
                result = self.tools.calculate_block_together(**filters)
                score = 1 if result else 0
                scores.append(score)

                self.save_level2_and_level3(set_name,
                                            params.get(self.tools.KPI_NAME),
                                            score)

        if not scores:
            return False
        set_score = (sum(scores) / float(len(scores))) * 100
        return set_score

    def calculate_relative_position_sets(self, set_name):
        """
        This function calculates every relative-position-typed KPI from the relevant sets, and returns the set final score.
        """
        scores = []
        for params in self.set_templates_data[set_name]:
            if self.store_channel == params.get(self.tools.CHANNEL,
                                                '').upper():
                tested_filters = {
                    'product_ean_code': params.get(self.tools.TESTED)
                }
                anchor_filters = {
                    'product_ean_code': params.get(self.tools.ANCHOR)
                }
                direction_data = {
                    'top':
                    self._get_direction_for_relative_position(
                        params.get(self.tools.TOP_DISTANCE)),
                    'bottom':
                    self._get_direction_for_relative_position(
                        params.get(self.tools.BOTTOM_DISTANCE)),
                    'left':
                    self._get_direction_for_relative_position(
                        params.get(self.tools.LEFT_DISTANCE)),
                    'right':
                    self._get_direction_for_relative_position(
                        params.get(self.tools.RIGHT_DISTANCE))
                }
                general_filters = {
                    'template_name': params.get(self.tools.LOCATION)
                }
                result = self.tools.calculate_relative_position(
                    tested_filters, anchor_filters, direction_data,
                    **general_filters)
                score = 1 if result else 0
                scores.append(score)

                self.save_level2_and_level3(set_name,
                                            params.get(self.tools.KPI_NAME),
                                            score)

        if not scores:
            return False
        set_score = (sum(scores) / float(len(scores))) * 100
        return set_score

    def _get_direction_for_relative_position(self, value):
        """
        This function converts direction data from the template (as string) to a number.
        """
        if value == self.tools.UNLIMITED_DISTANCE:
            value = 1000
        elif not value or not str(value).isdigit():
            value = 0
        else:
            value = int(value)
        return value

    def calculate_posm_sets(self, set_name):
        """
        This function calculates every POSM-typed KPI from the relevant sets, and returns the set final score.
        """
        scores = []
        for params in self.set_templates_data[set_name]:
            if self.store_channel is None:
                break

            kpi_res = self.tools.calculate_posm(
                display_name=params.get(self.tools.DISPLAY_NAME))
            score = 1 if kpi_res > 0 else 0
            if params.get(self.store_type) == self.tools.RELEVANT_FOR_STORE:
                scores.append(score)

            if score == 1 or params.get(
                    self.store_type) == self.tools.RELEVANT_FOR_STORE:
                self.save_level2_and_level3(
                    set_name, params.get(self.tools.DISPLAY_NAME), score)

        if not scores:
            return False
        set_score = (sum(scores) / float(len(scores))) * 100
        return set_score

    def calculate_assortment_sets(self, set_name):
        """
        This function calculates every Assortment-typed KPI from the relevant sets, and returns the set final score.
        """
        scores = []
        for params in self.set_templates_data[set_name]:
            target = str(params.get(self.store_type, ''))
            if target.isdigit() or target.capitalize() in (
                    self.tools.RELEVANT_FOR_STORE,
                    self.tools.OR_OTHER_PRODUCTS):
                products = str(
                    params.get(self.tools.PRODUCT_EAN_CODE,
                               params.get(self.tools.PRODUCT_EAN_CODE2,
                                          ''))).replace(',', ' ').split()
                target = 1 if not target.isdigit() else int(target)
                kpi_name = params.get(self.tools.GROUP_NAME,
                                      params.get(self.tools.PRODUCT_NAME))
                kpi_static_data = self.kpi_static_data[
                    (self.kpi_static_data['kpi_set_name'] == set_name)
                    & (self.kpi_static_data['kpi_name'] == kpi_name)]
                if len(products) > 1:
                    result = 0
                    for product in products:
                        product_score = self.tools.calculate_assortment(
                            product_ean_code=product)
                        result += product_score
                        product_name = \
                        self.all_products[self.all_products['product_ean_code'] == product]['product_name'].values[0]
                        try:
                            atomic_fk = \
                            kpi_static_data[kpi_static_data['atomic_kpi_name'] == product_name]['atomic_kpi_fk'].values[
                                0]
                        except Exception as e:
                            Log.info(
                                'Product {} is not defined in the DB'.format(
                                    product_name))
                            continue
                        self.write_to_db_result(atomic_fk,
                                                product_score,
                                                level=self.LEVEL3)
                    score = 1 if result >= target else 0
                else:
                    result = self.tools.calculate_assortment(
                        product_ean_code=products)
                    atomic_fk = kpi_static_data['atomic_kpi_fk'].values[0]
                    score = 1 if result >= target else 0
                    self.write_to_db_result(atomic_fk,
                                            score,
                                            level=self.LEVEL3)

                scores.append(score)
                kpi_fk = kpi_static_data['kpi_fk'].values[0]
                self.write_to_db_result(kpi_fk, score, level=self.LEVEL2)

        if not scores:
            return False
        set_score = (sum(scores) / float(len(scores))) * 100
        return set_score

    def write_to_db_result(self, fk, score, level):
        """
        This function the result data frame of every KPI (atomic KPI/KPI/KPI set),
        and appends the insert SQL query into the queries' list, later to be written to the DB.
        """
        attributes = self.create_attributes_dict(fk, score, level)
        if level == self.LEVEL1:
            table = KPS_RESULT
        elif level == self.LEVEL2:
            table = KPK_RESULT
        elif level == self.LEVEL3:
            table = KPI_RESULT
        else:
            return
        self.scores[level][fk] = score
        query = insert(attributes, table)
        self.kpi_results_queries.append(query)

    def create_attributes_dict(self, fk, score, level):
        """
        This function creates a data frame with all attributes needed for saving in KPI results tables.

        """
        score = round(score, 2)
        if level == self.LEVEL1:
            kpi_set_name = self.kpi_static_data[
                self.kpi_static_data['kpi_set_fk'] ==
                fk]['kpi_set_name'].values[0]
            score_type = '%' if kpi_set_name in self.tools.KPI_SETS_WITH_PERCENT_AS_SCORE else ''
            attributes = pd.DataFrame(
                [(kpi_set_name, self.session_uid, self.store_id,
                  self.visit_date.isoformat(), format(score,
                                                      '.2f'), score_type, fk)],
                columns=[
                    'kps_name', 'session_uid', 'store_fk', 'visit_date',
                    'score_1', 'score_2', 'kpi_set_fk'
                ])

        elif level == self.LEVEL2:
            kpi_name = self.kpi_static_data[self.kpi_static_data['kpi_fk'] ==
                                            fk]['kpi_name'].values[0].replace(
                                                "'", "\\'")
            attributes = pd.DataFrame(
                [(self.session_uid, self.store_id, self.visit_date.isoformat(),
                  fk, kpi_name, score)],
                columns=[
                    'session_uid', 'store_fk', 'visit_date', 'kpi_fk',
                    'kpk_name', 'score'
                ])
        elif level == self.LEVEL3:
            data = self.kpi_static_data[self.kpi_static_data['atomic_kpi_fk']
                                        == fk]
            atomic_kpi_name = data['atomic_kpi_name'].values[0].replace(
                "'", "\\'")
            kpi_fk = data['kpi_fk'].values[0]
            kpi_set_name = self.kpi_static_data[
                self.kpi_static_data['atomic_kpi_fk'] ==
                fk]['kpi_set_name'].values[0]
            attributes = pd.DataFrame([
                (atomic_kpi_name, self.session_uid,
                 kpi_set_name, self.store_id, self.visit_date.isoformat(),
                 datetime.utcnow().isoformat(), score, kpi_fk, fk, None, None)
            ],
                                      columns=[
                                          'display_text', 'session_uid',
                                          'kps_name', 'store_fk', 'visit_date',
                                          'calculation_time', 'score',
                                          'kpi_fk', 'atomic_kpi_fk',
                                          'threshold', 'result'
                                      ])
        else:
            attributes = pd.DataFrame()
        return attributes.to_dict()

    @log_runtime('Saving to DB')
    def commit_results_data(self):
        """
        This function writes all KPI results to the DB, and commits the changes.
        """
        cur = self.rds_conn.db.cursor()
        delete_queries = DIAGEOQueries.get_delete_session_results_query_old_tables(
            self.session_uid)
        for query in delete_queries:
            cur.execute(query)
        for query in self.kpi_results_queries:
            cur.execute(query)
        self.rds_conn.db.commit()
Ejemplo n.º 23
0
class CCUSLiveDemoToolBox:
    def __init__(self, data_provider, output):
        self.output = output
        self.data_provider = data_provider
        self.project_name = self.data_provider.project_name
        self.commonV2 = CommonV2(self.data_provider)
        self.common = Common(self.data_provider)
        self.assortment = Assortment(self.data_provider, self.output)

        self.own_manuf_fk = int(
            self.data_provider.own_manufacturer.param_value.values[0])
        self.store_info = self.data_provider[Data.STORE_INFO]
        self.new_kpi_static_data = self.commonV2.get_new_kpi_static_data()
        self.all_products_include_deleted = self.data_provider[
            Data.ALL_PRODUCTS_INCLUDING_DELETED]
        self.visit_date = self.data_provider[Data.VISIT_DATE]

    def main_calculation(self):
        """
        This function calculates the KPI results.
        """
        # Global assortment kpis
        # assortment_res_dict = self.diageo_generator.diageo_global_assortment_function_v3()
        Log.info('In function main_calculation reg-data provider ')
        assortment_res_dict = self.assortment_calc()
        self.commonV2.save_json_to_new_tables(assortment_res_dict)
        self.commonV2.commit_results_data()

    def get_oos_action_excluded(self, lvl3_result):
        excluded_from_oos = self.commonV2.get_oos_exclude_values()
        if excluded_from_oos.empty:
            return
        products_excluded = excluded_from_oos.product_fk.unique()
        lvl3_result.loc[((lvl3_result['in_store'] == 0) &
                         (lvl3_result['product_fk'].isin(products_excluded))),
                        'in_store'] = 1

    def assortment_calc(self):
        """
            This function calculates the KPI results.
            """

        dict_list = []
        diageo_fk = self.own_manuf_fk
        store_fk = self.store_info[Consts.STORE_FK].iloc[0]
        lvl3_result = self.assortment.calculate_lvl3_assortment()

        if lvl3_result.empty:
            return dict_list

        # remove live session kpis from regular kpi calculation
        live_session_kpis = list(self.new_kpi_static_data[
            self.new_kpi_static_data['live_session_relevance'] == 1]['pk'])

        self.get_oos_action_excluded(lvl3_result)
        lvl3_result = lvl3_result[~lvl3_result.kpi_fk_lvl2.
                                  isin(live_session_kpis)]
        if lvl3_result.empty:
            return dict_list

        dist_kpi_fk = \
        self.new_kpi_static_data[(self.new_kpi_static_data['client_name'] == Consts.DISTRIBUTION_KPI) |
                                 (self.new_kpi_static_data['type'] == Consts.DISTRIBUTION_KPI)][
            Consts.PK].values[0]
        kpis = self.new_kpi_static_data[self.new_kpi_static_data['type'].isin(
            Consts.LMPA_KPIS)][Consts.PK]
        if kpis.empty:
            lvl3_result = pd.merge(lvl3_result,
                                   self.all_products_include_deleted[[
                                       Consts.PRODUCT_FK, 'category_fk'
                                   ]],
                                   on=[Consts.PRODUCT_FK])
        else:
            lvl3_result = pd.merge(
                lvl3_result[~(lvl3_result['kpi_fk_lvl2'].isin(kpis.values))],
                self.all_products_include_deleted[[
                    Consts.PRODUCT_FK, 'category_fk'
                ]],
                on=[Consts.PRODUCT_FK])

        if not lvl3_result.empty:
            for result in lvl3_result.itertuples():
                score = 2 if result.in_store == 1 else 1
                kpi_name = \
                    self.new_kpi_static_data[self.new_kpi_static_data[Consts.PK] == result.kpi_fk_lvl3][
                        'type'].values[
                        0] + Consts.CATEGORY_LEVEL_KPI
                kpi_fk_by_manu = \
                    self.new_kpi_static_data[self.new_kpi_static_data['type'] == kpi_name][Consts.PK].values[0]

                identifier_level_2 = self.commonV2.get_dictionary(
                    category_fk=result.category_fk, kpi_fk=result.kpi_fk_lvl2)

                # by category
                dict_list.append(
                    self.build_dictionary_for_db_insert_v2(
                        fk=kpi_fk_by_manu,
                        numerator_id=result.product_fk,
                        numerator_result=result.facings,
                        result=score,
                        score=score,
                        denominator_id=result.category_fk,
                        denominator_result=1,
                        identifier_parent=identifier_level_2,
                        should_enter=True))

            dist_results = lvl3_result[lvl3_result['kpi_fk_lvl3'] ==
                                       dist_kpi_fk]
            for res in dist_results.itertuples():
                kpi_fk = \
                    self.new_kpi_static_data[
                        self.new_kpi_static_data['type'] == Consts.OOS_SKU_KPI + Consts.CATEGORY_LEVEL_KPI][
                        Consts.PK].values[0]
                parent_kpi_fk = \
                    self.new_kpi_static_data[
                        self.new_kpi_static_data['type'] == Consts.OOS_KPI + Consts.CATEGORY_LEVEL_KPI][
                        Consts.PK].values[0]
                num_res = res.facings
                if not res.in_store:
                    is_oos = 1
                    identifier_level_2 = self.commonV2.get_dictionary(
                        category_fk=res.category_fk, kpi_fk=parent_kpi_fk)
                    dict_list.append(
                        self.build_dictionary_for_db_insert_v2(
                            fk=kpi_fk,
                            numerator_id=res.product_fk,
                            numerator_result=num_res,
                            result=is_oos,
                            score=is_oos,
                            denominator_id=res.category_fk,
                            denominator_result=1,
                            identifier_parent=identifier_level_2))

            lvl2_result = self.assortment.calculate_lvl2_assortment_v2(
                lvl3_result)
            for result in lvl2_result.itertuples():
                kpi_name = self.new_kpi_static_data[
                    self.new_kpi_static_data[Consts.PK] == result.
                    kpi_fk_lvl2]['type'].values[0] + Consts.CATEGORY_LEVEL_KPI
                identifier_level_2 = self.commonV2.get_dictionary(
                    category_fk=result.category_fk, kpi_fk=result.kpi_fk_lvl2)
                identifier_level_1 = self.commonV2.get_dictionary(
                    kpi_fk=result.kpi_fk_lvl2)
                kpi_fk_by_cat = \
                    self.new_kpi_static_data[self.new_kpi_static_data['type'] == kpi_name][Consts.PK].values[0]
                denominator_res = result.total
                if not pd.isnull(result.target) and not pd.isnull(
                        result.group_target_date
                ) and result.group_target_date.date() <= self.visit_date:
                    denominator_res = result.target
                res = np.divide(float(result.passes),
                                float(denominator_res)) * 100
                if res >= 100:
                    score = 100
                else:
                    score = 0
                res = 100 if res > 100 else res

                # Manufacturer level KPI
                dict_list.append(
                    self.build_dictionary_for_db_insert_v2(
                        fk=kpi_fk_by_cat,
                        numerator_id=result.category_fk,
                        numerator_result=result.passes,
                        result=res,
                        score=score,
                        denominator_id=diageo_fk,
                        denominator_result=denominator_res,
                        identifier_result=identifier_level_2,
                        identifier_parent=identifier_level_1,
                        should_enter=True))

            dist_fk = self.new_kpi_static_data[self.new_kpi_static_data['type']
                                               == Consts.DISTRIBUTION_LVL2][
                                                   Consts.PK].values[0]
            dist_results = lvl2_result[lvl2_result['kpi_fk_lvl2'] == dist_fk]
            for res in dist_results.itertuples():
                kpi_fk = \
                    self.new_kpi_static_data[
                        self.new_kpi_static_data['type'] == Consts.OOS_KPI + Consts.CATEGORY_LEVEL_KPI][
                        Consts.PK].values[0]
                result = np.divide(float(res.total - res.passes),
                                   float(res.total)) * 100
                identifier_level_1 = self.commonV2.get_dictionary(
                    kpi_fk=kpi_fk)
                if result >= 100:
                    score = 0
                else:
                    score = 100
                identifier_level_2 = self.commonV2.get_dictionary(
                    category_fk=res.category_fk, kpi_fk=kpi_fk)
                num_res = res.total - res.passes
                dict_list.append(
                    self.build_dictionary_for_db_insert_v2(
                        fk=kpi_fk,
                        numerator_id=res.category_fk,
                        numerator_result=num_res,
                        result=result,
                        score=score,
                        denominator_id=diageo_fk,
                        denominator_result=res.total,
                        identifier_result=identifier_level_2,
                        identifier_parent=identifier_level_1,
                        should_enter=True))

            if not lvl2_result.empty:
                lvl1_result = self.assortment.calculate_lvl1_assortment_v2(
                    lvl2_result)
                for result in lvl1_result.itertuples():
                    kpi_level_1_name = \
                        self.new_kpi_static_data[self.new_kpi_static_data[Consts.PK] == result.kpi_fk_lvl2][
                            'type'].values[
                            0] + Consts.MANUFACTURER_LEVEL_KPI
                    kpi_level_1 = \
                        self.new_kpi_static_data[self.new_kpi_static_data['type'] == kpi_level_1_name][
                            Consts.PK].values[0]
                    identifier_level_1 = self.commonV2.get_dictionary(
                        kpi_fk=result.kpi_fk_lvl2)
                    denominator_res = result.total
                    res = np.divide(float(result.passes),
                                    float(denominator_res)) * 100
                    if res >= 100:
                        score = 100
                    else:
                        score = 0
                    dict_list.append(
                        self.build_dictionary_for_db_insert_v2(
                            fk=kpi_level_1,
                            numerator_id=diageo_fk,
                            numerator_result=result.passes,
                            result=res,
                            score=score,
                            denominator_id=store_fk,
                            denominator_result=denominator_res,
                            identifier_result=identifier_level_1,
                            should_enter=False))
                dist_fk = self.new_kpi_static_data[
                    self.new_kpi_static_data['type'] ==
                    Consts.DISTRIBUTION_LVL2][Consts.PK].values[0]
                dist_results = lvl1_result[lvl1_result['kpi_fk_lvl2'] ==
                                           dist_fk]
                for res in dist_results.itertuples():
                    kpi_fk = self.new_kpi_static_data[
                        self.new_kpi_static_data['type'] == Consts.OOS_KPI +
                        Consts.MANUFACTURER_LEVEL_KPI][Consts.PK].values[0]
                    result = np.divide(float(res.total - res.passes),
                                       float(res.total)) * 100
                    if result >= 100:
                        score = 100
                    else:
                        score = 0
                    kpi_ident = self.new_kpi_static_data[
                        self.new_kpi_static_data['type'] == Consts.OOS_KPI +
                        Consts.CATEGORY_LEVEL_KPI][Consts.PK].values[0]
                    identifier_level_1 = self.commonV2.get_dictionary(
                        kpi_fk=kpi_ident)
                    dict_list.append(
                        self.build_dictionary_for_db_insert_v2(
                            fk=kpi_fk,
                            numerator_id=diageo_fk,
                            numerator_result=res.passes,
                            result=result,
                            score=score,
                            denominator_id=store_fk,
                            denominator_result=res.total,
                            identifier_result=identifier_level_1))
        return dict_list

    def build_dictionary_for_db_insert_v2(
            self,
            fk=None,
            kpi_name=None,
            numerator_id=0,
            numerator_result=0,
            result=0,
            denominator_id=0,
            denominator_result=0,
            score=0,
            denominator_result_after_actions=None,
            context_id=None,
            target=None,
            identifier_parent=None,
            identifier_result=None,
            should_enter=None):
        try:
            insert_params = dict()
            if not fk:
                if not kpi_name:
                    return
                else:
                    insert_params[
                        'fk'] = self.common.get_kpi_fk_by_kpi_name_new_tables(
                            kpi_name)
            else:
                insert_params['fk'] = fk
            insert_params['numerator_id'] = numerator_id
            insert_params['numerator_result'] = numerator_result
            insert_params['denominator_id'] = denominator_id
            insert_params['denominator_result'] = denominator_result
            insert_params['result'] = result
            insert_params['score'] = score
            if target:
                insert_params['target'] = target
            if denominator_result_after_actions:
                insert_params[
                    'denominator_result_after_actions'] = denominator_result_after_actions
            if context_id:
                insert_params['context_id'] = context_id
            if identifier_parent:
                insert_params['identifier_parent'] = identifier_parent
                insert_params['should_enter'] = True
            if identifier_result:
                insert_params['identifier_result'] = identifier_result
            if should_enter:
                insert_params['should_enter'] = should_enter
            return insert_params
        except IndexError:
            Log.error('error in build_dictionary_for_db_insert')
            return None
Ejemplo n.º 24
0
 def run_project_calculations(self):
     self.timer.start()
     common = Common(self.data_provider)
     COOLERSCREENSUSKGenerator(self.data_provider, self.output,
                               common).main_function()
     self.timer.stop('KPIGenerator.run_project_calculations')
Ejemplo n.º 25
0
class CUBAUCUBAUToolBox:
    LEVEL1 = 1
    LEVEL2 = 2
    LEVEL3 = 3
    OwnMan = 'CUB'
    OwnManFK = 1
    SOS = ['TAP', 'CUB']
    ON_PREMISE = 'On Premise'
    OFF_PREMISE = 'Off Premise'
    TOP_PALLET = 'Cool Room Stacks Top'
    FRONT_PALLET = 'Cool Room Stacks Front'
    PRODUCT_TYPES_TO_EXCLUDE = ['POS', 'Irrelevant']

    def __init__(self, data_provider, output):
        self.output = output
        self.data_provider = data_provider
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.products = self.data_provider[Data.PRODUCTS]
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.store_id = self.data_provider[Data.STORE_FK]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.rds_conn = PSProjectConnector(self.project_name,
                                           DbUsers.CalculationEng)
        self.tools = CUBAUCUBAUGENERALToolBox(self.data_provider,
                                              self.output,
                                              rds_conn=self.rds_conn)
        self.common = Common(self.data_provider)
        self.common_sos = SOS(self.data_provider, self.output)
        self.kpi_results_queries = []
        self.k_engine = BaseCalculationsGroup(data_provider, output)
        self.store_type = data_provider.store_type
        self.matches_with_direction = self.get_match_product_in_scene_with_direction(
        )

    def main_calculation(self):
        """
        This function calculates the KPI results.
        """
        if self.store_type == self.ON_PREMISE:
            sos_types = ['TAP', 'TRAX']
        elif self.store_type == self.OFF_PREMISE:
            sos_types = ['CUB', 'TRAX']
        self.update_custom_scif()

        for sos in sos_types:
            exclude_scene = self.get_exclude_scene(sos)
            product_type = self.get_product_filter(sos)
            self.own_manufacturer_out_store(sos, product_type, exclude_scene)
            self.all_param_out_category('manufacturer_fk',
                                        'category_fk',
                                        sos,
                                        product_type,
                                        exclude_scene,
                                        context=None)
            self.all_param_out_category('manufacturer_fk',
                                        'category_fk',
                                        sos,
                                        product_type,
                                        exclude_scene,
                                        context='template_fk')
            self.all_param_out_category('brand_fk',
                                        'category_fk',
                                        sos,
                                        product_type,
                                        exclude_scene,
                                        context='template_fk')
        return

    def get_product_filter(self, sos):
        if sos == 'TAP':
            product_type = 'POS'
        else:
            product_type = (['POS', 'Irrelevant'], 0)
        return product_type

    def get_exclude_scene(self, sos):
        if self.store_type == self.ON_PREMISE:
            exclude_scene = None
        else:
            if sos == 'CUB':
                exclude_scene = ['Cool Room Stacks Front']
            else:
                exclude_scene = ['Cool Room Stacks Top']
        return exclude_scene

    def own_manufacturer_out_store(self,
                                   sos_type,
                                   product_type,
                                   exclude_scene=None):
        kpi_type = '{}_SHARE_MANUFACTURER_OUT_OF_STORE'.format(sos_type)
        kpi_fk = self.common.New_kpi_static_data[
            self.common.New_kpi_static_data['type'] ==
            kpi_type]['pk'].values[0]
        subset_filters = {'manufacturer_name': self.OwnMan}
        pop_filters = {
            'template_display_name': (exclude_scene, 0),
            'product_type': product_type
        }
        self.calculate_sos(sos_type, kpi_fk, self.OwnManFK, self.store_id,
                           subset_filters, pop_filters)
        return

    def all_param_out_category(self,
                               param_1,
                               param_2,
                               sos_type,
                               product_type,
                               exclude_scene=None,
                               context=None):
        if not context:  # only for own manufacturer out of category
            df = self.scif.loc[:, [param_1, param_2]]
            relevant_df = df.drop_duplicates(subset=param_2)
            kpi_type = '{}_SHARE_OWN_{}_OUT_OF_CATEGORY'.format(
                sos_type, (param_1.replace('_fk', '')).upper())
        else:
            df = self.scif.loc[:, [param_1, param_2, context]]
            kpi_type = '{}_SHARE_{}_OUT_OF_CATEGORY'.format(
                sos_type, (param_1.replace('_fk', '')).upper())
            relevant_df = df.drop_duplicates()
        kpi_fk = self.common.New_kpi_static_data[
            self.common.New_kpi_static_data['type'] ==
            kpi_type]['pk'].values[0]
        for i, row in relevant_df.iterrows():
            if not context:
                pop_filters = {
                    'template_display_name': (exclude_scene, 0),
                    'product_type': product_type,
                    param_2: row[param_2]
                }
                subset_filters = {param_1: self.OwnManFK}
            else:
                pop_filters = {
                    'template_display_name': (exclude_scene, 0),
                    'product_type': product_type,
                    param_2: row[param_2],
                    context: row[context]
                }
                subset_filters = {param_1: row[param_1]}
            if context:
                self.calculate_sos(sos_type, kpi_fk, row[param_1],
                                   row[param_2], subset_filters, pop_filters,
                                   row[context])
            else:
                self.calculate_sos(sos_type, kpi_fk, self.OwnManFK,
                                   row[param_2], subset_filters, pop_filters)
        return

    def calculate_sos(self,
                      sos_type,
                      kpi_fk,
                      numerator_fk,
                      denominator_fk,
                      subset_filters,
                      pop_filters,
                      context=None):
        ratio = 0
        df = self.match_product_in_scene.merge(self.scif,
                                               how='left',
                                               on=['scene_fk', 'product_fk'])
        df = df.drop(['facings'], axis=1)
        #  denominator
        pop_filter = self.common_sos.get_filter_condition(df, **pop_filters)
        #  numerator
        subset_filter = self.common_sos.get_filter_condition(
            df, **subset_filters)
        try:
            ratio = self.calculate_sos_by_policy(sos_type,
                                                 kpi_fk,
                                                 numerator_fk,
                                                 denominator_fk,
                                                 subset_filter=subset_filter,
                                                 pop_filter=pop_filter,
                                                 context=context)
        except Exception as e:
            Log.debug(
                'calculate_sos_facing_by_scene can not be calculated for scene {}'
                .format(e.message))
        return ratio

    def calculate_sos_by_policy(self,
                                sos_type,
                                kpi_fk,
                                numerator_fk,
                                denominator_fk,
                                subset_filter,
                                pop_filter,
                                context=None):
        matches = self.matches_with_direction.merge(
            self.scif, how='left', on=['scene_fk', 'product_fk'])
        den_df = matches[pop_filter]
        nom_df = den_df[subset_filter]
        if sos_type == 'CUB':
            # IN Top pallet scene type we should include only top facings:
            denominator = den_df.loc[(den_df['template_display_name'] == self.TOP_PALLET) &
                                     (den_df['image_direction'] == 'Top')]['product_fk'].count() + \
                          den_df.loc[~(den_df['template_display_name'] == self.TOP_PALLET) & (den_df['stacking_layer'] == 1)]['product_fk'].count()
            numerator = nom_df.loc[(nom_df['template_display_name'] == self.TOP_PALLET) &
                                   (nom_df['image_direction'] == 'Top')]['product_fk'].count() + \
                          nom_df.loc[~(nom_df['template_display_name'] == self.TOP_PALLET) & (nom_df['stacking_layer'] == 1)]['product_fk'].count()
        else:
            denominator = den_df['product_fk'].count()
            numerator = nom_df['product_fk'].count()
        if denominator != 0:
            ratio = (numerator / float(denominator))
            self.common.write_to_db_result_new_tables(kpi_fk,
                                                      numerator_fk,
                                                      numerator,
                                                      round(ratio, 2),
                                                      denominator_fk,
                                                      denominator,
                                                      round(ratio, 2),
                                                      context_id=context)
        return

    def update_custom_scif(self):  # only for 'CUB Share' and 'TRAX Share'

        scenes = self.scif['scene_id'].unique().tolist()
        for scene in scenes:
            template_name = self.scif.loc[self.scif['scene_fk'] == scene][
                'template_display_name'].values[0]
            products = self.scif[(self.scif['scene_id'] == scene) & (
                ~self.scif['product_type'].isin(self.PRODUCT_TYPES_TO_EXCLUDE)
            )]['product_fk'].unique().tolist()
            for product in products:
                facings_cub_share = None
                facings_trax_share = None
                if template_name == self.FRONT_PALLET:
                    facings_trax_share = self.scif[
                        (self.scif['scene_id'] == scene)
                        & (self.scif['product_fk'] == product
                           )]['facings'].values[0]
                elif template_name == self.TOP_PALLET:
                    if self.store_type == self.OFF_PREMISE:
                        facings_cub_share = self.matches_with_direction[
                            (self.matches_with_direction['scene_fk'] == scene)
                            & (self.matches_with_direction['product_fk'] ==
                               product) &
                            (self.matches_with_direction['image_direction']
                             == 'Top')]['product_fk'].count()
                else:
                    if self.store_type == self.OFF_PREMISE:
                        facings_cub_share = self.scif[
                            (self.scif['scene_id'] == scene)
                            & (self.scif['product_fk'] == product
                               )]['facings_ign_stack'].values[0]
                    facings_trax_share = self.scif[
                        (self.scif['scene_id'] == scene)
                        & (self.scif['product_fk'] == product
                           )]['facings'].values[0]
                self.common.get_custom_scif_query(
                    scene_fk=scene,
                    product_fk=product,
                    in_assortment_OSA=facings_cub_share,
                    oos_osa=facings_trax_share,
                    mha_in_assortment=None,
                    mha_oos=None,
                    length_mm_custom=None)
        self.common.commit_custom_scif()
        return

    def get_match_product_in_scene_with_direction(self):
        query = CUBAUCUBAUQueries.get_facings_by_direction(self.session_uid)
        local_con = PSProjectConnector(self.project_name,
                                       DbUsers.CalculationEng)
        products_per_direction = pd.read_sql_query(query, local_con.db)
        matches_with_direction = self.match_product_in_scene.merge(
            products_per_direction,
            left_on='scene_match_fk',
            right_on='match_product_fk')
        return matches_with_direction
Ejemplo n.º 26
0
class Summary:
    def __init__(self, data_provider, output):
        self.output = output
        self.data_provider = data_provider
        self.project_name = self.data_provider.project_name
        self.adjacency = Adjancency(self.data_provider)
        self.block = Block(self.data_provider)
        self.template_name = 'summary_kpis.xlsx'
        self.TEMPLATE_PATH = os.path.join(
            os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
            'Data', self.template_name)
        self.template_data = parse_template(self.TEMPLATE_PATH, "KPIs")
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.tools = GENERALToolBox(self.data_provider)
        self.common = Common(self.data_provider)
        self.kpi_results_queries = []
        self.cub_tools = CUBAUCUBAUGENERALToolBox(self.data_provider,
                                                  self.output)
        self.store_type = self.data_provider.store_type
        self.store_info = self.data_provider[Data.STORE_INFO]
        self.session_uid = self.data_provider.session_uid
        self.visit_date = self.data_provider.visit_date

    # @log_runtime('Main Calculation')
    def main_calculation(self):
        """
        This function calculates the KPI results.
        """
        self.calc_adjacency()
        self.calc_block()
        self.calc_competitor_adjacency()
        return

    def calc_adjacency(self):
        kpi_data = self.template_data.loc[self.template_data['KPI Type'] ==
                                          ADJACENCY]
        for index, row in kpi_data.iterrows():
            # kpi_filters = {}
            # kpi_filters['template_display_name'] = row['display name'].split(",")
            adjacent_products = self.calculate_relative_adjacency(row)
            if adjacent_products:
                self.write_to_db(row, 100, adjacent_products)
            else:
                self.write_to_db(row, 0, adjacent_products)

        return

    def calc_competitor_adjacency(self):
        kpi_data = self.template_data.loc[self.template_data['KPI Type'] ==
                                          COMPETITOR_ADJACENCY]
        for index, row in kpi_data.iterrows():
            kpi_filters = {}
            kpi_filters['template_display_name'] = row['display name'].split(
                ",")
            if row['Store additional attribute 1']:
                if not self.store_info['additional_attribute_1'].values[
                        0] in row['Store additional attribute 1'].split(","):
                    continue
            competitor_products = self.calculate_relative_adjacency(
                row, **kpi_filters)
            if competitor_products:
                self.write_to_db(row, 100, 1)
            else:
                self.write_to_db(row, 0, 0)
        return

    def calc_block(self):
        kpi_data = self.template_data.loc[self.template_data['KPI Type'] ==
                                          BLOCK]
        for index, row in kpi_data.iterrows():
            result = None
            kpi_filters = {}
            if row['store type']:
                if not row['store type'] == self.store_type:
                    continue
            entity_type = row['Block Entity Type']
            entity_value = row['Block Entity Value'].split(",")
            kpi_filters[entity_type] = entity_value
            kpi_filters['template_display_name'] = row['display name'].strip(
            ).split(",")
            threshold = row['Target']
            if threshold:
                threshold = float(threshold)
            block_result = self.cub_tools.calculate_block_together(
                minimum_block_ratio=threshold, **kpi_filters)
            # block_result = self.block.calculate_block_together(minimum_block_ratio=threshold, **kpi_filters) #todo:open bug to Ilan
            filters, relevant_scenes = self.cub_tools.separate_location_filters_from_product_filters(
                **kpi_filters)
            score = 100 if block_result else 0
            if len(relevant_scenes) == 0:
                result = 'No relevant products or scenes'
                score = None
            self.write_to_db(row, score, result)
        return

    def calculate_relative_adjacency(self, kpi_data, **general_filters):
        adjacencies = []
        if kpi_data.empty:
            return None
        direction_data = {
            'top': (1, 1),
            'bottom': (1, 1),
            'left': (1, 1),
            'right': (1, 1)
        }
        entity_type = kpi_data['Block Entity Type']
        entity_value = kpi_data['Block Entity Value'].split(",")
        tested_filters = {entity_type: entity_value}
        entity_to_return = kpi_data['Anchor Entity to return']
        if kpi_data['Anchor Entity Type'] != '':
            anchor_filters = {
                kpi_data['Anchor Entity Type']: kpi_data['Anchor Entity Value']
            }
        else:
            anchor_filters = None
        if not self.scif.empty:
            adjacencies = self.cub_tools.calculate_adjacency_relativeness(
                tested_filters, direction_data, entity_to_return,
                anchor_filters, **general_filters)
        if adjacencies:
            total_result = 0
            for neighbor in list(set(adjacencies)):
                if str(neighbor) != 'nan':
                    if not total_result:
                        total_result = neighbor.replace("'",
                                                        "\\'").encode('utf-8')
                    else:
                        total_result = total_result + ', ' + neighbor.replace(
                            "'", "\\'").encode('utf-8')
        else:
            total_result = None
        # self.write_to_db(kpi_data, score, total_result)
        return str(total_result)

    def write_to_db(self, kpi_data, score, result=None):
        kpi_name = kpi_data['KPI']
        kpi_fk = self.common.get_kpi_fk_by_kpi_name(kpi_name, 2)
        # store_fk = self.store_info.store_fk.values[0]
        atomic_kpi_fk = self.common.get_kpi_fk_by_kpi_name(kpi_name, 3)
        self.common.write_to_db_result(kpi_fk, 2, score)
        kwargs = self.common.create_basic_dict(atomic_kpi_fk)
        kwargs['display_text'] = kpi_name
        kwargs['result'] = result
        self.common.write_to_db_result(fk=kpi_fk,
                                       level=3,
                                       score=score,
                                       **kwargs)
Ejemplo n.º 27
0
class DIAGEOTWToolBox:
    SOS_SETS = 'SOS_SETS'
    LEVEL1 = 1
    LEVEL2 = 2
    LEVEL3 = 3

    ACTIVATION_STANDARD = 'Activation Standard'

    def __init__(self, data_provider, output):
        self.k_engine = BaseCalculationsScript(data_provider, output)
        self.data_provider = data_provider
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.products = self.data_provider[Data.PRODUCTS]
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.rds_conn = PSProjectConnector(self.project_name, DbUsers.CalculationEng)
        self.store_info = self.data_provider[Data.STORE_INFO]
        self.store_channel = self.store_info['store_type'].values[0]
        if self.store_channel:
            self.store_channel = self.store_channel.upper()
        self.store_type = self.store_info['additional_attribute_1'].values[0]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.store_id = self.data_provider[Data.STORE_FK]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.match_display_in_scene = self.get_match_display()
        self.set_templates_data = {}
        self.kpi_static_data = self.get_kpi_static_data()
        self.scores = {self.LEVEL1: {},
                       self.LEVEL2: {},
                       self.LEVEL3: {}}
        self.kpi_results_queries = []
        self.output = output
        self.common = Common(self.data_provider)
        self.commonV2 = CommonV2(self.data_provider)
        self.tools = DIAGEOToolBox(self.data_provider, output,
                                   match_display_in_scene=self.match_display_in_scene)  # replace the old one
        self.diageo_generator = DIAGEOGenerator(self.data_provider, self.output, self.common)

    def get_kpi_static_data(self):
        """
        This function extracts the static KPI data and saves it into one global data frame.
        The data is taken from static.kpi / static.atomic_kpi / static.kpi_set.
        """
        query = DIAGEOQueries.get_all_kpi_data()
        kpi_static_data = pd.read_sql_query(query, self.rds_conn.db)
        return kpi_static_data

    def get_match_display(self):
        """
        This function extracts the display matches data and saves it into one global data frame.
        The data is taken from probedata.match_display_in_scene.
        """
        query = DIAGEOQueries.get_match_display(self.session_uid)
        match_display = pd.read_sql_query(query, self.rds_conn.db)
        return match_display

    def main_calculation(self, set_names):
        """
        This function calculates the KPI results.
        """

        # SOS Out Of The Box kpis
        self.activate_ootb_kpis()

        # sos by scene type
        self.diageo_generator.sos_by_scene_type(self.commonV2)

        # Global assortment kpis
        assortment_res_dict = self.diageo_generator.diageo_global_assortment_function_v2()
        self.commonV2.save_json_to_new_tables(assortment_res_dict)

        # Global assortment kpis - v3 for NEW MOBILE REPORTS use
        assortment_res_dict_v3 = self.diageo_generator.diageo_global_assortment_function_v3()
        self.commonV2.save_json_to_new_tables(assortment_res_dict_v3)

        # Global menu calculation
        menus_res_dict = self.diageo_generator.diageo_global_new_share_of_menu_function()
        self.commonV2.save_json_to_new_tables(menus_res_dict)

        for set_name in set_names:
            # if set_name not in self.tools.KPI_SETS_WITHOUT_A_TEMPLATE and set_name not in self.set_templates_data.keys():
            #     self.set_templates_data[set_name] = self.tools.download_template(set_name)

            # if set_name in ('Local MPA', 'MPA', 'New Products',):
            #     set_score = self.calculate_assortment_sets(set_name)
            # if set_name in ('Relative Position',):
            #     set_score = self.calculate_relative_position_sets(set_name)
            # elif set_name in ('Brand Blocking',):
            #     set_score = self.calculate_block_together_sets(set_name)
            if set_name in ('SOS',):
                set_score = self.calculate_sos_sets(set_name)
                if set_score:
                    fk = self.common.get_kpi_fk_by_kpi_name_new_tables(self.SOS_SETS)
                    self.commonV2.write_to_db_result(fk=fk, numerator_id=1, denominator_id=self.store_id,
                                                     result=set_score)
            # elif set_name == 'Visible to Customer':
            #     visible_filters = {self.tools.VISIBILITY_PRODUCTS_FIELD: 'Y'}
            #     filters = {"manufacturer_name": "Diageo"}
            #     set_score = self.tools.calculate_visible_percentage(visible_filters=visible_filters, **filters)
            #     self.save_level2_and_level3(set_name, set_name, set_score)
            # elif set_name == 'Secondary':
            #     set_score = self.tools.calculate_number_of_scenes(location_type='Secondary')
            #     self.save_level2_and_level3(set_name, set_name, set_score)
            # elif set_name == 'Survey Questions':
            #     set_score = self.calculate_survey_sets(set_name)
            # elif set_name in ('POSM',):
            #     set_score = self.calculate_posm_sets(set_name)
            else:
                continue

            if set_score == 0:
                pass
            elif set_score is False:
                continue

            set_fk = self.kpi_static_data[self.kpi_static_data['kpi_set_name'] == set_name]['kpi_set_fk'].values[0]
            self.write_to_db_result(set_fk, set_score, self.LEVEL1)
        # commiting to new tables
        self.commonV2.commit_results_data()

    def save_level2_and_level3(self, set_name, kpi_name, score):
        """
        Given KPI data and a score, this functions writes the score for both KPI level 2 and 3 in the DB.
        """
        kpi_data = self.kpi_static_data[(self.kpi_static_data['kpi_set_name'] == set_name) &
                                        (self.kpi_static_data['kpi_name'] == kpi_name)]
        kpi_fk = kpi_data['kpi_fk'].values[0]
        atomic_kpi_fk = kpi_data['atomic_kpi_fk'].values[0]
        self.write_to_db_result(kpi_fk, score, self.LEVEL2)
        self.write_to_db_result(atomic_kpi_fk, score, self.LEVEL3)

    def calculate_activation_standard(self):
        """
        This function calculates the Activation Standard KPI, and saves the result to the DB (for all 3 levels).
        """
        final_score = 0
        for params in self.tools.download_template(self.ACTIVATION_STANDARD):
            set_name = params.get(self.tools.ACTIVATION_SET_NAME)
            kpi_name = params.get(self.tools.ACTIVATION_KPI_NAME)
            target = float(params.get(self.tools.ACTIVATION_TARGET))
            target = target * 100 if target < 1 else target
            score_type = params.get(self.tools.ACTIVATION_SCORE)
            weight = float(params.get(self.tools.ACTIVATION_WEIGHT))
            if kpi_name:
                kpi_fk = self.kpi_static_data[(self.kpi_static_data['kpi_set_name'] == set_name) &
                                              (self.kpi_static_data['kpi_name'] == kpi_name)]['kpi_fk'].values[0]
                score = self.scores[self.LEVEL2].get(kpi_fk, 0)
            else:
                set_fk = self.kpi_static_data[self.kpi_static_data['kpi_set_name'] == set_name]['kpi_set_fk'].values[0]
                score = self.scores[self.LEVEL1].get(set_fk, 0)
            if score >= target:
                score = 100
            else:
                if score_type == 'PROPORTIONAL':
                    score = (score / float(target)) * 100
                else:
                    score = 0
            final_score += score * weight
            self.save_level2_and_level3(self.ACTIVATION_STANDARD, set_name, score)
        set_fk = self.kpi_static_data[self.kpi_static_data['kpi_set_name'] ==
                                      self.ACTIVATION_STANDARD]['kpi_set_fk'].values[0]
        self.write_to_db_result(set_fk, final_score, self.LEVEL1)

    def calculate_sos_sets(self, set_name):
        """
        This function calculates every SOS-typed KPI from the relevant sets, and returns the set final score.
        """
        scores = []
        for params in self.set_templates_data[set_name]:
            if params.get(self.store_type) == self.tools.RELEVANT_FOR_STORE:
                result = self.tools.calculate_share_of_shelf(manufacturer=self.tools.DIAGEO,
                                                             include_empty=self.tools.EXCLUDE_EMPTY)
                score = 1 if result >= params.get(self.tools.TARGET) else 0
                scores.append(score)

                self.save_level2_and_level3(set_name, params.get(self.tools.KPI_NAME), score)

        if not scores:
            return False
        set_score = (sum(scores) / float(len(scores))) * 100
        return set_score

    def calculate_survey_sets(self, set_name):
        """
        This function calculates every Survey-Answer-typed KPI and returns the set final score.
        """
        scores = []
        for params in self.set_templates_data[set_name]:
            survey_text = params.get(self.tools.SURVEY_QUESTION)
            answer_field = 'number_value' if params.get(self.tools.SURVEY_ANSWER_TYPE) == 'Numeric' else None
            answer_target = params.get(self.tools.SURVEY_ANSWER)
            survey_answer = self.tools.get_survey_answer(survey_text, answer_field)
            if answer_field == 'number_value':
                if str(answer_target).isdigit():
                    answer_target = '=={}'.format(answer_target)
                if not str(survey_answer).isdigit():
                    result = False
                else:
                    result = eval('{}{}'.format(survey_answer, answer_target))
            else:
                result = True if survey_answer in answer_target.split(';') else False
            score = 1 if result else 0
            scores.append(score)

            self.save_level2_and_level3(set_name, params.get(self.tools.KPI_NAME), score)

        if not scores:
            return False
        set_score = (sum(scores) / float(len(scores))) * 100
        return set_score

    def validate_survey(self, params):
        """
        This function checks whether the Survey-typed KPI should be run, based on the Execution Condition Field.
        """
        survey_validated = False
        condition = params.get(self.tools.SURVEY_CONDITION)
        if not condition:
            survey_validated = True
        else:
            question, answers = condition.split('=')
            survey_answer = self.tools.get_survey_answer(question)
            if survey_answer in answers.split(';'):
                survey_validated = True
        return survey_validated

    def calculate_block_together_sets(self, set_name):
        """
        This function calculates every block-together-typed KPI from the relevant sets, and returns the set final score.
        """
        scores = []
        for params in self.set_templates_data[set_name]:
            if self.store_channel == params.get(self.tools.CHANNEL, '').upper():
                filters = {'template_name': params.get(self.tools.LOCATION)}
                if params.get(self.tools.SUB_BRAND_NAME):
                    filters['sub_brand_name'] = params.get(self.tools.SUB_BRAND_NAME)
                else:
                    filters['brand_name'] = params.get(self.tools.BRAND_NAME)
                result = self.tools.calculate_block_together(**filters)
                score = 1 if result else 0
                scores.append(score)

                self.save_level2_and_level3(set_name, params.get(self.tools.KPI_NAME), score)

        if not scores:
            return False
        set_score = (sum(scores) / float(len(scores))) * 100
        return set_score

    def calculate_relative_position_sets(self, set_name):
        """
        This function calculates every relative-position-typed KPI from the relevant sets, and returns the set final score.
        """
        scores = []
        for params in self.set_templates_data[set_name]:
            if self.store_channel == params.get(self.tools.CHANNEL, '').upper():
                tested_filters = {'product_ean_code': params.get(self.tools.TESTED)}
                anchor_filters = {'product_ean_code': params.get(self.tools.ANCHOR)}
                direction_data = {'top': self._get_direction_for_relative_position(params.get(self.tools.TOP_DISTANCE)),
                                  'bottom': self._get_direction_for_relative_position(
                                      params.get(self.tools.BOTTOM_DISTANCE)),
                                  'left': self._get_direction_for_relative_position(
                                      params.get(self.tools.LEFT_DISTANCE)),
                                  'right': self._get_direction_for_relative_position(
                                      params.get(self.tools.RIGHT_DISTANCE))}
                general_filters = {'template_name': params.get(self.tools.LOCATION)}
                result = self.tools.calculate_relative_position(tested_filters, anchor_filters, direction_data,
                                                                **general_filters)
                score = 1 if result else 0
                scores.append(score)

                self.save_level2_and_level3(set_name, params.get(self.tools.KPI_NAME), score)

        if not scores:
            return False
        set_score = (sum(scores) / float(len(scores))) * 100
        return set_score

    def _get_direction_for_relative_position(self, value):
        """
        This function converts direction data from the template (as string) to a number.
        """
        if value == self.tools.UNLIMITED_DISTANCE:
            value = 1000
        elif not value or not str(value).isdigit():
            value = 0
        else:
            value = int(value)
        return value

    def calculate_posm_sets(self, set_name):
        """
        This function calculates every POSM-typed KPI from the relevant sets, and returns the set final score.
        """
        scores = []
        for params in self.set_templates_data[set_name]:
            if self.store_channel is None:
                break

            kpi_res = self.tools.calculate_posm(display_name=params.get(self.tools.DISPLAY_NAME))
            score = 1 if kpi_res > 0 else 0
            if params.get(self.store_type) == self.tools.RELEVANT_FOR_STORE:
                scores.append(score)

            if score == 1 or params.get(self.store_type) == self.tools.RELEVANT_FOR_STORE:
                self.save_level2_and_level3(set_name, params.get(self.tools.DISPLAY_NAME), score)

        if not scores:
            return False
        set_score = (sum(scores) / float(len(scores))) * 100
        return set_score

    def calculate_assortment_sets(self, set_name):
        """
        This function calculates every Assortment-typed KPI from the relevant sets, and returns the set final score.
        """
        scores = []
        for params in self.set_templates_data[set_name]:
            target = str(params.get(self.store_type, ''))
            if target.isdigit() or target.capitalize() in (self.tools.RELEVANT_FOR_STORE, self.tools.OR_OTHER_PRODUCTS):
                products = str(params.get(self.tools.PRODUCT_EAN_CODE,
                                          params.get(self.tools.PRODUCT_EAN_CODE2, ''))).replace(',', ' ').split()
                target = 1 if not target.isdigit() else int(target)
                kpi_name = params.get(self.tools.GROUP_NAME, params.get(self.tools.PRODUCT_NAME))
                kpi_static_data = self.kpi_static_data[(self.kpi_static_data['kpi_set_name'] == set_name) &
                                                       (self.kpi_static_data['kpi_name'] == kpi_name)]
                if len(products) > 1:
                    result = 0
                    for product in products:
                        product_score = self.tools.calculate_assortment(product_ean_code=product)
                        result += product_score
                        product_name = \
                            self.all_products[self.all_products['product_ean_code'] == product]['product_name'].values[
                                0]
                        try:
                            atomic_fk = \
                                kpi_static_data[kpi_static_data['atomic_kpi_name'] == product_name][
                                    'atomic_kpi_fk'].values[
                                    0]
                        except Exception as e:
                            Log.info('Product {} is not defined in the DB'.format(product_name))
                            continue
                        self.write_to_db_result(atomic_fk, product_score, level=self.LEVEL3)
                    score = 1 if result >= target else 0
                else:
                    result = self.tools.calculate_assortment(product_ean_code=products)
                    atomic_fk = kpi_static_data['atomic_kpi_fk'].values[0]
                    score = 1 if result >= target else 0
                    self.write_to_db_result(atomic_fk, score, level=self.LEVEL3)

                scores.append(score)
                kpi_fk = kpi_static_data['kpi_fk'].values[0]
                self.write_to_db_result(kpi_fk, score, level=self.LEVEL2)

        if not scores:
            return False
        set_score = (sum(scores) / float(len(scores))) * 100
        return set_score

    def write_to_db_result(self, fk, score, level):
        """
        This function the result data frame of every KPI (atomic KPI/KPI/KPI set),
        and appends the insert SQL query into the queries' list, later to be written to the DB.
        """
        attributes = self.create_attributes_dict(fk, score, level)
        if level == self.LEVEL1:
            table = KPS_RESULT
        elif level == self.LEVEL2:
            table = KPK_RESULT
        elif level == self.LEVEL3:
            table = KPI_RESULT
        else:
            return
        self.scores[level][fk] = score
        query = insert(attributes, table)
        self.kpi_results_queries.append(query)

    def activate_ootb_kpis(self):
        # FACINGS_SOS_MANUFACTURER_IN_WHOLE_STORE - level 1
        sos_store_fk = self.commonV2.get_kpi_fk_by_kpi_name('SOS OUT OF STORE')
        sos_store = ManufacturerFacingsSOSInWholeStore(data_provider=self.data_provider,
                                                       kpi_definition_fk=sos_store_fk).calculate()
        # FACINGS_SOS_CATEGORY_IN_WHOLE_STORE - level 2
        sos_cat_out_of_store_fk = self.commonV2.get_kpi_fk_by_kpi_name('SOS CATEGORY OUT OF STORE')
        sos_cat_out_of_store = self.calculate_sos_of_cat_of_out_of_store_new(sos_cat_out_of_store_fk)

        # FACINGS_SOS_SUB_CATEGORY_OUT_OF_CATEGORY - level 3
        sos_sub_cat_out_of_cat_fk = self.commonV2.get_kpi_fk_by_kpi_name('SOS SUB CATEGORY OUT OF CATEGORY')
        sos_sub_cat_out_of_cat = SubCategoryFacingsSOSPerCategory(data_provider=self.data_provider,
                                                                  kpi_definition_fk=sos_sub_cat_out_of_cat_fk).calculate()

        # FACINGS_SOS_MANUFACTURER_OUT_OF_SUB_CATEGORY - level 4
        sos_man_out_of_sub_cat_fk = self.commonV2.get_kpi_fk_by_kpi_name('SOS MANUFACTURER OUT OF SUB CATEGORY')
        sos_man_out_of_sub_cat = ManufacturerFacingsSOSPerSubCategoryInStore(
            data_provider=self.data_provider, kpi_definition_fk=sos_man_out_of_sub_cat_fk).calculate()

        # FACINGS_SOS_BRAND_OUT_OF_SUB_CATEGORY_IN_WHOLE_STORE - level 5
        sos_brand_out_of_sub_cat_fk = self.commonV2.get_kpi_fk_by_kpi_name('SOS BRAND OUT OF MANUFACTURER')
        sos_brand_out_of_sub_cat = self.calculate_sos_of_brand_out_of_manufacturer_in_sub_cat(
            sos_brand_out_of_sub_cat_fk)

        # Savings results in Hierarchy
        self.save_hierarchy(sos_store, sos_cat_out_of_store, sos_sub_cat_out_of_cat, sos_man_out_of_sub_cat,
                            sos_brand_out_of_sub_cat)

    def calculate_sos_of_brand_out_of_manufacturer_in_sub_cat(self, kpi_fk):
        pass
        res_list = []
        res_dict = dict()
        # Get rid of Irrelevant and Empty types and keep only facings > 1
        filtered_scif = self.scif[
            ~self.scif['product_type'].isin(['Irrelevant', 'Empty']) & self.scif['facings_ign_stack'] > 0]

        # Filter by each Sub Category and Manufacturer
        sub_cat_fk_list = filtered_scif['sub_category_fk'].unique().tolist()
        for sub_cat in sub_cat_fk_list:
            filtered_scif_by_sub_cat = filtered_scif[filtered_scif['sub_category_fk'] == sub_cat]
            list_of_relevant_manufacturers = filtered_scif_by_sub_cat['manufacturer_fk'].unique().tolist()
            for manu_fk in list_of_relevant_manufacturers:
                filtered_scif_by_sub_cat_and_manufacturer = filtered_scif_by_sub_cat[
                    filtered_scif_by_sub_cat['manufacturer_fk'] == manu_fk]
                denominator_result = filtered_scif_by_sub_cat_and_manufacturer['facings_ign_stack'].sum()

                # Calculate results per Brand
                list_of_relevant_brands = filtered_scif_by_sub_cat_and_manufacturer['brand_fk'].unique().tolist()
                for brand_fk in list_of_relevant_brands:
                    filtered_scif_by_brand = filtered_scif_by_sub_cat_and_manufacturer[
                        filtered_scif_by_sub_cat_and_manufacturer['brand_fk'] == brand_fk]
                    facings_brand_results = filtered_scif_by_brand['facings_ign_stack'].sum()
                    result_for_brand = facings_brand_results / denominator_result

                    # Preparing the results' dictionary
                    res_dict['kpi_definition_fk'] = kpi_fk
                    res_dict['numerator_id'] = brand_fk
                    res_dict['numerator_result'] = facings_brand_results
                    res_dict['denominator_id'] = int(sub_cat)
                    res_dict['denominator_result'] = denominator_result
                    res_dict['identifier_result'] = (int(brand_fk), int(sub_cat), int(manu_fk))
                    res_dict['identifier_parent'] = int(manu_fk), (int(sub_cat))
                    res_dict['result'] = result_for_brand
                    res_dict['score'] = result_for_brand
                    res_list.append(res_dict.copy())
        return res_list

    def calculate_sos_of_cat_of_out_of_store_new(self, kpi_fk):
        res_list = []
        res_dict = dict()
        # Get rid of Irrelevant and Empty types and keep only facings ignore stacking > 1
        filtered_scif = self.scif[
            ~self.scif['product_type'].isin(['Irrelevant', 'Empty']) & self.scif['facings_ign_stack'] > 0]
        denominator_result = filtered_scif['facings_ign_stack'].sum()
        categories_fk_list = filtered_scif['category_fk'].unique().tolist()

        # Calculate result per category (using facings_ign_stack!)
        for category_fk in categories_fk_list:
            filtered_scif_by_category = filtered_scif[filtered_scif['category_fk'] == category_fk]
            facings_category_result = filtered_scif_by_category['facings_ign_stack'].sum()
            result_for_category = facings_category_result / denominator_result

            # Preparing the results' dictionary
            res_dict['kpi_definition_fk'] = kpi_fk
            res_dict['numerator_id'] = category_fk
            res_dict['numerator_result'] = facings_category_result
            res_dict['denominator_id'] = self.store_id
            res_dict['denominator_result'] = denominator_result
            res_dict['result'] = result_for_category
            res_dict['score'] = result_for_category
            res_list.append(res_dict.copy())
        return res_list

    def save_hierarchy(self, level_1, level_2, level_3, level_4, level_5):
        for i in level_1:
            res = i.to_dict
            kpi_identifier = "level_1"
            self.commonV2.write_to_db_result(fk=res['kpi_definition_fk'], numerator_id=res['numerator_id'],
                                             denominator_id=res['denominator_id'],
                                             numerator_result=res['numerator_result'],
                                             denominator_result=res['denominator_result'], result=res['result'],
                                             score=res['result'],
                                             identifier_result=kpi_identifier, should_enter=False)

        for res in level_2:
            kpi_identifier = "level_2_" + str(int(res['numerator_id']))
            parent_identifier = "level_1"
            self.commonV2.write_to_db_result(fk=res['kpi_definition_fk'], numerator_id=res['numerator_id'],
                                             denominator_id=res['denominator_id'],
                                             numerator_result=res['numerator_result'],
                                             denominator_result=res['denominator_result'], result=res['result'],
                                             score=res['result'],
                                             identifier_result=kpi_identifier,
                                             identifier_parent=parent_identifier, should_enter=True)

        for i in level_3:
            res = i.to_dict
            kpi_identifier = str(int(res['numerator_id']))
            parent_identifier = "level_2_" + str(int(res['denominator_id']))
            self.commonV2.write_to_db_result(fk=res['kpi_definition_fk'], numerator_id=res['numerator_id'],
                                             denominator_id=res['denominator_id'],
                                             numerator_result=res['numerator_result'],
                                             denominator_result=res['denominator_result'], result=res['result'],
                                             score=res['result'],
                                             identifier_result=kpi_identifier,
                                             identifier_parent=parent_identifier, should_enter=True)

        for i in level_4:
            res = i.to_dict
            kpi_identifier = "level_4_" + str((int(res['numerator_id']), int(res['denominator_id'])))
            parent_identifier = str(int(res['denominator_id']))
            self.commonV2.write_to_db_result(fk=res['kpi_definition_fk'], numerator_id=res['numerator_id'],
                                             denominator_id=res['denominator_id'],
                                             numerator_result=res['numerator_result'],
                                             denominator_result=res['denominator_result'], result=res['result'],
                                             score=res['result'],
                                             identifier_result=kpi_identifier,
                                             identifier_parent=parent_identifier, should_enter=True)

        for res in level_5:
            kpi_identifier = "level_5_" + str(res['identifier_result'])
            parent_identifier = "level_4_" + str(res['identifier_parent'])
            self.commonV2.write_to_db_result(fk=res['kpi_definition_fk'], numerator_id=res['numerator_id'],
                                             denominator_id=res['denominator_id'],
                                             numerator_result=res['numerator_result'],
                                             denominator_result=res['denominator_result'], result=res['result'],
                                             score=res['result'],
                                             identifier_result=kpi_identifier, identifier_parent=parent_identifier,
                                             should_enter=True)

    def create_attributes_dict(self, fk, score, level):
        """
        This function creates a data frame with all attributes needed for saving in KPI results tables.

        """
        score = round(score, 2)
        if level == self.LEVEL1:
            kpi_set_name = self.kpi_static_data[self.kpi_static_data['kpi_set_fk'] == fk]['kpi_set_name'].values[0]
            score_type = '%' if kpi_set_name in self.tools.KPI_SETS_WITH_PERCENT_AS_SCORE else ''
            attributes = pd.DataFrame([(kpi_set_name, self.session_uid, self.store_id, self.visit_date.isoformat(),
                                        format(score, '.2f'), score_type, fk)],
                                      columns=['kps_name', 'session_uid', 'store_fk', 'visit_date', 'score_1',
                                               'score_2', 'kpi_set_fk'])

        elif level == self.LEVEL2:
            kpi_name = self.kpi_static_data[self.kpi_static_data['kpi_fk'] == fk]['kpi_name'].values[0].replace("'",
                                                                                                                "\\'")
            attributes = pd.DataFrame([(self.session_uid, self.store_id, self.visit_date.isoformat(),
                                        fk, kpi_name, score)],
                                      columns=['session_uid', 'store_fk', 'visit_date', 'kpi_fk', 'kpk_name', 'score'])
        elif level == self.LEVEL3:
            data = self.kpi_static_data[self.kpi_static_data['atomic_kpi_fk'] == fk]
            atomic_kpi_name = data['atomic_kpi_name'].values[0].replace("'", "\\'")
            kpi_fk = data['kpi_fk'].values[0]
            kpi_set_name = self.kpi_static_data[self.kpi_static_data['atomic_kpi_fk'] == fk]['kpi_set_name'].values[0]
            attributes = pd.DataFrame([(atomic_kpi_name, self.session_uid, kpi_set_name, self.store_id,
                                        self.visit_date.isoformat(), datetime.utcnow().isoformat(),
                                        score, kpi_fk, fk, None, None)],
                                      columns=['display_text', 'session_uid', 'kps_name', 'store_fk', 'visit_date',
                                               'calculation_time', 'score', 'kpi_fk', 'atomic_kpi_fk', 'threshold',
                                               'result'])
        else:
            attributes = pd.DataFrame()
        return attributes.to_dict()

    @log_runtime('Saving to DB')
    def commit_results_data(self):
        """
        This function writes all KPI results to the DB, and commits the changes.
        """
        cur = self.rds_conn.db.cursor()
        delete_queries = DIAGEOQueries.get_delete_session_results_query_old_tables(self.session_uid)
        for query in delete_queries:
            cur.execute(query)
        for query in self.kpi_results_queries:
            cur.execute(query)
        self.rds_conn.db.commit()
Ejemplo n.º 28
0
class CCZAToolBox:

    def __init__(self, data_provider, output):
        self.output = output
        self.data_provider = data_provider
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.products = self.data_provider[Data.PRODUCTS]
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.store_id = self.data_provider[Data.STORE_FK]
        self.rds_conn = PSProjectConnector(self.project_name, DbUsers.CalculationEng)
        # self.store_type = self.data_provider[Data.STORE_INFO]['store_type'].iloc[0]
        query_store_type = CCZAQueries.get_attr3(self.session_uid)
        store_type = pd.read_sql_query(query_store_type, self.rds_conn.db)
        self.store_type = store_type[Const.ATTR3].iloc[0]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.tools = GENERALToolBox(self.data_provider, self.output, rds_conn=self.rds_conn)
        self.survey_response = self.data_provider[Data.SURVEY_RESPONSES]
        self.tool_box_for_flow = ToolBox
        # create data-frames from template
        self.kpi_sheets = {}
        for name in Const.sheet_names_and_rows:
            self.kpi_sheets[name] = parse_template(TEMPLATE_PATH, sheet_name=name,
                                                   lower_headers_row_index=Const.sheet_names_and_rows[name])
        self.common = Common(self.data_provider, Const.RED_SCORE)
        self.survey_handler = Survey(self.data_provider, self.output, self.kpi_sheets[Const.SURVEY_QUESTIONS])
        self.kpi_static_data = self.common.kpi_static_data
        self.kpi_results_queries = []
        self.common_v2 = CommonV2(self.data_provider)
        self.own_manuf_fk = self.get_own_manufacturer_fk()
        self.scif_match_react = self.scif[self.scif[ScifConsts.RLV_SOS_SC] == 1]

    def get_own_manufacturer_fk(self):
        own_manufacturer_fk = self.data_provider.own_manufacturer.param_value.values[0]
        return int(float(own_manufacturer_fk))

    def sos_main_calculation(self):
        store_sos_ident_par, store_facings = self.calculate_own_manufacturer_out_of_store()
        category_df = self.calculate_sos_category_out_of_store(store_sos_ident_par, store_facings)
        if not category_df.empty:
            manufacturer_cat_df = self.calculate_sos_manufacturer_out_of_category(category_df)
            self.calculate_sos_brand_out_of_manufacturer(manufacturer_cat_df)

    def calculate_own_manufacturer_out_of_store(self):
        manuf_out_of_store_fk = self.common_v2.get_kpi_fk_by_kpi_type(Const.SOS_OWN_MANUF_OUT_OF_STORE)
        store_sos_ident_par = self.common_v2.get_dictionary(kpi_fk=manuf_out_of_store_fk)
        num_result = self.scif_match_react[self.scif_match_react[ScifConsts.MANUFACTURER_FK] == self.own_manuf_fk] \
                                                                [ScifConsts.FACINGS_IGN_STACK].sum()
        denom_result = float(self.scif_match_react[ScifConsts.FACINGS_IGN_STACK].sum())
        sos_result = num_result / denom_result if denom_result else 0
        self.common_v2.write_to_db_result(fk=manuf_out_of_store_fk, numerator_id=self.own_manuf_fk,
                                          denominator_id=self.store_id, numerator_result=num_result,
                                          denominator_result=denom_result, score=sos_result * 100, result=sos_result,
                                          identifier_result=store_sos_ident_par, should_enter=True)
        return store_sos_ident_par, denom_result

    def calculate_sos_brand_out_of_manufacturer(self, manuf_df):
        kpi_fk = self.common_v2.get_kpi_fk_by_kpi_type(Const.SOS_BRAND_OUT_CAT)
        brand_man_cat_df = self.scif_match_react.groupby([ScifConsts.CATEGORY_FK, ScifConsts.MANUFACTURER_FK,
                                                          ScifConsts.BRAND_FK],
                                                         as_index=False).agg({ScifConsts.FACINGS_IGN_STACK: np.sum})
        brand_man_cat_df = brand_man_cat_df.merge(manuf_df, on=[ScifConsts.CATEGORY_FK, ScifConsts.MANUFACTURER_FK],
                                                  how='left')
        brand_man_cat_df['sos'] = brand_man_cat_df[ScifConsts.FACINGS_IGN_STACK] / brand_man_cat_df['man_cat_facings']
        for i, row in brand_man_cat_df.iterrows():
            self.common_v2.write_to_db_result(fk=kpi_fk, numerator_id=row[ScifConsts.BRAND_FK],
                                              denominator_id=row[ScifConsts.MANUFACTURER_FK],
                                              numerator_result=row[ScifConsts.FACINGS_IGN_STACK],
                                              denominator_result=row['man_cat_facings'], result=row['sos'],
                                              context_id=row[ScifConsts.CATEGORY_FK],
                                              score=row['sos'] * 100, identifier_parent=row['manuf_id_parent'],
                                              should_enter=True)

    def calculate_sos_manufacturer_out_of_category(self, cat_df):
        kpi_fk = self.common_v2.get_kpi_fk_by_kpi_type(Const.SOS_MANUF_OUT_OF_CAT)
        manuf_in_cat_df = self.scif_match_react.groupby([ScifConsts.CATEGORY_FK, ScifConsts.MANUFACTURER_FK],
                                                        as_index=False).agg({ScifConsts.FACINGS_IGN_STACK: np.sum})
        manuf_in_cat_df = manuf_in_cat_df.merge(cat_df, on=ScifConsts.CATEGORY_FK, how='left')
        manuf_in_cat_df['sos'] = manuf_in_cat_df[ScifConsts.FACINGS_IGN_STACK] / manuf_in_cat_df['category_facings']
        manuf_in_cat_df['id_result'] = manuf_in_cat_df.apply(self.build_identifier_parent_man_in_cat,
                                                             axis=1, args=(kpi_fk,))
        for i, row in manuf_in_cat_df.iterrows():
            self.common_v2.write_to_db_result(fk=kpi_fk, numerator_id=row[ScifConsts.MANUFACTURER_FK],
                                              denominator_id=row[ScifConsts.CATEGORY_FK],
                                              numerator_result=row[ScifConsts.FACINGS_IGN_STACK],
                                              denominator_result=row['category_facings'], result=row['sos'],
                                              score=row['sos'] * 100, identifier_parent=row['cat_id_parent'],
                                              identifier_result=row['id_result'],
                                              should_enter=True)
        manuf_in_cat_df.rename(columns={ScifConsts.FACINGS_IGN_STACK: 'man_cat_facings',
                                        'id_result': 'manuf_id_parent'}, inplace=True)
        manuf_in_cat_df = manuf_in_cat_df[[ScifConsts.MANUFACTURER_FK, ScifConsts.CATEGORY_FK,
                                           'man_cat_facings', 'manuf_id_parent']]
        return manuf_in_cat_df

    @staticmethod
    def build_identifier_parent_man_in_cat(row, kpi_fk):
        id_result_dict = {Const.KPI_FK: kpi_fk, ScifConsts.CATEGORY_FK: row[ScifConsts.CATEGORY_FK],
                          ScifConsts.MANUFACTURER_FK: row[ScifConsts.MANUFACTURER_FK]}
        return id_result_dict

    def calculate_sos_category_out_of_store(self, store_sos_ident_par, store_facings):
        kpi_fk = self.common_v2.get_kpi_fk_by_kpi_type(Const.SOS_CAT_OUT_OF_STORE)
        cat_df = self.scif_match_react.groupby([ScifConsts.CATEGORY_FK],
                                               as_index=False).agg({ScifConsts.FACINGS_IGN_STACK: np.sum})
        cat_df['sos'] = cat_df[ScifConsts.FACINGS_IGN_STACK] / store_facings
        cat_df['id_result'] = cat_df[ScifConsts.CATEGORY_FK].apply(lambda x: {ScifConsts.CATEGORY_FK: x,
                                                                              Const.KPI_FK: kpi_fk})
        for i, row in cat_df.iterrows():
            self.common_v2.write_to_db_result(fk=kpi_fk, numerator_id=row[ScifConsts.CATEGORY_FK],
                                              denominator_id=self.store_id,
                                              numerator_result=row[ScifConsts.FACINGS_IGN_STACK],
                                              denominator_result=store_facings, result=row['sos'],
                                              score=row['sos'] * 100, identifier_parent=store_sos_ident_par,
                                              identifier_result=row['id_result'], should_enter=True)
        cat_df.rename(columns={ScifConsts.FACINGS_IGN_STACK: 'category_facings', 'id_result': 'cat_id_parent'},
                      inplace=True)
        cat_df = cat_df[[ScifConsts.CATEGORY_FK, 'category_facings', 'cat_id_parent']]
        return cat_df

    def main_calculation_red_score(self):
        set_score = 0
        try:
            set_name = self.kpi_sheets[Const.KPIS].iloc[len(self.kpi_sheets[Const.KPIS]) - 1][
                Const.KPI_NAME]
            kpi_fk = self.common_v2.get_kpi_fk_by_kpi_type(set_name)
            set_identifier_res = self.common_v2.get_dictionary(kpi_fk=kpi_fk)
            if self.store_type in self.kpi_sheets[Const.KPIS].keys().tolist():
                for i in xrange(len(self.kpi_sheets[Const.KPIS]) - 1):
                    params = self.kpi_sheets[Const.KPIS].iloc[i]
                    percent = self.get_percent(params[self.store_type])
                    if percent == 0:
                        continue
                    kpi_score = self.main_calculation_lvl_2(identifier_parent=set_identifier_res, params=params)
                    set_score += kpi_score * percent
            else:
                Log.warning('The store-type "{}" is not recognized in the template'.format(self.store_type))
                return
            kpi_names = {Const.column_name1: set_name}
            set_fk = self.get_kpi_fk_by_kpi_path(self.common.LEVEL1, kpi_names)
            if set_fk:
                try:
                    self.common.write_to_db_result(score=set_score, level=self.common.LEVEL1, fk=set_fk)
                except Exception as exception:
                    Log.error('Exception in the set {} writing to DB: {}'.format(set_name, exception.message))
            self.common_v2.write_to_db_result(fk=kpi_fk, numerator_id=self.own_manuf_fk,
                                              denominator_id=self.store_id, score=set_score,
                                              result=set_score, identifier_result=set_identifier_res,
                                              should_enter=True)
        except Exception as exception:
            Log.error('Exception in the kpi-set calculating: {}'.format(exception.message))
            pass

    def main_calculation_lvl_2(self, identifier_parent, *args, **kwargs):
        """
            :param kwargs: dict - kpi line from the template.
            the function gets the kpi (level 2) row, and calculates its children.
            :return: float - score of the kpi.
        """
        kpi_params = kwargs['params']
        kpi_name = kpi_params[Const.KPI_NAME]
        kpi_score = 0.0
        set_name = kpi_params[Const.KPI_GROUP]
        kpi_type = kpi_params[Const.KPI_TYPE]
        target = kpi_params[Const.TARGET]
        kpi_fk_lvl_2 = self.common_v2.get_kpi_fk_by_kpi_type(kpi_name)
        lvl_2_identifier_par = self.common_v2.get_dictionary(kpi_fk=kpi_fk_lvl_2)
        if kpi_name != Const.FLOW:
            for i in range(len(self.kpi_sheets[target])):
                if kpi_params[Const.WEIGHT_SHEET].strip():
                    target_series = self.kpi_sheets[target].iloc[i]
                    weight_sheet = self.kpi_sheets[kpi_params[Const.WEIGHT_SHEET]]
                    atomic_params = weight_sheet[(weight_sheet[Const.KPI_NAME] == target_series[Const.KPI_NAME]) &
                                                 (weight_sheet[Const.ATOMIC_NAME] == target_series[Const.ATOMIC_NAME])
                                                 ].iloc[0]
                    atomic_params[Const.targets_line] = target_series
                    # atomic_params = self.kpi_sheets[kpi_params[Const.WEIGHT_SHEET]].iloc[i]
                    # atomic_params[Const.targets_line] = self.kpi_sheets[target].iloc[i]
                else:
                    atomic_params = self.kpi_sheets[target].iloc[i]
                percent = self.get_percent(atomic_params[self.store_type])
                if percent == 0.0 or atomic_params[Const.KPI_NAME] != kpi_name:
                    continue
                atomic_params[Const.type] = kpi_type
                atomic_score = self.calculate_atomic(atomic_params, set_name, lvl_2_identifier_par)
                if atomic_score is None:
                    atomic_score = 0.0
                    Log.error('The calculated score is not good.')
                kpi_score += atomic_score * percent
        else:
            atomic_row = self.kpi_sheets[target].iloc[0]
            atomic_params = atomic_row.to_dict()
            atomic_params.update({Const.ATOMIC_NAME: kpi_name, Const.KPI_NAME: kpi_name, Const.type: kpi_name})
            kpi_score = self.calculate_atomic(atomic_params, set_name, lvl_2_identifier_par)
        kpi_names = {Const.column_name1: set_name, Const.column_name2: kpi_name}
        kpi_fk = self.get_kpi_fk_by_kpi_path(self.common.LEVEL2, kpi_names)
        if kpi_fk:
            try:
                self.common.write_to_db_result(score=kpi_score, level=self.common.LEVEL2, fk=kpi_fk)
            except Exception as e:
                Log.error('Exception in the kpi {} writing to DB: {}'.format(kpi_name, e.message))
        self.common_v2.write_to_db_result(fk=kpi_fk_lvl_2, numerator_id=self.own_manuf_fk, denominator_id=self.store_id,
                                          score=kpi_score, result=kpi_score, identifier_parent=identifier_parent,
                                          identifier_result=lvl_2_identifier_par, should_enter=True)
        return kpi_score

    def calculate_atomic(self, atomic_params, set_name, lvl_2_identifier_parent):
        """
            :param atomic_params: dict - atomic kpi line from the template
            :param set_name: str - name of the set, for DB.
            the function gets the atomic (level 3) row, and calculates it by the options (availability/SOS/flow/survey).
            :return: float - score of the atomic kpi.
        """
        atomic_name = atomic_params[Const.ATOMIC_NAME]
        kpi_name = atomic_params[Const.KPI_NAME]
        atomic_type = atomic_params[Const.type]
        if atomic_type == Const.AVAILABILITY:
            atomic_score = self.calculate_availability(atomic_params)
        elif atomic_type == Const.SOS_FACINGS:
            atomic_score = self.calculate_sos(atomic_params)
        elif atomic_type == Const.SURVEY_QUESTION:
            if Const.KPI_TYPE in atomic_params.keys() and atomic_params[Const.KPI_TYPE] != Const.SURVEY:
                atomic_score = self.calculate_survey_with_types(atomic_params)
            else:
                atomic_score = self.calculate_survey_with_codes(atomic_params)
        elif atomic_type == Const.FLOW:
            atomic_score = self.calculate_flow(atomic_params)
        else:
            atomic_score = 0.0
            Log.error('The type "{}" is unknown'.format(atomic_type))
        kpi_names = {Const.column_name1: set_name, Const.column_name2: kpi_name, Const.column_name3: atomic_name}
        atomic_fk = self.get_kpi_fk_by_kpi_path(self.common.LEVEL3, kpi_names)
        if atomic_fk:
            try:
                self.common.write_to_db_result(score=atomic_score, level=self.common.LEVEL3, fk=atomic_fk)
            except Exception as e:
                Log.error('Exception in the atomic-kpi {} writing to DB: {}'.format(atomic_name, e.message))
        kpi_fk_lvl_3 = self.common_v2.get_kpi_fk_by_kpi_type(atomic_name) if atomic_name != Const.FLOW \
            else self.common_v2.get_kpi_fk_by_kpi_type(Const.FLOW_LVL_3)
        self.common_v2.write_to_db_result(fk=kpi_fk_lvl_3, numerator_id=self.own_manuf_fk, denominator_id=self.store_id,
                                          result=atomic_score, score=atomic_score,
                                          identifier_parent=lvl_2_identifier_parent, should_enter=True)
        return atomic_score

    @kpi_runtime()
    def calculate_availability(self, atomic_params):
        """
            :param atomic_params: dict - atomic kpi line from the template
            :return: 100 if this product is available, 0 otherwise.
        """
        availability = self.calculate_all_availability(atomic_params[Const.ENTITY_TYPE],
                                                       atomic_params[Const.ENTITY_VAL],
                                                       atomic_params[Const.ENTITY_TYPE2],
                                                       atomic_params[Const.ENTITY_VAL2],
                                                       atomic_params[Const.IN_NOT_IN],
                                                       Converters.convert_type(atomic_params[Const.TYPE_FILTER]),
                                                       atomic_params[Const.VALUE_FILTER])
        return 100.0 * (availability > 0)

    def calculate_all_availability(self, type1, value1, type2, value2, in_or_not, filter_type, filter_value):
        """
            :param atomic_params: dict - atomic kpi line from the template.
            checks the kind of the survey and sends it to the match function
            :return: float - score.
        """
        if not type1 or not value1:
            Log.warning('There is no type and value in the atomic availability')
            return 0.0
        type1 = Converters.convert_type(type1)
        value1 = value1.split(',')
        value1 = map(lambda x: x.strip(), value1)
        type2 = Converters.convert_type(type2)
        value2 = value2
        filters = {type1: value1}
        if type2 and value2:
            value2 = value2.split(',')
            value2 = map(lambda x: x.strip(), value2)
            filters[type2] = value2
        if in_or_not:
            filters = self.update_filters(filters, in_or_not, filter_type, filter_value)
        return self.tools.calculate_availability(**filters)

    @kpi_runtime()
    def calculate_survey_with_types(self, atomic_params):
        """
            :param atomic_params: dict - atomic kpi line from the template.
            checks the kind of the survey and sends it to the match function
            :return: float - score.
        """
        atomic_score = 0.0
        accepted_answer = atomic_params[Const.ACCEPTED_ANSWER_RESULT]
        atomic_type = atomic_params[Const.KPI_TYPE]
        if not accepted_answer:
            if atomic_type == Const.AVAILABILITY:
                accepted_answer = 1
            else:
                Log.warning('There is no accepted answer to {} in the template'.format(
                    atomic_params[Const.ATOMIC_NAME]))
                return atomic_score
        if atomic_type == Const.AVAILABILITY:
            availability = self.calculate_all_availability(atomic_params[Const.ENTITY_TYPE],
                                                           atomic_params[Const.ENTITY_VAL],
                                                           atomic_params[Const.ENTITY_TYPE2],
                                                           atomic_params[Const.ENTITY_VAL2],
                                                           atomic_params[Const.IN_NOT_IN],
                                                           atomic_params[Const.TYPE_FILTER],
                                                           atomic_params[Const.VALUE_FILTER])
            atomic_score = 100.0 * (availability >= float(accepted_answer))
        elif atomic_type == Const.SCENE_COUNT:
            count = self.calculate_scene_count(atomic_params)
            atomic_score = 100.0 * (count >= float(accepted_answer))
        elif atomic_type == Const.PLANOGRAM:
            # atomic_score = self.calculate_planogram(atomic_params)
            atomic_score = self.calculate_planogram_new(atomic_params)
        else:
            Log.warning('The type "{}" is not recognized'.format(atomic_type))
        return atomic_score

    @kpi_runtime()
    def calculate_survey_with_codes(self, atomic_params):
        """
            :param atomic_params: dict - atomic kpi line from the template.
            checks survey question.
            :return: float - score.
        """
        atomic_score = 0.0
        try:
            code = str(int(float(atomic_params[Const.SURVEY_Q_CODE])))
        except ValueError:
            Log.warning('The atomic "{}" has no survey code'.format(atomic_params[Const.ATOMIC_NAME]))
            return atomic_score
        survey_text = atomic_params[Const.ATOMIC_NAME]
        if Const.targets_line in atomic_params.keys():
            try:
                accepted_answer = float(atomic_params[Const.targets_line][self.store_type])
            except ValueError:
                Log.warning('The atomic "{}" has no target'.format(atomic_params[Const.ATOMIC_NAME]))
                return atomic_score
            survey_data = self.tools.survey_response[self.tools.survey_response['code'].isin([code])]
            if survey_data.empty:
                Log.warning('Survey with {} = {} does not exist'.format('code', code))
                survey_data = self.tools.survey_response[
                    self.tools.survey_response['question_text'].isin([survey_text])]
                if survey_data.empty:
                    return atomic_score
            survey_answer = survey_data['number_value'].iloc[0]
            check_survey = accepted_answer >= survey_answer
        else:
            survey_code_couple = ('code', code)
            accepted_answer = atomic_params[Const.ACCEPTED_ANSWER_RESULT]
            check_survey = self.tools.check_survey_answer(survey_code_couple, accepted_answer)
            if check_survey is None:
                check_survey = self.tools.check_survey_answer(survey_text, accepted_answer)
                if check_survey is None:
                    return atomic_score
        atomic_score = 100.0 * check_survey
        return atomic_score

    def calculate_planogram(self, atomic_params):
        """
            :param atomic_params: dict - atomic kpi line from the template
            :return: 100 if there is planogram, 0 otherwise.
        """
        type_name = Converters.convert_type(atomic_params[Const.ENTITY_TYPE])
        values = atomic_params[Const.ENTITY_VAL].split(',')
        values = map(lambda x: x.strip(), values)
        wanted_answer = float(atomic_params[Const.ACCEPTED_ANSWER_RESULT])
        filtered_scenes = self.scif[self.scif[type_name].isin(values)]['scene_id'].unique()
        count = 0
        for scene_id in filtered_scenes:
            query = CCZAQueries.getPlanogramByTemplateName(scene_id)
            planogram = pd.read_sql_query(query, self.rds_conn.db)
            if 1 in planogram['match_compliance_status'].unique().tolist():
                count += 1
            if count >= wanted_answer:
                return 100.0
        return 0.0

    def calculate_planogram_new(self, atomic_params):
        """
            :param atomic_params: dict - atomic kpi line from the template
            :return: 100 if there is scene which has at least one correctly positioned product, 0 otherwise.
        """
        type_name = Converters.convert_type(atomic_params[Const.ENTITY_TYPE])
        values = map(lambda x: x.strip(), atomic_params[Const.ENTITY_VAL].split(','))
        wanted_answer = float(atomic_params[Const.ACCEPTED_ANSWER_RESULT])
        filtered_scenes = self.scif[self.scif[type_name].isin(values)][ScifConsts.SCENE_FK].unique()
        scenes_passing = 0
        for scene in filtered_scenes:
            incor_tags = self.match_product_in_scene[(self.match_product_in_scene[ScifConsts.SCENE_FK]
                                                      == scene) &
                                                     (~(self.match_product_in_scene[MatchesConsts.COMPLIANCE_STATUS_FK]
                                                      == 3))]
            if len(incor_tags) == 0:
                scenes_passing += 1
        score = 100 if scenes_passing >= wanted_answer else 0
        return score

    def calculate_scene_count(self, atomic_params):
        """
            :param atomic_params: dict - atomic kpi line from the template
            :return: int - amount of scenes.
        """
        filters = {Converters.convert_type(
            atomic_params[Const.ENTITY_TYPE]): map(lambda x: x.strip(), atomic_params[Const.ENTITY_VAL].split(','))}
        scene_count = self.tools.calculate_number_of_scenes(**filters)
        return scene_count

    @kpi_runtime()
    def calculate_sos(self, atomic_params):
        """
            :param atomic_params: dict - atomic kpi line from the template
            :return: the percent of SOS (if it's binary - 100 if more than target, otherwise 0).
        """
        numerator_type = atomic_params[Const.ENTITY_TYPE_NUMERATOR]
        numerator_value = atomic_params[Const.NUMERATOR]
        denominator_type = atomic_params[Const.ENTITY_TYPE_DENOMINATOR]
        denominator_value = atomic_params[Const.DENOMINATOR]
        in_or_not = atomic_params[Const.IN_NOT_IN]
        filter_type = Converters.convert_type(atomic_params[Const.TYPE_FILTER])
        filter_value = atomic_params[Const.VALUE_FILTER]
        denominator_filters = self.get_default_filters(denominator_type, denominator_value)
        numerator_filters = self.get_default_filters(numerator_type, numerator_value)
        if in_or_not:
            numerator_filters = self.update_filters(numerator_filters, in_or_not, filter_type, filter_value)
            denominator_filters = self.update_filters(denominator_filters, in_or_not, filter_type, filter_value)
        atomic_score = self.tools.calculate_share_of_shelf(
            sos_filters=numerator_filters, **denominator_filters) * 100
        if atomic_params[Const.SCORE] == Const.BINARY:
            try:
                return 100 * (atomic_score >= float(atomic_params[Const.targets_line][
                                                        self.store_type]))
            except ValueError:
                Log.warning('The target for {} is bad in store {}'.format(
                    atomic_params[Const.ATOMIC_NAME], self.store_type))
                return 0.0
        elif atomic_params[Const.SCORE] != Const.NUMERIC:
            Log.error('The score is not numeric and not binary.')
        return atomic_score

    def update_filters(self, filters, in_or_not, filter_type, filter_value):
        """
            :param filters: the source filters as dict.
            :param in_or_not: boolean if it should include or not include
            :param filter_type: str
            :param filter_value: str
            adds to exist filter if to include/exclude one more condition.
            :return: dict - the updated filter.
        """
        filter_type = Converters.convert_type(filter_type)
        if "Not" in in_or_not:
            list_of_negative = list(self.scif[~(self.scif[filter_type] == filter_value)][filter_type].unique())
            filters[filter_type] = list_of_negative
        elif "In" in in_or_not:
            filters[filter_type] = filter_value
        else:
            Log.warning('The value in "In/Not In" in the template should be "Not in", "In" or empty')
        return filters

    @kpi_runtime()
    def calculate_flow(self, atomic_params):
        """
            checking if the shelf is sorted like the brands list.
            :return: 100 if it's fine, 0 otherwise.
        """
        population_entity_type = Converters.convert_type(atomic_params[Const.ENTITY_TYPE])
        progression_list = map(lambda x: x.strip(), atomic_params[Const.ENTITY_VAL].split(','))
        location_entity_type = Converters.convert_type(atomic_params[Const.TYPE_FILTER])
        location_values = map(lambda x: x.strip(), atomic_params[Const.VALUE_FILTER].split(','))
        is_in_param = False if atomic_params[Const.IN_NOT_IN] == u'Not in' else True
        filter_loc_param = self.scif[location_entity_type].isin(location_values) if is_in_param else\
            ~self.scif[location_entity_type].isin(location_values)

        filtered_scif = self.scif[
            filter_loc_param &
            (self.scif['tagged'] >= 1) &
            (self.scif[population_entity_type].isin(progression_list))]

        join_on = ['scene_fk', 'product_fk']
        match_product_join_scif = pd.merge(filtered_scif, self.match_product_in_scene, on=join_on, how='left',
                                           suffixes=('_x', '_matches'))
        progression_field = 'brand_name'
        group_column = 'scene_fk'

        progression_cross_shelves_true = (self.tool_box_for_flow.progression(
            df=match_product_join_scif, progression_list=progression_list, progression_field=progression_field,
            at_least_one=False, left_to_right=False, cross_bays=True, cross_shelves=True,
            include_stacking=False, group_by=group_column))
        progression_cross_shelves_false = (self.tool_box_for_flow.progression(
            df=match_product_join_scif, progression_list=progression_list, progression_field=progression_field,
            at_least_one=False, left_to_right=False, cross_bays=True, cross_shelves=False,
            include_stacking=False, group_by=group_column))

        return 100.0 * (progression_cross_shelves_true or
                        progression_cross_shelves_false)

    def get_kpi_fk_by_kpi_path(self, kpi_level, kpi_names):
        """
        :param kpi_level: int - kpi level.
        :param kpi_names: dict - all the path from the set.
        :return: int - the fk of this kpi in the DB.
        """
        for name in kpi_names:
            assert isinstance(kpi_names[name], (unicode, basestring)), "name is not a string: %r" % kpi_names[name]
        try:
            if kpi_level == self.common.LEVEL1:
                return self.kpi_static_data[self.kpi_static_data[Const.column_name1] == kpi_names[Const.column_name1]][
                    Const.column_key1].values[0]
            elif kpi_level == self.common.LEVEL2:
                return self.kpi_static_data[
                    (self.kpi_static_data[Const.column_name1] == kpi_names[Const.column_name1]) &
                    (self.kpi_static_data[Const.column_name2] == kpi_names[Const.column_name2])][
                    Const.column_key2].values[0]
            elif kpi_level == self.common.LEVEL3:
                return self.kpi_static_data[
                    (self.kpi_static_data[Const.column_name1].str.encode('utf8') == kpi_names[Const.column_name1].encode('utf8')) &
                    (self.kpi_static_data[Const.column_name2].str.encode('utf8') == kpi_names[Const.column_name2].encode('utf8')) &
                    (self.kpi_static_data[Const.column_name3].str.encode('utf8') == kpi_names[Const.column_name3].encode('utf8'))][
                    Const.column_key3].values[0]
            else:
                raise ValueError, 'invalid level'
        except IndexError:
            Log.info('Kpi path: {}, is not equal to any kpi name in static table'.format(kpi_names))
            return None

    @staticmethod
    def get_default_filters(type_name, value_name):
        """
            :param type_name: string that contains list of types
            :param value_name: string that contains list of values in the same length
            :return: filter as dict.
        """
        if ',' in type_name:
            types = type_name.split(',')
            types = map(lambda x: x.strip(), types)
            values = value_name.split(',')
            values = map(lambda x: x.strip(), values)
            filters = {}
            if len(types) != len(values):
                Log.warning('there are {} types and {} values, should be the same amount'.format(
                    len(types), len(values)))
            else:
                for i in xrange(len(types)):
                    filters[Converters.convert_type(types[i])] = values[i]
        else:
            filters = {Converters.convert_type(type_name): map(lambda x: x.strip(), value_name.split(','))}
        return filters

    @staticmethod
    def get_percent(num):
        """
        :param num: a cell from the template, can be float in percent, int or string for 0
        :return: the number divided by 100
        """
        try:
            answer = float(num)
            ans = answer if answer < 1 else answer / 100.0
            if ans < 0 or ans > 100:
                Log.error('The weight is {} in the template, not possible.'.format(ans))
                return 0.0
            return ans
        except ValueError:
            return 0.0
Ejemplo n.º 29
0
class PS3SandToolBox:

    def __init__(self, data_provider, output):
        self.output = output
        self.data_provider = data_provider
        self.project_name = data_provider.project_name
        self.store_id = self.data_provider[Data.STORE_FK]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.common = Common(self.data_provider)
        self.commonV2 = CommonV2(self.data_provider)
        self.diageo_generator = DIAGEOGenerator(self.data_provider, self.output, self.common, menu=True)
        self.template_handler = TemplateHandler(self.project_name)

    def main_calculation(self):
        """
        This function calculates the KPI results.
        """
        # SOS Out Of The Box kpis
        self.diageo_generator.activate_ootb_kpis(self.commonV2)

        # sos by scene type
        self.diageo_generator.sos_by_scene_type(self.commonV2)

        log_runtime('Updating templates')
        self.template_handler.update_templates()

        # Global assortment kpis
        assortment_res = self.diageo_generator.diageo_global_grouping_assortment_calculation()
        self.commonV2.save_json_to_new_tables(assortment_res)

        # Global Menu kpis
        menus_res = self.diageo_generator.diageo_global_new_share_of_menu_function()
        self.commonV2.save_json_to_new_tables(menus_res)

        # Global Secondary Displays function
        res_json = self.diageo_generator.diageo_global_secondary_display_secondary_function()
        if res_json:
            self.commonV2.write_to_db_result(fk=res_json['fk'], numerator_id=1, denominator_id=self.store_id,
                                             result=res_json['result'])

        # Brand Blocking Global function
        template_data = self.template_handler.download_template(DiageoKpiNames.BRAND_BLOCKING)
        res_dict = self.diageo_generator.diageo_global_block_together(
            kpi_name=DiageoKpiNames.BRAND_BLOCKING,
            set_templates_data=template_data)
        self.commonV2.save_json_to_new_tables(res_dict)

        # Global Relative Position function
        template_data = self.template_handler.download_template(DiageoKpiNames.RELATIVE_POSITION)
        res_dict = self.diageo_generator.diageo_global_relative_position_function(
            template_data, location_type='template_name')
        self.commonV2.save_json_to_new_tables(res_dict)

        # Global Vertical Shelf Placement function
        template_data = self.template_handler.download_template(DiageoKpiNames.VERTICAL_SHELF_PLACEMENT)
        res_dict = self.diageo_generator.diageo_global_vertical_placement(template_data)
        self.commonV2.save_json_to_new_tables(res_dict)

        # committing to the new tables
        self.commonV2.commit_results_data()
        # committing to the old tables
        self.common.commit_results_data()
Ejemplo n.º 30
0
class DIAGEOUKToolBox:
    def __init__(self, data_provider, output):
        self.data_provider = data_provider
        self.project_name = self.data_provider.project_name
        self.store_id = self.data_provider[Data.STORE_FK]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.output = output
        self.common = Common(self.data_provider)
        self.commonV2 = CommonV2(self.data_provider)
        self.diageo_generator = DIAGEOGenerator(self.data_provider,
                                                self.output,
                                                self.common,
                                                menu=True)
        self.template_handler = TemplateHandler(self.project_name)

    def main_calculation(self):
        """
        This function calculates the KPI results.
        """
        log_runtime('Updating templates')
        self.template_handler.update_templates()

        # SOS Out Of The Box kpis
        self.diageo_generator.activate_ootb_kpis(self.commonV2)

        # sos by scene type
        self.diageo_generator.sos_by_scene_type(self.commonV2)

        # Global assortment kpis
        assortment_res = self.diageo_generator.diageo_global_grouping_assortment_calculation(
        )
        self.commonV2.save_json_to_new_tables(assortment_res)

        # Global Menu kpis
        menus_res = self.diageo_generator.diageo_global_new_share_of_menu_function(
        )
        self.commonV2.save_json_to_new_tables(menus_res)

        # Global Equipment score kpis
        equipment_score_scenes = self.get_equipment_score_relevant_scenes()
        res_dict = self.diageo_generator.diageo_global_equipment_score(
            save_scene_level=False, scene_list=equipment_score_scenes)
        self.commonV2.save_json_to_new_tables(res_dict)

        # Global Relative Position function
        template_data = self.template_handler.download_template(
            DiageoKpiNames.RELATIVE_POSITION)
        res_dict = self.diageo_generator.diageo_global_relative_position_function(
            template_data, location_type='template_name')
        self.commonV2.save_json_to_new_tables(res_dict)

        # Global Secondary Displays function
        res_json = self.diageo_generator.diageo_global_secondary_display_secondary_function(
        )
        if res_json:
            self.commonV2.write_to_db_result(fk=res_json['fk'],
                                             numerator_id=1,
                                             denominator_id=self.store_id,
                                             result=res_json['result'])

        # Global Visible to Consumer function
        sku_list = filter(
            None, self.scif[self.scif['product_type'] ==
                            'SKU'].product_ean_code.tolist())
        res_dict = self.diageo_generator.diageo_global_visible_percentage(
            sku_list)
        self.commonV2.save_json_to_new_tables(res_dict)

        # committing to new tables
        self.commonV2.commit_results_data()
        # committing to the old tables
        self.common.commit_results_data()

    def get_equipment_score_relevant_scenes(self):
        scenes = []
        if not self.diageo_generator.scif.empty:
            scenes = self.diageo_generator.scif[
                self.diageo_generator.scif['template_name'] ==
                'ON - DRAUGHT TAPS']['scene_fk'].unique().tolist()
        return scenes