Пример #1
0
    def __init__(self, data_provider, output, common):
        GlobalSessionToolBox.__init__(self, data_provider, output, common)
        self.ps_data_provider = PsDataProvider(data_provider)
        self.own_manufacturer = int(self.data_provider.own_manufacturer.param_value.values[0])
        self.all_templates = self.data_provider[Data.ALL_TEMPLATES]
        self.project_templates = {}
        self.parse_template()
        self.store_type = self.store_info['store_type'].iloc[0]
        self.survey = Survey(self.data_provider, output, ps_data_provider=self.ps_data_provider, common=self.common)
        self.att2 = self.store_info['additional_attribute_2'].iloc[0]
        self.results_df = pd.DataFrame(columns=['kpi_name', 'kpi_fk', 'numerator_id', 'numerator_result',
                                                'denominator_id', 'denominator_result', 'result', 'score',
                                                'identifier_result', 'identifier_parent', 'should_enter'])

        self.products = self.data_provider[Data.PRODUCTS]
        scif = self.scif[['brand_fk', 'facings', 'product_type']].groupby(by='brand_fk').sum()
        self.mpis = self.matches \
            .merge(self.products, on='product_fk', suffixes=['', '_p']) \
            .merge(self.scene_info, on='scene_fk', suffixes=['', '_s']) \
            .merge(self.all_templates[['template_fk', TEMPLATE_GROUP]], on='template_fk') \
            .merge(scif, on='brand_fk')[COLUMNS]
        self.mpis['store_fk'] = self.store_id

        self.calculations = {
            COMBO: self.calculate_combo,
            POSM_AVAILABILITY: self.calculate_posm_availability,
            SCORING: self.calculate_scoring,
            SHARE_OF_EMPTY: self.calculate_share_of_empty,
            SOS: self.calculate_sos,
            SURVEY: self.calculate_survey,
        }
Пример #2
0
 def __init__(self, data_provider, output, common_db):
     self.output = output
     self.data_provider = data_provider
     self.project_name = self.data_provider.project_name
     self.session_uid = self.data_provider.session_uid
     self.products = self.data_provider[Data.PRODUCTS]
     self.all_products = self.data_provider[Data.ALL_PRODUCTS]
     self.match_product_in_scene = self.data_provider[Data.MATCHES]
     self.visit_date = self.data_provider[Data.VISIT_DATE]
     self.session_info = self.data_provider[Data.SESSION_INFO]
     self.scene_info = self.data_provider[Data.SCENES_INFO]
     self.store_id = self.data_provider[Data.STORE_FK]
     self.ps_data_provider = PsDataProvider(self.data_provider, self.output)
     self.store_info = self.ps_data_provider.get_ps_store_info(
         self.data_provider[Data.STORE_INFO])
     self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
     self.scif = self.scif[self.scif['product_type'] != "Irrelevant"]
     self.result_values = self.ps_data_provider.get_result_values()
     self.templates = self.read_templates()
     self.common_db = common_db
     self.survey = Survey(self.data_provider, output=self.output, ps_data_provider=self.ps_data_provider,
                          common=self.common_db)
     self.manufacturer_fk = Const.MANUFACTURER_FK
     self.region = self.store_info['region_name'].iloc[0]
     self.store_type = self.store_info['store_type'].iloc[0]
     self.retailer = self.store_info['retailer_name'].iloc[0]
     self.branch = self.store_info['branch_name'].iloc[0]
     self.additional_attribute_4 = self.store_info['additional_attribute_4'].iloc[0]
     self.additional_attribute_7 = self.store_info['additional_attribute_7'].iloc[0]
     self.body_armor_delivered = self.get_body_armor_delivery_status()
     self.convert_base_size_and_multi_pack()
Пример #3
0
 def __init__(self, data_provider, output):
     self.output = output
     self.data_provider = data_provider
     self.project_name = self.data_provider.project_name
     self.common = Common(self.data_provider)
     self.old_common = oldCommon(self.data_provider)
     self.rds_conn = PSProjectConnector(self.project_name,
                                        DbUsers.CalculationEng)
     self.session_fk = self.data_provider.session_id
     self.match_product_in_scene = self.data_provider[Data.MATCHES]
     self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
     self.store_info = self.data_provider[Data.STORE_INFO]
     self.store_id = self.data_provider[Data.STORE_FK]
     self.survey = Survey(self.data_provider)
     self.block = Block(self.data_provider)
     self.general_toolbox = GENERALToolBox(self.data_provider)
     self.visit_date = self.data_provider[Data.VISIT_DATE]
     self.template_path = self.get_relevant_template()
     self.gap_data = self.get_gap_data()
     self.kpi_weights = parse_template(self.template_path,
                                       Consts.KPI_WEIGHT,
                                       lower_headers_row_index=0)
     self.template_data = self.parse_template_data()
     self.kpis_gaps = list()
     self.passed_availability = list()
     self.kpi_static_data = self.old_common.get_kpi_static_data()
     self.own_manufacturer_fk = int(
         self.data_provider.own_manufacturer.param_value.values[0])
     self.parser = Parser
     self.all_products = self.data_provider[Data.ALL_PRODUCTS]
Пример #4
0
 def __init__(self, data_provider, output):
     self.output = output
     self.data_provider = data_provider
     self.project_name = self.data_provider.project_name
     self.session_uid = self.data_provider.session_uid
     self.products = self.data_provider[Data.PRODUCTS]
     self.all_products = self.data_provider[Data.ALL_PRODUCTS]
     self.match_product_in_scene = self.data_provider[Data.MATCHES]
     self.visit_date = self.data_provider[Data.VISIT_DATE]
     self.session_info = self.data_provider[Data.SESSION_INFO]
     self.scene_info = self.data_provider[Data.SCENES_INFO]
     self.store_id = self.data_provider[Data.STORE_FK]
     self.store_info = self.data_provider[Data.STORE_INFO]
     self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
     self.united_scenes = self.get_united_scenes() # we don't need to check scenes without United products
     self.survey = Survey(self.data_provider, self.output)
     self.sos = SOS(self.data_provider, self.output)
     self.templates = {}
     self.common_db = Common(self.data_provider, CMA_COMPLIANCE)
     self.region = self.store_info['region_name'].iloc[0]
     self.store_type = self.store_info['store_type'].iloc[0]
     self.program = self.store_info['additional_attribute_14'].iloc[0]
     self.sales_center = self.store_info['additional_attribute_5'].iloc[0]
     if self.store_type in STORE_TYPES: #####
         self.store_type = STORE_TYPES[self.store_type] ####
     self.store_attr = self.store_info['additional_attribute_15'].iloc[0]
     self.kpi_static_data = self.common_db.get_kpi_static_data()
     self.total_score = 0
     for sheet in Const.SHEETS_CMA:
         self.templates[sheet] = pd.read_excel(TEMPLATE_PATH, sheetname=sheet).fillna('')
Пример #5
0
 def __init__(self, data_provider, output, calculation_type):
     self.output = output
     self.data_provider = data_provider
     self.project_name = self.data_provider.project_name
     self.session_uid = self.data_provider.session_uid
     self.products = self.data_provider[Data.PRODUCTS]
     self.all_products = self.data_provider[Data.ALL_PRODUCTS]
     self.match_product_in_scene = self.data_provider[Data.MATCHES]
     self.visit_date = self.data_provider[Data.VISIT_DATE]
     self.session_info = self.data_provider[Data.SESSION_INFO]
     self.scene_info = self.data_provider[Data.SCENES_INFO]
     self.store_id = self.data_provider[Data.STORE_FK]
     self.store_info = self.data_provider[Data.STORE_INFO]
     self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
     self.scif = self.scif[self.scif['product_type'] != "Irrelevant"]
     self.united_scenes = self.get_united_scenes(
     )  # we don't need to check scenes without United products
     self.survey = Survey(self.data_provider, self.output)
     self.templates = {}
     self.calculation_type = calculation_type
     if self.calculation_type == Const.SOVI:
         self.TEMPLATE_PATH = TEMPLATE_PATH
         self.RED_SCORE = Const.RED_SCORE
         self.RED_SCORE_INTEG = Const.RED_SCORE_INTEG
         for sheet in Const.SHEETS:
             self.templates[sheet] = pd.read_excel(
                 self.TEMPLATE_PATH, sheetname=sheet).fillna('')
         self.converters = self.templates[Const.CONVERTERS]
     else:
         self.TEMPLATE_PATH = SURVEY_TEMPLATE_PATH
         self.RED_SCORE = Const.MANUAL_RED_SCORE
         self.RED_SCORE_INTEG = Const.MANUAL_RED_SCORE_INTEG
         for sheet in Const.SHEETS_MANUAL:
             self.templates[sheet] = pd.read_excel(
                 self.TEMPLATE_PATH, sheetname=sheet).fillna('')
     self.common_db_integ = Common(self.data_provider, self.RED_SCORE_INTEG)
     self.kpi_static_data_integ = self.common_db_integ.get_kpi_static_data()
     self.common_db = Common(self.data_provider, self.RED_SCORE)
     self.region = self.store_info['region_name'].iloc[0]
     self.store_type = self.store_info['store_type'].iloc[0]
     if self.store_type in STORE_TYPES:  #####
         self.store_type = STORE_TYPES[self.store_type]  ####
     self.store_attr = self.store_info['additional_attribute_15'].iloc[0]
     self.kpi_static_data = self.common_db.get_kpi_static_data()
     main_template = self.templates[Const.KPIS]
     self.templates[Const.KPIS] = main_template[
         (main_template[Const.REGION] == self.region)
         & (main_template[Const.STORE_TYPE] == self.store_type)]
     self.scene_calculator = CCBOTTLERSUSSceneRedToolBox(
         data_provider, output, self.templates, self)
     self.scenes_results = pd.DataFrame(columns=Const.COLUMNS_OF_SCENE)
     self.session_results = pd.DataFrame(columns=Const.COLUMNS_OF_SESSION)
     self.all_results = pd.DataFrame(columns=Const.COLUMNS_OF_SCENE)
     self.used_scenes = []
     self.red_score = 0
     self.weight_factor = self.get_weight_factor()
Пример #6
0
    def __init__(self, data_provider, output):
        self.output = output
        self.data_provider = data_provider
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.session_id = self.data_provider.session_id
        self.products = self.data_provider[Data.PRODUCTS]
        self.common_v2 = Common_V2(self.data_provider)
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.store_id = self.data_provider[Data.STORE_FK]
        self.tools = GENERALToolBox(self.data_provider)
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.survey = Survey(self.data_provider, self.output)
        self.rds_conn = PSProjectConnector(self.project_name,
                                           DbUsers.CalculationEng)
        self.kpi_static_data = self.common_v2.get_kpi_static_data()
        self.kpi_results_queries = []
        self.kpi_results_new_tables_queries = []
        self.store_info = self.data_provider[Data.STORE_INFO]
        self.oos_policies = self.get_policies()
        self.result_dict = {}
        self.hierarchy_dict = {}
        self.sos_target_sheet = pd.read_excel(PATH_SURVEY_AND_SOS_TARGET,
                                              Const.SOS_TARGET).fillna("")
        self.survey_sheet = pd.read_excel(PATH_SURVEY_AND_SOS_TARGET,
                                          Const.SURVEY).fillna("")
        self.survey_combo_sheet = pd.read_excel(PATH_SURVEY_AND_SOS_TARGET,
                                                Const.SURVEY_COMBO).fillna("")
        self.oos_sheet = pd.read_excel(PATH_SURVEY_AND_SOS_TARGET,
                                       Const.OOS_KPI).fillna("")

        try:
            self.store_type_filter = self.store_info['store_type'].values[
                0].strip()
        except:
            Log.warning(
                "There is no store type in the db for store_fk: {}".format(
                    str(self.store_id)))
        try:
            self.region_name_filter = self.store_info['region_name'].values[
                0].strip()
            self.region_fk = self.store_info['region_fk'].values[0]
        except:
            Log.warning("There is no region in the db for store_fk: {}".format(
                str(self.store_id)))
        try:
            self.att6_filter = self.store_info[
                'additional_attribute_6'].values[0].strip()
        except:
            Log.warning(
                "There is no additional attribute 6 in the db for store_fk: {}"
                .format(str(self.store_id)))
 def __init__(self, data_provider, output, templates, store_attr):
     self.output = output
     self.data_provider = data_provider
     self.project_name = self.data_provider.project_name
     self.survey = Survey(self.data_provider, self.output)
     self.templates = templates
     if Const.CONVERTERS in self.templates:
         self.converters = self.templates[Const.CONVERTERS]
         self.exclusion_sheet = self.templates[Const.SKU_EXCLUSION]
     self.store_attr = store_attr
Пример #8
0
 def __init__(self, data_provider, output, common_db2):
     self.output = output
     self.data_provider = data_provider
     self.project_name = self.data_provider.project_name
     self.session_uid = self.data_provider.session_uid
     self.manufacturer_fk = 1
     self.products = self.data_provider[Data.PRODUCTS]
     self.all_products = self.data_provider[Data.ALL_PRODUCTS]
     self.match_product_in_scene = self.data_provider[Data.MATCHES]
     self.visit_date = self.data_provider[Data.VISIT_DATE]
     self.session_info = self.data_provider[Data.SESSION_INFO]
     self.scene_info = self.data_provider[Data.SCENES_INFO]
     self.store_id = self.data_provider[Data.STORE_FK]
     self.store_info = self.data_provider[Data.STORE_INFO]
     self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
     self.united_scenes = self.get_united_scenes(
     )  # we don't need to check scenes without United products
     self.survey = Survey(self.data_provider, self.output)
     self.ps_data_provider = PsDataProvider(self.data_provider, self.output)
     self.sos = SOS(self.data_provider, self.output)
     self.templates = {}
     self.common_db = Common(self.data_provider, SUB_PROJECT)
     self.common_db2 = common_db2
     self.result_values = self.ps_data_provider.get_result_values()
     self.region = self.store_info['region_name'].iloc[0]
     self.store_type = self.store_info['store_type'].iloc[0]
     self.program = self.store_info['additional_attribute_14'].iloc[0]
     self.sales_center = self.store_info['additional_attribute_5'].iloc[0]
     if self.store_type in STORE_TYPES:
         self.store_type = STORE_TYPES[self.store_type]
     self.store_attr = self.store_info['additional_attribute_15'].iloc[0]
     self.kpi_static_data = self.common_db.get_kpi_static_data()
     self.ignore_stacking = False
     self.facings_field = 'facings' if not self.ignore_stacking else 'facings_ign_stack'
     self.total_score = 0
     self.total_count = 0
     for sheet in Const.SHEETS_CMA:
         self.templates[sheet] = pd.read_excel(TEMPLATE_PATH,
                                               sheetname=sheet).fillna('')
     self.tools = Shared(self.data_provider, self.output)
Пример #9
0
 def __init__(self, data_provider, output, common):
     GlobalSessionToolBox.__init__(self, data_provider, output, common)
     self.ps_data_provider = PsDataProvider(data_provider)
     self.own_manufacturer = int(
         self.data_provider.own_manufacturer.param_value.values[0])
     self.templates = {}
     self.parse_template()
     self.store_type = self.store_info['store_type'].iloc[0]
     self.survey = Survey(self.data_provider,
                          output,
                          ps_data_provider=self.ps_data_provider,
                          common=self.common)
     self.att2 = self.store_info['additional_attribute_2'].iloc[0]
     self.products = self.data_provider[Data.PRODUCTS]
     self.survey = self.data_provider.survey_responses
     self.results_df = pd.DataFrame(columns=[
         'kpi_name', 'kpi_fk', 'numerator_id', 'numerator_result',
         'denominator_id', 'denominator_result', 'result', 'score',
         'identifier_result', 'identifier_parent', 'should_enter'
     ])
Пример #10
0
 def __init__(self, data_provider, output, common_db2):
     self.output = output
     self.data_provider = data_provider
     self.common_db = Common(self.data_provider, SUB_PROJECT)
     self.common_db2 = common_db2
     self.project_name = self.data_provider.project_name
     self.session_uid = self.data_provider.session_uid
     self.products = self.data_provider[Data.PRODUCTS]
     self.all_products = self.data_provider[Data.ALL_PRODUCTS]
     self.match_product_in_scene = self.data_provider[Data.MATCHES]
     self.visit_date = self.data_provider[Data.VISIT_DATE]
     self.session_info = self.data_provider[Data.SESSION_INFO]
     self.scene_info = self.data_provider[Data.SCENES_INFO]
     self.store_id = self.data_provider[Data.STORE_FK]
     self.store_info = self.data_provider[Data.STORE_INFO]
     self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
     self.scif = self.scif[~(self.scif['product_type'] == 'Irrelevant')]
     self.sw_scenes = self.get_relevant_scenes(
     )  # we don't need to check scenes without United products
     self.survey = Survey(self.data_provider, self.output)
     self.sos = SOS(self.data_provider, self.output)
     self.results = self.data_provider[Data.SCENE_KPI_RESULTS]
     self.region = self.store_info['region_name'].iloc[0]
     self.store_type = self.store_info['store_type'].iloc[0]
     self.program = self.store_info['additional_attribute_3'].iloc[0]
     self.sales_center = self.store_info['additional_attribute_5'].iloc[0]
     if self.store_type in STORE_TYPES:  #####
         self.store_type = STORE_TYPES[self.store_type]  ####
     self.store_attr = self.store_info['additional_attribute_3'].iloc[0]
     # self.kpi_static_data = self.common_db.get_kpi_static_data()
     self.ignore_stacking = False
     self.facings_field = 'facings' if not self.ignore_stacking else 'facings_ign_stack'
     self.sub_scores = defaultdict(int)
     self.sub_totals = defaultdict(int)
     self.templates = self.get_template()
     self.hierarchy = self.templates[Const.KPIS].set_index(
         Const.KPI_NAME)[Const.PARENT].to_dict()
     self.templates = self.get_relevant_template(self.templates)
     self.children = self.templates[Const.KPIS][Const.KPI_NAME]
     self.tools = Shared(self.data_provider, self.output)
Пример #11
0
 def __init__(self, data_provider, output, common_v2):
     self.output = output
     self.data_provider = data_provider
     self.project_name = self.data_provider.project_name
     self.session_uid = self.data_provider.session_uid
     self.products = self.data_provider[Data.PRODUCTS]
     self.all_products = self.data_provider[Data.ALL_PRODUCTS]
     self.match_product_in_scene = self.data_provider[Data.MATCHES]
     self.visit_date = self.data_provider[Data.VISIT_DATE]
     self.session_info = self.data_provider[Data.SESSION_INFO]
     self.scene_info = self.data_provider[Data.SCENES_INFO]
     self.store_id = self.data_provider[Data.STORE_FK]
     self.store_info = self.data_provider[Data.STORE_INFO]
     self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
     self.scif = self.scif[~(self.scif['product_type'] == 'Irrelevant')]
     self.sw_scenes = self.get_sw_scenes(
     )  # we don't need to check scenes without United products
     self.survey = Survey(self.data_provider, self.output)
     self.sos = SOS(self.data_provider, self.output)
     self.templates = {}
     self.common_db = Common(self.data_provider, CMA_COMPLIANCE)
     self.common_db2 = common_v2
     self.common_scene = CommonV2(self.data_provider)
     self.region = self.store_info['region_name'].iloc[0]
     self.store_type = self.store_info['store_type'].iloc[0]
     self.program = self.store_info['additional_attribute_3'].iloc[0]
     self.sales_center = self.store_info['additional_attribute_5'].iloc[0]
     if self.store_type in STORE_TYPES:  #####
         self.store_type = STORE_TYPES[self.store_type]  ####
     self.store_attr = self.store_info['additional_attribute_3'].iloc[0]
     self.kpi_static_data = self.common_db.get_kpi_static_data()
     self.total_score = 0
     self.sub_scores = defaultdict(int)
     self.sub_totals = defaultdict(int)
     self.ignore_stacking = False
     self.facings_field = 'facings' if not self.ignore_stacking else 'facings_ign_stack'
     for sheet in Const.SHEETS_CMA:
         self.templates[sheet] = pd.read_excel(TEMPLATE_PATH,
                                               sheetname=sheet).fillna('')
Пример #12
0
class CCBOTTLERSUSREDToolBox:
    def __init__(self, data_provider, output, calculation_type):
        self.output = output
        self.data_provider = data_provider
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.products = self.data_provider[Data.PRODUCTS]
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.store_id = self.data_provider[Data.STORE_FK]
        self.store_info = self.data_provider[Data.STORE_INFO]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.scif = self.scif[self.scif['product_type'] != "Irrelevant"]
        self.united_scenes = self.get_united_scenes(
        )  # we don't need to check scenes without United products
        self.survey = Survey(self.data_provider, self.output)
        self.templates = {}
        self.calculation_type = calculation_type
        if self.calculation_type == Const.SOVI:
            self.TEMPLATE_PATH = TEMPLATE_PATH
            self.RED_SCORE = Const.RED_SCORE
            self.RED_SCORE_INTEG = Const.RED_SCORE_INTEG
            for sheet in Const.SHEETS:
                self.templates[sheet] = pd.read_excel(
                    self.TEMPLATE_PATH, sheetname=sheet).fillna('')
            self.converters = self.templates[Const.CONVERTERS]
        else:
            self.TEMPLATE_PATH = SURVEY_TEMPLATE_PATH
            self.RED_SCORE = Const.MANUAL_RED_SCORE
            self.RED_SCORE_INTEG = Const.MANUAL_RED_SCORE_INTEG
            for sheet in Const.SHEETS_MANUAL:
                self.templates[sheet] = pd.read_excel(
                    self.TEMPLATE_PATH, sheetname=sheet).fillna('')
        self.common_db_integ = Common(self.data_provider, self.RED_SCORE_INTEG)
        self.kpi_static_data_integ = self.common_db_integ.get_kpi_static_data()
        self.common_db = Common(self.data_provider, self.RED_SCORE)
        self.region = self.store_info['region_name'].iloc[0]
        self.store_type = self.store_info['store_type'].iloc[0]
        if self.store_type in STORE_TYPES:  #####
            self.store_type = STORE_TYPES[self.store_type]  ####
        self.store_attr = self.store_info['additional_attribute_15'].iloc[0]
        self.kpi_static_data = self.common_db.get_kpi_static_data()
        main_template = self.templates[Const.KPIS]
        self.templates[Const.KPIS] = main_template[
            (main_template[Const.REGION] == self.region)
            & (main_template[Const.STORE_TYPE] == self.store_type)]
        self.scene_calculator = CCBOTTLERSUSSceneRedToolBox(
            data_provider, output, self.templates, self)
        self.scenes_results = pd.DataFrame(columns=Const.COLUMNS_OF_SCENE)
        self.session_results = pd.DataFrame(columns=Const.COLUMNS_OF_SESSION)
        self.all_results = pd.DataFrame(columns=Const.COLUMNS_OF_SCENE)
        self.used_scenes = []
        self.red_score = 0
        self.weight_factor = self.get_weight_factor()

    # main functions:

    def main_calculation(self, *args, **kwargs):
        """
            This function gets all the scene results from the SceneKPI, after that calculates every session's KPI,
            and in the end it calls "filter results" to choose every KPI and scene and write the results in DB.
        """
        main_template = self.templates[Const.KPIS]
        if self.calculation_type == Const.SOVI:
            self.scenes_results = self.scene_calculator.main_calculation()
            session_template = main_template[main_template[Const.SESSION_LEVEL]
                                             == Const.V]
            for i, main_line in session_template.iterrows():
                self.calculate_main_kpi(main_line)
        else:
            for i, main_line in main_template.iterrows():
                self.calculate_manual_kpi(main_line)
        self.choose_and_write_results()

    def calculate_main_kpi(self, main_line):
        """
        This function gets a line from the main_sheet, transfers it to the match function, and checks all of the
        KPIs in the same name in the match sheet.
        :param main_line: series from the template of the main_sheet.
        """
        kpi_name = main_line[Const.KPI_NAME]
        kpi_type = main_line[Const.SHEET]
        relevant_scif = self.scif[
            (self.scif['scene_id'].isin(self.united_scenes))
            & (self.scif['product_type'] != 'Empty')]
        scene_types = self.does_exist(main_line, Const.SCENE_TYPE)
        if scene_types:
            relevant_scif = relevant_scif[relevant_scif['template_name'].isin(
                scene_types)]
        scene_groups = self.does_exist(main_line, Const.SCENE_TYPE_GROUP)
        if scene_groups:
            relevant_scif = relevant_scif[relevant_scif['template_group'].isin(
                scene_groups)]
        if kpi_type == Const.SCENE_AVAILABILITY:
            result = False if relevant_scif.empty else True
        else:
            isnt_dp = True if self.store_attr != Const.DP and main_line[
                Const.STORE_ATTRIBUTE] == Const.DP else False
            relevant_template = self.templates[kpi_type]
            relevant_template = relevant_template[relevant_template[
                Const.KPI_NAME] == kpi_name]
            target = len(relevant_template) if main_line[Const.GROUP_TARGET] == Const.ALL \
                else main_line[Const.GROUP_TARGET]
            if main_line[Const.SAME_PACK] == Const.V:
                result = self.calculate_availability_with_same_pack(
                    relevant_template, relevant_scif, isnt_dp, target)
            else:
                function = self.get_kpi_function(kpi_type)
                passed_counter = 0
                for i, kpi_line in relevant_template.iterrows():
                    answer = function(kpi_line, relevant_scif, isnt_dp)
                    if answer:
                        passed_counter += 1
                result = passed_counter >= target
        self.write_to_session_level(kpi_name=kpi_name, result=result)

    def calculate_manual_kpi(self, main_line):
        """
        This function gets a line from the main_sheet, transfers it to the match function, and checks all of the
        KPIs in the same name in the match sheet.
        :param main_line: series from the template of the main_sheet.
        """
        kpi_name = main_line[Const.KPI_NAME]
        relevant_template = self.templates[Const.SURVEY]
        relevant_template = relevant_template[relevant_template[Const.KPI_NAME]
                                              == kpi_name]
        target = len(relevant_template) if main_line[Const.GROUP_TARGET] == Const.ALL \
            else main_line[Const.GROUP_TARGET]
        passed_counter = 0
        for i, kpi_line in relevant_template.iterrows():
            answer = self.calculate_survey_specific(kpi_line)
            if answer:
                passed_counter += 1
        result = passed_counter >= target
        self.write_to_session_level(kpi_name=kpi_name, result=result)

    # write in DF:

    def write_to_session_level(self, kpi_name, result=0):
        """
        Writes a result in the DF
        :param kpi_name: string
        :param result: boolean
        """
        result_dict = {Const.KPI_NAME: kpi_name, Const.RESULT: result * 1}
        self.session_results = self.session_results.append(result_dict,
                                                           ignore_index=True)

    def write_to_all_levels(self,
                            kpi_name,
                            result,
                            display_text,
                            weight,
                            scene_fk=None,
                            reuse_scene=False):
        """
        Writes the final result in the "all" DF, add the score to the red score and writes the KPI in the DB
        :param kpi_name: str
        :param result: int
        :param display_text: str
        :param weight: int/float
        :param scene_fk: for the scene's kpi
        :param reuse_scene: this kpi can use scenes that were used
        """
        score = self.get_score(weight) * (result > 0)
        self.red_score += score
        result_dict = {
            Const.KPI_NAME: kpi_name,
            Const.RESULT: result,
            Const.SCORE: score
        }
        if scene_fk:
            result_dict[Const.SCENE_FK] = scene_fk
            if not reuse_scene:
                self.used_scenes.append(scene_fk)
        self.all_results = self.all_results.append(result_dict,
                                                   ignore_index=True)
        self.write_to_db(kpi_name, score, display_text=display_text)

    # survey:

    def calculate_survey_specific(self,
                                  kpi_line,
                                  relevant_scif=None,
                                  isnt_dp=None):
        """
        returns a survey line if True or False
        :param kpi_line: line from the survey sheet
        :param relevant_scif:
        :param isnt_dp:
        :return: True or False - if the question gets the needed answer
        """
        question = kpi_line[Const.Q_TEXT]
        if not question:
            question_id = kpi_line[Const.Q_ID]
            if question_id == "":
                Log.warning(
                    "The template has a survey question without ID or text")
                return False
            question = ('question_fk', int(question_id))
        answers = kpi_line[Const.ACCEPTED_ANSWER].split(',')
        min_answer = None if kpi_line[Const.REQUIRED_ANSWER] == '' else True
        for answer in answers:
            if self.survey.check_survey_answer(survey_text=question,
                                               target_answer=answer,
                                               min_required_answer=min_answer):
                return True
        return False

    # availability:

    def calculate_availability_with_same_pack(self, relevant_template,
                                              relevant_scif, isnt_dp, target):
        """
        checks if all the lines in the availability sheet passes the KPI, AND if all of these filtered scif has
        at least one common product that has the same size and number of sub_packages.
        :param relevant_template: all the match lines from the availability sheet.
        :param relevant_scif: filtered scif
        :param isnt_dp: if "store attribute" in the main sheet has DP, and the store is not DP, we shouldn't calculate
        DP lines
        :param target: how many lines should pass
        :return: boolean
        """
        relevant_scif = relevant_scif.fillna("NAN")
        # only items categorized as SSD should be evaluated in this calculation; see PROS-6342
        relevant_scif = relevant_scif[relevant_scif['att4'] == 'SSD']
        if relevant_scif.empty:
            return False
        sizes = relevant_scif['size'].tolist()
        sub_packages_nums = relevant_scif['number_of_sub_packages'].tolist()
        packages = set(zip(sizes, sub_packages_nums))
        for package in packages:
            passed_counter = 0
            filtered_scif = relevant_scif[
                (relevant_scif['size'] == package[0])
                & (relevant_scif['number_of_sub_packages'] == package[1])]
            for i, kpi_line in relevant_template.iterrows():
                answer = self.calculate_availability(kpi_line, filtered_scif,
                                                     isnt_dp)
                if answer:
                    passed_counter += 1
            if passed_counter < target:
                return False
        return True

    def calculate_availability(self, kpi_line, relevant_scif, isnt_dp):
        """
        checks if all the lines in the availability sheet passes the KPI (there is at least one product
        in this relevant scif that has the attributes).
        :param relevant_scif: filtered scif
        :param isnt_dp: if "store attribute" in the main sheet has DP, and the store is not DP, we shouldn't calculate
        DP lines
        :param kpi_line: line from the availability sheet
        :return: boolean
        """
        if isnt_dp and kpi_line[Const.MANUFACTURER] in Const.DP_MANU:
            return True
        filtered_scif = self.filter_scif_availability(kpi_line, relevant_scif)
        target = kpi_line[Const.TARGET]
        return filtered_scif[
            filtered_scif['facings'] > 0]['facings'].count() >= target

    def filter_scif_specific(self, relevant_scif, kpi_line, name_in_template,
                             name_in_scif):
        """
        takes scif and filters it from the template
        :param relevant_scif: the current filtered scif
        :param kpi_line: line from one sheet (availability for example)
        :param name_in_template: the column name in the template
        :param name_in_scif: the column name in SCIF
        :return:
        """
        values = self.does_exist(kpi_line, name_in_template)
        if values:
            if name_in_scif in Const.NUMERIC_VALUES_TYPES:
                values = [float(x) for x in values]
            return relevant_scif[relevant_scif[name_in_scif].isin(values)]
        return relevant_scif

    def filter_scif_availability(self, kpi_line, relevant_scif):
        """
        calls filter_scif_specific for every column in the template of availability
        :param kpi_line:
        :param relevant_scif:
        :return:
        """
        names_of_columns = {
            Const.MANUFACTURER: "manufacturer_name",
            Const.BRAND: "brand_name",
            Const.TRADEMARK: "att2",
            Const.SIZE: "size",
            Const.NUM_SUB_PACKAGES: "number_of_sub_packages",
            Const.PREMIUM_SSD: "Premium SSD",
            Const.INNOVATION_BRAND: "Innovation Brand",
        }
        for name in names_of_columns:
            relevant_scif = self.filter_scif_specific(relevant_scif, kpi_line,
                                                      name,
                                                      names_of_columns[name])
        return relevant_scif

    # SOS:

    def calculate_sos(self, kpi_line, relevant_scif, isnt_dp):
        """
        calculates SOS line in the relevant scif.
        :param kpi_line: line from SOS sheet.
        :param relevant_scif: filtered scif.
        :param isnt_dp: if "store attribute" in the main sheet has DP, and the store is not DP, we should filter
        all the DP products out of the numerator.
        :return: boolean
        """
        kpi_name = kpi_line[Const.KPI_NAME]
        if kpi_line[Const.EXCLUSION_SHEET] == Const.V:
            exclusion_sheet = self.templates[Const.SKU_EXCLUSION]
            relevant_exclusions = exclusion_sheet[exclusion_sheet[
                Const.KPI_NAME] == kpi_name]
            for i, exc_line in relevant_exclusions.iterrows():
                relevant_scif = self.exclude_scif(exc_line, relevant_scif)
        relevant_scif = relevant_scif[relevant_scif['product_type'] != "Empty"]
        den_type = kpi_line[Const.DEN_TYPES_1]
        den_value = kpi_line[Const.DEN_VALUES_1]
        relevant_scif = self.filter_by_type_value(relevant_scif, den_type,
                                                  den_value)
        if kpi_line[Const.SSD_STILL] != "":
            relevant_scif = self.filter_by_type_value(
                relevant_scif, Const.SSD_STILL, kpi_line[Const.SSD_STILL])
        num_type = kpi_line[Const.NUM_TYPES_1]
        num_value = kpi_line[Const.NUM_VALUES_1]
        num_scif = self.filter_by_type_value(relevant_scif, num_type,
                                             num_value)
        if isnt_dp:
            num_scif = num_scif[~(
                num_scif['manufacturer_name'].isin(Const.DP_MANU))]
        target = float(kpi_line[Const.TARGET]) / 100
        percentage = num_scif['facings'].sum() / relevant_scif['facings'].sum() if relevant_scif['facings'].sum() > 0 \
            else 0
        return percentage >= target

    # SOS majority:

    def calculate_sos_maj(self, kpi_line, relevant_scif, isnt_dp):
        """
        calculates SOS majority line in the relevant scif. Filters the denominator and sends the line to the
        match function (majority or dominant)
        :param kpi_line: line from SOS majority sheet.
        :param relevant_scif: filtered scif.
        :param isnt_dp: if "store attribute" in the main sheet has DP, and the store is not DP, we should filter
        all the DP products out of the numerator (and the denominator of the dominant part).
        :return: boolean
        """
        kpi_name = kpi_line[Const.KPI_NAME]
        if kpi_line[Const.EXCLUSION_SHEET] == Const.V:
            exclusion_sheet = self.templates[Const.SKU_EXCLUSION]
            relevant_exclusions = exclusion_sheet[exclusion_sheet[
                Const.KPI_NAME] == kpi_name]
            for i, exc_line in relevant_exclusions.iterrows():
                relevant_scif = self.exclude_scif(exc_line, relevant_scif)
        relevant_scif = relevant_scif[relevant_scif['product_type'] != "Empty"]
        den_type = kpi_line[Const.DEN_TYPES_1]
        den_value = kpi_line[Const.DEN_VALUES_1]
        relevant_scif = self.filter_by_type_value(relevant_scif, den_type,
                                                  den_value)
        den_type = kpi_line[Const.DEN_TYPES_2]
        den_value = kpi_line[Const.DEN_VALUES_2]
        relevant_scif = self.filter_by_type_value(relevant_scif, den_type,
                                                  den_value)
        if kpi_line[Const.MAJ_DOM] == Const.MAJOR:
            answer = self.calculate_majority_part(kpi_line, relevant_scif,
                                                  isnt_dp)
        elif kpi_line[Const.MAJ_DOM] == Const.DOMINANT:
            answer = self.calculate_dominant_part(kpi_line, relevant_scif,
                                                  isnt_dp)
        else:
            Log.warning("SOS majority does not know '{}' part".format(
                kpi_line[Const.MAJ_DOM]))
            answer = False
        return answer

    def calculate_majority_part(self, kpi_line, relevant_scif, isnt_dp):
        """
        filters the numerator and checks if the SOS is bigger than 50%.
        :param kpi_line: line from SOS majority sheet.
        :param relevant_scif: filtered scif.
        :param isnt_dp: if "store attribute" in the main sheet has DP, and the store is not DP, we should filter
        all the DP products out of the numerator.
        :return: boolean
        """
        num_type = kpi_line[Const.NUM_TYPES_1]
        num_value = kpi_line[Const.NUM_VALUES_1]
        num_scif = self.filter_by_type_value(relevant_scif, num_type,
                                             num_value)
        num_type = kpi_line[Const.NUM_TYPES_2]
        num_value = kpi_line[Const.NUM_VALUES_2]
        num_scif = self.filter_by_type_value(num_scif, num_type, num_value)
        if num_scif.empty:
            return None
        if isnt_dp:
            num_scif = num_scif[~(
                num_scif['manufacturer_name'].isin(Const.DP_MANU))]
        target = Const.MAJORITY_TARGET
        return num_scif['facings'].sum() / relevant_scif['facings'].sum(
        ) >= target

    def calculate_dominant_part(self, kpi_line, relevant_scif, isnt_dp):
        """
        filters the numerator and checks if the given value in the given type is the one with the most facings.
        :param kpi_line: line from SOS majority sheet.
        :param relevant_scif: filtered scif.
        :param isnt_dp: if "store attribute" in the main sheet has DP, and the store is not DP, we should filter
        all the DP products out.
        :return: boolean
        """
        type_name = self.get_column_name(kpi_line[Const.NUM_TYPES_1],
                                         relevant_scif)
        values = str(kpi_line[Const.NUM_VALUES_1]).split(', ')
        if isnt_dp:
            relevant_scif = relevant_scif[~(
                relevant_scif['manufacturer_name'].isin(Const.DP_MANU))]
            if kpi_line[Const.ADD_IF_NOT_DP] != "":
                values_to_add = str(kpi_line[Const.ADD_IF_NOT_DP]).split(', ')
                values = values + values_to_add
        if type_name in Const.NUMERIC_VALUES_TYPES:
            values = [float(x) for x in values]
        max_facings, needed_one = 0, 0
        values_type = relevant_scif[type_name].unique().tolist()
        if None in values_type:
            values_type.remove(None)
            current_sum = relevant_scif[
                relevant_scif[type_name].isnull()]['facings'].sum()
            if current_sum > max_facings:
                max_facings = current_sum
        for value in values_type:
            current_sum = relevant_scif[relevant_scif[type_name] ==
                                        value]['facings'].sum()
            if current_sum > max_facings:
                max_facings = current_sum
            if value in values:
                needed_one += current_sum
        return needed_one >= max_facings

    # helpers:

    def get_column_name(self, field_name, df):
        """
        checks what the real field name in DttFrame is (if it exists in the DF or exists in the "converter" sheet).
        :param field_name: str
        :param df: scif/products
        :return: real column name (if exists)
        """
        if field_name in df.columns:
            return field_name
        if field_name.upper() in self.converters[
                Const.NAME_IN_TEMP].str.upper().tolist():
            field_name = self.converters[self.converters[
                Const.NAME_IN_TEMP].str.upper() == field_name.upper()][
                    Const.NAME_IN_DB].iloc[0]
            return field_name
        return None

    def filter_by_type_value(self, relevant_scif, type_name, value):
        """
        filters scif with the type and value
        :param relevant_scif: current filtered scif
        :param type_name: str (from the template)
        :param value: str
        :return: new scif
        """
        if type_name == "":
            return relevant_scif
        values = value.split(', ')
        new_type_name = self.get_column_name(type_name, relevant_scif)
        if not new_type_name:
            print "There is no field '{}'".format(type_name)
            return relevant_scif
        if new_type_name in Const.NUMERIC_VALUES_TYPES:
            values = [float(x) for x in values]
        return relevant_scif[relevant_scif[new_type_name].isin(values)]

    @staticmethod
    def exclude_scif(exclude_line, relevant_scif):
        """
        filters products out of the scif
        :param exclude_line: line from the exclusion sheet
        :param relevant_scif: current filtered scif
        :return: new scif
        """
        if exclude_line[Const.PRODUCT_EAN] != "":
            exclude_products = exclude_line[Const.PRODUCT_EAN].split(', ')
            relevant_scif = relevant_scif[~(
                relevant_scif['product_ean_code'].isin(exclude_products))]
        if exclude_line[Const.BRAND] != "":
            exclude_brands = exclude_line[Const.BRAND].split(', ')
            relevant_scif = relevant_scif[~(
                relevant_scif['brand_name'].isin(exclude_brands))]
        return relevant_scif

    @staticmethod
    def does_exist(kpi_line, column_name):
        """
        checks if kpi_line has values in this column, and if it does - returns a list of these values
        :param kpi_line: line from template
        :param column_name: str
        :return: list of values if there are, otherwise None
        """
        if column_name in kpi_line.keys() and kpi_line[column_name] != "":
            cell = kpi_line[column_name]
            if type(cell) in [int, float]:
                return [cell]
            elif type(cell) in [unicode, str]:
                return cell.split(", ")
        return None

    def get_kpi_function(self, kpi_type):
        """
        transfers every kpi to its own function
        :param kpi_type: value from "sheet" column in the main sheet
        :return: function
        """
        if kpi_type == Const.SURVEY:
            return self.calculate_survey_specific
        elif kpi_type == Const.AVAILABILITY:
            return self.calculate_availability
        elif kpi_type == Const.SOS:
            return self.calculate_sos
        elif kpi_type == Const.SOS_MAJOR:
            return self.calculate_sos_maj
        else:
            Log.warning(
                "The value '{}' in column sheet in the template is not recognized"
                .format(kpi_type))
            return None

    def choose_and_write_results(self):
        """
        writes all the KPI in the DB: first the session's ones, second the scene's ones and in the end the ones
        that depends on the previous ones. After all it writes the red score
        """
        # self.scenes_results.to_csv('results/{}/scene {}.csv'.format(self.calculation_type, self.session_uid))####
        # self.session_results.to_csv('results/{}/session {}.csv'.format(self.calculation_type, self.session_uid))####
        main_template = self.templates[Const.KPIS]
        self.write_session_kpis(main_template)
        if self.calculation_type == Const.SOVI:
            self.write_scene_kpis(main_template)
        self.write_condition_kpis(main_template)
        self.write_missings(main_template)
        self.write_to_db(self.RED_SCORE, self.red_score)
        # result_dict = {Const.KPI_NAME: 'RED SCORE', Const.SCORE: self.red_score}####
        # self.all_results = self.all_results.append(result_dict, ignore_index=True)####
        # self.all_results.to_csv('results/{}/{}.csv'.format(self.calculation_type, self.session_uid))####

    def write_missings(self, main_template):
        """
        write 0 in all the KPIs that didn't get score
        :param main_template:
        """
        for i, main_line in main_template.iterrows():
            kpi_name = main_line[Const.KPI_NAME]
            if not self.all_results[self.all_results[Const.KPI_NAME] ==
                                    kpi_name].empty:
                continue
            result = 0
            display_text = main_line[Const.DISPLAY_TEXT]
            weight = main_line[Const.WEIGHT]
            self.write_to_all_levels(kpi_name, result, display_text, weight)

    def write_session_kpis(self, main_template):
        """
        iterates all the session's KPIs and saves them
        :param main_template: main_sheet.
        """
        session_template = main_template[main_template[Const.CONDITION] == ""]
        if self.calculation_type == Const.SOVI:
            session_template = session_template[session_template[
                Const.SESSION_LEVEL] == Const.V]
        for i, main_line in session_template.iterrows():
            kpi_name = main_line[Const.KPI_NAME]
            result = self.session_results[self.session_results[Const.KPI_NAME]
                                          == kpi_name]
            if result.empty:
                continue
            result = result.iloc[0][Const.RESULT]
            display_text = main_line[Const.DISPLAY_TEXT]
            weight = main_line[Const.WEIGHT]
            self.write_to_all_levels(kpi_name, result, display_text, weight)

    def write_incremental_kpis(self, scene_template):
        """
        lets the incremental KPIs choose their scenes (if they passed).
        if KPI passed some scenes, we will choose the scene that the children passed
        :param scene_template: filtered main_sheet
        :return: the new template (without the KPI written already)
        """
        incremental_template = scene_template[
            scene_template[Const.INCREMENTAL] != ""]
        while not incremental_template.empty:
            for i, main_line in incremental_template.iterrows():
                kpi_name = main_line[Const.KPI_NAME]
                reuse_scene = main_line[Const.REUSE_SCENE] == Const.V
                kpi_results = self.scenes_results[self.scenes_results[
                    Const.KPI_NAME] == kpi_name]
                if not reuse_scene:
                    kpi_results = kpi_results[~(
                        kpi_results[Const.SCENE_FK].isin(self.used_scenes))]
                true_results = kpi_results[kpi_results[Const.RESULT] > 0]
                increments = main_line[Const.INCREMENTAL]
                if ', ' in increments:
                    first_kpi = increments.split(', ')[0]
                    others = increments.replace(', '.format(first_kpi), '')
                    scene_template.loc[scene_template[Const.KPI_NAME] ==
                                       first_kpi, Const.INCREMENTAL] = others
                if true_results.empty:
                    scene_template.loc[scene_template[Const.KPI_NAME] ==
                                       kpi_name, Const.INCREMENTAL] = ""
                else:
                    true_results = true_results.sort_values(by=Const.RESULT,
                                                            ascending=False)
                    display_text = main_line[Const.DISPLAY_TEXT]
                    weight = main_line[Const.WEIGHT]
                    scene_fk = true_results.iloc[0][Const.SCENE_FK]
                    self.write_to_all_levels(
                        kpi_name,
                        true_results.iloc[0][Const.RESULT],
                        display_text,
                        weight,
                        scene_fk=scene_fk,
                        reuse_scene=reuse_scene)
                    scene_template = scene_template[~(
                        scene_template[Const.KPI_NAME] == kpi_name)]
            incremental_template = scene_template[
                scene_template[Const.INCREMENTAL] != ""]
        return scene_template

    def write_regular_scene_kpis(self, scene_template):
        """
        lets the regular KPIs choose their scenes (if they passed).
        Like in the incremental part - if KPI passed some scenes, we will choose the scene that the children passed
        :param scene_template: filtered main_sheet (only scene KPIs, and without the passed incremental)
        :return: the new template (without the KPI written already)
        """
        for i, main_line in scene_template.iterrows():
            kpi_name = main_line[Const.KPI_NAME]
            reuse_scene = main_line[Const.REUSE_SCENE] == Const.V
            kpi_results = self.scenes_results[self.scenes_results[
                Const.KPI_NAME] == kpi_name]
            if not reuse_scene:
                kpi_results = kpi_results[~(
                    kpi_results[Const.SCENE_FK].isin(self.used_scenes))]
            true_results = kpi_results[kpi_results[Const.RESULT] > 0]
            display_text = main_line[Const.DISPLAY_TEXT]
            weight = main_line[Const.WEIGHT]
            if true_results.empty:
                continue
            true_results = true_results.sort_values(by=Const.RESULT,
                                                    ascending=False)
            scene_fk = true_results.iloc[0][Const.SCENE_FK]
            self.write_to_all_levels(kpi_name,
                                     true_results.iloc[0][Const.RESULT],
                                     display_text,
                                     weight,
                                     scene_fk=scene_fk,
                                     reuse_scene=reuse_scene)
            scene_template = scene_template[~(
                scene_template[Const.KPI_NAME] == kpi_name)]
        return scene_template

    def write_not_passed_scene_kpis(self, scene_template):
        """
        lets the KPIs not passed choose their scenes.
        :param scene_template: filtered main_sheet (only scene KPIs, and without the passed KPIs)
        """
        for i, main_line in scene_template.iterrows():
            kpi_name = main_line[Const.KPI_NAME]
            reuse_scene = main_line[Const.REUSE_SCENE] == Const.V
            kpi_results = self.scenes_results[self.scenes_results[
                Const.KPI_NAME] == kpi_name]
            if not reuse_scene:
                kpi_results = kpi_results[~(
                    kpi_results[Const.SCENE_FK].isin(self.used_scenes))]
            display_text = main_line[Const.DISPLAY_TEXT]
            weight = main_line[Const.WEIGHT]
            if kpi_results.empty:
                continue
            scene_fk = kpi_results.iloc[0][Const.SCENE_FK]
            self.write_to_all_levels(kpi_name,
                                     0,
                                     display_text,
                                     weight,
                                     scene_fk=scene_fk,
                                     reuse_scene=reuse_scene)

    def write_scene_kpis(self, main_template):
        """
        iterates every scene_kpi that does not depend on others, and choose the scene they will take:
        1. the incrementals take their scene (if they passed).
        2. the regular KPIs that passed choose their scenes.
        3. the ones that didn't pass choose their random scenes.
        :param main_template: main_sheet.
        """
        scene_template = main_template[
            (main_template[Const.SESSION_LEVEL] != Const.V)
            & (main_template[Const.CONDITION] == "")]
        scene_template = self.write_incremental_kpis(scene_template)
        scene_template = self.write_regular_scene_kpis(scene_template)
        self.write_not_passed_scene_kpis(scene_template)

    def write_condition_kpis(self, main_template):
        """
        writes all the KPI that depend on other KPIs by checking if the parent KPI has passed and in which scene.
        :param main_template: main_sheet
        """
        condition_template = main_template[
            main_template[Const.CONDITION] != '']
        for i, main_line in condition_template.iterrows():
            condition = main_line[Const.CONDITION]
            kpi_name = main_line[Const.KPI_NAME]
            if self.calculation_type == Const.MANUAL or main_line[
                    Const.SESSION_LEVEL] == Const.V:
                kpi_results = self.session_results[self.session_results[
                    Const.KPI_NAME] == kpi_name]
            else:
                kpi_results = self.scenes_results[self.scenes_results[
                    Const.KPI_NAME] == kpi_name]
            condition_result = self.all_results[
                (self.all_results[Const.KPI_NAME] == condition)
                & (self.all_results[Const.RESULT] > 0)]
            if condition_result.empty:
                continue
            condition_result = condition_result.iloc[0]
            condition_scene = condition_result[Const.SCENE_FK]
            if condition_scene and Const.SCENE_FK in kpi_results:
                results = kpi_results[kpi_results[Const.SCENE_FK] ==
                                      condition_scene]
            else:
                results = kpi_results
            if results.empty:
                continue
            result = results.iloc[0][Const.RESULT]
            display_text = main_line[Const.DISPLAY_TEXT]
            weight = main_line[Const.WEIGHT]
            scene_fk = results.iloc[0][
                Const.SCENE_FK] if Const.SCENE_FK in kpi_results else None
            self.write_to_all_levels(kpi_name,
                                     result,
                                     display_text,
                                     weight,
                                     scene_fk=scene_fk)

    def get_united_scenes(self):
        return self.scif[self.scif['United Deliver'] ==
                         'Y']['scene_id'].unique().tolist()

    def get_weight_factor(self):
        sum_weights = self.templates[Const.KPIS][Const.WEIGHT].sum()
        return sum_weights / 100.0

    def get_score(self, weight):
        return weight / self.weight_factor

    def write_to_db(self, kpi_name, score, display_text=''):
        """
        writes result in the DB
        :param kpi_name: str
        :param score: float
        :param display_text: str
        """
        if kpi_name == self.RED_SCORE:
            self.write_to_db_result(self.common_db.get_kpi_fk_by_kpi_name(
                self.RED_SCORE, 1),
                                    score=score,
                                    level=1)
            if self.common_db_integ:
                self.write_to_db_result(
                    self.common_db_integ.get_kpi_fk_by_kpi_name(
                        self.RED_SCORE_INTEG, 1),
                    score=score,
                    level=1,
                    set_type=Const.MANUAL)
        else:
            self.write_to_db_result(self.common_db.get_kpi_fk_by_kpi_name(
                kpi_name, 2),
                                    score=score,
                                    level=2)
            self.write_to_db_result(self.common_db.get_kpi_fk_by_kpi_name(
                kpi_name, 3),
                                    score=score,
                                    level=3,
                                    display_text=display_text)
            if self.common_db_integ:
                self.write_to_db_result(
                    self.common_db_integ.get_kpi_fk_by_kpi_name(kpi_name, 3),
                    score=score,
                    level=3,
                    display_text=kpi_name,
                    set_type=Const.MANUAL)

    def write_to_db_result(self,
                           fk,
                           level,
                           score,
                           set_type=Const.SOVI,
                           **kwargs):
        """
        This function creates the result data frame of every KPI (atomic KPI/KPI/KPI set),
        and appends the insert SQL query into the queries' list, later to be written to the DB.
        """
        if kwargs:
            kwargs['score'] = score
            attributes = self.create_attributes_dict(fk=fk,
                                                     level=level,
                                                     set_type=set_type,
                                                     **kwargs)
        else:
            attributes = self.create_attributes_dict(fk=fk,
                                                     score=score,
                                                     set_type=set_type,
                                                     level=level)
        if level == self.common_db.LEVEL1:
            table = self.common_db.KPS_RESULT
        elif level == self.common_db.LEVEL2:
            table = self.common_db.KPK_RESULT
        elif level == self.common_db.LEVEL3:
            table = self.common_db.KPI_RESULT
        else:
            return
        query = insert(attributes, table)
        if set_type == Const.SOVI:
            self.common_db.kpi_results_queries.append(query)
        else:
            self.common_db_integ.kpi_results_queries.append(query)

    def create_attributes_dict(self,
                               score,
                               fk=None,
                               level=None,
                               display_text=None,
                               set_type=Const.SOVI,
                               **kwargs):
        """
        This function creates a data frame with all attributes needed for saving in KPI results tables.
        or
        you can send dict with all values in kwargs
        """
        kpi_static_data = self.kpi_static_data if set_type == Const.SOVI else self.kpi_static_data_integ
        if level == self.common_db.LEVEL1:
            if kwargs:
                kwargs['score'] = score
                values = [val for val in kwargs.values()]
                col = [col for col in kwargs.keys()]
                attributes = pd.DataFrame(values, columns=col)
            else:
                kpi_set_name = kpi_static_data[kpi_static_data['kpi_set_fk'] ==
                                               fk]['kpi_set_name'].values[0]
                attributes = pd.DataFrame(
                    [(kpi_set_name, self.session_uid, self.store_id,
                      self.visit_date.isoformat(), format(score, '.2f'), fk)],
                    columns=[
                        'kps_name', 'session_uid', 'store_fk', 'visit_date',
                        'score_1', 'kpi_set_fk'
                    ])
        elif level == self.common_db.LEVEL2:
            if kwargs:
                kwargs['score'] = score
                values = [val for val in kwargs.values()]
                col = [col for col in kwargs.keys()]
                attributes = pd.DataFrame(values, columns=col)
            else:
                kpi_name = kpi_static_data[kpi_static_data['kpi_fk'] ==
                                           fk]['kpi_name'].values[0].replace(
                                               "'", "\\'")
                attributes = pd.DataFrame(
                    [(self.session_uid, self.store_id,
                      self.visit_date.isoformat(), fk, kpi_name, score)],
                    columns=[
                        'session_uid', 'store_fk', 'visit_date', 'kpi_fk',
                        'kpk_name', 'score'
                    ])
        elif level == self.common_db.LEVEL3:
            if kwargs:
                kwargs['score'] = score
                values = tuple([val for val in kwargs.values()])
                col = [col for col in kwargs.keys()]
                attributes = pd.DataFrame([values], columns=col)
            else:
                data = kpi_static_data[kpi_static_data['atomic_kpi_fk'] == fk]
                kpi_fk = data['kpi_fk'].values[0]
                kpi_set_name = kpi_static_data[kpi_static_data['atomic_kpi_fk']
                                               == fk]['kpi_set_name'].values[0]
                attributes = pd.DataFrame(
                    [(display_text, self.session_uid, kpi_set_name,
                      self.store_id, self.visit_date.isoformat(),
                      datetime.utcnow().isoformat(), score, kpi_fk, fk)],
                    columns=[
                        'display_text', 'session_uid', 'kps_name', 'store_fk',
                        'visit_date', 'calculation_time', 'score', 'kpi_fk',
                        'atomic_kpi_fk'
                    ])
        else:
            attributes = pd.DataFrame()
        return attributes.to_dict()

    def commit_results(self):
        """
        committing the results in both sets
        """
        self.common_db.delete_results_data_by_kpi_set()
        self.common_db.commit_results_data_without_delete()
        if self.common_db_integ:
            self.common_db_integ.delete_results_data_by_kpi_set()
            self.common_db_integ.commit_results_data_without_delete()
Пример #13
0
class CBCDAIRYILToolBox:
    def __init__(self, data_provider, output):
        self.output = output
        self.data_provider = data_provider
        self.project_name = self.data_provider.project_name
        self.common = Common(self.data_provider)
        self.old_common = oldCommon(self.data_provider)
        self.rds_conn = PSProjectConnector(self.project_name,
                                           DbUsers.CalculationEng)
        self.session_fk = self.data_provider.session_id
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.store_info = self.data_provider[Data.STORE_INFO]
        self.store_id = self.data_provider[Data.STORE_FK]
        self.survey = Survey(self.data_provider)
        self.block = Block(self.data_provider)
        self.general_toolbox = GENERALToolBox(self.data_provider)
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.template_path = self.get_relevant_template()
        self.gap_data = self.get_gap_data()
        self.kpi_weights = parse_template(self.template_path,
                                          Consts.KPI_WEIGHT,
                                          lower_headers_row_index=0)
        self.template_data = self.parse_template_data()
        self.kpis_gaps = list()
        self.passed_availability = list()
        self.kpi_static_data = self.old_common.get_kpi_static_data()
        self.own_manufacturer_fk = int(
            self.data_provider.own_manufacturer.param_value.values[0])
        self.parser = Parser
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]

    def get_relevant_template(self):
        """
        This function returns the relevant template according to it's visit date.
        Because of a change that was done in the logic there are 3 templates that match different dates.
        :return: Full template path
        """
        if self.visit_date <= datetime.date(datetime(2019, 12, 31)):
            return "{}/{}/{}".format(
                Consts.TEMPLATE_PATH, Consts.PREVIOUS_TEMPLATES,
                Consts.PROJECT_TEMPLATE_NAME_UNTIL_2019_12_31)
        else:
            return "{}/{}".format(Consts.TEMPLATE_PATH,
                                  Consts.CURRENT_TEMPLATE)

    def get_gap_data(self):
        """
        This function parse the gap data template and returns the gap priorities.
        :return: A dict with the priorities according to kpi_names. E.g: {kpi_name1: 1, kpi_name2: 2 ...}
        """
        gap_sheet = parse_template(self.template_path,
                                   Consts.KPI_GAP,
                                   lower_headers_row_index=0)
        gap_data = zip(gap_sheet[Consts.KPI_NAME], gap_sheet[Consts.ORDER])
        gap_data = {kpi_name: int(order) for kpi_name, order in gap_data}
        return gap_data

    def main_calculation(self):
        """
        This function calculates the KPI results.
        At first it fetches the relevant Sets (according to the stores attributes) and go over all of the relevant
        Atomic KPIs based on the project's template.
        Than, It aggregates the result per KPI using the weights and at last aggregates for the set level.
        """
        self.calculate_hierarchy_sos()
        self.calculate_oos()
        if self.template_data.empty:
            Log.warning(Consts.EMPTY_TEMPLATE_DATA_LOG.format(self.store_id))
            return
        kpi_set, kpis = self.get_relevant_kpis_for_calculation()
        kpi_set_fk = self.common.get_kpi_fk_by_kpi_type(Consts.TOTAL_SCORE)
        old_kpi_set_fk = self.get_kpi_fk_by_kpi_name(Consts.TOTAL_SCORE, 1)
        total_set_scores = list()
        for kpi_name in kpis:
            kpi_fk = self.common.get_kpi_fk_by_kpi_type(kpi_name)
            old_kpi_fk = self.get_kpi_fk_by_kpi_name(kpi_name, 2)
            kpi_weight = self.get_kpi_weight(kpi_name, kpi_set)
            atomics_df = self.get_atomics_to_calculate(kpi_name)
            atomic_results = self.calculate_atomic_results(
                kpi_fk, atomics_df)  # Atomic level
            kpi_results = self.calculate_kpis_and_save_to_db(
                atomic_results, kpi_fk, kpi_weight, kpi_set_fk)  # KPI lvl
            self.old_common.old_write_to_db_result(fk=old_kpi_fk,
                                                   level=2,
                                                   score=format(
                                                       kpi_results, '.2f'))
            total_set_scores.append(kpi_results)
        kpi_set_score = self.calculate_kpis_and_save_to_db(
            total_set_scores, kpi_set_fk)  # Set level
        self.old_common.write_to_db_result(fk=old_kpi_set_fk,
                                           level=1,
                                           score=kpi_set_score)
        self.handle_gaps()

    def calculate_oos(self):
        numerator = total_facings = 0
        store_kpi_fk = self.common.get_kpi_fk_by_kpi_type(kpi_type=Consts.OOS)
        sku_kpi_fk = self.common.get_kpi_fk_by_kpi_type(
            kpi_type=Consts.OOS_SKU)
        leading_skus_df = self.template_data[self.template_data[
            Consts.KPI_NAME].str.encode(
                "utf8") == Consts.LEADING_PRODUCTS.encode("utf8")]
        skus_ean_list = leading_skus_df[Consts.PARAMS_VALUE_1].tolist()
        skus_ean_set = set([
            ean_code.strip() for values in skus_ean_list
            for ean_code in values.split(",")
        ])
        product_fks = self.all_products[self.all_products[
            'product_ean_code'].isin(skus_ean_set)]['product_fk'].tolist()
        # sku level oos
        for sku in product_fks:
            # 2 for distributed and 1 for oos
            product_df = self.scif[self.scif['product_fk'] == sku]
            if product_df.empty:
                numerator += 1
                self.common.write_to_db_result(fk=sku_kpi_fk,
                                               numerator_id=sku,
                                               denominator_id=self.store_id,
                                               result=1,
                                               numerator_result=1,
                                               denominator_result=1,
                                               score=0,
                                               identifier_parent="OOS",
                                               should_enter=True)

        # store level oos
        denominator = len(product_fks)
        if denominator == 0:
            numerator = result = 0
        else:
            result = round(numerator / float(denominator), 4)
        self.common.write_to_db_result(fk=store_kpi_fk,
                                       numerator_id=self.own_manufacturer_fk,
                                       denominator_id=self.store_id,
                                       result=result,
                                       numerator_result=numerator,
                                       denominator_result=denominator,
                                       score=total_facings,
                                       identifier_result="OOS")

    def calculate_hierarchy_sos(self):
        store_kpi_fk = self.common.get_kpi_fk_by_kpi_type(
            kpi_type=Consts.SOS_BY_OWN_MAN)
        category_kpi_fk = self.common.get_kpi_fk_by_kpi_type(
            kpi_type=Consts.SOS_BY_OWN_MAN_CAT)
        brand_kpi_fk = self.common.get_kpi_fk_by_kpi_type(
            kpi_type=Consts.SOS_BY_OWN_MAN_CAT_BRAND)
        sku_kpi_fk = self.common.get_kpi_fk_by_kpi_type(
            kpi_type=Consts.SOS_BY_OWN_MAN_CAT_BRAND_SKU)
        sos_df = self.scif[self.scif['rlv_sos_sc'] == 1]
        # store level sos
        store_res, store_num, store_den = self.calculate_own_manufacturer_sos(
            filters={}, df=sos_df)
        self.common.write_to_db_result(fk=store_kpi_fk,
                                       numerator_id=self.own_manufacturer_fk,
                                       denominator_id=self.store_id,
                                       result=store_res,
                                       numerator_result=store_num,
                                       denominator_result=store_den,
                                       score=store_res,
                                       identifier_result="OWN_SOS")
        # category level sos
        session_categories = set(
            self.parser.filter_df(
                conditions={'manufacturer_fk': self.own_manufacturer_fk},
                data_frame_to_filter=self.scif)['category_fk'])
        for category_fk in session_categories:
            filters = {'category_fk': category_fk}
            cat_res, cat_num, cat_den = self.calculate_own_manufacturer_sos(
                filters=filters, df=sos_df)
            self.common.write_to_db_result(
                fk=category_kpi_fk,
                numerator_id=category_fk,
                denominator_id=self.store_id,
                result=cat_res,
                numerator_result=cat_num,
                denominator_result=cat_den,
                score=cat_res,
                identifier_parent="OWN_SOS",
                should_enter=True,
                identifier_result="OWN_SOS_cat_{}".format(str(category_fk)))
            # brand-category level sos
            filters['manufacturer_fk'] = self.own_manufacturer_fk
            cat_brands = set(
                self.parser.filter_df(conditions=filters,
                                      data_frame_to_filter=sos_df)['brand_fk'])
            for brand_fk in cat_brands:
                filters['brand_fk'] = brand_fk
                brand_df = self.parser.filter_df(conditions=filters,
                                                 data_frame_to_filter=sos_df)
                brand_num = brand_df['facings'].sum()
                brand_res, brand_num, cat_num = self.calculate_sos_res(
                    brand_num, cat_num)
                self.common.write_to_db_result(
                    fk=brand_kpi_fk,
                    numerator_id=brand_fk,
                    denominator_id=category_fk,
                    result=brand_res,
                    numerator_result=brand_num,
                    should_enter=True,
                    denominator_result=cat_num,
                    score=brand_res,
                    identifier_parent="OWN_SOS_cat_{}".format(
                        str(category_fk)),
                    identifier_result="OWN_SOS_cat_{}_brand_{}".format(
                        str(category_fk), str(brand_fk)))
                product_fks = set(
                    self.parser.filter_df(
                        conditions=filters,
                        data_frame_to_filter=sos_df)['product_fk'])
                for sku in product_fks:
                    filters['product_fk'] = sku
                    product_df = self.parser.filter_df(
                        conditions=filters, data_frame_to_filter=sos_df)
                    sku_facings = product_df['facings'].sum()
                    sku_result, sku_num, sku_den = self.calculate_sos_res(
                        sku_facings, brand_num)
                    self.common.write_to_db_result(
                        fk=sku_kpi_fk,
                        numerator_id=sku,
                        denominator_id=brand_fk,
                        result=sku_result,
                        numerator_result=sku_facings,
                        should_enter=True,
                        denominator_result=brand_num,
                        score=sku_facings,
                        identifier_parent="OWN_SOS_cat_{}_brand_{}".format(
                            str(category_fk), str(brand_fk)))
                del filters['product_fk']
            del filters['brand_fk']

    def calculate_own_manufacturer_sos(self, filters, df):
        filters['manufacturer_fk'] = self.own_manufacturer_fk
        numerator_df = self.parser.filter_df(conditions=filters,
                                             data_frame_to_filter=df)
        del filters['manufacturer_fk']
        denominator_df = self.parser.filter_df(conditions=filters,
                                               data_frame_to_filter=df)
        if denominator_df.empty:
            return 0, 0, 0
        denominator = denominator_df['facings'].sum()
        if numerator_df.empty:
            numerator = 0
        else:
            numerator = numerator_df['facings'].sum()
        return self.calculate_sos_res(numerator, denominator)

    @staticmethod
    def calculate_sos_res(numerator, denominator):
        if denominator == 0:
            return 0, 0, 0
        result = round(numerator / float(denominator), 3)
        return result, numerator, denominator

    def add_gap(self, atomic_kpi, score, atomic_weight):
        """
        In case the score is not perfect the gap is added to the gap list.
        :param atomic_weight: The Atomic KPI's weight.
        :param score: Atomic KPI score.
        :param atomic_kpi: A Series with data about the Atomic KPI.
        """
        parent_kpi_name = atomic_kpi[Consts.KPI_NAME]
        atomic_name = atomic_kpi[Consts.KPI_ATOMIC_NAME]
        atomic_fk = self.common.get_kpi_fk_by_kpi_type(atomic_name)
        current_gap_dict = {
            Consts.ATOMIC_FK: atomic_fk,
            Consts.PRIORITY: self.gap_data[parent_kpi_name],
            Consts.SCORE: score,
            Consts.WEIGHT: atomic_weight
        }
        self.kpis_gaps.append(current_gap_dict)

    @staticmethod
    def sort_by_priority(gap_dict):
        """ This is a util function for the kpi's gaps sorting by priorities"""
        return gap_dict[Consts.PRIORITY], gap_dict[Consts.SCORE]

    def handle_gaps(self):
        """ This function takes the top 5 gaps (by priority) and saves it to the DB (pservice.custom_gaps table) """
        self.kpis_gaps.sort(key=self.sort_by_priority)
        gaps_total_score = 0
        gaps_per_kpi_fk = self.common.get_kpi_fk_by_kpi_type(
            Consts.GAP_PER_ATOMIC_KPI)
        gaps_total_score_kpi_fk = self.common.get_kpi_fk_by_kpi_type(
            Consts.GAPS_TOTAL_SCORE_KPI)
        for gap in self.kpis_gaps[:5]:
            current_gap_score = gap[Consts.WEIGHT] - (gap[Consts.SCORE] / 100 *
                                                      gap[Consts.WEIGHT])
            gaps_total_score += current_gap_score
            self.insert_gap_results(gaps_per_kpi_fk,
                                    current_gap_score,
                                    gap[Consts.WEIGHT],
                                    numerator_id=gap[Consts.ATOMIC_FK],
                                    parent_fk=gaps_total_score_kpi_fk)
        total_weight = sum(
            map(lambda res: res[Consts.WEIGHT], self.kpis_gaps[:5]))
        self.insert_gap_results(gaps_total_score_kpi_fk, gaps_total_score,
                                total_weight)

    def insert_gap_results(self,
                           gap_kpi_fk,
                           score,
                           weight,
                           numerator_id=Consts.CBC_MANU,
                           parent_fk=None):
        """ This is a utility function that insert results to the DB for the GAP """
        should_enter = True if parent_fk else False
        score, weight = score * 100, round(weight * 100, 2)
        self.common.write_to_db_result(fk=gap_kpi_fk,
                                       numerator_id=numerator_id,
                                       numerator_result=score,
                                       denominator_id=self.store_id,
                                       denominator_result=weight,
                                       weight=weight,
                                       identifier_result=gap_kpi_fk,
                                       identifier_parent=parent_fk,
                                       result=score,
                                       score=score,
                                       should_enter=should_enter)

    def calculate_kpis_and_save_to_db(self,
                                      kpi_results,
                                      kpi_fk,
                                      parent_kpi_weight=1.0,
                                      parent_fk=None):
        """
        This KPI aggregates the score by weights and saves the results to the DB.
        :param kpi_results: A list of results and weights tuples: [(score1, weight1), (score2, weight2) ... ].
        :param kpi_fk: The relevant KPI fk.
        :param parent_kpi_weight: The parent's KPI total weight.
        :param parent_fk: The KPI SET FK that the KPI "belongs" too if exist.
        :return: The aggregated KPI score.
        """
        should_enter = True if parent_fk else False
        ignore_weight = not should_enter  # Weights should be ignored only in the set level!
        kpi_score = self.calculate_kpi_result_by_weight(
            kpi_results, parent_kpi_weight, ignore_weights=ignore_weight)
        total_weight = round(parent_kpi_weight * 100, 2)
        target = None if parent_fk else round(80,
                                              2)  # Requested for visualization
        self.common.write_to_db_result(fk=kpi_fk,
                                       numerator_id=Consts.CBC_MANU,
                                       numerator_result=kpi_score,
                                       denominator_id=self.store_id,
                                       denominator_result=total_weight,
                                       target=target,
                                       identifier_result=kpi_fk,
                                       identifier_parent=parent_fk,
                                       should_enter=should_enter,
                                       weight=total_weight,
                                       result=kpi_score,
                                       score=kpi_score)

        if not parent_fk:  # required only for writing set score in anoter kpi needed for dashboard
            kpi_fk = self.common.get_kpi_fk_by_kpi_type(
                Consts.TOTAL_SCORE_FOR_DASHBOARD)
            self.common.write_to_db_result(fk=kpi_fk,
                                           numerator_id=Consts.CBC_MANU,
                                           numerator_result=kpi_score,
                                           denominator_id=self.store_id,
                                           denominator_result=total_weight,
                                           target=target,
                                           identifier_result=kpi_fk,
                                           identifier_parent=parent_fk,
                                           should_enter=should_enter,
                                           weight=total_weight,
                                           result=kpi_score,
                                           score=kpi_score)

        return kpi_score

    def calculate_kpi_result_by_weight(self,
                                       kpi_results,
                                       parent_kpi_weight,
                                       ignore_weights=False):
        """
        This function aggregates the KPI results by scores and weights.
        :param ignore_weights: If True the function just sums the results.
        :param parent_kpi_weight: The parent's KPI total weight.
        :param kpi_results: A list of results and weights tuples: [(score1, weight1), (score2, weight2) ... ].
        :return: The aggregated KPI score.
        """
        if ignore_weights or len(kpi_results) == 0:
            return sum(kpi_results)
        weights_list = map(lambda res: res[1], kpi_results)
        if None in weights_list:  # Ignoring weights and dividing equally by length!
            kpi_score = sum(map(lambda res: res[0], kpi_results)) / float(
                len(kpi_results))
        elif round(
                sum(weights_list), 2
        ) < parent_kpi_weight:  # Missing weights needs to be divided among the kpis
            kpi_score = self.divide_missing_percentage(kpi_results,
                                                       parent_kpi_weight,
                                                       sum(weights_list))
        else:
            kpi_score = sum([score * weight for score, weight in kpi_results])
        return kpi_score

    @staticmethod
    def divide_missing_percentage(kpi_results, parent_weight, total_weights):
        """
        This function is been activated in case the total number of KPI weights doesn't equal to 100%.
        It divides the missing percentage among the other KPI and calculates the score.
        :param parent_weight: Parent KPI's weight.
        :param total_weights: The total number of weights that were calculated earlier.
        :param kpi_results: A list of results and weights tuples: [(score1, weight1), (score2, weight2) ... ].
        :return: KPI aggregated score.
        """
        missing_weight = parent_weight - total_weights
        weight_addition = missing_weight / float(
            len(kpi_results)) if kpi_results else 0
        kpi_score = sum([
            score * (weight + weight_addition) for score, weight in kpi_results
        ])
        return kpi_score

    def calculate_atomic_results(self, kpi_fk, atomics_df):
        """
        This method calculates the result for every atomic KPI (the lowest level) that are relevant for the kpi_fk.
        :param kpi_fk: The KPI FK that the atomic "belongs" too.
        :param atomics_df: The relevant Atomic KPIs from the project's template.
        :return: A list of results and weights tuples: [(score1, weight1), (score2, weight2) ... ].
        """
        total_scores = list()
        for i in atomics_df.index:
            current_atomic = atomics_df.loc[i]
            kpi_type, atomic_weight, general_filters = self.get_relevant_data_per_atomic(
                current_atomic)
            if general_filters is None:
                continue
            num_result, den_result, atomic_score = self.calculate_atomic_kpi_by_type(
                kpi_type, **general_filters)
            # Handling Atomic KPIs results
            if atomic_score is None:  # In cases that we need to ignore the KPI and divide it's weight
                continue
            elif atomic_score < 100:
                self.add_gap(current_atomic, atomic_score, atomic_weight)
            total_scores.append((atomic_score, atomic_weight))
            atomic_fk_lvl_2 = self.common.get_kpi_fk_by_kpi_type(
                current_atomic[Consts.KPI_ATOMIC_NAME].strip())
            old_atomic_fk = self.get_kpi_fk_by_kpi_name(
                current_atomic[Consts.KPI_ATOMIC_NAME].strip(), 3)
            self.common.write_to_db_result(fk=atomic_fk_lvl_2,
                                           numerator_id=Consts.CBC_MANU,
                                           numerator_result=num_result,
                                           denominator_id=self.store_id,
                                           weight=round(
                                               atomic_weight * 100, 2),
                                           denominator_result=den_result,
                                           should_enter=True,
                                           identifier_parent=kpi_fk,
                                           result=atomic_score,
                                           score=atomic_score * atomic_weight)
            self.old_common.old_write_to_db_result(
                fk=old_atomic_fk,
                level=3,
                result=str(format(atomic_score * atomic_weight, '.2f')),
                score=atomic_score)
        return total_scores

    def get_kpi_fk_by_kpi_name(self, kpi_name, kpi_level):
        if kpi_level == 1:
            column_key = 'kpi_set_fk'
            column_value = 'kpi_set_name'
        elif kpi_level == 2:
            column_key = 'kpi_fk'
            column_value = 'kpi_name'
        elif kpi_level == 3:
            column_key = 'atomic_kpi_fk'
            column_value = 'atomic_kpi_name'
        else:
            raise ValueError('invalid level')

        try:
            if column_key and column_value:
                return self.kpi_static_data[
                    self.kpi_static_data[column_value].str.encode('utf-8') ==
                    kpi_name.encode('utf-8')][column_key].values[0]

        except IndexError:
            Log.error(
                'Kpi name: {}, isnt equal to any kpi name in static table'.
                format(kpi_name))
            return None

    def get_relevant_data_per_atomic(self, atomic_series):
        """
        This function return the relevant data per Atomic KPI.
        :param atomic_series: The Atomic row from the Template.
        :return: A tuple with data: (atomic_type, atomic_weight, general_filters)
        """
        kpi_type = atomic_series.get(Consts.KPI_TYPE)
        atomic_weight = float(atomic_series.get(
            Consts.WEIGHT)) if atomic_series.get(Consts.WEIGHT) else None
        general_filters = self.get_general_filters(atomic_series)
        return kpi_type, atomic_weight, general_filters

    def calculate_atomic_kpi_by_type(self, atomic_type, **general_filters):
        """
        This function calculates the result according to the relevant Atomic Type.
        :param atomic_type: KPI Family from the template.
        :param general_filters: Relevant attributes and values to calculate by.
        :return: A tuple with results: (numerator_result, denominator_result, total_score).
        """
        num_result = denominator_result = 0
        if atomic_type in [Consts.AVAILABILITY]:
            atomic_score = self.calculate_availability(**general_filters)
        elif atomic_type == Consts.AVAILABILITY_FROM_BOTTOM:
            atomic_score = self.calculate_availability_from_bottom(
                **general_filters)
        elif atomic_type == Consts.MIN_2_AVAILABILITY:
            num_result, denominator_result, atomic_score = self.calculate_min_2_availability(
                **general_filters)
        elif atomic_type == Consts.SURVEY:
            atomic_score = self.calculate_survey(**general_filters)
        elif atomic_type == Consts.BRAND_BLOCK:
            atomic_score = self.calculate_brand_block(**general_filters)
        elif atomic_type == Consts.EYE_LEVEL:
            num_result, denominator_result, atomic_score = self.calculate_eye_level(
                **general_filters)
        else:
            Log.warning(Consts.UNSUPPORTED_KPI_LOG.format(atomic_type))
            atomic_score = None
        return num_result, denominator_result, atomic_score

    def get_relevant_kpis_for_calculation(self):
        """
        This function retrieve the relevant KPIs to calculate from the template
        :return: A tuple: (set_name, [kpi1, kpi2, kpi3...]) to calculate.
        """
        kpi_set = self.template_data[Consts.KPI_SET].values[0]
        kpis = self.template_data[self.template_data[
            Consts.KPI_SET].str.encode('utf-8') == kpi_set.encode('utf-8')][
                Consts.KPI_NAME].unique().tolist()
        # Planogram KPI should be calculated last because of the MINIMUM 2 FACINGS KPI.
        if Consts.PLANOGRAM_KPI in kpis and kpis.index(
                Consts.PLANOGRAM_KPI) != len(kpis) - 1:
            kpis.append(kpis.pop(kpis.index(Consts.PLANOGRAM_KPI)))
        return kpi_set, kpis

    def get_atomics_to_calculate(self, kpi_name):
        """
        This method filters the KPIs data to be the relevant atomic KPIs.
        :param kpi_name: The hebrew KPI name from the template.
        :return: A DataFrame that contains data about the relevant Atomic KPIs.
        """
        atomics = self.template_data[self.template_data[
            Consts.KPI_NAME].str.encode('utf-8') == kpi_name.encode('utf-8')]
        return atomics

    def get_store_attributes(self, attributes_names):
        """
        This function encodes and returns the relevant store attribute.
        :param attributes_names: List of requested store attributes to return.
        :return: A dictionary with the requested attributes, E.g: {attr_name: attr_val, ...}
        """
        # Filter store attributes
        store_info_dict = self.store_info.iloc[0].to_dict()
        filtered_store_info = {
            store_att: store_info_dict[store_att]
            for store_att in attributes_names
        }
        return filtered_store_info

    def parse_template_data(self):
        """
        This function responsible to filter the relevant template data..
        :return: A DataFrame with filtered Data by store attributes.
        """
        kpis_template = parse_template(self.template_path,
                                       Consts.KPI_SHEET,
                                       lower_headers_row_index=1)
        relevant_store_info = self.get_store_attributes(
            Consts.STORE_ATTRIBUTES_TO_FILTER_BY)
        filtered_data = self.filter_template_by_store_att(
            kpis_template, relevant_store_info)
        return filtered_data

    @staticmethod
    def filter_template_by_store_att(kpis_template, store_attributes):
        """
        This function gets a dictionary with store type, additional attribute 1, 2 and 3 and filters the template by it.
        :param kpis_template: KPI sheet of the project's template.
        :param store_attributes: {store_type: X, additional_attribute_1: Y, ... }.
        :return: A filtered DataFrame.
        """
        for store_att, store_val in store_attributes.iteritems():
            if store_val is None:
                store_val = ""
            kpis_template = kpis_template[(
                kpis_template[store_att].str.encode('utf-8') ==
                store_val.encode('utf-8')) | (kpis_template[store_att] == "")]
        return kpis_template

    def get_relevant_scenes_by_params(self, params):
        """
        This function returns the relevant scene_fks to calculate.
        :param params: The Atomic KPI row filters from the template.
        :return: List of scene fks.
        """
        template_names = params[Consts.TEMPLATE_NAME].split(Consts.SEPARATOR)
        template_groups = params[Consts.TEMPLATE_GROUP].split(Consts.SEPARATOR)
        filtered_scif = self.scif[[
            Consts.SCENE_ID, 'template_name', 'template_group'
        ]]
        if template_names and any(template_names):
            filtered_scif = filtered_scif[filtered_scif['template_name'].isin(
                template_names)]
        if template_groups and any(template_groups):
            filtered_scif = filtered_scif[filtered_scif['template_group'].isin(
                template_groups)]
        return filtered_scif[Consts.SCENE_ID].unique().tolist()

    def get_general_filters(self, params):
        """
        This function returns the relevant KPI filters according to the template.
        Filter params 1 & 2 are included and param 3 is for exclusion.
        :param params: The Atomic KPI row in the template
        :return: A dictionary with the relevant filters.
        """
        general_filters = {
            Consts.TARGET: params[Consts.TARGET],
            Consts.SPLIT_SCORE: params[Consts.SPLIT_SCORE],
            Consts.KPI_FILTERS: dict()
        }
        relevant_scenes = self.get_relevant_scenes_by_params(params)
        if not relevant_scenes:
            return None
        else:
            general_filters[Consts.KPI_FILTERS][
                Consts.SCENE_ID] = relevant_scenes
        for type_col, value_col in Consts.KPI_FILTER_VALUE_LIST:
            if params[value_col]:
                should_included = Consts.INCLUDE_VAL if value_col != Consts.PARAMS_VALUE_3 else Consts.EXCLUDE_VAL
                param_type, param_value = params[type_col], params[value_col]
                filter_param = self.handle_param_values(
                    param_type, param_value)
                general_filters[Consts.KPI_FILTERS][param_type] = (
                    filter_param, should_included)

        return general_filters

    @staticmethod
    def handle_param_values(param_type, param_value):
        """
        :param param_type: The param type to filter by. E.g: product_ean code or brand_name
        :param param_value: The value to filter by.
        :return: list of param values.
        """
        values_list = param_value.split(Consts.SEPARATOR)
        params = map(
            lambda val: float(val) if unicode.isdigit(val) and param_type !=
            Consts.EAN_CODE else val.strip(), values_list)
        return params

    def get_kpi_weight(self, kpi, kpi_set):
        """
        This method returns the KPI weight according to the project's template.
        :param kpi: The KPI name.
        :param kpi_set: Set KPI name.
        :return: The kpi weight (Float).
        """
        row = self.kpi_weights[(self.kpi_weights[Consts.KPI_SET].str.encode(
            'utf-8') == kpi_set.encode('utf-8')) & (self.kpi_weights[
                Consts.KPI_NAME].str.encode('utf-8') == kpi.encode('utf-8'))]
        weight = row.get(Consts.WEIGHT)
        return float(weight.values[0]) if not weight.empty else None

    def merge_and_filter_scif_and_matches_for_eye_level(self, **kpi_filters):
        """
        This function merges between scene_item_facts and match_product_in_scene DataFrames and filters the merged DF
        according to the @param kpi_filters.
        :param kpi_filters: Dictionary with attributes and values to filter the DataFrame by.
        :return: The merged and filtered DataFrame.
        """
        scif_matches_diff = self.match_product_in_scene[
            ['scene_fk', 'product_fk'] +
            list(self.match_product_in_scene.keys().difference(
                self.scif.keys()))]
        merged_df = pd.merge(self.scif[self.scif.facings != 0],
                             scif_matches_diff,
                             how='outer',
                             left_on=['scene_id', 'item_id'],
                             right_on=[Consts.SCENE_FK, Consts.PRODUCT_FK])
        merged_df = merged_df[self.general_toolbox.get_filter_condition(
            merged_df, **kpi_filters)]
        return merged_df

    @kpi_runtime()
    def calculate_eye_level(self, **general_filters):
        """
        This function calculates the Eye level KPI. It filters and products according to the template and
        returns a Tuple: (eye_level_facings / total_facings, score).
        :param general_filters: A dictionary with the relevant KPI filters.
        :return: E.g: (10, 20, 50) or (8, 10, 100) --> score >= 75 turns to 100.
        """
        merged_df = self.merge_and_filter_scif_and_matches_for_eye_level(
            **general_filters[Consts.KPI_FILTERS])
        relevant_scenes = merged_df['scene_id'].unique().tolist()
        total_number_of_facings = eye_level_facings = 0
        for scene in relevant_scenes:
            scene_merged_df = merged_df[merged_df['scene_id'] == scene]
            scene_matches = self.match_product_in_scene[
                self.match_product_in_scene['scene_fk'] == scene]
            total_number_of_facings += len(scene_merged_df)
            scene_merged_df = self.filter_df_by_shelves(
                scene_merged_df, scene_matches, Consts.EYE_LEVEL_PER_SHELF)
            eye_level_facings += len(scene_merged_df)
        total_score = eye_level_facings / float(
            total_number_of_facings) if total_number_of_facings else 0
        total_score = 100 if total_score >= 0.75 else total_score * 100
        return eye_level_facings, total_number_of_facings, total_score

    @staticmethod
    def filter_df_by_shelves(df, scene_matches, eye_level_definition):
        """
        This function filters the df according to the eye-level definition
        :param df: data frame to filter
        :param scene_matches: match_product_in_scene for particular scene
        :param eye_level_definition: definition for eye level shelves
        :return: filtered data frame
        """
        # number_of_shelves = df.shelf_number_from_bottom.max()
        number_of_shelves = max(scene_matches.shelf_number_from_bottom.max(),
                                scene_matches.shelf_number.max())
        top, bottom = 0, 0
        for json_def in eye_level_definition:
            if json_def[Consts.MIN] <= number_of_shelves <= json_def[
                    Consts.MAX]:
                top = json_def[Consts.TOP]
                bottom = json_def[Consts.BOTTOM]
        return df[(df.shelf_number > top)
                  & (df.shelf_number_from_bottom > bottom)]

    @kpi_runtime()
    def calculate_availability_from_bottom(self, **general_filters):
        """
        This function checks if *all* of the relevant products are in the lowest shelf.
        :param general_filters: A dictionary with the relevant KPI filters.
        :return:
        """
        allowed_products_dict = self.get_allowed_product_by_params(
            **general_filters)
        filtered_matches = self.match_product_in_scene[
            self.match_product_in_scene[Consts.PRODUCT_FK].isin(
                allowed_products_dict[Consts.PRODUCT_FK])]
        relevant_shelves_to_check = set(
            filtered_matches[Consts.SHELF_NUM_FROM_BOTTOM].unique().tolist())
        # Check bottom shelf condition
        return 0 if len(
            relevant_shelves_to_check
        ) != 1 or Consts.LOWEST_SHELF not in relevant_shelves_to_check else 100

    @kpi_runtime()
    def calculate_brand_block(self, **general_filters):
        """
        This function calculates the brand block KPI. It filters and excluded products according to the template and
        than checks if at least one scene has a block.
        :param general_filters: A dictionary with the relevant KPI filters.
        :return: 100 if at least one scene has a block, 0 otherwise.
        """
        products_dict = self.get_allowed_product_by_params(**general_filters)
        block_result = self.block.network_x_block_together(
            population=products_dict,
            additional={
                'minimum_block_ratio': Consts.MIN_BLOCK_RATIO,
                'minimum_facing_for_block': Consts.MIN_FACINGS_IN_BLOCK,
                'allowed_products_filters': {
                    'product_type': ['Empty']
                },
                'calculate_all_scenes': False,
                'include_stacking': True,
                'check_vertical_horizontal': False
            })

        result = 100 if not block_result.empty and not block_result[
            block_result.is_block].empty else 0
        return result

    def get_allowed_product_by_params(self, **filters):
        """
        This function filters the relevant products for the block together KPI and exclude the ones that needs to be
        excluded by the template.
        :param filters: Atomic KPI filters.
        :return: A Dictionary with the relevant products. E.g: {'product_fk': [1,2,3,4,5]}.
        """
        allowed_product = dict()
        filtered_scif = self.calculate_availability(return_df=True, **filters)
        allowed_product[Consts.PRODUCT_FK] = filtered_scif[
            Consts.PRODUCT_FK].unique().tolist()
        return allowed_product

    @kpi_runtime()
    def calculate_survey(self, **general_filters):
        """
        This function calculates the result for Survey KPI.
        :param general_filters: A dictionary with the relevant KPI filters.
        :return: 100 if the answer is yes, else 0.
        """
        if Consts.QUESTION_ID not in general_filters[
                Consts.KPI_FILTERS].keys():
            Log.warning(Consts.MISSING_QUESTION_LOG)
            return 0
        survey_question_id = general_filters[Consts.KPI_FILTERS].get(
            Consts.QUESTION_ID)
        # General filters returns output for filter_df basically so we need to adjust it here.
        if isinstance(survey_question_id, tuple):
            survey_question_id = survey_question_id[0]  # Get rid of the tuple
        if isinstance(survey_question_id, list):
            survey_question_id = int(
                survey_question_id[0])  # Get rid of the list
        target_answer = general_filters[Consts.TARGET]
        survey_answer = self.survey.get_survey_answer(
            (Consts.QUESTION_FK, survey_question_id))
        if survey_answer in Consts.SURVEY_ANSWERS_TO_IGNORE:
            return None
        elif survey_answer:
            return 100 if survey_answer.strip() == target_answer else 0
        return 0

    @kpi_runtime()
    def calculate_availability(self, return_df=False, **general_filters):
        """
        This functions checks for availability by filters.
        During the calculation, if the KPI was passed, the results is being saved for future usage of
        "MIN 2 AVAILABILITY KPI".
        :param return_df: If True, the function returns the filtered scene item facts, else, returns the score.
        :param general_filters: A dictionary with the relevant KPI filters.
        :return: See @param return_df.
        """
        filtered_scif = self.scif[self.general_toolbox.get_filter_condition(
            self.scif, **general_filters[Consts.KPI_FILTERS])]
        if return_df:
            return filtered_scif
        if not filtered_scif.empty:
            tested_products = general_filters[Consts.KPI_FILTERS][
                Consts.EAN_CODE][0]
            self.passed_availability.append(tested_products)
            return 100
        return 0

    @staticmethod
    def get_number_of_facings_per_product_dict(df, ignore_stack=False):
        """
        This function gets a DataFrame and returns a dictionary with number of facings per products.
        :param df: Pandas.DataFrame with 'product_ean_code' and 'facings' / 'facings_ign_stack' fields.
        :param ignore_stack: If True will use 'facings_ign_stack' field, else 'facings' field.
        :return: E.g: {ean_code1: 10, ean_code2: 5, ean_code3: 1...}
        """
        stacking_field = Consts.FACINGS_IGN_STACK if ignore_stack else Consts.FACINGS
        df = df[[Consts.EAN_CODE, stacking_field]].dropna()
        df = df[df[stacking_field] > 0]
        facings_dict = dict(zip(df[Consts.EAN_CODE], df[stacking_field]))
        return facings_dict

    @kpi_runtime()
    def calculate_min_2_availability(self, **general_filters):
        """
        This KPI checks for all of the Availability Atomics KPIs that passed, if the tested products have at least
        2 facings in case of IGNORE STACKING!
        :param general_filters: A dictionary with the relevant KPI filters.
        :return: numerator result, denominator result and total_score
        """
        score = 0
        filtered_df = self.calculate_availability(return_df=True,
                                                  **general_filters)
        facings_counter = self.get_number_of_facings_per_product_dict(
            filtered_df, ignore_stack=True)
        for products in self.passed_availability:
            score += 1 if sum([
                facings_counter[product]
                for product in products if product in facings_counter
            ]) > 1 else 0
        total_score = (score / float(len(self.passed_availability))
                       ) * 100 if self.passed_availability else 0
        return score, len(self.passed_availability), total_score
Пример #14
0
class ComidasToolBox(GlobalSessionToolBox):
    def __init__(self, data_provider, output, common):
        GlobalSessionToolBox.__init__(self, data_provider, output, common)
        self.ps_data_provider = PsDataProvider(data_provider)
        self.own_manufacturer = int(self.data_provider.own_manufacturer.param_value.values[0])
        self.all_templates = self.data_provider[Data.ALL_TEMPLATES]
        self.project_templates = {}
        self.parse_template()
        self.store_type = self.store_info['store_type'].iloc[0]
        self.survey = Survey(self.data_provider, output, ps_data_provider=self.ps_data_provider, common=self.common)
        self.att2 = self.store_info['additional_attribute_2'].iloc[0]
        self.results_df = pd.DataFrame(columns=['kpi_name', 'kpi_fk', 'numerator_id', 'numerator_result',
                                                'denominator_id', 'denominator_result', 'result', 'score',
                                                'identifier_result', 'identifier_parent', 'should_enter'])

        self.products = self.data_provider[Data.PRODUCTS]
        scif = self.scif[['brand_fk', 'facings', 'product_type']].groupby(by='brand_fk').sum()
        self.mpis = self.matches \
            .merge(self.products, on='product_fk', suffixes=['', '_p']) \
            .merge(self.scene_info, on='scene_fk', suffixes=['', '_s']) \
            .merge(self.all_templates[['template_fk', TEMPLATE_GROUP]], on='template_fk') \
            .merge(scif, on='brand_fk')[COLUMNS]
        self.mpis['store_fk'] = self.store_id

        self.calculations = {
            COMBO: self.calculate_combo,
            POSM_AVAILABILITY: self.calculate_posm_availability,
            SCORING: self.calculate_scoring,
            SHARE_OF_EMPTY: self.calculate_share_of_empty,
            SOS: self.calculate_sos,
            SURVEY: self.calculate_survey,
        }

    def parse_template(self):
        for sheet in SHEETS:
            self.project_templates[sheet] = pd.read_excel(TEMPLATE_PATH, sheet_name=sheet)

    def main_calculation(self):
        if not self.store_type == 'Fondas-Rsr':
            return

        relevant_kpi_template = self.project_templates[KPIS]
        sos_kpi_template = self.filter_df(relevant_kpi_template, filters={KPI_TYPE: SOS})
        soe_kpi_template = self.filter_df(relevant_kpi_template, filters={KPI_TYPE: SHARE_OF_EMPTY})
        survey_kpi_template = self.filter_df(relevant_kpi_template, filters={KPI_TYPE: SURVEY})
        posm_kpi_template = self.filter_df(relevant_kpi_template, filters={KPI_TYPE: POSM_AVAILABILITY})
        combo_kpi_template = self.filter_df(relevant_kpi_template, filters={KPI_TYPE: COMBO})
        scoring_kpi_template = self.filter_df(relevant_kpi_template, filters={KPI_TYPE: SCORING})
        sub_scoring_kpi_template = self.filter_df(scoring_kpi_template, filters={KPI_NAME: scoring_kpi_template[PARENT_KPI]}, exclude=True)
        meta_scoring_kpi_template = self.filter_df(scoring_kpi_template, filters={KPI_NAME: scoring_kpi_template[PARENT_KPI]})

        self._calculate_kpis_from_template(sos_kpi_template)
        self._calculate_kpis_from_template(soe_kpi_template)
        self._calculate_kpis_from_template(survey_kpi_template)
        self.calculate_distribution()
        self._calculate_kpis_from_template(posm_kpi_template)
        self._calculate_kpis_from_template(sub_scoring_kpi_template)
        self._calculate_kpis_from_template(combo_kpi_template)
        self._calculate_kpis_from_template(meta_scoring_kpi_template)
        self.save_results_to_db()

    def _calculate_kpis_from_template(self, template_df):
        for i, row in template_df.iterrows():
            calculation_function = self.calculations.get(row[KPI_TYPE])
            try:
                kpi_row = self.project_templates[row[KPI_TYPE]][
                    self.project_templates[row[KPI_TYPE]][KPI_NAME].str.encode('utf-8') == row[KPI_NAME].encode('utf-8')
                    ].iloc[0]
            except IndexError:
                return

            result_data = calculation_function(kpi_row)
            if result_data:
                weight = row['Score']
                if weight and pd.notna(weight) and pd.notna(result_data['result']) and 'score' not in result_data:
                    result_data['score'] = weight * result_data['result']
                parent_kpi_name = self._get_parent_name_from_kpi_name(result_data['kpi_name'])
                if parent_kpi_name and 'identifier_parent' not in result_data.keys():
                    result_data['identifier_parent'] = parent_kpi_name
                if 'identifier_result' not in result_data:
                    result_data['identifier_result'] = result_data['kpi_name']
                if result_data['result'] <= 1:
                    result_data['result'] = result_data['result'] * 100
                if 'numerator_id' not in result_data:
                    result_data['numerator_id'] = self.own_manufacturer
                if 'denominator_id' not in result_data:
                    result_data['denominator_id'] = self.store_id
                self.results_df.loc[len(self.results_df), result_data.keys()] = result_data

    def calculate_distribution(self):
        distribution_template = self.project_templates[DISTRIBUTION] \
            .rename(columns={'store_additional_attribute_2': 'store_size'})
        distribution_template['additional_brands'] = distribution_template \
            .apply(lambda row: int(row['constraint'].split()[0]), axis=1)

        kpi_name = distribution_template.at[0, KPI_NAME]
        kpi_id = self.common.get_kpi_fk_by_kpi_name(kpi_name)

        # anchor_brands = self.sanitize_values(distribution_template.at[0, 'a_value'])
        try:
            anchor_brands = [int(brand) for brand in distribution_template.at[0, 'a_value'].split(",")]
        except AttributeError:
            anchor_brands = [distribution_template.at[0, 'a_value']]

        try:
            template_groups = [template_group.strip() for template_group in distribution_template.at[0, TEMPLATE_GROUP].split(',')]
        except AttributeError:
            template_groups = [distribution_template.at[0, TEMPLATE_GROUP]]

        anchor_threshold = distribution_template.at[0, 'a_test_threshold_2']
        anchor_df = self.filter_df(self.mpis, filters={TEMPLATE_GROUP: template_groups, 'brand_fk': anchor_brands})
        if (anchor_df['facings'] >= anchor_threshold).empty:
            score = result = 0

        try:
            target_brands = [int(brand) for brand in distribution_template.at[0, 'b_value'].split(",")]
        except AttributeError:
            target_brands = [distribution_template.at[0, 'b_value']]

        target_threshold = distribution_template.at[0, 'b_threshold_2']
        target_df = self.filter_df(self.mpis, filters={TEMPLATE_GROUP: template_groups, 'brand_fk': target_brands})
        num_target_brands = len(target_df[target_df['facings'] >= target_threshold]['brand_fk'].unique())
        store_size = self.store_info.at[0, 'additional_attribute_2']

        distribution = self.filter_df(
            distribution_template,
            filters={'additional_brands': num_target_brands, 'store_size': store_size})

        if distribution.empty:
            max_constraints = distribution_template \
                .groupby(by=['store_size'], as_index=False) \
                .max()
            distribution = self.filter_df(max_constraints, filters={'store_size': store_size})

        score = distribution.iloc[0]['Score']
        parent_kpi = distribution.iloc[0][PARENT_KPI]
        max_score = self.filter_df(self.project_templates[KPIS], filters={KPI_NAME: parent_kpi}).iloc[0]['Score']
        result = score / max_score * 100
        numerator_result = len(self.filter_df(self.mpis, filters={
            TEMPLATE_GROUP: template_groups,
            'manufacturer_fk': self.own_manufacturer,
            'product_type': 'SKU'}))
        denominator_result = len(self.filter_df(self.mpis, filters={
            TEMPLATE_GROUP: template_groups,
            'product_type': ['SKU', 'Irrelevant']}))

        result_dict = {
            'kpi_name': kpi_name,
            'kpi_fk': kpi_id,
            'numerator_id': self.own_manufacturer,
            'numerator_result': numerator_result,
            'denominator_id': self.store_id,
            'denominator_result': denominator_result,
            'result': result,
            'score': score,
            'identifier_parent': parent_kpi,
            'identifier_result': kpi_name
        }

        self.results_df.loc[len(self.results_df), result_dict.keys()] = result_dict

    def calculate_share_of_empty(self, row):
        target = row['target']
        numerator_param1 = row[NUMERATOR_PARAM_1]
        numerator_value1 = row[NUMERATOR_VALUE_1]

        kpi_name = row[KPI_NAME]
        kpi_id = self.common.get_kpi_fk_by_kpi_name(kpi_name)
        template_groups = row[TEMPLATE_GROUP].split(',')
        denominator_scif = self.filter_df(self.scif, filters={TEMPLATE_GROUP: template_groups})
        denominator_scif = self.filter_df(denominator_scif, filters={'product_type': 'POS'}, exclude=True)
        numerator_scif = self.filter_df(denominator_scif, filters={numerator_param1: numerator_value1})
        template_id = self.filter_df(self.all_templates, filters={TEMPLATE_GROUP: template_groups})['template_fk'].unique()[0]

        result_dict = {
            'kpi_name': kpi_name,
            'kpi_fk': kpi_id,
            'numerator_id': self.own_manufacturer,
            'denominator_id': template_id,
            'result': 0}

        if not numerator_scif.empty:
            denominator_result = denominator_scif.facings.sum()
            numerator_result = numerator_scif.facings.sum()
            result = (numerator_result / denominator_result)
            result_dict['numerator_result'] = numerator_result
            result_dict['denominator_result'] = denominator_result
            result_dict['result'] = self.calculate_sos_score(target, result)

        return result_dict

    def calculate_sos(self, row):
        kpi_name = row[KPI_NAME]
        kpi_id = self.common.get_kpi_fk_by_kpi_name(kpi_name)
        template_groups = self.sanitize_values(row[TEMPLATE_GROUP])
        product_types = row['product_type'].split(",")

        den_df = self.filter_df(self.mpis, filters={TEMPLATE_GROUP: template_groups, 'product_type': product_types})
        num_param = row[NUMERATOR_PARAM_1]
        num_val = row[NUMERATOR_VALUE_1]
        num_df = self.filter_df(den_df, filters={num_param: num_val})

        try:
            ratio = len(num_df) / len(den_df)
        except ZeroDivisionError:
            ratio = 0

        target = row['target']
        result = self.calculate_sos_score(target, ratio)

        result_dict = {
            'kpi_name': kpi_name,
            'kpi_fk': kpi_id,
            'numerator_id': self.own_manufacturer,
            'denominator_id': num_df[row[DENOMINATOR_ENTITY]].mode().iloc[0],
            'result': result
        }

        return result_dict

    def calculate_posm_availability(self, row):
        # if dominant kpi passed, skip
        result = 100
        max_score = row['KPI Total Points']
        if row['Dominant KPI'] != 'Y':
            result = 50
            dom_kpi = self.filter_df(self.project_templates['POSM Availability'],
                                     filters={'Parent KPI': row['Parent KPI'], 'Dominant KPI': 'Y'}
                                     )
            dom_name = dom_kpi.iloc[0][KPI_NAME]
            max_score = dom_kpi.iloc[0]['KPI Total Points']
            dom_score = self.filter_df(self.results_df, filters={'kpi_name': dom_name}).iloc[0]['result']
            if dom_score > 0:
                result = 0

        kpi_name = row['KPI Name']
        kpi_fk = self.common.get_kpi_fk_by_kpi_name(kpi_name)
        product_fks = [int(product) for product in str(row['product_fk']).split(',')]
        template_fks = self.get_template_fk(row['template_name'])
        filtered_df = self.filter_df(self.mpis, filters={'template_fk': template_fks, 'product_fk': product_fks})

        if filtered_df.empty:
            result = 0

        score = max_score * result / 100

        try:
            denominator_id = filtered_df['template_fk'].mode().iloc[0]
        except IndexError:
            denominator_id = template_fks[0]

        result_dict = {
            'kpi_fk': kpi_fk,
            'kpi_name': kpi_name,
            'denominator_id': denominator_id,
            'result': result,
            'score': score,
        }

        return result_dict

    def calculate_survey(self, row):
        """
        Determines whether the calculation passes based on if the survey response in `row` is 'Si' or 'No'.

        :param row: Row of template containing Survey question data.
        :return: Dictionary containing KPI results.
        """

        kpi_name = row[KPI_NAME]
        kpi_id = self.common.get_kpi_fk_by_kpi_name(kpi_name)
        result = 1 if self.survey.get_survey_answer(row['KPI Question']).lower() == 'si' else 0
        result_dict = {
            'kpi_name': kpi_name,
            'kpi_fk': kpi_id,
            'numerator_id': self.own_manufacturer,
            'denominator_id': self.store_id,
            'result': result
        }

        return result_dict

    def calculate_scoring(self, row):
        kpi_name = row[KPI_NAME]
        kpi_id = self.common.get_kpi_fk_by_kpi_name(kpi_name)
        component_kpi = [comp.strip() for comp in row['Component KPIs'].split(',')]
        component_df = self.filter_df(self.results_df, filters={'kpi_name': component_kpi})
        score = component_df['score'].sum()
        result = score if kpi_name == "ICE-Fondas-Rsr" else score / row['Score'] * 100

        result_dict = {
            'kpi_name': kpi_name,
            'kpi_fk': kpi_id,
            'numerator_id': self.own_manufacturer,
            'denominator_id': self.store_id,
            'result': result,
            'score': score,
        }

        return result_dict

    def calculate_combo(self, row):
        kpi_name = row[KPI_NAME]
        kpi_id = self.common.get_kpi_fk_by_kpi_name(kpi_name)

        a_filter = row['a_filter']
        a_value = row['a_value']

        component_kpi = [comp.strip() for comp in self.filter_df(self.project_templates['Scoring'], filters={KPI_NAME: a_value}).iloc[0]['Component KPIs'].split(",")]
        component_df = self.filter_df(self.results_df, filters={'kpi_name': component_kpi})
        a_test = row['a_test']
        a_score = component_df[a_test].sum()
        a_threshold = row['a_threshold']
        a_check = a_score >= a_threshold

        template_groups = row[TEMPLATE_GROUP]
        b_filter = row['b_filter']
        b_value = row['b_value'].split(",")
        b_threshold = row['b_threshold']
        b_check = len(self.filter_df(self.mpis, filters={TEMPLATE_GROUP: template_groups, b_filter: b_value})) >= b_threshold

        func = LOGIC.get(row['b_logic'].lower())
        result = int(func(a_check, b_check))

        result_dict = {
            'kpi_name': kpi_name,
            'kpi_fk': kpi_id,
            'result': result,
        }

        return result_dict

    # def calculate_scoring(self, row):
    #     kpi_name = row[KPI_NAME]
    #     kpi_fk = self.common.get_kpi_fk_by_kpi_type(kpi_name)
    #     numerator_id = self.own_manuf_fk
    #     denominator_id = self.store_id
    #
    #     result_dict = {'kpi_name': kpi_name, 'kpi_fk': kpi_fk, 'numerator_id': numerator_id,
    #                    'denominator_id': denominator_id}
    #
    #     component_kpis = self.sanitize_values(row['Component KPIs'])
    #     dependency_kpis = self.sanitize_values(row['Dependency'])
    #     relevant_results = self.results_df[self.results_df['kpi_name'].isin(component_kpis)]
    #     passing_results = relevant_results[(relevant_results['result'] != 0) &
    #                                        (relevant_results['result'].notna()) &
    #                                        (relevant_results['score'] != 0)]
    #     nan_results = relevant_results[relevant_results['result'].isna()]
    #     if len(relevant_results) > 0 and len(relevant_results) == len(nan_results):
    #         result_dict['result'] = pd.np.nan
    #     elif row['Component aggregation'] == 'one-passed':
    #         if len(relevant_results) > 0 and len(passing_results) > 0:
    #             result_dict['result'] = 1
    #         else:
    #             result_dict['result'] = 0
    #     elif row['Component aggregation'] == 'sum':
    #         if len(relevant_results) > 0:
    #             result_dict['score'] = relevant_results['score'].sum()
    #             if 'result' not in result_dict.keys():
    #                 if row['score_based_result'] == 'y':
    #                     result_dict['result'] = 0 if result_dict['score'] == 0 else result_dict['score'] / row['Score']
    #                 elif row['composition_based_result'] == 'y':
    #                     result_dict['result'] = 0 if passing_results.empty else float(len(passing_results)) / len(
    #                         relevant_results)
    #                 else:
    #                     result_dict['result'] = result_dict['score']
    #         else:
    #             result_dict['score'] = 0
    #             if 'result' not in result_dict.keys():
    #                 result_dict['result'] = result_dict['score']
    #     if dependency_kpis and dependency_kpis is not pd.np.nan:
    #         dependency_results = self.results_df[self.results_df['kpi_name'].isin(dependency_kpis)]
    #         passing_dependency_results = dependency_results[dependency_results['result'] != 0]
    #         if len(dependency_results) > 0 and len(dependency_results) == len(passing_dependency_results):
    #             result_dict['result'] = 1
    #         else:
    #             result_dict['result'] = 0
    #
    #     return result_dict

    def _filter_df_based_on_row(self, row, df):
        columns_in_scif = row.index[np.in1d(row.index, df.columns)]
        for column_name in columns_in_scif:
            if pd.notna(row[column_name]):
                df = df[df[column_name].isin(self.sanitize_values(row[column_name]))]
            if df.empty:
                break
        return df

    def _get_kpi_name_and_fk(self, row, generic_num_dem_id=False):
        kpi_name = row[KPI_NAME]
        kpi_fk = self.common.get_kpi_fk_by_kpi_type(kpi_name)
        output = [kpi_name, kpi_fk]
        if generic_num_dem_id:
            numerator_id = self.scif[row[NUMERATOR_ENTITY]].mode().iloc[0]
            denominator_id = self.scif[row[DENOMINATOR_ENTITY]].mode().iloc[0]
            output.append(numerator_id)
            output.append(denominator_id)
        return output

    def _get_parent_name_from_kpi_name(self, kpi_name):
        template = self.project_templates[KPIS]
        parent_kpi_name = \
            template[template[KPI_NAME].str.encode('utf-8') == kpi_name.encode('utf-8')][PARENT_KPI].iloc[0]
        if parent_kpi_name and pd.notna(parent_kpi_name):
            return parent_kpi_name
        else:
            return None

    @staticmethod
    def sanitize_values(item):
        if pd.isna(item):
            return item
        else:
            if type(item) == int:
                return str(item)
            else:
                items = [item.strip() for item in item.split(',')]
                return items

    def save_results_to_db(self):
        self.results_df.drop(columns=['kpi_name'], inplace=True)
        self.results_df.rename(columns={'kpi_fk': 'fk'}, inplace=True)
        self.filter_df(self.results_df, filters={'identifier_parent': None}, func=pd.Series.notnull)['should_enter'] = True
        # self.results_df.loc[self.results_df['identifier_parent'].notnull(), 'should_enter'] = True
        # set result to NaN for records that do not have a parent
        # identifier_results = self.results_df[self.results_df['result'].notna()]['identifier_result'].unique().tolist()
        # self.results_df['result'] = self.results_df.apply(
        #     lambda row: pd.np.nan if (pd.notna(row['identifier_parent']) and row[
        #         'identifier_parent'] not in identifier_results) else row['result'], axis=1)
        self.results_df['result'] = self.results_df.apply(
            lambda row: row['result'] if (
                    pd.notna(row['identifier_parent']) or pd.notna(row['identifier_result'])) else np.nan, axis=1)
        # get rid of 'not applicable' results
        self.results_df.dropna(subset=['result'], inplace=True)
        self.results_df.fillna(0, inplace=True)
        results = self.results_df.to_dict('records')
        for result in results:
            self.write_to_db(**result)

    @staticmethod
    def calculate_sos_score(target, result):
        """
        Determines whether `result` is greater than or within the range of `target`.

        :param target: Target value as either a minimum value or a '-'-separated range.
        :param result: Calculation result to compare to 1target1.
        :return: 1 if `result` >= `target` or is within the `target` range.
        """

        score = 0
        if pd.notna(target):
            target = [int(n) for n in str(target).split('-')]  # string cast redundant?
            if len(target) == 1:
                score = int(result*100 >= target[0])
            if len(target) == 2:
                score = int(target[0] <= result*100 <= target[1])
        return score

    @staticmethod
    def filter_df(df, filters, exclude=False, func=pd.Series.isin):
        """
        :param df: DataFrame to filter.
        :param filters: Dictionary of column-value list pairs to filter by.
        :param exclude:
        :param func: Function to determine inclusion.
        :return: Filtered DataFrame.
        """

        vert = op.inv if exclude else op.pos
        func = LOGIC.get(func, func)

        for col, val in filters.items():
            if not hasattr(val, '__iter__'):
                val = [val]
            try:
                if isinstance(val, pd.Series) and val.any() or pd.notna(val[0]):
                    df = df[vert(func(df[col], val))]
            except TypeError:
                df = df[vert(func(df[col]))]
        return df

    def get_template_fk(self, template_name):
        """
        :param template_name: Name of template.
        :return: ID of template.
        """

        template_df = self.filter_df(self.all_templates, filters={'template_name': template_name})
        template_fks = template_df['template_fk'].unique()

        return template_fks
Пример #15
0
class CMAToolBox:
    EXCLUDE_FILTER = 0
    INCLUDE_FILTER = 1
    CONTAIN_FILTER = 2

    def __init__(self, data_provider, output, common_db2):
        self.output = output
        self.data_provider = data_provider
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.manufacturer_fk = 1
        self.products = self.data_provider[Data.PRODUCTS]
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.store_id = self.data_provider[Data.STORE_FK]
        self.store_info = self.data_provider[Data.STORE_INFO]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.united_scenes = self.get_united_scenes(
        )  # we don't need to check scenes without United products
        self.survey = Survey(self.data_provider, self.output)
        self.ps_data_provider = PsDataProvider(self.data_provider, self.output)
        self.sos = SOS(self.data_provider, self.output)
        self.templates = {}
        self.common_db = Common(self.data_provider, SUB_PROJECT)
        self.common_db2 = common_db2
        self.result_values = self.ps_data_provider.get_result_values()
        self.region = self.store_info['region_name'].iloc[0]
        self.store_type = self.store_info['store_type'].iloc[0]
        self.program = self.store_info['additional_attribute_14'].iloc[0]
        self.sales_center = self.store_info['additional_attribute_5'].iloc[0]
        if self.store_type in STORE_TYPES:
            self.store_type = STORE_TYPES[self.store_type]
        self.store_attr = self.store_info['additional_attribute_15'].iloc[0]
        self.kpi_static_data = self.common_db.get_kpi_static_data()
        self.ignore_stacking = False
        self.facings_field = 'facings' if not self.ignore_stacking else 'facings_ign_stack'
        self.total_score = 0
        self.total_count = 0
        for sheet in Const.SHEETS_CMA:
            self.templates[sheet] = pd.read_excel(TEMPLATE_PATH,
                                                  sheetname=sheet).fillna('')
        self.tools = Shared(self.data_provider, self.output)

    # main functions:

    def main_calculation(self, *args, **kwargs):
        """
            This function gets all the scene results from the SceneKPI, after that calculates every session's KPI,
            and in the end it calls "filter results" to choose every KPI and scene and write the results in DB.
        """
        main_template = self.templates[Const.KPIS]
        if self.region in Const.REGIONS:
            for i, main_line in main_template.iterrows():
                store_type = self.does_exist(main_line, Const.STORE_TYPE)
                if store_type is None or self.store_type in self.does_exist(
                        main_line, Const.STORE_TYPE):
                    self.calculate_main_kpi(main_line)
            kpi_fk = self.common_db2.get_kpi_fk_by_kpi_name(SUB_PROJECT)

            result = 0
            if self.total_count:
                result = self.total_score * 100.0 / self.total_count
            self.common_db2.write_to_db_result(
                fk=kpi_fk,
                result=result,
                numerator_result=self.total_score,
                numerator_id=self.manufacturer_fk,
                denominator_result=self.total_count,
                denominator_id=self.store_id,
                identifier_result=self.common_db2.get_dictionary(
                    parent_name=SUB_PROJECT))
            self.write_to_db_result(self.common_db.get_kpi_fk_by_kpi_name(
                SUB_PROJECT, 1),
                                    score=self.total_score,
                                    level=1)

    def calculate_main_kpi(self, main_line):
        """
        This function gets a line from the main_sheet, transfers it to the match function, and checks all of the
        KPIs in the same name in the match sheet.
        :param main_line: series from the template of the main_sheet.
        """
        kpi_name = main_line[Const.KPI_NAME]
        kpi_type = main_line[Const.TYPE]
        relevant_scif = self.scif[self.scif['scene_id'].isin(
            self.united_scenes)]
        scene_types = self.does_exist(main_line, Const.SCENE_TYPE)
        result = score = target = None
        general_filters = {}
        if scene_types:
            relevant_scif = relevant_scif[relevant_scif['template_name'].isin(
                scene_types)]
            general_filters['template_name'] = scene_types
        scene_groups = self.does_exist(main_line, Const.TEMPLATE_GROUP)
        if scene_groups:
            relevant_scif = relevant_scif[relevant_scif['template_group'].isin(
                scene_groups)]
            general_filters['template_group'] = scene_groups
        if kpi_type == Const.SOS:
            isnt_dp = True if self.store_attr != Const.DP and main_line[
                Const.STORE_ATTRIBUTE] == Const.DP else False
            relevant_template = self.templates[kpi_type]
            relevant_template = relevant_template[relevant_template[
                Const.KPI_NAME] == kpi_name]
            kpi_function = self.get_kpi_function(kpi_type)
            for i, kpi_line in relevant_template.iterrows():
                result, score, target = kpi_function(kpi_line, relevant_scif,
                                                     isnt_dp, general_filters)
        else:
            pass
        self.total_count += 1
        if score > 0:
            self.total_score += 1
        if isinstance(result, tuple):
            self.write_to_all_levels(kpi_name=kpi_name,
                                     result=result[0],
                                     score=score,
                                     target=target,
                                     num=result[1],
                                     den=result[2])
        else:
            self.write_to_all_levels(kpi_name=kpi_name,
                                     result=result,
                                     score=score,
                                     target=target)

    # write in DF:
    def write_to_all_levels(self,
                            kpi_name,
                            result,
                            score,
                            target=None,
                            num=None,
                            den=None):
        """
        Writes the final result in the "all" DF, add the score to the red score and writes the KPI in the DB
        :param kpi_name: str
        :param result: int
        :param display_text: str
        :param weight: int/float
        :param scene_fk: for the scene's kpi
        :param reuse_scene: this kpi can use scenes that were used
        """
        # result_dict = {Const.KPI_NAME: kpi_name, Const.RESULT: result, Const.SCORE: score, Const.THRESHOLD: target}
        # self.all_results = self.all_results.append(result_dict, ignore_index=True)
        self.write_to_db(kpi_name,
                         score,
                         result=result,
                         target=target,
                         num=num,
                         den=den)

    # survey:

    def calculate_survey_specific(self,
                                  kpi_line,
                                  relevant_scif=None,
                                  isnt_dp=None):
        """
        returns a survey line if True or False
        :param kpi_line: line from the survey sheet
        :param relevant_scif:
        :param isnt_dp:
        :return: True or False - if the question gets the needed answer
        """
        question = kpi_line[Const.Q_TEXT]
        if not question:
            question_id = kpi_line[Const.Q_ID]
            if question_id == "":
                Log.warning(
                    "The template has a survey question without ID or text")
                return False
            question = ('question_fk', int(question_id))
        answers = kpi_line[Const.ACCEPTED_ANSWER].split(',')
        min_answer = None if kpi_line[Const.REQUIRED_ANSWER] == '' else True
        for answer in answers:
            if self.survey.check_survey_answer(survey_text=question,
                                               target_answer=answer,
                                               min_required_answer=min_answer):
                return True
        return False

    # availability:

    def calculate_availability_with_same_pack(self, relevant_template,
                                              relevant_scif, isnt_dp):
        """
        checks if all the lines in the availability sheet passes the KPI, AND if all of these filtered scif has
        at least one common product that has the same size and number of sub_packages.
        :param relevant_template: all the match lines from the availability sheet.
        :param relevant_scif: filtered scif
        :param isnt_dp: if "store attribute" in the main sheet has DP, and the store is not DP, we shouldn't calculate
        DP lines
        :return: boolean
        """
        packages = None
        for i, kpi_line in relevant_template.iterrows():
            if isnt_dp and kpi_line[Const.MANUFACTURER] in Const.DP_MANU:
                continue
            filtered_scif = self.filter_scif_availability(
                kpi_line, relevant_scif)
            filtered_scif = filtered_scif.fillna("NAN")
            target = kpi_line[Const.TARGET]
            sizes = filtered_scif['size'].tolist()
            sub_packages_nums = filtered_scif['number_of_sub_packages'].tolist(
            )
            cur_packages = set(zip(sizes, sub_packages_nums))
            if packages is None:
                packages = cur_packages
            else:
                packages = cur_packages & packages
                if len(packages) == 0:
                    return False
            if filtered_scif[
                    filtered_scif['facings'] > 0]['facings'].count() < target:
                return False
        if len(packages) > 1:
            return False
        return True

    def calculate_availability(self, kpi_line, relevant_scif, isnt_dp):
        """
        checks if all the lines in the availability sheet passes the KPI (there is at least one product
        in this relevant scif that has the attributes).
        :param relevant_scif: filtered scif
        :param isnt_dp: if "store attribute" in the main sheet has DP, and the store is not DP, we shouldn't calculate
        DP lines
        :param kpi_line: line from the availability sheet
        :return: boolean
        """
        if isnt_dp and kpi_line[Const.MANUFACTURER] in Const.DP_MANU:
            return True
        filtered_scif = self.filter_scif_availability(kpi_line, relevant_scif)
        target = kpi_line[Const.TARGET]
        return filtered_scif[
            filtered_scif['facings'] > 0]['facings'].count() >= target

    def filter_scif_specific(self, relevant_scif, kpi_line, name_in_template,
                             name_in_scif):
        """
        takes scif and filters it from the template
        :param relevant_scif: the current filtered scif
        :param kpi_line: line from one sheet (availability for example)
        :param name_in_template: the column name in the template
        :param name_in_scif: the column name in SCIF
        :return:
        """
        values = self.does_exist(kpi_line, name_in_template)
        if values:
            if name_in_scif in Const.NUMERIC_VALUES_TYPES:
                values = [float(x) for x in values]
            return relevant_scif[relevant_scif[name_in_scif].isin(values)]
        return relevant_scif

    def filter_scif_availability(self, kpi_line, relevant_scif):
        """
        calls filter_scif_specific for every column in the template of availability
        :param kpi_line:
        :param relevant_scif:
        :return:
        """
        names_of_columns = {
            Const.MANUFACTURER: "manufacturer_name",
            Const.BRAND: "brand_name",
            Const.TRADEMARK: "att2",
            Const.SIZE: "size",
            Const.NUM_SUB_PACKAGES: "number_of_sub_packages",
        }
        for name in names_of_columns:
            relevant_scif = self.filter_scif_specific(relevant_scif, kpi_line,
                                                      name,
                                                      names_of_columns[name])
        return relevant_scif

    # SOS:

    def calculate_sos(self, kpi_line, relevant_scif, isnt_dp, general_filters):
        """
        calculates SOS line in the relevant scif.
        :param kpi_line: line from SOS sheet.
        :param relevant_scif: filtered scif.
        :param isnt_dp: if "store attribute" in the main sheet has DP, and the store is not DP, we should filter
        all the DP products out of the numerator.
        :return: boolean
        """
        kpi_name = kpi_line[Const.KPI_NAME]
        den_type = kpi_line[Const.DEN_TYPES_1]
        den_value = kpi_line[Const.DEN_VALUES_1].split(',')
        num_type = kpi_line[Const.NUM_TYPES_1]
        num_value = kpi_line[Const.NUM_VALUES_1].split(',')
        target = self.get_sos_targets(kpi_name)
        general_filters[den_type] = den_value
        if kpi_line[Const.DEN_TYPES_2]:
            den_type_2 = kpi_line[Const.DEN_TYPES_2]
            den_value_2 = kpi_line[Const.DEN_VALUES_2].split(',')
            general_filters[den_type_2] = den_value_2
        sos_filters = {num_type: num_value}
        if isnt_dp:
            sos_filters['manufacturer_name'] = (Const.DP_MANU, 0)
        if kpi_line[Const.NUM_TYPES_2]:
            num_type_2 = kpi_line[Const.NUM_TYPES_2]
            num_value_2 = kpi_line[Const.NUM_VALUES_2].split(',')
            sos_filters[num_type_2] = num_value_2

        num_scif = relevant_scif[self.get_filter_condition(
            relevant_scif, **sos_filters)]
        den_scif = relevant_scif[self.get_filter_condition(
            relevant_scif, **general_filters)]
        sos_value, num, den = self.tools.sos_with_num_and_dem(
            kpi_line, num_scif, den_scif, self.facings_field)
        # sos_value = self.sos.calculate_share_of_shelf(sos_filters, **general_filters)
        # sos_value *= 100
        # sos_value = round(sos_value, 2)

        if target:
            target *= 100
            score = 1 if sos_value >= target else 0
        else:
            score = 0
            target = 0
        return (sos_value, num, den), score, target

    # SOS majority:

    def get_sos_targets(self, kpi_name):
        targets_template = self.templates[Const.TARGETS]
        store_targets = targets_template.loc[
            (targets_template['program'] == self.program)
            & (targets_template['channel'] == self.store_type)]
        filtered_targets_to_kpi = store_targets.loc[
            targets_template['KPI name'] == kpi_name]
        if not filtered_targets_to_kpi.empty:
            target = filtered_targets_to_kpi[Const.TARGET].values[0]
        else:
            target = None
        return target
        # return False

    def calculate_sos_maj(self, kpi_line, relevant_scif, isnt_dp):
        """
        calculates SOS majority line in the relevant scif. Filters the denominator and sends the line to the
        match function (majority or dominant)
        :param kpi_line: line from SOS majority sheet.
        :param relevant_scif: filtered scif.
        :param isnt_dp: if "store attribute" in the main sheet has DP, and the store is not DP, we should filter
        all the DP products out of the numerator (and the denominator of the dominant part).
        :return: boolean
        """
        kpi_name = kpi_line[Const.KPI_NAME]
        if kpi_line[Const.EXCLUSION_SHEET] == Const.V:
            exclusion_sheet = self.templates[Const.SKU_EXCLUSION]
            relevant_exclusions = exclusion_sheet[exclusion_sheet[
                Const.KPI_NAME] == kpi_name]
            for i, exc_line in relevant_exclusions.iterrows():
                relevant_scif = self.exclude_scif(exc_line, relevant_scif)
        relevant_scif = relevant_scif[relevant_scif['product_type'] != "Empty"]
        den_type = kpi_line[Const.DEN_TYPES_1]
        den_value = kpi_line[Const.DEN_VALUES_1]
        relevant_scif = self.filter_by_type_value(relevant_scif, den_type,
                                                  den_value)
        den_type = kpi_line[Const.DEN_TYPES_2]
        den_value = kpi_line[Const.DEN_VALUES_2]
        relevant_scif = self.filter_by_type_value(relevant_scif, den_type,
                                                  den_value)
        if kpi_line[Const.MAJ_DOM] == Const.MAJOR:
            answer = self.calculate_majority_part(kpi_line, relevant_scif,
                                                  isnt_dp)
        elif kpi_line[Const.MAJ_DOM] == Const.DOMINANT:
            answer = self.calculate_dominant_part(kpi_line, relevant_scif,
                                                  isnt_dp)
        else:
            Log.warning("SOS majority does not know '{}' part".format(
                kpi_line[Const.MAJ_DOM]))
            answer = False
        return answer

    def calculate_majority_part(self, kpi_line, relevant_scif, isnt_dp):
        """
        filters the numerator and checks if the SOS is bigger than 50%.
        :param kpi_line: line from SOS majority sheet.
        :param relevant_scif: filtered scif.
        :param isnt_dp: if "store attribute" in the main sheet has DP, and the store is not DP, we should filter
        all the DP products out of the numerator.
        :return: boolean
        """
        num_type = kpi_line[Const.NUM_TYPES_1]
        num_value = kpi_line[Const.NUM_VALUES_1]
        num_scif = self.filter_by_type_value(relevant_scif, num_type,
                                             num_value)
        num_type = kpi_line[Const.NUM_TYPES_2]
        num_value = kpi_line[Const.NUM_VALUES_2]
        num_scif = self.filter_by_type_value(num_scif, num_type, num_value)
        if num_scif.empty:
            return None
        if isnt_dp:
            num_scif = num_scif[~(
                num_scif['manufacturer_name'].isin(Const.DP_MANU))]
        target = Const.MAJORITY_TARGET
        return num_scif['facings'].sum() / relevant_scif['facings'].sum(
        ) >= target

    def calculate_dominant_part(self, kpi_line, relevant_scif, isnt_dp):
        """
        filters the numerator and checks if the given value in the given type is the one with the most facings.
        :param kpi_line: line from SOS majority sheet.
        :param relevant_scif: filtered scif.
        :param isnt_dp: if "store attribute" in the main sheet has DP, and the store is not DP, we should filter
        all the DP products out.
        :return: boolean
        """
        if isnt_dp:
            relevant_scif = relevant_scif[~(
                relevant_scif['manufacturer_name'].isin(Const.DP_MANU))]
        type_name = self.get_column_name(kpi_line[Const.NUM_TYPES_1],
                                         relevant_scif)
        values = str(kpi_line[Const.NUM_VALUES_1]).split(', ')
        if type_name in Const.NUMERIC_VALUES_TYPES:
            values = [float(x) for x in values]
        max_facings, needed_one = 0, 0
        values_type = relevant_scif[type_name].unique().tolist()
        if None in values_type:
            values_type.remove(None)
            current_sum = relevant_scif[
                relevant_scif[type_name].isnull()]['facings'].sum()
            if current_sum > max_facings:
                max_facings = current_sum
        for value in values_type:
            current_sum = relevant_scif[relevant_scif[type_name] ==
                                        value]['facings'].sum()
            if current_sum > max_facings:
                max_facings = current_sum
            if value in values:
                needed_one += current_sum
        return needed_one >= max_facings

    def get_filter_condition(self, df, **filters):
        """
        :param df: The data frame to be filters.
        :param filters: These are the parameters which the data frame is filtered by.
                       Every parameter would be a tuple of the value and an include/exclude flag.
                       INPUT EXAMPLE (1):   manufacturer_name = ('Diageo', DIAGEOAUPNGAMERICAGENERALToolBox.INCLUDE_FILTER)
                       INPUT EXAMPLE (2):   manufacturer_name = 'Diageo'
        :return: a filtered Scene Item Facts data frame.
        """
        if not filters:
            return df['pk'].apply(bool)
        if self.facings_field in df.keys():
            filter_condition = (df[self.facings_field] > 0)
        else:
            filter_condition = None
        for field in filters.keys():
            if field in df.keys():
                if isinstance(filters[field], tuple):
                    value, exclude_or_include = filters[field]
                else:
                    value, exclude_or_include = filters[
                        field], self.INCLUDE_FILTER
                if not value:
                    continue
                if not isinstance(value, list):
                    value = [value]
                if exclude_or_include == self.INCLUDE_FILTER:
                    condition = (df[field].isin(value))
                elif exclude_or_include == self.EXCLUDE_FILTER:
                    condition = (~df[field].isin(value))
                elif exclude_or_include == self.CONTAIN_FILTER:
                    condition = (df[field].str.contains(value[0], regex=False))
                    for v in value[1:]:
                        condition |= df[field].str.contains(v, regex=False)
                else:
                    continue
                if filter_condition is None:
                    filter_condition = condition
                else:
                    filter_condition &= condition
            else:
                Log.warning('field {} is not in the Data Frame'.format(field))

        return filter_condition

    # helpers:
    @staticmethod
    def get_column_name(field_name, df):
        """
        checks what the real field name in DttFrame is (if it exists in the DF or exists in the "converter" sheet).
        :param field_name: str
        :param df: scif/products
        :return: real column name (if exists)
        """
        if field_name in df.columns:
            return field_name
        return None

    def filter_by_type_value(self, relevant_scif, type_name, value):
        """
        filters scif with the type and value
        :param relevant_scif: current filtered scif
        :param type_name: str (from the template)
        :param value: str
        :return: new scif
        """
        if type_name == "":
            return relevant_scif
        values = value.split(', ')
        new_type_name = self.get_column_name(type_name, relevant_scif)
        if not new_type_name:
            print "There is no field '{}'".format(type_name)
            return relevant_scif
        if new_type_name in Const.NUMERIC_VALUES_TYPES:
            values = [float(x) for x in values]
        return relevant_scif[relevant_scif[new_type_name].isin(values)]

    @staticmethod
    def exclude_scif(exclude_line, relevant_scif):
        """
        filters products out of the scif
        :param exclude_line: line from the exclusion sheet
        :param relevant_scif: current filtered scif
        :return: new scif
        """
        exclude_products = exclude_line[Const.PRODUCT_EAN].split(', ')
        return relevant_scif[~(
            relevant_scif['product_ean_code'].isin(exclude_products))]

    @staticmethod
    def does_exist(kpi_line, column_name):
        """
        checks if kpi_line has values in this column, and if it does - returns a list of these values
        :param kpi_line: line from template
        :param column_name: str
        :return: list of values if there are, otherwise None
        """
        if column_name in kpi_line.keys() and kpi_line[column_name] != "":
            cell = kpi_line[column_name]
            if type(cell) in [int, float]:
                return [cell]
            elif type(cell) in [unicode, str]:
                if ", " in cell:
                    return cell.split(", ")
                else:
                    return cell.split(',')
        return None

    def get_kpi_function(self, kpi_type):
        """
        transfers every kpi to its own function
        :param kpi_type: value from "sheet" column in the main sheet
        :return: function
        """
        if kpi_type == Const.SURVEY:
            return self.calculate_survey_specific
        elif kpi_type == Const.AVAILABILITY:
            return self.calculate_availability
        elif kpi_type == Const.SOS:
            return self.calculate_sos
        elif kpi_type == Const.SOS_MAJOR:
            return self.calculate_sos_maj
        else:
            Log.warning(
                "The value '{}' in column sheet in the template is not recognized"
                .format(kpi_type))
            return None

    def get_united_scenes(self):
        return self.scif[self.scif['United Deliver'] ==
                         'Y']['scene_id'].unique().tolist()

    def get_pks_of_result(self, result):
        """
        converts string result to its pk (in static.kpi_result_value)
        :param result: str
        :return: int
        """
        pk = self.result_values[self.result_values['value'] ==
                                result]['pk'].iloc[0]
        return pk

    def write_to_db(self,
                    kpi_name,
                    score,
                    result=None,
                    target=None,
                    num=None,
                    den=None):
        """
        writes result in the DB
        :param kpi_name: str
        :param score: float
        :param result: str
        :param target: int
        """
        if target and score == 0:
            delta = den * (target / 100) - num
        else:
            delta = 0
        score_value = Const.PASS if score == 1 else Const.FAIL
        score = self.get_pks_of_result(score_value)
        kpi_fk = self.common_db2.get_kpi_fk_by_kpi_type(SUB_PROJECT + " " +
                                                        kpi_name)
        self.common_db2.write_to_db_result(
            fk=kpi_fk,
            result=result,
            score=score,
            should_enter=True,
            target=target,
            numerator_result=num,
            denominator_result=den,
            weight=delta,
            numerator_id=Const.MANUFACTURER_FK,
            denominator_id=self.store_id,
            identifier_parent=self.common_db2.get_dictionary(
                parent_name=SUB_PROJECT))
        self.write_to_db_result(self.common_db.get_kpi_fk_by_kpi_name(
            kpi_name, 2),
                                score=score,
                                level=2)
        self.write_to_db_result(self.common_db.get_kpi_fk_by_kpi_name(
            kpi_name, 3),
                                score=score,
                                level=3,
                                threshold=target,
                                result=result)

    def write_to_db_result(self,
                           fk,
                           level,
                           score,
                           set_type=Const.SOVI,
                           **kwargs):
        """
        This function creates the result data frame of every KPI (atomic KPI/KPI/KPI set),
        and appends the insert SQL query into the queries' list, later to be written to the DB.
        """
        if kwargs:
            kwargs['score'] = score
            attributes = self.create_attributes_dict(fk=fk,
                                                     level=level,
                                                     **kwargs)
        else:
            attributes = self.create_attributes_dict(fk=fk,
                                                     score=score,
                                                     level=level)
        if level == self.common_db.LEVEL1:
            table = self.common_db.KPS_RESULT
        elif level == self.common_db.LEVEL2:
            table = self.common_db.KPK_RESULT
        elif level == self.common_db.LEVEL3:
            table = self.common_db.KPI_RESULT
        else:
            return
        query = insert(attributes, table)
        self.common_db.kpi_results_queries.append(query)

    def create_attributes_dict(self,
                               score,
                               fk=None,
                               level=None,
                               display_text=None,
                               set_type=Const.SOVI,
                               **kwargs):
        """
        This function creates a data frame with all attributes needed for saving in KPI results tables.
        or
        you can send dict with all values in kwargs
        """
        kpi_static_data = self.kpi_static_data if set_type == Const.SOVI else self.kpi_static_data_integ
        if level == self.common_db.LEVEL1:
            if kwargs:
                kwargs['score'] = score
                values = [val for val in kwargs.values()]
                col = [col for col in kwargs.keys()]
                attributes = pd.DataFrame(values, columns=col)
            else:
                kpi_set_name = kpi_static_data[kpi_static_data['kpi_set_fk'] ==
                                               fk]['kpi_set_name'].values[0]
                attributes = pd.DataFrame(
                    [(kpi_set_name, self.session_uid, self.store_id,
                      self.visit_date.isoformat(), format(score, '.2f'), fk)],
                    columns=[
                        'kps_name', 'session_uid', 'store_fk', 'visit_date',
                        'score_1', 'kpi_set_fk'
                    ])
        elif level == self.common_db.LEVEL2:
            if kwargs:
                kwargs['score'] = score
                values = [val for val in kwargs.values()]
                col = [col for col in kwargs.keys()]
                attributes = pd.DataFrame(values, columns=col)
            else:
                kpi_name = kpi_static_data[kpi_static_data['kpi_fk'] ==
                                           fk]['kpi_name'].values[0].replace(
                                               "'", "\\'")
                attributes = pd.DataFrame(
                    [(self.session_uid, self.store_id,
                      self.visit_date.isoformat(), fk, kpi_name, score)],
                    columns=[
                        'session_uid', 'store_fk', 'visit_date', 'kpi_fk',
                        'kpk_name', 'score'
                    ])
        elif level == self.common_db.LEVEL3:
            data = kpi_static_data[kpi_static_data['atomic_kpi_fk'] == fk]
            kpi_fk = data['kpi_fk'].values[0]
            kpi_set_name = kpi_static_data[kpi_static_data['atomic_kpi_fk'] ==
                                           fk]['kpi_set_name'].values[0]
            display_text = data['kpi_name'].values[0]
            if kwargs:
                kwargs = self.add_additional_data_to_attributes(
                    kwargs, score, kpi_set_name, kpi_fk, fk,
                    datetime.utcnow().isoformat(), display_text)

                values = tuple([val for val in kwargs.values()])
                col = [col for col in kwargs.keys()]
                attributes = pd.DataFrame([values], columns=col)
            else:
                attributes = pd.DataFrame(
                    [(display_text, self.session_uid, kpi_set_name,
                      self.store_id, self.visit_date.isoformat(),
                      datetime.utcnow().isoformat(), score, kpi_fk, fk)],
                    columns=[
                        'display_text', 'session_uid', 'kps_name', 'store_fk',
                        'visit_date', 'calculation_time', 'score', 'kpi_fk',
                        'atomic_kpi_fk'
                    ])
        else:
            attributes = pd.DataFrame()
        return attributes.to_dict()

    def add_additional_data_to_attributes(self, kwargs_dict, score,
                                          kpi_set_name, kpi_fk, fk, calc_time,
                                          display_text):
        kwargs_dict['score'] = score
        kwargs_dict['kps_name'] = kpi_set_name
        kwargs_dict['kpi_fk'] = kpi_fk
        kwargs_dict['atomic_kpi_fk'] = fk
        kwargs_dict['calculation_time'] = calc_time
        kwargs_dict['session_uid'] = self.session_uid
        kwargs_dict['store_fk'] = self.store_id
        kwargs_dict['visit_date'] = self.visit_date.isoformat()
        kwargs_dict['display_text'] = display_text
        return kwargs_dict

    def commit_results(self):
        """
        committing the results in both sets
        """
        self.common_db.delete_results_data_by_kpi_set()
        self.common_db.commit_results_data_without_delete()
class FunctionsToolBox:
    def __init__(self, data_provider, output, templates, store_attr):
        self.output = output
        self.data_provider = data_provider
        self.project_name = self.data_provider.project_name
        self.survey = Survey(self.data_provider, self.output)
        self.templates = templates
        if Const.CONVERTERS in self.templates:
            self.converters = self.templates[Const.CONVERTERS]
            self.exclusion_sheet = self.templates[Const.SKU_EXCLUSION]
        self.store_attr = store_attr

    # survey:

    def calculate_survey_specific(self,
                                  kpi_line,
                                  relevant_scif=None,
                                  isnt_dp=None):
        """
        returns a survey line if True or False
        :param kpi_line: line from the survey sheet
        :param relevant_scif: unused, for the general function
        :param isnt_dp: unused, for the general function
        :return: True or False - if the question gets the needed answer
        """
        question = kpi_line[Const.Q_TEXT]
        if not question:
            question_id = kpi_line[Const.Q_ID]
            if question_id == "":
                Log.warning(
                    "The template has a survey question without ID or text")
                return False
            question = ('question_fk', int(question_id))
        answers = kpi_line[Const.ACCEPTED_ANSWER].split(',')
        min_answer = None if kpi_line[Const.REQUIRED_ANSWER] == '' else True
        for answer in answers:
            if self.survey.check_survey_answer(survey_text=question,
                                               target_answer=answer,
                                               min_required_answer=min_answer):
                return True
        return False

    # pointer the other kpi_functions:

    def calculate_kpi_by_type(self, main_line, filtered_scif):
        """
        the function calculates all the kpis
        :param main_line: one kpi line from the main template
        :param filtered_scif:
        :return: boolean, but it can be None if we want not to write it in DB
        """
        kpi_type = main_line[Const.SHEET]
        relevant_template = self.templates[kpi_type]
        relevant_template = relevant_template[relevant_template[Const.KPI_NAME]
                                              == main_line[Const.KPI_NAME]]
        target = len(relevant_template) if main_line[
            Const.GROUP_TARGET] == Const.ALL else main_line[Const.GROUP_TARGET]
        isnt_dp = True if self.store_attr != Const.DP and main_line[
            Const.STORE_ATTRIBUTE] == Const.DP else False
        if main_line[Const.SAME_PACK] == Const.V:
            filtered_scif = filtered_scif.fillna("NAN")
            # only items categorized as SSD should be evaluated in this calculation; see PROS-6342
            filtered_scif = filtered_scif[filtered_scif['att4'] == 'SSD']
            if filtered_scif.empty:
                return False
            sizes = filtered_scif['size'].tolist()
            sub_packages_nums = filtered_scif['number_of_sub_packages'].tolist(
            )
            packages = set(zip(sizes, sub_packages_nums))
            for package in packages:
                filtered_scif = filtered_scif[
                    (filtered_scif['size'] == package[0])
                    & (filtered_scif['number_of_sub_packages'] == package[1])]
                result = self.calculate_specific_kpi(
                    relevant_template, filtered_scif, isnt_dp, target,
                    self.calculate_availability)
                if result is False:
                    return result
            return True
        kpi_function = self.get_kpi_function(kpi_type)
        return self.calculate_specific_kpi(relevant_template, filtered_scif,
                                           isnt_dp, target, kpi_function)

    @staticmethod
    def calculate_specific_kpi(relevant_template, filtered_scif, isnt_dp,
                               target, kpi_function):
        """
        checks if the passed lines are more than target
        :param relevant_template: specific template filtered with the specific kpi lines
        :param filtered_scif:
        :param isnt_dp: the main_line has "DP" flag and the store_attr is not DP
        :param target: integer
        :param kpi_function: specific function for the calculation
        :return: boolean, but it can be None if we want not to write it in DB
        """
        passed_counter = 0
        for i, kpi_line in relevant_template.iterrows():
            answer = kpi_function(kpi_line, filtered_scif, isnt_dp)
            if answer:
                passed_counter += 1
            elif answer is None:
                return None
        return passed_counter >= target

    # availability:

    def calculate_availability(self, kpi_line, relevant_scif, isnt_dp):  # V
        """
        checks if all the lines in the availability sheet passes the KPI (there is at least one product
        in this relevant scif that has the attributes).
        :param relevant_scif: filtered scif
        :param isnt_dp: if "store attribute" in the main sheet has DP, and the store is not DP, we shouldn't calculate
        DP lines
        :param kpi_line: line from the availability sheet
        :return: boolean
        """
        if isnt_dp and kpi_line[Const.MANUFACTURER] in Const.DP_MANU:
            return True
        filtered_scif = self.filter_scif_availability(kpi_line, relevant_scif)
        target = kpi_line[Const.TARGET]
        return filtered_scif[
            filtered_scif['facings'] > 0]['facings'].count() >= target

    def filter_scif_availability(self, kpi_line, relevant_scif):  # V
        """
        calls filter_scif_specific for every column in the template of availability
        :param kpi_line:
        :param relevant_scif:
        :return:
        """
        names_of_columns = {
            Const.MANUFACTURER: "manufacturer_name",
            Const.BRAND: "brand_name",
            Const.TRADEMARK: "att2",
            Const.SIZE: "size",
            Const.NUM_SUB_PACKAGES: "number_of_sub_packages",
            Const.PREMIUM_SSD: "Premium SSD",
            Const.INNOVATION_BRAND: "Innovation Brand",
        }
        for name in names_of_columns:
            relevant_scif = self.filter_scif_specific(relevant_scif, kpi_line,
                                                      name,
                                                      names_of_columns[name])
        return relevant_scif

    def filter_scif_specific(self, relevant_scif, kpi_line, name_in_template,
                             name_in_scif):  # V
        """
        takes scif and filters it from the template
        :param relevant_scif: the current filtered scif
        :param kpi_line: line from one sheet (availability for example)
        :param name_in_template: the column name in the template
        :param name_in_scif: the column name in SCIF
        :return:
        """
        values = self.does_exist(kpi_line, name_in_template)
        if values:
            if name_in_scif in Const.NUMERIC_VALUES_TYPES:
                values = [float(x) for x in values]
            return relevant_scif[relevant_scif[name_in_scif].isin(values)]
        return relevant_scif

    # SOS:

    def calculate_sos(self, kpi_line, relevant_scif, isnt_dp):
        """
        calculates SOS line in the relevant scif.
        :param kpi_line: line from SOS sheet.
        :param relevant_scif: filtered scif.
        :param isnt_dp: if "store attribute" in the main sheet has DP, and the store is not DP, we should filter
        all the DP products out of the numerator.
        :return: boolean
        """
        relevant_scif = self.sos_first_filtering(kpi_line, relevant_scif)
        if kpi_line[Const.SSD_STILL] != "":
            relevant_scif = self.filter_by_type_value(
                relevant_scif, Const.SSD_STILL, kpi_line[Const.SSD_STILL])
        num_type = kpi_line[Const.NUM_TYPES_1]
        num_value = kpi_line[Const.NUM_VALUES_1]
        num_scif = self.filter_by_type_value(relevant_scif, num_type,
                                             num_value)
        if isnt_dp:
            num_scif = num_scif[~(
                num_scif['manufacturer_name'].isin(Const.DP_MANU))]
        target = float(kpi_line[Const.TARGET]) / 100
        percentage = num_scif['facings'].sum() / relevant_scif['facings'].sum() if relevant_scif['facings'].sum() > 0 \
            else 0
        return percentage >= target

    def sos_first_filtering(self, kpi_line, relevant_scif):
        """
        The common part of both SOS functions, first filtering
        :param kpi_line: line from the SOS/SOS_maj template
        :param relevant_scif: filtered scif
        :return: new filtered scif
        """
        kpi_name = kpi_line[Const.KPI_NAME]
        if kpi_line[Const.EXCLUSION_SHEET] == Const.V:
            relevant_scif = self.exclude_from_scif(kpi_name, relevant_scif)
        relevant_scif = relevant_scif[relevant_scif['product_type'] != "Empty"]
        den_type = kpi_line[Const.DEN_TYPES_1]
        den_value = kpi_line[Const.DEN_VALUES_1]
        relevant_scif = self.filter_by_type_value(relevant_scif, den_type,
                                                  den_value)
        return relevant_scif

    # SOS majority:

    def calculate_sos_maj(self, kpi_line, relevant_scif, isnt_dp):
        """
        calculates SOS majority line in the relevant scif. Filters the denominator and sends the line to the
        match function (majority or dominant)
        :param kpi_line: line from SOS majority sheet.
        :param relevant_scif: filtered scif.
        :param isnt_dp: if "store attribute" in the main sheet has DP, and the store is not DP, we should filter
        all the DP products out of the numerator (and the denominator of the dominant part).
        :return: boolean
        """
        relevant_scif = self.sos_first_filtering(kpi_line, relevant_scif)
        if kpi_line[Const.EXCLUSION_SHEET] == Const.V:
            kpi_name = kpi_line[Const.KPI_NAME]
            relevant_scif = self.exclude_from_scif(kpi_name, relevant_scif)
        den_type = kpi_line[Const.DEN_TYPES_2]
        den_value = kpi_line[Const.DEN_VALUES_2]
        relevant_scif = self.filter_by_type_value(relevant_scif, den_type,
                                                  den_value)
        if kpi_line[Const.MAJ_DOM] == Const.MAJOR:
            answer = self.calculate_majority_part(kpi_line, relevant_scif,
                                                  isnt_dp)
        elif kpi_line[Const.MAJ_DOM] == Const.DOMINANT:
            answer = self.calculate_dominant_part(kpi_line, relevant_scif,
                                                  isnt_dp)
        else:
            Log.warning("SOS majority does not know '{}' part".format(
                kpi_line[Const.MAJ_DOM]))
            answer = False
        return answer

    def calculate_majority_part(self, kpi_line, relevant_scif, isnt_dp):
        """
        filters the numerator and checks if the SOS is bigger than 50%.
        :param kpi_line: line from SOS majority sheet.
        :param relevant_scif: filtered scif.
        :param isnt_dp: if "store attribute" in the main sheet has DP, and the store is not DP, we should filter
        all the DP products out of the numerator.
        :return: boolean
        """
        num_type = kpi_line[Const.NUM_TYPES_1]
        num_value = kpi_line[Const.NUM_VALUES_1]
        num_scif = self.filter_by_type_value(relevant_scif, num_type,
                                             num_value)
        num_type = kpi_line[Const.NUM_TYPES_2]
        num_value = kpi_line[Const.NUM_VALUES_2]
        num_scif = self.filter_by_type_value(num_scif, num_type, num_value)
        if num_scif.empty:
            return None
        if isnt_dp:
            num_scif = num_scif[~(
                num_scif['manufacturer_name'].isin(Const.DP_MANU))]
        target = Const.MAJORITY_TARGET
        return num_scif['facings'].sum() / relevant_scif['facings'].sum(
        ) >= target

    def calculate_dominant_part(self, kpi_line, relevant_scif, isnt_dp):
        """
        filters the numerator and checks if the given value in the given type is the one with the most facings.
        :param kpi_line: line from SOS majority sheet.
        :param relevant_scif: filtered scif.
        :param isnt_dp: if "store attribute" in the main sheet has DP, and the store is not DP, we should filter
        all the DP products out.
        :return: boolean
        """
        type_name = self.get_column_name(kpi_line[Const.NUM_TYPES_1],
                                         relevant_scif)
        values = str(kpi_line[Const.NUM_VALUES_1]).split(', ')
        if isnt_dp:
            relevant_scif = relevant_scif[~(
                relevant_scif['manufacturer_name'].isin(Const.DP_MANU))]
            if kpi_line[Const.ADD_IF_NOT_DP] != "":
                values_to_add = str(kpi_line[Const.ADD_IF_NOT_DP]).split(', ')
                values = values + values_to_add
        if type_name in Const.NUMERIC_VALUES_TYPES:
            values = [float(x) for x in values]
        max_facings, needed_one = 0, 0
        values_type = relevant_scif[type_name].unique().tolist()
        if None in values_type:
            values_type.remove(None)
            current_sum = relevant_scif[
                relevant_scif[type_name].isnull()]['facings'].sum()
            if current_sum > max_facings:
                max_facings = current_sum
        for value in values_type:
            current_sum = relevant_scif[relevant_scif[type_name] ==
                                        value]['facings'].sum()
            if current_sum > max_facings:
                max_facings = current_sum
            if value in values:
                needed_one += current_sum
        return needed_one >= max_facings

    # helpers:

    def get_column_name(self, field_name, df):
        """
        checks what the real field name in DttFrame is (if it exists in the DF or exists in the "converter" sheet).
        :param field_name: str
        :param df: scif/products
        :return: real column name (if exists)
        """
        if field_name in df.columns:
            return field_name
        if field_name.upper() in self.converters[
                Const.NAME_IN_TEMP].str.upper().tolist():
            field_name = self.converters[self.converters[
                Const.NAME_IN_TEMP].str.upper() == field_name.upper()][
                    Const.NAME_IN_DB].iloc[0]
            return field_name
        return None

    def filter_by_type_value(self, relevant_scif, type_name, value):
        """
        filters scif with the type and value
        :param relevant_scif: current filtered scif
        :param type_name: str (from the template)
        :param value: str
        :return: new scif
        """
        if type_name == "":
            return relevant_scif
        values = value.split(', ')
        new_type_name = self.get_column_name(type_name, relevant_scif)
        if not new_type_name:
            print "There is no field '{}'".format(type_name)
            return relevant_scif
        if new_type_name in Const.NUMERIC_VALUES_TYPES:
            values = [float(x) for x in values]
        return relevant_scif[relevant_scif[new_type_name].isin(values)]

    @staticmethod
    def exclude_scif(exclude_line, relevant_scif):
        """
        filters products out of the scif
        :param exclude_line: line from the exclusion sheet
        :param relevant_scif: current filtered scif
        :return: new scif
        """
        if exclude_line[Const.PRODUCT_EAN] != "":
            exclude_products = exclude_line[Const.PRODUCT_EAN].split(', ')
            relevant_scif = relevant_scif[~(
                relevant_scif['product_ean_code'].isin(exclude_products))]
        if exclude_line[Const.BRAND] != "":
            exclude_brands = exclude_line[Const.BRAND].split(', ')
            relevant_scif = relevant_scif[~(
                relevant_scif['brand_name'].isin(exclude_brands))]
        return relevant_scif

    def exclude_from_scif(self, kpi_name, relevant_scif):
        """
        excludes all the necessary types from the scif
        :param kpi_name:
        :param relevant_scif:
        :return: filtered relevant_scif
        """
        relevant_exclusions = self.exclusion_sheet[self.exclusion_sheet[
            Const.KPI_NAME] == kpi_name]
        for i, exc_line in relevant_exclusions.iterrows():
            relevant_scif = self.exclude_scif(exc_line, relevant_scif)
        return relevant_scif

    @staticmethod
    def does_exist(kpi_line, column_name):
        """
        checks if kpi_line has values in this column, and if it does - returns a list of these values
        :param kpi_line: line from template
        :param column_name: str
        :return: list of values if there are, otherwise None
        """
        if column_name in kpi_line.keys() and kpi_line[column_name] != "":
            cell = kpi_line[column_name]
            if type(cell) in [int, float]:
                return [cell]
            elif type(cell) in [unicode, str]:
                return cell.split(", ")
        return None

    def get_kpi_function(self, kpi_type):
        """
        transfers every kpi to its own function
        :param kpi_type: value from "sheet" column in the main sheet
        :return: function
        """
        if kpi_type == Const.SURVEY:
            return self.calculate_survey_specific
        elif kpi_type == Const.AVAILABILITY:
            return self.calculate_availability
        elif kpi_type == Const.SOS:
            return self.calculate_sos
        elif kpi_type == Const.SOS_MAJOR:
            return self.calculate_sos_maj
        else:
            Log.warning(
                "The value '{}' in column sheet in the template is not recognized"
                .format(kpi_type))
            return None
Пример #17
0
    def __init__(self, data_provider, output):
        self.output = output
        self.data_provider = data_provider
        self.common = CommonV2  # remove later
        self.common_v2 = CommonV2(self.data_provider)
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.products = self.data_provider[Data.PRODUCTS]
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.store_id = self.data_provider[Data.STORE_FK]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.rds_conn = PSProjectConnector(self.project_name, DbUsers.CalculationEng)
        self.kpi_results_queries = []
        self.ps_data_provider = PsDataProvider(self.data_provider, self.output)
        self.survey = Survey(self.data_provider, output=self.output, ps_data_provider=self.ps_data_provider,
                             common=self.common_v2)
        self.store_sos_policies = self.ps_data_provider.get_store_policies()
        self.labels = self.ps_data_provider.get_labels()
        self.store_info = self.data_provider[Data.STORE_INFO]
        self.store_info = self.ps_data_provider.get_ps_store_info(self.store_info)
        self.country = self.store_info['country'].iloc[0]
        self.current_date = datetime.now()
        self.extra_spaces_template = pd.read_excel(Const.EXTRA_SPACES_RELEVANT_SUB_CATEGORIES_PATH)
        self.store_targets = pd.read_excel(Const.STORE_TARGETS_PATH)
        self.sub_category_weight = pd.read_excel(Const.SUB_CATEGORY_TARGET_PATH, sheetname='category_score')
        self.kpi_weights = pd.read_excel(Const.SUB_CATEGORY_TARGET_PATH, sheetname='max_weight')
        self.targets = self.ps_data_provider.get_kpi_external_targets()
        self.store_assortment = PSAssortmentDataProvider(
            self.data_provider).execute(policy_name=None)
        self.supervisor_target = self.get_supervisor_target()
        try:
            self.sub_category_assortment = pd.merge(self.store_assortment,
                                                    self.all_products.loc[:, ['product_fk', 'sub_category',
                                                                              'sub_category_fk']],
                                                    how='left', on='product_fk')
            self.sub_category_assortment = \
                self.sub_category_assortment[~self.sub_category_assortment['assortment_name'].str.contains(
                    'ASSORTMENT')]
            self.sub_category_assortment = pd.merge(self.sub_category_assortment, self.sub_category_weight, how='left',
                                                    left_on='sub_category',
                                                    right_on='Category')


        except KeyError:
            self.sub_category_assortment = pd.DataFrame()
        self.update_score_sub_category_weights()
        try:
            self.store_assortment_without_powerskus = \
                self.store_assortment[self.store_assortment['assortment_name'].str.contains('ASSORTMENT')]
        except KeyError:
            self.store_assortment_without_powerskus = pd.DataFrame()

        self.adherence_results = pd.DataFrame(columns=['product_fk', 'trax_average',
                                                       'suggested_price', 'into_interval', 'min_target', 'max_target',
                                                       'percent_range'])
        self.extra_spaces_results = pd.DataFrame(
            columns=['sub_category_fk', 'template_fk', 'count'])

        self.powersku_scores = {}
        self.powersku_empty = {}

        self.powersku_bonus = {}
        self.powersku_price = {}
        self.powersku_sos = {}
Пример #18
0
class HEINZCRToolBox:
    LVL3_HEADERS = ['assortment_group_fk', 'assortment_fk', 'target', 'product_fk',
                    'in_store', 'kpi_fk_lvl1', 'kpi_fk_lvl2', 'kpi_fk_lvl3', 'group_target_date',
                    'assortment_super_group_fk']
    LVL2_HEADERS = ['assortment_group_fk', 'assortment_fk', 'target', 'passes', 'total',
                    'kpi_fk_lvl1', 'kpi_fk_lvl2', 'group_target_date']
    LVL1_HEADERS = ['assortment_group_fk', 'target', 'passes', 'total', 'kpi_fk_lvl1']
    ASSORTMENT_FK = 'assortment_fk'
    ASSORTMENT_GROUP_FK = 'assortment_group_fk'
    ASSORTMENT_SUPER_GROUP_FK = 'assortment_super_group_fk'
    BRAND_VARIENT = 'brand_varient'
    NUMERATOR = 'numerator'
    DENOMINATOR = 'denominator'
    DISTRIBUTION_KPI = 'Distribution - SKU'
    OOS_SKU_KPI = 'OOS - SKU'
    OOS_KPI = 'OOS'

    def __init__(self, data_provider, output):
        self.output = output
        self.data_provider = data_provider
        self.common = CommonV2  # remove later
        self.common_v2 = CommonV2(self.data_provider)
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.products = self.data_provider[Data.PRODUCTS]
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.store_id = self.data_provider[Data.STORE_FK]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.rds_conn = PSProjectConnector(self.project_name, DbUsers.CalculationEng)
        self.kpi_results_queries = []
        self.ps_data_provider = PsDataProvider(self.data_provider, self.output)
        self.survey = Survey(self.data_provider, output=self.output, ps_data_provider=self.ps_data_provider,
                             common=self.common_v2)
        self.store_sos_policies = self.ps_data_provider.get_store_policies()
        self.labels = self.ps_data_provider.get_labels()
        self.store_info = self.data_provider[Data.STORE_INFO]
        self.store_info = self.ps_data_provider.get_ps_store_info(self.store_info)
        self.country = self.store_info['country'].iloc[0]
        self.current_date = datetime.now()
        self.extra_spaces_template = pd.read_excel(Const.EXTRA_SPACES_RELEVANT_SUB_CATEGORIES_PATH)
        self.store_targets = pd.read_excel(Const.STORE_TARGETS_PATH)
        self.sub_category_weight = pd.read_excel(Const.SUB_CATEGORY_TARGET_PATH, sheetname='category_score')
        self.kpi_weights = pd.read_excel(Const.SUB_CATEGORY_TARGET_PATH, sheetname='max_weight')
        self.targets = self.ps_data_provider.get_kpi_external_targets()
        self.store_assortment = PSAssortmentDataProvider(
            self.data_provider).execute(policy_name=None)
        self.supervisor_target = self.get_supervisor_target()
        try:
            self.sub_category_assortment = pd.merge(self.store_assortment,
                                                    self.all_products.loc[:, ['product_fk', 'sub_category',
                                                                              'sub_category_fk']],
                                                    how='left', on='product_fk')
            self.sub_category_assortment = \
                self.sub_category_assortment[~self.sub_category_assortment['assortment_name'].str.contains(
                    'ASSORTMENT')]
            self.sub_category_assortment = pd.merge(self.sub_category_assortment, self.sub_category_weight, how='left',
                                                    left_on='sub_category',
                                                    right_on='Category')


        except KeyError:
            self.sub_category_assortment = pd.DataFrame()
        self.update_score_sub_category_weights()
        try:
            self.store_assortment_without_powerskus = \
                self.store_assortment[self.store_assortment['assortment_name'].str.contains('ASSORTMENT')]
        except KeyError:
            self.store_assortment_without_powerskus = pd.DataFrame()

        self.adherence_results = pd.DataFrame(columns=['product_fk', 'trax_average',
                                                       'suggested_price', 'into_interval', 'min_target', 'max_target',
                                                       'percent_range'])
        self.extra_spaces_results = pd.DataFrame(
            columns=['sub_category_fk', 'template_fk', 'count'])

        self.powersku_scores = {}
        self.powersku_empty = {}

        self.powersku_bonus = {}
        self.powersku_price = {}
        self.powersku_sos = {}

    def main_calculation(self, *args, **kwargs):
        """
        This function calculates the KPI results.
        """
        if self.scif.empty:
            return
        # these function must run first
        #  self.adherence_results = self.heinz_global_price_adherence(pd.read_excel(Const.PRICE_ADHERENCE_TEMPLATE_PATH,
        #                                                                          sheetname="Price Adherence"))
        self.adherence_results = self.heinz_global_price_adherence(self.targets)
        self.extra_spaces_results = self.heinz_global_extra_spaces()
        self.set_relevant_sub_categories()

        # this isn't relevant to the 'Perfect Score' calculation
        self.heinz_global_distribution_per_category()
        self.calculate_assortment()

        self.calculate_powersku_assortment()
        self.main_sos_calculation()
        self.calculate_powersku_price_adherence()
        self.calculate_perfect_store_extra_spaces()
        self.check_bonus_question()

        self.calculate_perfect_sub_category()

    def calculate_assortment(self):
        if self.store_assortment_without_powerskus.empty:
            return

        products_in_store = self.scif[self.scif['facings'] > 0]['product_fk'].unique().tolist()
        pass_count = 0

        total_kpi_fk = self.common_v2.get_kpi_fk_by_kpi_type('Distribution')
        identifier_dict = self.common_v2.get_dictionary(kpi_fk=total_kpi_fk)

        oos_kpi_fk = self.common_v2.get_kpi_fk_by_kpi_type('OOS')
        oos_identifier_dict = self.common_v2.get_dictionary(kpi_fk=oos_kpi_fk)

        for row in self.store_assortment_without_powerskus.itertuples():
            result = 0
            if row.product_fk in products_in_store:
                result = 1
                pass_count += 1

            sku_kpi_fk = self.common_v2.get_kpi_fk_by_kpi_type('Distribution - SKU')
            self.common_v2.write_to_db_result(sku_kpi_fk, numerator_id=row.product_fk, denominator_id=row.assortment_fk,
                                              result=result, identifier_parent=identifier_dict, should_enter=True)

            oos_result = 0 if result else 1
            oos_sku_kpi_fk = self.common_v2.get_kpi_fk_by_kpi_type('OOS - SKU')
            self.common_v2.write_to_db_result(oos_sku_kpi_fk, numerator_id=row.product_fk,
                                              denominator_id=row.assortment_fk,
                                              result=oos_result, identifier_parent=oos_identifier_dict,
                                              should_enter=True)

        number_of_products_in_assortment = len(self.store_assortment_without_powerskus)
        if number_of_products_in_assortment:
            total_result = (pass_count / float(number_of_products_in_assortment)) * 100
            oos_products = number_of_products_in_assortment - pass_count
            oos_result = (oos_products / float(number_of_products_in_assortment)) * 100
        else:
            total_result = 0
            oos_products = number_of_products_in_assortment
            oos_result = number_of_products_in_assortment
        self.common_v2.write_to_db_result(total_kpi_fk, numerator_id=Const.OWN_MANUFACTURER_FK,
                                          denominator_id=self.store_id,
                                          numerator_result=pass_count,
                                          denominator_result=number_of_products_in_assortment,
                                          result=total_result, identifier_result=identifier_dict)
        self.common_v2.write_to_db_result(oos_kpi_fk, numerator_id=Const.OWN_MANUFACTURER_FK,
                                          denominator_id=self.store_id,
                                          numerator_result=oos_products,
                                          denominator_result=number_of_products_in_assortment,
                                          result=oos_result, identifier_result=oos_identifier_dict)

    def calculate_powersku_assortment(self):
        if self.sub_category_assortment.empty:
            return 0

        sub_category_kpi_fk = self.common_v2.get_kpi_fk_by_kpi_type(Const.POWER_SKU_SUB_CATEGORY)
        sku_kpi_fk = self.common_v2.get_kpi_fk_by_kpi_type(Const.POWER_SKU)
        target_kpi_weight = float(
            self.kpi_weights['Score'][self.kpi_weights['KPIs'] == Const.KPI_WEIGHTS['POWERSKU']].iloc[
                0])

        kpi_weight = self.get_kpi_weight('POWERSKU')

        products_in_session = self.scif[self.scif['facings'] > 0]['product_fk'].unique().tolist()
        self.sub_category_assortment['in_session'] = \
            self.sub_category_assortment.loc[:, 'product_fk'].isin(products_in_session)

        # save PowerSKU results at SKU level
        for sku in self.sub_category_assortment[
            ['product_fk', 'sub_category_fk', 'in_session', 'sub_category']].itertuples():
            parent_dict = self.common_v2.get_dictionary(
                kpi_fk=sub_category_kpi_fk, sub_category_fk=sku.sub_category_fk)
            relevant_sub_category_df = self.sub_category_assortment[
                self.sub_category_assortment['sub_category'] == sku.sub_category]
            if relevant_sub_category_df.empty:
                sub_category_count = 0
            else:
                sub_category_count = len(relevant_sub_category_df)

            result = 1 if sku.in_session else 0

            score = result * (target_kpi_weight / float(sub_category_count))
            self.common_v2.write_to_db_result(sku_kpi_fk, numerator_id=sku.product_fk,
                                              denominator_id=sku.sub_category_fk, score=score,
                                              result=result, identifier_parent=parent_dict, should_enter=True)
        # save PowerSKU results at sub_category level

        aggregated_results = self.sub_category_assortment.groupby('sub_category_fk').agg(
            {'in_session': 'sum', 'product_fk': 'count'}).reset_index().rename(
            columns={'product_fk': 'product_count'})
        aggregated_results['percent_complete'] = \
            aggregated_results.loc[:, 'in_session'] / aggregated_results.loc[:, 'product_count']
        aggregated_results['result'] = aggregated_results['percent_complete']
        for sub_category in aggregated_results.itertuples():
            identifier_dict = self.common_v2.get_dictionary(kpi_fk=sub_category_kpi_fk,
                                                            sub_category_fk=sub_category.sub_category_fk)

            result = sub_category.result
            score = result * kpi_weight

            self.powersku_scores[sub_category.sub_category_fk] = score
            self.common_v2.write_to_db_result(sub_category_kpi_fk, numerator_id=sub_category.sub_category_fk,
                                              denominator_id=self.store_id,
                                              identifier_parent=sub_category.sub_category_fk,
                                              identifier_result=identifier_dict, result=result * 100, score=score,
                                              weight=target_kpi_weight, target=target_kpi_weight,
                                              should_enter=True)

    def heinz_global_distribution_per_category(self):
        relevant_stores = pd.DataFrame(columns=self.store_sos_policies.columns)
        for row in self.store_sos_policies.itertuples():
            policies = json.loads(row.store_policy)
            df = self.store_info
            for key, value in policies.items():
                try:
                    df_1 = df[df[key].isin(value)]
                except KeyError:
                    continue
            if not df_1.empty:
                stores = self.store_sos_policies[(self.store_sos_policies['store_policy'] == row.store_policy.encode('utf-8'))
                                                 & (
                                                         self.store_sos_policies[
                                                             'target_validity_start_date'] <= datetime.date(
                                                     self.current_date))]
                if stores.empty:
                    relevant_stores = stores
                else:
                    relevant_stores = relevant_stores.append(stores, ignore_index=True)
        relevant_stores = relevant_stores.drop_duplicates(subset=['kpi', 'sku_name', 'target', 'sos_policy'],
                                                          keep='last')
        for row in relevant_stores.itertuples():
            sos_policy = json.loads(row.sos_policy)
            numerator_key = sos_policy[self.NUMERATOR].keys()[0]
            denominator_key = sos_policy[self.DENOMINATOR].keys()[0]
            numerator_val = sos_policy[self.NUMERATOR][numerator_key]
            denominator_val = sos_policy[self.DENOMINATOR][denominator_key]
            target = row.target * 100
            if numerator_key == 'manufacturer':
                numerator_key = numerator_key + '_name'

            if denominator_key == 'sub_category' \
                    and denominator_val.lower() != 'all' \
                    and json.loads(row.store_policy).get('store_type') \
                    and len(json.loads(row.store_policy).get('store_type')) == 1:
                try:
                    denominator_id = self.all_products[self.all_products[denominator_key] == denominator_val][
                        denominator_key + '_fk'].values[0]
                    numerator_id = self.all_products[self.all_products[numerator_key] == numerator_val][
                        numerator_key.split('_')[0] + '_fk'].values[0]

                    # self.common.write_to_db_result_new_tables(fk=12, numerator_id=numerator_id,
                    #                                           numerator_result=None,
                    #                                           denominator_id=denominator_id,
                    #                                           denominator_result=None,
                    #                                           result=target)
                    self.common_v2.write_to_db_result(fk=12, numerator_id=numerator_id, numerator_result=None,
                                                      denominator_id=denominator_id, denominator_result=None,
                                                      result=target)
                except Exception as e:
                    Log.warning(denominator_key + ' - - ' + denominator_val)

    def calculate_perfect_store(self):
        pass

    def calculate_perfect_sub_category(self):
        kpi_fk = self.common_v2.get_kpi_fk_by_kpi_type(Const.PERFECT_STORE_SUB_CATEGORY)
        parent_kpi = self.common_v2.get_kpi_fk_by_kpi_type(Const.PERFECT_STORE)

        total_score = 0
        sub_category_fk_list = []
        kpi_type_dict_scores = [self.powersku_scores, self.powersku_empty, self.powersku_price,
                                self.powersku_sos]

        for kpi_dict in kpi_type_dict_scores:
            sub_category_fk_list.extend(kpi_dict.keys())

        kpi_weight_perfect_store = 0
        if self.country in self.sub_category_weight.columns.to_list():
            kpi_weight_perfect_store = self.sub_category_weight[self.country][
                self.sub_category_weight['Category'] == Const.PERFECT_STORE_KPI_WEIGHT]

            if not kpi_weight_perfect_store.empty:
                kpi_weight_perfect_store = kpi_weight_perfect_store.iloc[0]

        unique_sub_cat_fks = list(dict.fromkeys(sub_category_fk_list))

        sub_category_fks = self.sub_category_weight.sub_category_fk.unique().tolist()
        relevant_sub_cat_list = [x for x in sub_category_fks if str(x) != 'nan']

        # relevant_sub_cat_list = self.sub_category_assortment['sub_category_fk'][
        #     self.sub_category_assortment['Category'] != pd.np.nan].unique().tolist()
        for sub_cat_fk in unique_sub_cat_fks:
            if sub_cat_fk in relevant_sub_cat_list:
                bonus_score = 0
                try:
                    bonus_score = self.powersku_bonus[sub_cat_fk]
                except:
                    pass

                sub_cat_weight = self.get_weight(sub_cat_fk)
                sub_cat_score = self.calculate_sub_category_sum(kpi_type_dict_scores, sub_cat_fk)

                result = sub_cat_score

                score = (result * sub_cat_weight) + bonus_score
                total_score += score

                self.common_v2.write_to_db_result(kpi_fk, numerator_id=sub_cat_fk,
                                                  denominator_id=self.store_id,
                                                  result=result, score=score,
                                                  identifier_parent=parent_kpi,
                                                  identifier_result=sub_cat_fk,
                                                  weight=sub_cat_weight * 100,
                                                  should_enter=True)

        self.common_v2.write_to_db_result(parent_kpi, numerator_id=Const.OWN_MANUFACTURER_FK,
                                          denominator_id=self.store_id,
                                          result=total_score, score=total_score,
                                          identifier_result=parent_kpi,
                                          target=kpi_weight_perfect_store,
                                          should_enter=True)

    def main_sos_calculation(self):
        relevant_stores = pd.DataFrame(columns=self.store_sos_policies.columns)
        for row in self.store_sos_policies.itertuples():
            policies = json.loads(row.store_policy)
            df = self.store_info
            for key, value in policies.items():
                try:
                    if key != 'additional_attribute_3':
                        df1 = df[df[key].isin(value)]
                except KeyError:
                    continue
            if not df1.empty:
                stores = \
                    self.store_sos_policies[(self.store_sos_policies['store_policy'].str.encode(
                                                'utf-8') == row.store_policy.encode('utf-8')) &
                                            (self.store_sos_policies['target_validity_start_date'] <= datetime.date(
                                                self.current_date))]
                if stores.empty:
                    relevant_stores = stores
                else:
                    relevant_stores = relevant_stores.append(stores, ignore_index=True)

        relevant_stores = relevant_stores.drop_duplicates(subset=['kpi', 'sku_name', 'target', 'sos_policy'],
                                                          keep='last')

        results_df = pd.DataFrame(columns=['sub_category', 'sub_category_fk', 'score'])

        sos_sub_category_kpi_fk = self.common_v2.get_kpi_fk_by_kpi_type(Const.SOS_SUB_CATEGORY)

        for row in relevant_stores.itertuples():
            sos_policy = json.loads(row.sos_policy)
            numerator_key = sos_policy[self.NUMERATOR].keys()[0]
            denominator_key = sos_policy[self.DENOMINATOR].keys()[0]
            numerator_val = sos_policy[self.NUMERATOR][numerator_key]
            denominator_val = sos_policy[self.DENOMINATOR][denominator_key]
            json_policy = json.loads(row.store_policy)
            kpi_fk = row.kpi

            # This is to assign the KPI to SOS_manufacturer_category_GLOBAL
            if json_policy.get('store_type') and len(json_policy.get('store_type')) > 1:
                kpi_fk = 8

            if numerator_key == 'manufacturer':
                numerator_key = numerator_key + '_name'
                # we need to include 'Philadelphia' as a manufacturer for all countries EXCEPT Chile
                if self.country == 'Chile':
                    numerator_values = [numerator_val]
                else:
                    numerator_values = [numerator_val, 'Philadelphia']
            else:
                # if the numerator isn't 'manufacturer', we just need to convert the value to a list
                numerator_values = [numerator_val]

            if denominator_key == 'sub_category':
                include_stacking_list = ['Nuts', 'DRY CHEESE', 'IWSN', 'Shredded', 'SNACK']
                if denominator_val in include_stacking_list:
                    facings_field = 'facings'
                else:
                    facings_field = 'facings_ign_stack'
            else:
                facings_field = 'facings_ign_stack'

            if denominator_key == 'sub_category' and denominator_val.lower() == 'all':
                # Here we are talkin on a KPI when the target have no denominator,
                # the calculation should be done on Numerator only
                numerator = self.scif[(self.scif[numerator_key] == numerator_val) &
                                      (self.scif['location_type'] == 'Primary Shelf')
                                      ][facings_field].sum()
                kpi_fk = 9
                denominator = None
                denominator_id = None
            else:
                numerator = self.scif[(self.scif[numerator_key].isin(numerator_values)) &
                                      (self.scif[denominator_key] == denominator_val) &
                                      (self.scif['location_type'] == 'Primary Shelf')][facings_field].sum()
                denominator = self.scif[(self.scif[denominator_key] == denominator_val) &
                                        (self.scif['location_type'] == 'Primary Shelf')][facings_field].sum()

            try:
                if denominator is not None:
                    denominator_id = self.all_products[self.all_products[denominator_key] == denominator_val][
                        denominator_key + '_fk'].values[0]
                if numerator is not None:
                    numerator_id = self.all_products[self.all_products[numerator_key] == numerator_val][
                        numerator_key.split('_')[0] + '_fk'].values[0]

                sos = 0
                if numerator and denominator:
                    sos = np.divide(float(numerator), float(denominator)) * 100
                score = 0
                target = row.target * 100
                if sos >= target:
                    score = 100

                identifier_parent = None
                should_enter = False
                if denominator_key == 'sub_category' and kpi_fk == row.kpi:
                    # if this a sub_category result, save it to the results_df for 'Perfect Store' store
                    results_df.loc[len(results_df)] = [denominator_val, denominator_id, score / 100]
                    identifier_parent = self.common_v2.get_dictionary(kpi_fk=sos_sub_category_kpi_fk,
                                                                      sub_category_fk=denominator_id)
                    should_enter = True

                manufacturer = None
                self.common_v2.write_to_db_result(kpi_fk, numerator_id=numerator_id, numerator_result=numerator,
                                                  denominator_id=denominator_id, denominator_result=denominator,
                                                  result=target, score=sos, target=target,
                                                  score_after_actions=manufacturer, identifier_parent=identifier_parent,
                                                  should_enter=should_enter)
            except Exception as e:
                Log.warning(denominator_key + ' - - ' + denominator_val)

        # if there are no sub_category sos results, there's no perfect store information to be saved
        if len(results_df) == 0:
            return 0

        # save aggregated results for each sub category
        kpi_weight = self.get_kpi_weight('SOS')
        for row in results_df.itertuples():
            identifier_result = \
                self.common_v2.get_dictionary(kpi_fk=sos_sub_category_kpi_fk,
                                              sub_category_fk=row.sub_category_fk)

            # sub_cat_weight = self.get_weight(row.sub_category_fk)
            result = row.score
            score = result * kpi_weight

            self.powersku_sos[row.sub_category_fk] = score
            # limit results so that aggregated results can only add up to 3
            self.common_v2.write_to_db_result(sos_sub_category_kpi_fk,
                                              numerator_id=row.sub_category_fk,
                                              denominator_id=self.store_id,
                                              result=row.score, score=score,
                                              identifier_parent=row.sub_category_fk,
                                              identifier_result=identifier_result,
                                              weight=kpi_weight,
                                              target=kpi_weight,
                                              should_enter=True)

    def calculate_powersku_price_adherence(self):
        adherence_kpi_fk = self.common_v2.get_kpi_fk_by_kpi_type(Const.POWER_SKU_PRICE_ADHERENCE)
        adherence_sub_category_kpi_fk = \
            self.common_v2.get_kpi_fk_by_kpi_type(Const.POWER_SKU_PRICE_ADHERENCE_SUB_CATEGORY)

        if self.sub_category_assortment.empty:
            return False

        results = pd.merge(self.sub_category_assortment,
                           self.adherence_results, how='left', on='product_fk')
        results['into_interval'].fillna(0, inplace=True)

        for row in results.itertuples():
            parent_dict = self.common_v2.get_dictionary(kpi_fk=adherence_sub_category_kpi_fk,
                                                        sub_category_fk=row.sub_category_fk)

            score_value = 'Not Present'
            in_session = row.in_session
            if in_session:
                if not pd.isna(row.trax_average) and row.suggested_price:
                    price_in_interval = 1 if row.into_interval == 1 else 0
                    if price_in_interval == 1:
                        score_value = 'Pass'
                    else:
                        score_value = 'Fail'
                else:
                    score_value = 'No Price'

            score = Const.PRESENCE_PRICE_VALUES[score_value]
            self.common_v2.write_to_db_result(adherence_kpi_fk, numerator_id=row.product_fk,
                                              denominator_id=row.sub_category_fk, result=row.trax_average,
                                              score=score, target=row.suggested_price, numerator_result=row.min_target,
                                              denominator_result=row.max_target,
                                              weight=row.percent_range,
                                              identifier_parent=parent_dict, should_enter=True)

        aggregated_results = results.groupby('sub_category_fk').agg(
            {'into_interval': 'sum', 'product_fk': 'count'}).reset_index().rename(
            columns={'product_fk': 'product_count'})
        aggregated_results['percent_complete'] = \
            aggregated_results.loc[:, 'into_interval'] / aggregated_results.loc[:, 'product_count']

        for row in aggregated_results.itertuples():
            identifier_result = self.common_v2.get_dictionary(kpi_fk=adherence_sub_category_kpi_fk,
                                                              sub_category_fk=row.sub_category_fk)
            kpi_weight = self.get_kpi_weight('PRICE')
            result = row.percent_complete
            score = result * kpi_weight

            self.powersku_price[row.sub_category_fk] = score

            self.common_v2.write_to_db_result(adherence_sub_category_kpi_fk, numerator_id=row.sub_category_fk,
                                              denominator_id=self.store_id, result=result, score=score,
                                              numerator_result=row.into_interval, denominator_result=row.product_count,
                                              identifier_parent=row.sub_category_fk,
                                              identifier_result=identifier_result,
                                              weight=kpi_weight, target=kpi_weight,
                                              should_enter=True)

    def heinz_global_price_adherence(self, config_df):
        config_df = config_df.sort_values(by=["received_time"], ascending=False).drop_duplicates(
            subset=['start_date', 'end_date', 'ean_code', 'store_type'], keep="first")

        if config_df.empty:
            Log.warning("No external_targets data found - Price Adherence will not be calculated")
            return self.adherence_results

        self.match_product_in_scene.loc[self.match_product_in_scene['price'].isna(), 'price'] = \
            self.match_product_in_scene.loc[self.match_product_in_scene['price'].isna(), 'promotion_price']
        # =============== remove after updating logic to support promotional pricing ===============
        results_df = self.adherence_results
        my_config_df = \
            config_df[config_df['store_type'].str.encode('utf-8') == self.store_info.store_type[0].encode('utf-8')]

        products_in_session = self.scif['product_ean_code'].unique().tolist()
        products_in_session = [ean for ean in products_in_session if ean is not pd.np.nan and ean is not None]

        my_config_df = my_config_df[my_config_df['ean_code'].isin(products_in_session)]

        for row in my_config_df.itertuples():
            product_pk = \
                self.all_products[self.all_products['product_ean_code']
                                  == row.ean_code]['product_fk'].iloc[0]

            mpisc_df_price = \
                self.match_product_in_scene[(self.match_product_in_scene['product_fk'] == product_pk) |
                                            (self.match_product_in_scene[
                                                 'substitution_product_fk'] == product_pk)]['price']
            try:
                suggested_price = float(row.suggested_price)
            except Exception as e:
                Log.error("Product with ean_code {} is not in the configuration file for customer type {}"
                          .format(row.ean_code, self.store_info.store_type[0].encode('utf-8')))
                break
            percentage_weight = int(row.percentage_weight)
            upper_percentage = (100 + percentage_weight) / float(100)
            lower_percentage = (100 - percentage_weight) / float(100)
            min_price = suggested_price * lower_percentage
            max_price = suggested_price * upper_percentage
            percentage_sku = percentage_weight
            into_interval = 0
            prices_sum = 0
            count = 0
            trax_average = None
            for price in mpisc_df_price:
                if price and pd.notna(price):
                    prices_sum += price
                    count += 1

            if prices_sum > 0:
                trax_average = prices_sum / count
                into_interval = 0

            if not np.isnan(suggested_price):
                if min_price <= trax_average <= max_price:
                    into_interval = 100

            results_df.loc[len(results_df)] = [product_pk, trax_average,
                                               suggested_price, into_interval / 100, min_price, max_price,
                                               percentage_sku]

            self.common_v2.write_to_db_result(10, numerator_id=product_pk,
                                              numerator_result=suggested_price,
                                              denominator_id=product_pk,
                                              denominator_result=trax_average,
                                              result=row.percentage_weight,
                                              score=into_interval)
            if trax_average:
                mark_up = (np.divide(np.divide(float(trax_average), float(1.13)),
                                     float(suggested_price)) - 1) * 100

                self.common_v2.write_to_db_result(11, numerator_id=product_pk,
                                                  numerator_result=suggested_price,
                                                  denominator_id=product_pk,
                                                  denominator_result=trax_average,
                                                  score=mark_up,
                                                  result=mark_up)

        return results_df

    def calculate_perfect_store_extra_spaces(self):
        extra_spaces_kpi_fk = self.common_v2.get_kpi_fk_by_kpi_type(
            Const.PERFECT_STORE_EXTRA_SPACES_SUB_CATEGORY)

        sub_cats_for_store = self.relevant_sub_categories

        if self.extra_spaces_results.empty:
            pass

        try:
            relevant_sub_categories = [x.strip() for x in self.extra_spaces_template[
                self.extra_spaces_template['country'].str.encode('utf-8') == self.country.encode('utf-8')][
                'sub_category'].iloc[0].split(',')]
        except IndexError:
            Log.warning(
                'No relevant sub_categories for the Extra Spaces KPI found for the following country: {}'.format(
                    self.country))

        self.extra_spaces_results = pd.merge(self.extra_spaces_results,
                                             self.all_products.loc[:, [
                                                                          'sub_category_fk',
                                                                          'sub_category']].dropna().drop_duplicates(),
                                             how='left', on='sub_category_fk')

        relevant_extra_spaces = \
            self.extra_spaces_results[self.extra_spaces_results['sub_category'].isin(
                relevant_sub_categories)]
        kpi_weight = self.get_kpi_weight('EXTRA')
        for row in relevant_extra_spaces.itertuples():
            self.powersku_empty[row.sub_category_fk] = 1 * kpi_weight
            score = result = 1

            if row.sub_category_fk in sub_cats_for_store:
                sub_cats_for_store.remove(row.sub_category_fk)

            self.common_v2.write_to_db_result(extra_spaces_kpi_fk, numerator_id=row.sub_category_fk,
                                              denominator_id=row.template_fk, result=result, score=score,
                                              identifier_parent=row.sub_category_fk,
                                              target=1, should_enter=True)

        for sub_cat_fk in sub_cats_for_store:
            result = score = 0
            self.powersku_empty[sub_cat_fk] = 0
            self.common_v2.write_to_db_result(extra_spaces_kpi_fk, numerator_id=sub_cat_fk,
                                              denominator_id=0, result=result, score=score,
                                              identifier_parent=sub_cat_fk,
                                              target=1, should_enter=True)

    def heinz_global_extra_spaces(self):
        try:
            supervisor = self.store_info['additional_attribute_3'][0]
            store_target = -1
            # for row in self.store_sos_policies.itertuples():
            #     policies = json.loads(row.store_policy)
            #     for key, value in policies.items():
            #         try:
            #             if key == 'additional_attribute_3' and value[0] == supervisor:
            #                 store_target = row.target
            #                 break
            #         except KeyError:
            #             continue

            for row in self.supervisor_target.itertuples():
                try:
                    if row.supervisor == supervisor:
                        store_target = row.target
                        break
                except:
                    continue
        except Exception as e:
            Log.error("Supervisor target is not configured for the extra spaces report ")
            raise e

        results_df = self.extra_spaces_results

        # limit to only secondary scenes
        relevant_scif = self.scif[(self.scif['location_type_fk'] == float(2)) &
                                  (self.scif['facings'] > 0)]
        if relevant_scif.empty:
            return results_df
        # aggregate facings for every scene/sub_category combination in the visit
        relevant_scif = \
            relevant_scif.groupby(['scene_fk', 'template_fk', 'sub_category_fk'], as_index=False)['facings'].sum()
        # sort sub_categories by number of facings, largest first
        relevant_scif = relevant_scif.sort_values(['facings'], ascending=False)
        # drop all but the sub_category with the largest number of facings for each scene
        relevant_scif = relevant_scif.drop_duplicates(subset=['scene_fk'], keep='first')

        for row in relevant_scif.itertuples():
            results_df.loc[len(results_df)] = [row.sub_category_fk, row.template_fk, row.facings]
            self.common_v2.write_to_db_result(13, numerator_id=row.template_fk,
                                              numerator_result=row.facings,
                                              denominator_id=row.sub_category_fk,
                                              denominator_result=row.facings,
                                              context_id=row.scene_fk,
                                              result=store_target)

        return results_df

    def check_bonus_question(self):
        bonus_kpi_fk = self.common_v2.get_kpi_fk_by_kpi_type(Const.BONUS_QUESTION_SUB_CATEGORY)
        bonus_weight = self.kpi_weights['Score'][self.kpi_weights['KPIs'] == Const.KPI_WEIGHTS['Bonus']].iloc[0]

        sub_category_fks = self.sub_category_weight.sub_category_fk.unique().tolist()
        sub_category_fks = [x for x in sub_category_fks if str(x) != 'nan']
        if self.survey.check_survey_answer(('question_fk', Const.BONUS_QUESTION_FK), 'Yes,yes,si,Si'):
            result = 1
        else:
            result = 0

        for sub_cat_fk in sub_category_fks:
            sub_cat_weight = self.get_weight(sub_cat_fk)

            score = result * sub_cat_weight
            target_weight = bonus_weight * sub_cat_weight
            self.powersku_bonus[sub_cat_fk] = score

            self.common_v2.write_to_db_result(bonus_kpi_fk, numerator_id=sub_cat_fk,
                                              denominator_id=self.store_id,
                                              result=result, score=score, identifier_parent=sub_cat_fk,
                                              weight=target_weight, target=target_weight,
                                              should_enter=True)

    def commit_results_data(self):
        self.common_v2.commit_results_data()

    def update_score_sub_category_weights(self):
        all_sub_category_fks = self.all_products[['sub_category', 'sub_category_fk']].drop_duplicates()
        self.sub_category_weight = pd.merge(self.sub_category_weight, all_sub_category_fks, left_on='Category',
                                            right_on='sub_category',
                                            how='left')

    def get_weight(self, sub_category_fk):
        weight_value = 0

        if self.country in self.sub_category_weight.columns.to_list():
            weight_df = self.sub_category_weight[self.country][
                (self.sub_category_weight.sub_category_fk == sub_category_fk)]
            if weight_df.empty:
                return 0

            weight_value = weight_df.iloc[0]

            if pd.isna(weight_value):
                weight_value = 0

        weight = weight_value * 0.01
        return weight

    def get_kpi_weight(self, kpi_name):
        weight = self.kpi_weights['Score'][self.kpi_weights['KPIs'] == Const.KPI_WEIGHTS[kpi_name]].iloc[0]
        return weight

    def get_supervisor_target(self):
        supervisor_target = self.targets[self.targets['kpi_type'] == 'Extra Spaces']
        return supervisor_target

    def calculate_sub_category_sum(self, dict_list, sub_cat_fk):
        total_score = 0
        for item in dict_list:
            try:
                total_score += item[sub_cat_fk]
            except:
                pass

        return total_score

    def set_relevant_sub_categories(self):
        if self.country in self.sub_category_weight.columns.to_list():

            df = self.sub_category_weight[['Category', 'sub_category_fk', self.country]].dropna()
            self.relevant_sub_categories = df.sub_category_fk.to_list()
        else:
            self.relevant_sub_categories = []
Пример #19
0
class LIBERTYToolBox:

    def __init__(self, data_provider, output, common_db):
        self.output = output
        self.data_provider = data_provider
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.products = self.data_provider[Data.PRODUCTS]
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.store_id = self.data_provider[Data.STORE_FK]
        self.ps_data_provider = PsDataProvider(self.data_provider, self.output)
        self.store_info = self.ps_data_provider.get_ps_store_info(self.data_provider[Data.STORE_INFO])
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.scif = self.scif[self.scif['product_type'] != "Irrelevant"]
        self.templates = {}
        self.result_values = self.ps_data_provider.get_result_values()
        for sheet in Const.SHEETS:
            self.templates[sheet] = pd.read_excel(Const.TEMPLATE_PATH, sheetname=sheet).fillna('')
        self.common_db = common_db
        self.survey = Survey(self.data_provider, output=self.output, ps_data_provider=self.ps_data_provider,
                             common=self.common_db)
        self.manufacturer_fk = Const.MANUFACTURER_FK
        self.region = self.store_info['region_name'].iloc[0]
        self.store_type = self.store_info['store_type'].iloc[0]
        self.retailer = self.store_info['retailer_name'].iloc[0]
        self.branch = self.store_info['branch_name'].iloc[0]
        self.additional_attribute_4 = self.store_info['additional_attribute_4'].iloc[0]
        self.additional_attribute_7 = self.store_info['additional_attribute_7'].iloc[0]
        self.body_armor_delivered = self.get_body_armor_delivery_status()

    # main functions:

    def main_calculation(self, *args, **kwargs):
        """
            This function gets all the scene results from the SceneKPI, after that calculates every session's KPI,
            and in the end it calls "filter results" to choose every KPI and scene and write the results in DB.
        """
        red_score = 0
        main_template = self.templates[Const.KPIS]
        for i, main_line in main_template.iterrows():
            relevant_store_types = self.does_exist(main_line, Const.ADDITIONAL_ATTRIBUTE_7)
            if relevant_store_types and self.additional_attribute_7 not in relevant_store_types:
                continue
            result = self.calculate_main_kpi(main_line)
            if result:
                red_score += main_line[Const.WEIGHT]

        if len(self.common_db.kpi_results) > 0:
            kpi_fk = self.common_db.get_kpi_fk_by_kpi_type(Const.RED_SCORE_PARENT)
            self.common_db.write_to_db_result(kpi_fk, numerator_id=1, denominator_id=self.store_id, result=red_score,
                                              identifier_result=Const.RED_SCORE_PARENT, should_enter=True)
        return

    def calculate_main_kpi(self, main_line):
        """
        This function gets a line from the main_sheet, transfers it to the match function, and checks all of the
        KPIs in the same name in the match sheet.
        :param main_line: series from the template of the main_sheet.
        """
        relevant_scif = self.scif
        scene_types = self.does_exist(main_line, Const.SCENE_TYPE)
        if scene_types:
            relevant_scif = relevant_scif[relevant_scif['template_name'].isin(scene_types)]
        excluded_scene_types = self.does_exist(main_line, Const.EXCLUDED_SCENE_TYPE)
        if excluded_scene_types:
            relevant_scif = relevant_scif[~relevant_scif['template_name'].isin(excluded_scene_types)]
        template_groups = self.does_exist(main_line, Const.TEMPLATE_GROUP)
        if template_groups:
            relevant_scif = relevant_scif[relevant_scif['template_group'].isin(template_groups)]

        result = self.calculate_kpi_by_type(main_line, relevant_scif)

        return result

    def calculate_kpi_by_type(self, main_line, relevant_scif):
        """
        the function calculates all the kpis
        :param main_line: one kpi line from the main template
        :param relevant_scif:
        :return: boolean, but it can be None if we want not to write it in DB
        """
        kpi_type = main_line[Const.KPI_TYPE]
        relevant_template = self.templates[kpi_type]
        kpi_line = relevant_template[relevant_template[Const.KPI_NAME] == main_line[Const.KPI_NAME]].iloc[0]
        kpi_function = self.get_kpi_function(kpi_type)
        weight = main_line[Const.WEIGHT]

        if relevant_scif.empty:
            result = 0
        else:
            result = kpi_function(kpi_line, relevant_scif, weight)

        result_type_fk = self.ps_data_provider.get_pks_of_result(
            Const.PASS) if result > 0 else self.ps_data_provider.get_pks_of_result(Const.FAIL)

        kpi_name = kpi_line[Const.KPI_NAME] + Const.LIBERTY
        kpi_fk = self.common_db.get_kpi_fk_by_kpi_type(kpi_name)
        self.common_db.write_to_db_result(kpi_fk, numerator_id=self.manufacturer_fk, numerator_result=0,
                                          denominator_id=self.store_id, denominator_result=0, weight=weight,
                                          result=result_type_fk, identifier_parent=Const.RED_SCORE_PARENT,
                                          identifier_result=kpi_name, should_enter=True)

        return result

    # SOS functions
    def calculate_sos(self, kpi_line, relevant_scif, weight):
        market_share_required = self.does_exist(kpi_line, Const.MARKET_SHARE_TARGET)
        if market_share_required:
            market_share_target = self.get_market_share_target()
        else:
            market_share_target = 0

        if not market_share_target:
            market_share_target = 0

        manufacturer = self.does_exist(kpi_line, Const.MANUFACTURER)
        if manufacturer:
            relevant_scif = relevant_scif[relevant_scif['manufacturer_name'].isin(manufacturer)]

        number_of_facings = relevant_scif['facings'].sum()
        result = 1 if number_of_facings > market_share_target else 0

        parent_kpi_name = kpi_line[Const.KPI_NAME] + Const.LIBERTY
        kpi_fk = self.common_db.get_kpi_fk_by_kpi_type(parent_kpi_name + Const.DRILLDOWN)
        self.common_db.write_to_db_result(kpi_fk, numerator_id=self.manufacturer_fk, numerator_result=0,
                                          denominator_id=self.store_id, denominator_result=0, weight=weight,
                                          result=number_of_facings, target=market_share_target,
                                          identifier_parent=parent_kpi_name, should_enter=True)

        return result

    # Availability functions
    def calculate_availability(self, kpi_line, relevant_scif, weight):
        survey_question_skus_required = self.does_exist(kpi_line, Const.SURVEY_QUESTION_SKUS_REQUIRED)
        if survey_question_skus_required:
            survey_question_skus = self.get_relevant_product_assortment_by_kpi_name(kpi_line[Const.KPI_NAME])
            unique_skus = \
                relevant_scif[relevant_scif['product_fk'].isin(survey_question_skus)]['product_fk'].unique().tolist()
        else:
            manufacturer = self.does_exist(kpi_line, Const.MANUFACTURER)
            if manufacturer:
                relevant_scif = relevant_scif[relevant_scif['manufacturer_name'].isin(manufacturer)]
            brand = self.does_exist(kpi_line, Const.BRAND)
            if brand:
                relevant_scif = relevant_scif[relevant_scif['brand_name'].isin(brand)]
            category = self.does_exist(kpi_line, Const.CATEGORY)
            if category:
                relevant_scif = relevant_scif[relevant_scif['category'].isin(category)]
            excluded_brand = self.does_exist(kpi_line, Const.EXCLUDED_BRAND)
            if excluded_brand:
                relevant_scif = relevant_scif[~relevant_scif['brand_name'].isin(excluded_brand)]
            unique_skus = relevant_scif['product_fk'].unique().tolist()

        length_of_unique_skus = len(unique_skus)
        minimum_number_of_skus = kpi_line[Const.MINIMUM_NUMBER_OF_SKUS]

        result = 1 if length_of_unique_skus >= minimum_number_of_skus else 0

        parent_kpi_name = kpi_line[Const.KPI_NAME] + Const.LIBERTY
        kpi_fk = self.common_db.get_kpi_fk_by_kpi_type(parent_kpi_name + Const.DRILLDOWN)
        self.common_db.write_to_db_result(kpi_fk, numerator_id=self.manufacturer_fk, numerator_result=0,
                                          denominator_id=self.store_id, denominator_result=0, weight=weight,
                                          result=length_of_unique_skus, target=minimum_number_of_skus,
                                          identifier_parent=parent_kpi_name, should_enter=True)

        return result

    def get_relevant_product_assortment_by_kpi_name(self, kpi_name):
        template = self.templates[Const.SURVEY_QUESTION_SKUS]
        relevant_template = template[template[Const.KPI_NAME] == kpi_name]
        relevant_ean_codes = relevant_template[Const.EAN_CODE].unique().tolist()
        relevant_ean_codes = [str(int(x)) for x in relevant_ean_codes if x != '']  # we need this to fix dumb template
        relevant_products = self.all_products[self.all_products['product_ean_code'].isin(relevant_ean_codes)]
        return relevant_products['product_fk'].unique().tolist()

    # Count of Display functions
    def calculate_count_of_display(self, kpi_line, relevant_scif, weight):
        filtered_scif = relevant_scif

        manufacturer = self.does_exist(kpi_line, Const.MANUFACTURER)
        if manufacturer:
            filtered_scif = relevant_scif[relevant_scif['manufacturer_name'].isin(manufacturer)]

        brand = self.does_exist(kpi_line, Const.BRAND)
        if brand:
            filtered_scif = filtered_scif[filtered_scif['brand_name'].isin(brand)]

        ssd_still = self.does_exist(kpi_line, Const.ATT4)
        if ssd_still:
            filtered_scif = filtered_scif[filtered_scif['att4'].isin(ssd_still)]

        size_subpackages = self.does_exist(kpi_line, Const.SIZE_SUBPACKAGES_NUM)
        if size_subpackages:
            # convert all pairings of size and number of subpackages to tuples
            size_subpackages_tuples = [tuple([float(i) for i in x.split(';')]) for x in size_subpackages]
            filtered_scif = filtered_scif[pd.Series(list(zip(filtered_scif['size'],
                                                             filtered_scif['number_of_sub_packages'])),
                                                    index=filtered_scif.index).isin(size_subpackages_tuples)]

        sub_packages = self.does_exist(kpi_line, Const.SUBPACKAGES_NUM)
        if sub_packages:
            if sub_packages == [Const.NOT_NULL]:
                filtered_scif = filtered_scif[~filtered_scif['number_of_sub_packages'].isnull()]
            else:
                filtered_scif = filtered_scif[filtered_scif['number_of_sub_packages'].isin([int(i) for i in sub_packages])]

        if self.does_exist(kpi_line, Const.MINIMUM_FACINGS_REQUIRED):
            number_of_passing_displays = self.get_number_of_passing_displays(filtered_scif)

            parent_kpi_name = kpi_line[Const.KPI_NAME] + Const.LIBERTY
            kpi_fk = self.common_db.get_kpi_fk_by_kpi_type(parent_kpi_name + Const.DRILLDOWN)
            self.common_db.write_to_db_result(kpi_fk, numerator_id=self.manufacturer_fk, numerator_result=0,
                                              denominator_id=self.store_id, denominator_result=0, weight=weight,
                                              result=number_of_passing_displays,
                                              identifier_parent=parent_kpi_name, should_enter=True)
            return 1 if number_of_passing_displays > 0 else 0
        else:
            return 0

    # Share of Display functions
    def calculate_share_of_display(self, kpi_line, relevant_scif, weight):
        filtered_scif = relevant_scif

        manufacturer = self.does_exist(kpi_line, Const.MANUFACTURER)
        if manufacturer:
            filtered_scif = relevant_scif[relevant_scif['manufacturer_name'].isin(manufacturer)]

        ssd_still = self.does_exist(kpi_line, Const.ATT4)
        if ssd_still:
            filtered_scif = filtered_scif[filtered_scif['att4'].isin(ssd_still)]

        if self.does_exist(kpi_line, Const.MARKET_SHARE_TARGET):
            market_share_target = self.get_market_share_target(ssd_still=ssd_still)
        else:
            market_share_target = 0

        if self.does_exist(kpi_line, Const.INCLUDE_BODY_ARMOR) and self.body_armor_delivered:
            body_armor_scif = relevant_scif[relevant_scif['brand_fk'] == Const.BODY_ARMOR_BRAND_FK]
            filtered_scif = filtered_scif.append(body_armor_scif, sort=False)

        if self.does_exist(kpi_line, Const.MINIMUM_FACINGS_REQUIRED):
            number_of_passing_displays = self.get_number_of_passing_displays(filtered_scif)

            result = 1 if number_of_passing_displays > market_share_target else 0

            parent_kpi_name = kpi_line[Const.KPI_NAME] + Const.LIBERTY
            kpi_fk = self.common_db.get_kpi_fk_by_kpi_type(parent_kpi_name + Const.DRILLDOWN)
            self.common_db.write_to_db_result(kpi_fk, numerator_id=self.manufacturer_fk, numerator_result=0,
                                              denominator_id=self.store_id, denominator_result=0, weight=weight,
                                              result=number_of_passing_displays, target=market_share_target,
                                              identifier_parent=parent_kpi_name, should_enter=True)

            return result
        else:
            return 0

    def get_number_of_passing_displays(self, filtered_scif):
        if filtered_scif.empty:
            return 0

        filtered_scif['passed_displays'] = \
            filtered_scif.apply(lambda row: self._calculate_pass_status_of_display(row), axis=1)

        return filtered_scif['passed_displays'].sum()

    def _calculate_pass_status_of_display(self, row):  # need to move to external KPI targets
        template = self.templates[Const.MINIMUM_FACINGS]
        package_category = (row['size'], row['number_of_sub_packages'], row['size_unit'])
        relevant_template = template[pd.Series(zip(template['size'],
                                                   template['subpackages_num'],
                                                   template['unit_of_measure'])) == package_category]
        minimum_facings = relevant_template[Const.MINIMUM_FACINGS_REQUIRED_FOR_DISPLAY].min()
        return 1 if row['facings'] > minimum_facings else 0

    # Survey functions
    def calculate_survey(self, kpi_line, relevant_scif, weight):
        return 1 if self.survey.check_survey_answer(kpi_line[Const.QUESTION_TEXT], 'Yes') else 0

    # helper functions
    def get_market_share_target(self, ssd_still=None):  # need to move to external KPI targets
        template = self.templates[Const.MARKET_SHARE]
        relevant_template = template[(template[Const.ADDITIONAL_ATTRIBUTE_4] == self.additional_attribute_4) &
                                     (template[Const.RETAILER] == self.retailer) &
                                     (template[Const.BRANCH] == self.branch)]

        if relevant_template.empty:
            if ssd_still:
                if ssd_still[0].lower() == Const.SSD.lower():
                    return 49
                elif ssd_still[0].lower() == Const.STILL.lower():
                    return 16
                else:
                    return 0
            else:
                return 26

        if ssd_still:
            if ssd_still[0].lower() == Const.SSD.lower():
                return relevant_template[Const.SSD].iloc[0]
            elif ssd_still[0].lower() == Const.STILL.lower():
                return relevant_template[Const.STILL].iloc[0]

        # total 26, ssd only 49, still only 16
        return relevant_template[Const.SSD_AND_STILL].iloc[0]

    def get_body_armor_delivery_status(self):
        if self.store_info['additional_attribute_8'].iloc[0] == 'Y':
            return True
        else:
            return False

    def get_kpi_function(self, kpi_type):
        """
        transfers every kpi to its own function
        :param kpi_type: value from "sheet" column in the main sheet
        :return: function
        """
        if kpi_type == Const.SOS:
            return self.calculate_sos
        elif kpi_type == Const.AVAILABILITY:
            return self.calculate_availability
        elif kpi_type == Const.COUNT_OF_DISPLAY:
            return self.calculate_count_of_display
        elif kpi_type == Const.SHARE_OF_DISPLAY:
            return self.calculate_share_of_display
        elif kpi_type == Const.SURVEY:
            return self.calculate_survey
        else:
            Log.warning(
                "The value '{}' in column sheet in the template is not recognized".format(kpi_type))
            return None

    @staticmethod
    def does_exist(kpi_line, column_name):
        """
        checks if kpi_line has values in this column, and if it does - returns a list of these values
        :param kpi_line: line from template
        :param column_name: str
        :return: list of values if there are, otherwise None
        """
        if column_name in kpi_line.keys() and kpi_line[column_name] != "":
            cell = kpi_line[column_name]
            if type(cell) in [int, float]:
                return [cell]
            elif type(cell) in [unicode, str]:
                return [x.strip() for x in cell.split(",")]
        return None
Пример #20
0
class INBEVMXToolBox:
    def __init__(self, data_provider, output):
        self.output = output
        self.data_provider = data_provider
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.session_id = self.data_provider.session_id
        self.products = self.data_provider[Data.PRODUCTS]
        self.common_v2 = Common_V2(self.data_provider)
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.store_id = self.data_provider[Data.STORE_FK]
        self.tools = GENERALToolBox(self.data_provider)
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.survey = Survey(self.data_provider, self.output)
        self.rds_conn = PSProjectConnector(self.project_name,
                                           DbUsers.CalculationEng)
        self.kpi_static_data = self.common_v2.get_kpi_static_data()
        self.kpi_results_queries = []
        self.kpi_results_new_tables_queries = []
        self.store_info = self.data_provider[Data.STORE_INFO]
        self.oos_policies = self.get_policies()
        self.result_dict = {}
        self.hierarchy_dict = {}

        try:
            self.store_type_filter = self.store_info['store_type'].values[
                0].strip()
        except:
            Log.error("there is no store type in the db")
            return
        try:
            self.region_name_filter = self.store_info['region_name'].values[
                0].strip()
            self.region_fk = self.store_info['region_fk'].values[0]
        except:
            Log.error("there is no region in the db")
            return
        try:
            self.att6_filter = self.store_info[
                'additional_attribute_6'].values[0].strip()
        except:
            Log.error("there is no additional attribute 6 in the db")
            return
        self.sos_target_sheet = pd.read_excel(PATH_SURVEY_AND_SOS_TARGET,
                                              Const.SOS_TARGET).fillna("")
        self.survey_sheet = pd.read_excel(PATH_SURVEY_AND_SOS_TARGET,
                                          Const.SURVEY).fillna("")
        self.survey_combo_sheet = pd.read_excel(PATH_SURVEY_AND_SOS_TARGET,
                                                Const.SURVEY_COMBO).fillna("")
        self.oos_sheet = pd.read_excel(PATH_SURVEY_AND_SOS_TARGET,
                                       Const.OOS_KPI).fillna("")

    def get_policies(self):
        query = INBEVMXQueries.get_policies()
        policies = pd.read_sql_query(query, self.rds_conn.db)
        return policies

    def main_calculation(self):
        """
        This function calculates the KPI results.
        """
        kpis_sheet = pd.read_excel(PATH_SURVEY_AND_SOS_TARGET,
                                   Const.KPIS).fillna("")
        for index, row in kpis_sheet.iterrows():
            self.handle_atomic(row)
        self.save_parent_kpis()
        self.common_v2.commit_results_data()

    def calculate_oos_target(self):
        temp = self.oos_sheet[Const.TEMPLATE_STORE_TYPE]
        rows_stores_filter = self.oos_sheet[
            temp.apply(lambda r: self.store_type_filter in
                       [item.strip() for item in r.split(",")])]
        if rows_stores_filter.empty:
            weight = 0
        else:
            weight = rows_stores_filter[Const.TEMPLATE_SCORE].values[0]
        all_data = pd.merge(
            self.scif[["store_id", "product_fk", "facings", "template_name"]],
            self.store_info,
            left_on="store_id",
            right_on="store_fk")
        if all_data.empty:
            return 0
        json_policies = self.oos_policies.copy()
        json_policies[Const.POLICY] = self.oos_policies[Const.POLICY].apply(
            lambda line: json.loads(line))
        diff_policies = json_policies[
            Const.POLICY].drop_duplicates().reset_index()
        diff_table = json_normalize(diff_policies[Const.POLICY].tolist())

        # remove all lists from df
        diff_table = diff_table.applymap(lambda x: x[0]
                                         if isinstance(x, list) else x)
        for col in diff_table.columns:
            att = all_data.iloc[0][col]
            if att is None:
                return 0
            diff_table = diff_table[diff_table[col] == att]
            all_data = all_data[all_data[col] == att]
        if len(diff_table) > 1:
            Log.warning("There is more than one possible match")
            return 0
        if diff_table.empty:
            return 0
        selected_row = diff_policies.iloc[diff_table.index[0]][Const.POLICY]
        json_policies = json_policies[json_policies[Const.POLICY] ==
                                      selected_row]
        products_to_check = json_policies['product_fk'].tolist()
        products_df = all_data[(
            all_data['product_fk'].isin(products_to_check))][[
                'product_fk', 'facings'
            ]].fillna(0)
        products_df = products_df.groupby('product_fk').sum().reset_index()
        try:
            atomic_pk_sku = self.common_v2.get_kpi_fk_by_kpi_name(
                Const.OOS_SKU_KPI)
        except IndexError:
            Log.warning("There is no matching Kpi fk for kpi name: " +
                        Const.OOS_SKU_KPI)
            return 0
        for product in products_to_check:
            if product not in products_df['product_fk'].values:
                products_df = products_df.append(
                    {
                        'product_fk': product,
                        'facings': 0.0
                    }, ignore_index=True)
        for index, row in products_df.iterrows():
            result = 0 if row['facings'] > 0 else 1
            self.common_v2.write_to_db_result(fk=atomic_pk_sku,
                                              numerator_id=row['product_fk'],
                                              numerator_result=row['facings'],
                                              denominator_id=self.store_id,
                                              result=result,
                                              score=result,
                                              identifier_parent=Const.OOS_KPI,
                                              should_enter=True,
                                              parent_fk=3)

        not_existing_products_len = len(
            products_df[products_df['facings'] == 0])
        result = not_existing_products_len / float(len(products_to_check))
        try:
            atomic_pk = self.common_v2.get_kpi_fk_by_kpi_name(Const.OOS_KPI)
            result_oos_pk = self.common_v2.get_kpi_fk_by_kpi_name(
                Const.OOS_RESULT_KPI)
        except IndexError:
            Log.warning("There is no matching Kpi fk for kpi name: " +
                        Const.OOS_KPI)
            return 0
        score = result * weight
        self.common_v2.write_to_db_result(
            fk=atomic_pk,
            numerator_id=self.region_fk,
            numerator_result=not_existing_products_len,
            denominator_id=self.store_id,
            denominator_result=len(products_to_check),
            result=result,
            score=score,
            identifier_result=Const.OOS_KPI,
            parent_fk=3)
        self.common_v2.write_to_db_result(
            fk=result_oos_pk,
            numerator_id=self.region_fk,
            numerator_result=not_existing_products_len,
            denominator_id=self.store_id,
            denominator_result=len(products_to_check),
            result=result,
            score=result,
            parent_fk=3)
        return score

    def save_parent_kpis(self):
        for kpi in self.result_dict.keys():
            try:
                kpi_fk = self.common_v2.get_kpi_fk_by_kpi_name(kpi)
            except IndexError:
                Log.warning("There is no matching Kpi fk for kpi name: " + kpi)
                continue
            if kpi not in self.hierarchy_dict:
                self.common_v2.write_to_db_result(fk=kpi_fk,
                                                  numerator_id=self.region_fk,
                                                  denominator_id=self.store_id,
                                                  result=self.result_dict[kpi],
                                                  score=self.result_dict[kpi],
                                                  identifier_result=kpi,
                                                  parent_fk=1)
            else:
                self.common_v2.write_to_db_result(
                    fk=kpi_fk,
                    numerator_id=self.region_fk,
                    denominator_id=self.store_id,
                    result=self.result_dict[kpi],
                    score=self.result_dict[kpi],
                    identifier_result=kpi,
                    identifier_parent=self.hierarchy_dict[kpi],
                    should_enter=True,
                    parent_fk=2)

    def handle_atomic(self, row):
        result = 0
        atomic_id = row[Const.TEMPLATE_KPI_ID]
        atomic_name = row[Const.KPI_LEVEL_3].strip()
        kpi_name = row[Const.KPI_LEVEL_2].strip()
        set_name = row[Const.KPI_LEVEL_1].strip()
        kpi_type = row[Const.TEMPLATE_KPI_TYPE].strip()
        if atomic_name != kpi_name:
            parent_name = kpi_name
        else:
            parent_name = set_name
        if kpi_type == Const.SOS_TARGET:
            if self.scene_info['number_of_probes'].sum() > 1:
                result = self.handle_sos_target_atomics(
                    atomic_id, atomic_name, parent_name)
        elif kpi_type == Const.SURVEY:
            result = self.handle_survey_atomics(atomic_id, atomic_name,
                                                parent_name)
        elif kpi_type == Const.SURVEY_COMBO:
            result = self.handle_survey_combo(atomic_id, atomic_name,
                                              parent_name)
        elif kpi_type == Const.OOS_KPI:
            result = self.calculate_oos_target()

        # Update kpi results
        if atomic_name != kpi_name:
            if kpi_name not in self.result_dict.keys():
                self.result_dict[kpi_name] = result
                self.hierarchy_dict[kpi_name] = set_name
            else:
                self.result_dict[kpi_name] += result

        # Update set results
        if set_name not in self.result_dict.keys():
            self.result_dict[set_name] = result
        else:
            self.result_dict[set_name] += result

    def handle_sos_target_atomics(self, atomic_id, atomic_name, parent_name):

        denominator_number_of_total_facings = 0
        count_result = -1

        # bring the kpi rows from the sos sheet
        rows = self.sos_target_sheet.loc[self.sos_target_sheet[
            Const.TEMPLATE_KPI_ID] == atomic_id]

        # get a single row
        row = self.find_row(rows)
        if row.empty:
            return 0

        target = row[Const.TEMPLATE_TARGET_PRECENT].values[0]
        score = row[Const.TEMPLATE_SCORE].values[0]
        df = pd.merge(self.scif,
                      self.store_info,
                      how="left",
                      left_on="store_id",
                      right_on="store_fk")

        # get the filters
        filters = self.get_filters_from_row(row.squeeze())
        numerator_number_of_facings = self.count_of_facings(df, filters)
        if numerator_number_of_facings != 0 and count_result == -1:
            if 'manufacturer_name' in filters.keys():
                deno_manufacturer = row[
                    Const.TEMPLATE_MANUFACTURER_DENOMINATOR].values[0].strip()
                deno_manufacturer = deno_manufacturer.split(",")
                filters['manufacturer_name'] = [
                    item.strip() for item in deno_manufacturer
                ]
                denominator_number_of_total_facings = self.count_of_facings(
                    df, filters)
                percentage = 100 * (numerator_number_of_facings /
                                    denominator_number_of_total_facings)
                count_result = score if percentage >= target else -1

        if count_result == -1:
            return 0

        try:
            atomic_pk = self.common_v2.get_kpi_fk_by_kpi_name(atomic_name)
        except IndexError:
            Log.warning("There is no matching Kpi fk for kpi name: " +
                        atomic_name)
            return 0

        self.common_v2.write_to_db_result(
            fk=atomic_pk,
            numerator_id=self.region_fk,
            numerator_result=numerator_number_of_facings,
            denominator_id=self.store_id,
            denominator_result=denominator_number_of_total_facings,
            result=count_result,
            score=count_result,
            identifier_result=atomic_name,
            identifier_parent=parent_name,
            should_enter=True,
            parent_fk=3)
        return count_result

    def find_row(self, rows):
        temp = rows[Const.TEMPLATE_STORE_TYPE]
        rows_stores_filter = rows[(
            temp.apply(lambda r: self.store_type_filter in
                       [item.strip() for item in r.split(",")])) |
                                  (temp == "")]
        temp = rows_stores_filter[Const.TEMPLATE_REGION]
        rows_regions_filter = rows_stores_filter[(
            temp.apply(lambda r: self.region_name_filter in
                       [item.strip() for item in r.split(",")])) |
                                                 (temp == "")]
        temp = rows_regions_filter[Const.TEMPLATE_ADDITIONAL_ATTRIBUTE_6]
        rows_att6_filter = rows_regions_filter[(
            temp.apply(lambda r: self.att6_filter in
                       [item.strip() for item in r.split(",")])) |
                                               (temp == "")]
        return rows_att6_filter

    def get_filters_from_row(self, row):
        filters = dict(row)

        # no need to be accounted for
        for field in Const.DELETE_FIELDS:
            if field in filters:
                del filters[field]

        # filter all the empty cells
        for key in filters.keys():
            if (filters[key] == ""):
                del filters[key]
            elif isinstance(filters[key], tuple):
                filters[key] = (filters[key][0].split(","), filters[key][1])
            else:
                filters[key] = filters[key].split(",")
                filters[key] = [item.strip() for item in filters[key]]

        return self.create_filters_according_to_scif(filters)

    def create_filters_according_to_scif(self, filters):
        convert_from_scif = {
            Const.TEMPLATE_GROUP: 'template_group',
            Const.TEMPLATE_MANUFACTURER_NOMINATOR: 'manufacturer_name',
            Const.TEMPLATE_ADDITIONAL_ATTRIBUTE_6: 'additional_attribute_6'
        }

        for key in filters.keys():
            if key in convert_from_scif:
                filters[convert_from_scif[key]] = filters.pop(key)
        return filters

    def count_of_facings(self, df, filters):

        facing_data = df[self.tools.get_filter_condition(df, **filters)]
        number_of_facings = facing_data['facings'].sum()
        return number_of_facings

    def handle_survey_combo(self, atomic_id, atomic_name, parent_name):
        # bring the kpi rows from the survey sheet
        numerator = denominator = 0
        rows = self.survey_combo_sheet.loc[self.survey_combo_sheet[
            Const.TEMPLATE_KPI_ID] == atomic_id]
        temp = rows[Const.TEMPLATE_STORE_TYPE]
        row_store_filter = rows[(
            temp.apply(lambda r: self.store_type_filter in
                       [item.strip() for item in r.split(",")])) |
                                (temp == "")]
        if row_store_filter.empty:
            return 0

        condition = row_store_filter[Const.TEMPLATE_CONDITION].values[0]
        condition_type = row_store_filter[
            Const.TEMPLATE_CONDITION_TYPE].values[0]
        score = row_store_filter[Const.TEMPLATE_SCORE].values[0]

        # find the answer to the survey in session
        for i, row in row_store_filter.iterrows():
            question_text = row[Const.TEMPLATE_SURVEY_QUESTION_TEXT]
            question_answer_template = row[Const.TEMPLATE_TARGET_ANSWER]

            survey_result = self.survey.get_survey_answer(
                ('question_text', question_text))
            if not survey_result:
                continue
            if '-' in question_answer_template:
                numbers = question_answer_template.split('-')
                try:
                    numeric_survey_result = int(survey_result)
                except:
                    Log.warning("Survey question - " + str(question_text) +
                                " - doesn't have a numeric result")
                    continue
                if numeric_survey_result < int(
                        numbers[0]) or numeric_survey_result > int(numbers[1]):
                    continue
                numerator_or_denominator = row_store_filter[
                    Const.NUMERATOR_OR_DENOMINATOR].values[0]
                if numerator_or_denominator == Const.DENOMINATOR:
                    denominator += numeric_survey_result
                else:
                    numerator += numeric_survey_result
            else:
                continue
        if condition_type == '%':
            if denominator != 0:
                fraction = 100 * (float(numerator) / float(denominator))
            else:
                if numerator > 0:
                    fraction = 100
                else:
                    fraction = 0
            result = score if fraction >= condition else 0
        else:
            return 0

        try:
            atomic_pk = self.common_v2.get_kpi_fk_by_kpi_name(atomic_name)
        except IndexError:
            Log.warning("There is no matching Kpi fk for kpi name: " +
                        atomic_name)
            return 0
        self.common_v2.write_to_db_result(fk=atomic_pk,
                                          numerator_id=self.region_fk,
                                          numerator_result=numerator,
                                          denominator_result=denominator,
                                          denominator_id=self.store_id,
                                          result=result,
                                          score=result,
                                          identifier_result=atomic_name,
                                          identifier_parent=parent_name,
                                          should_enter=True,
                                          parent_fk=3)
        return result

    def handle_survey_atomics(self, atomic_id, atomic_name, parent_name):
        # bring the kpi rows from the survey sheet
        rows = self.survey_sheet.loc[self.survey_sheet[Const.TEMPLATE_KPI_ID]
                                     == atomic_id]
        temp = rows[Const.TEMPLATE_STORE_TYPE]
        row_store_filter = rows[(
            temp.apply(lambda r: self.store_type_filter in
                       [item.strip() for item in r.split(",")])) |
                                (temp == "")]

        if row_store_filter.empty:
            return 0
        else:
            # find the answer to the survey in session
            question_text = row_store_filter[
                Const.TEMPLATE_SURVEY_QUESTION_TEXT].values[0]
            question_answer_template = row_store_filter[
                Const.TEMPLATE_TARGET_ANSWER].values[0]
            score = row_store_filter[Const.TEMPLATE_SCORE].values[0]

            survey_result = self.survey.get_survey_answer(
                ('question_text', question_text))
            if not survey_result:
                return 0
            if '-' in question_answer_template:
                numbers = question_answer_template.split('-')
                try:
                    numeric_survey_result = int(survey_result)
                except:
                    Log.warning("Survey question - " + str(question_text) +
                                " - doesn't have a numeric result")
                    return 0
                if numeric_survey_result < int(
                        numbers[0]) or numeric_survey_result > int(numbers[1]):
                    return 0
                condition = row_store_filter[
                    Const.TEMPLATE_CONDITION].values[0]
                if condition != "":
                    second_question_text = row_store_filter[
                        Const.TEMPLATE_SECOND_SURVEY_QUESTION_TEXT].values[0]
                    second_survey_result = self.survey.get_survey_answer(
                        ('question_text', second_question_text))
                    if not second_survey_result:
                        second_survey_result = 0
                    second_numeric_survey_result = int(second_survey_result)
                    survey_result = 1 if numeric_survey_result >= second_numeric_survey_result else -1
                else:
                    survey_result = 1
            else:
                question_answer_template = question_answer_template.split(',')
                question_answer_template = [
                    item.strip() for item in question_answer_template
                ]
                if survey_result in question_answer_template:
                    survey_result = 1
                else:
                    survey_result = -1
        final_score = score if survey_result == 1 else 0

        try:
            atomic_pk = self.common_v2.get_kpi_fk_by_kpi_name(atomic_name)
        except IndexError:
            Log.warning("There is no matching Kpi fk for kpi name: " +
                        atomic_name)
            return 0
        self.common_v2.write_to_db_result(fk=atomic_pk,
                                          numerator_id=self.region_fk,
                                          numerator_result=0,
                                          denominator_result=0,
                                          denominator_id=self.store_id,
                                          result=survey_result,
                                          score=final_score,
                                          identifier_result=atomic_name,
                                          identifier_parent=parent_name,
                                          should_enter=True,
                                          parent_fk=3)
        return final_score

    def get_new_kpi_static_data(self):
        """
            This function extracts the static new KPI data (new tables) and saves it into one global data frame.
            The data is taken from static.kpi_level_2.
            """
        query = INBEVMXQueries.get_new_kpi_data()
        kpi_static_data = pd.read_sql_query(query, self.rds_conn.db)
        return kpi_static_data
Пример #21
0
class LIBERTYToolBox:

    def __init__(self, data_provider, output, common_db):
        self.output = output
        self.data_provider = data_provider
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.products = self.data_provider[Data.PRODUCTS]
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.store_id = self.data_provider[Data.STORE_FK]
        self.ps_data_provider = PsDataProvider(self.data_provider, self.output)
        self.store_info = self.ps_data_provider.get_ps_store_info(
            self.data_provider[Data.STORE_INFO])
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.scif = self.scif[self.scif['product_type'] != "Irrelevant"]
        self.result_values = self.ps_data_provider.get_result_values()
        self.templates = self.read_templates()
        self.common_db = common_db
        self.survey = Survey(self.data_provider, output=self.output, ps_data_provider=self.ps_data_provider,
                             common=self.common_db)
        self.manufacturer_fk = Const.MANUFACTURER_FK
        self.region = self.store_info['region_name'].iloc[0]
        self.store_type = self.store_info['store_type'].iloc[0]
        self.retailer = self.store_info['retailer_name'].iloc[0]
        self.branch = self.store_info['branch_name'].iloc[0]
        self.additional_attribute_4 = self.store_info['additional_attribute_4'].iloc[0]
        self.additional_attribute_7 = self.store_info['additional_attribute_7'].iloc[0]
        self.body_armor_delivered = self.get_body_armor_delivery_status()
        self.convert_base_size_and_multi_pack()

    def read_templates(self):
        templates = {}
        for sheet in Const.SHEETS:
            converters = None
            if sheet == Const.MINIMUM_FACINGS:
                converters = {Const.BASE_SIZE_MIN: self.convert_base_size_values,
                              Const.BASE_SIZE_MAX: self.convert_base_size_values}
            templates[sheet] = \
                pd.read_excel(Const.TEMPLATE_PATH, sheet_name=sheet,
                              converters=converters).fillna('')
        return templates

    # main functions:

    def main_calculation(self, *args, **kwargs):
        """
            This function gets all the scene results from the SceneKPI, after that calculates every session's KPI,
            and in the end it calls "filter results" to choose every KPI and scene and write the results in DB.
        """
        if self.region != 'Liberty':
            return
        red_score = 0
        main_template = self.templates[Const.KPIS]
        for i, main_line in main_template.iterrows():
            relevant_store_types = self.does_exist(main_line, Const.ADDITIONAL_ATTRIBUTE_7)
            if relevant_store_types and self.additional_attribute_7 not in relevant_store_types:
                continue
            result = self.calculate_main_kpi(main_line)
            if result:
                red_score += main_line[Const.WEIGHT] * result

        if len(self.common_db.kpi_results) > 0:
            kpi_fk = self.common_db.get_kpi_fk_by_kpi_type(Const.RED_SCORE_PARENT)
            self.common_db.write_to_db_result(kpi_fk, numerator_id=1, denominator_id=self.store_id, result=red_score,
                                              identifier_result=Const.RED_SCORE_PARENT, should_enter=True)
        return

    def calculate_main_kpi(self, main_line):
        """
        This function gets a line from the main_sheet, transfers it to the match function, and checks all of the
        KPIs in the same name in the match sheet.
        :param main_line: series from the template of the main_sheet.
        """
        relevant_scif = self.scif
        scene_types = self.does_exist(main_line, Const.SCENE_TYPE)
        if scene_types:
            relevant_scif = relevant_scif[relevant_scif['template_name'].isin(scene_types)]
        excluded_scene_types = self.does_exist(main_line, Const.EXCLUDED_SCENE_TYPE)
        if excluded_scene_types:
            relevant_scif = relevant_scif[~relevant_scif['template_name'].isin(
                excluded_scene_types)]
        template_groups = self.does_exist(main_line, Const.TEMPLATE_GROUP)
        if template_groups:
            relevant_scif = relevant_scif[relevant_scif['template_group'].isin(template_groups)]

        result = self.calculate_kpi_by_type(main_line, relevant_scif)

        return result

    def calculate_kpi_by_type(self, main_line, relevant_scif):
        """
        the function calculates all the kpis
        :param main_line: one kpi line from the main template
        :param relevant_scif:
        :return: boolean, but it can be None if we want not to write it in DB
        """
        kpi_type = main_line[Const.KPI_TYPE]
        relevant_template = self.templates[kpi_type]
        kpi_line = relevant_template[relevant_template[Const.KPI_NAME]
                                     == main_line[Const.KPI_NAME]].iloc[0]
        kpi_function = self.get_kpi_function(kpi_type)
        weight = main_line[Const.WEIGHT]

        if relevant_scif.empty:
            result = 0
        else:
            result = kpi_function(kpi_line, relevant_scif, weight)

        result_type_fk = self.ps_data_provider.get_pks_of_result(
            Const.PASS) if result > 0 else self.ps_data_provider.get_pks_of_result(Const.FAIL)

        if self.does_exist(main_line, Const.PARENT_KPI_NAME):
            # if this is a child KPI, we do not need to return a value to the Total Score KPI
            return 0
        else:  # normal behavior for when this isn't a child KPI
            kpi_name = kpi_line[Const.KPI_NAME] + Const.LIBERTY
            kpi_fk = self.common_db.get_kpi_fk_by_kpi_type(kpi_name)
            self.common_db.write_to_db_result(kpi_fk, numerator_id=self.manufacturer_fk, numerator_result=0,
                                              denominator_id=self.store_id, denominator_result=0, weight=weight,
                                              result=result_type_fk, identifier_parent=Const.RED_SCORE_PARENT,
                                              identifier_result=kpi_name, should_enter=True)
            return result

    # SOS functions

    def calculate_sos(self, kpi_line, relevant_scif, weight):
        market_share_required = self.does_exist(kpi_line, Const.MARKET_SHARE_TARGET)
        if market_share_required:
            market_share_target = self.get_market_share_target()
        else:
            market_share_target = 0

        if not market_share_target:
            market_share_target = 0

        denominator_facings = relevant_scif['facings'].sum()

        filtered_scif = relevant_scif.copy()

        manufacturer = self.does_exist(kpi_line, Const.MANUFACTURER)
        if manufacturer:
            filtered_scif = relevant_scif[relevant_scif['manufacturer_name'].isin(manufacturer)]

        liberty_truck = self.does_exist(kpi_line, Const.LIBERTY_KEY_MANUFACTURER)
        if liberty_truck:
            liberty_truck_scif = relevant_scif[relevant_scif[Const.LIBERTY_KEY_MANUFACTURER].isin(
                liberty_truck)]
            filtered_scif = filtered_scif.append(liberty_truck_scif, sort=False).drop_duplicates()

        if self.does_exist(kpi_line, Const.INCLUDE_BODY_ARMOR) and self.body_armor_delivered:
            body_armor_scif = relevant_scif[relevant_scif['brand_fk'] == Const.BODY_ARMOR_BRAND_FK]
            filtered_scif = filtered_scif.append(body_armor_scif, sort=False)

        numerator_facings = filtered_scif['facings'].sum()
        sos_value = numerator_facings / float(denominator_facings)
        result = 1 if sos_value > market_share_target else 0

        parent_kpi_name = kpi_line[Const.KPI_NAME] + Const.LIBERTY
        kpi_fk = self.common_db.get_kpi_fk_by_kpi_type(parent_kpi_name + Const.DRILLDOWN)
        self.common_db.write_to_db_result(kpi_fk, numerator_id=self.manufacturer_fk, numerator_result=numerator_facings,
                                          denominator_id=self.store_id, denominator_result=denominator_facings,
                                          weight=weight, score=result * weight,
                                          result=sos_value * 100, target=market_share_target * 100,
                                          identifier_parent=parent_kpi_name, should_enter=True)

        return result

    # Availability functions
    def calculate_availability(self, kpi_line, relevant_scif, weight):
        survey_question_skus_required = self.does_exist(
            kpi_line, Const.SURVEY_QUESTION_SKUS_REQUIRED)
        if survey_question_skus_required:
            survey_question_skus, secondary_survey_question_skus = \
                self.get_relevant_product_assortment_by_kpi_name(kpi_line[Const.KPI_NAME])
            unique_skus = \
                relevant_scif[relevant_scif['product_fk'].isin(
                    survey_question_skus)]['product_fk'].unique().tolist()
            if secondary_survey_question_skus:
                secondary_unique_skus = \
                    relevant_scif[relevant_scif['product_fk'].isin(secondary_survey_question_skus)][
                        'product_fk'].unique().tolist()
            else:
                secondary_unique_skus = None

        else:
            secondary_unique_skus = None
            manufacturer = self.does_exist(kpi_line, Const.MANUFACTURER)
            if manufacturer:
                relevant_scif = relevant_scif[relevant_scif['manufacturer_name'].isin(manufacturer)]
            brand = self.does_exist(kpi_line, Const.BRAND)
            if brand:
                relevant_scif = relevant_scif[relevant_scif['brand_name'].isin(brand)]
            category = self.does_exist(kpi_line, Const.CATEGORY)
            if category:
                relevant_scif = relevant_scif[relevant_scif['category'].isin(category)]
            excluded_brand = self.does_exist(kpi_line, Const.EXCLUDED_BRAND)
            if excluded_brand:
                relevant_scif = relevant_scif[~relevant_scif['brand_name'].isin(excluded_brand)]
            excluded_sku = self.does_exist(kpi_line, Const.EXCLUDED_SKU)
            if excluded_sku:
                relevant_scif = relevant_scif[~relevant_scif['product_name'].isin(excluded_sku)]
            unique_skus = relevant_scif['product_fk'].unique().tolist()

        length_of_unique_skus = len(unique_skus)
        minimum_number_of_skus = kpi_line[Const.MINIMUM_NUMBER_OF_SKUS]

        if length_of_unique_skus >= minimum_number_of_skus:
            if secondary_unique_skus:
                length_of_unique_skus = len(secondary_unique_skus)
                minimum_number_of_skus = kpi_line[Const.SECONDARY_MINIMUM_NUMBER_OF_SKUS]
                result = 1 if length_of_unique_skus > minimum_number_of_skus else 0
            else:
                result = 1
        else:
            result = 0

        parent_kpi_name = kpi_line[Const.KPI_NAME] + Const.LIBERTY
        kpi_fk = self.common_db.get_kpi_fk_by_kpi_type(parent_kpi_name + Const.DRILLDOWN)
        self.common_db.write_to_db_result(kpi_fk, numerator_id=self.manufacturer_fk, numerator_result=0,
                                          denominator_id=self.store_id, denominator_result=0, weight=weight,
                                          result=length_of_unique_skus, target=minimum_number_of_skus,
                                          score=result * weight,
                                          identifier_parent=parent_kpi_name, should_enter=True)

        return result

    def get_relevant_product_assortment_by_kpi_name(self, kpi_name):
        template = self.templates[Const.SURVEY_QUESTION_SKUS]
        relevant_template = template[template[Const.KPI_NAME] == kpi_name]
        # we need this to fix dumb template
        relevant_template[Const.EAN_CODE] = \
            relevant_template[Const.EAN_CODE].apply(lambda x: str(int(x)) if x != '' else None)
        primary_ean_codes = \
            relevant_template[relevant_template[Const.SECONDARY_GROUP]
                              != 'Y'][Const.EAN_CODE].unique().tolist()
        primary_ean_codes = [code for code in primary_ean_codes if code is not None]
        primary_products = self.all_products[self.all_products['product_ean_code'].isin(
            primary_ean_codes)]
        primary_product_pks = primary_products['product_fk'].unique().tolist()
        secondary_ean_codes = \
            relevant_template[relevant_template[Const.SECONDARY_GROUP]
                              == 'Y'][Const.EAN_CODE].unique().tolist()
        if secondary_ean_codes:
            secondary_products = self.all_products[self.all_products['product_ean_code'].isin(
                secondary_ean_codes)]
            secondary_product_pks = secondary_products['product_fk'].unique().tolist()
        else:
            secondary_product_pks = None
        return primary_product_pks, secondary_product_pks

    # Count of Display functions
    def calculate_count_of_display(self, kpi_line, relevant_scif, weight):
        filtered_scif = relevant_scif.copy()

        manufacturer = self.does_exist(kpi_line, Const.MANUFACTURER)
        if manufacturer:
            filtered_scif = relevant_scif[relevant_scif['manufacturer_name'].isin(manufacturer)]

        liberty_truck = self.does_exist(kpi_line, Const.LIBERTY_KEY_MANUFACTURER)
        if liberty_truck:
            liberty_truck_scif = relevant_scif[relevant_scif[Const.LIBERTY_KEY_MANUFACTURER].isin(
                liberty_truck)]
            filtered_scif = filtered_scif.append(liberty_truck_scif, sort=False).drop_duplicates()

        brand = self.does_exist(kpi_line, Const.BRAND)
        if brand:
            filtered_scif = filtered_scif[filtered_scif['brand_name'].isin(brand)]

        category = self.does_exist(kpi_line, Const.CATEGORY)
        if category:
            filtered_scif = filtered_scif[filtered_scif['category'].isin(category)]

        excluded_brand = self.does_exist(kpi_line, Const.EXCLUDED_BRAND)
        if excluded_brand:
            filtered_scif = filtered_scif[~filtered_scif['brand_name'].isin(excluded_brand)]

        excluded_category = self.does_exist(kpi_line, Const.EXCLUDED_CATEGORY)
        if excluded_category:
            filtered_scif = filtered_scif[~filtered_scif['category'].isin(excluded_category)]

        ssd_still = self.does_exist(kpi_line, Const.ATT4)
        if ssd_still:
            filtered_scif = filtered_scif[filtered_scif['att4'].isin(ssd_still)]

        if self.does_exist(kpi_line, Const.INCLUDE_BODY_ARMOR) and self.body_armor_delivered:
            body_armor_scif = relevant_scif[relevant_scif['brand_fk'] == Const.BODY_ARMOR_BRAND_FK]
            filtered_scif = filtered_scif.append(body_armor_scif, sort=False)

        size_subpackages = self.does_exist(kpi_line, Const.SIZE_SUBPACKAGES_NUM)
        if size_subpackages:
            # convert all pairings of size and number of subpackages to tuples
            # size_subpackages_tuples = [tuple([float(i) for i in x.split(';')]) for x in size_subpackages]
            size_subpackages_tuples = [tuple([self.convert_base_size_values(i) for i in x.split(';')]) for x in
                                       size_subpackages]
            filtered_scif = filtered_scif[pd.Series(list(zip(filtered_scif['Base Size'],
                                                             filtered_scif['Multi-Pack Size'])),
                                                    index=filtered_scif.index).isin(size_subpackages_tuples)]

        excluded_size_subpackages = self.does_exist(kpi_line, Const.EXCLUDED_SIZE_SUBPACKAGES_NUM)
        if excluded_size_subpackages:
            # convert all pairings of size and number of subpackages to tuples
            # size_subpackages_tuples = [tuple([float(i) for i in x.split(';')]) for x in size_subpackages]
            size_subpackages_tuples = [tuple([self.convert_base_size_values(i) for i in x.split(';')]) for x in
                                       excluded_size_subpackages]
            filtered_scif = filtered_scif[~pd.Series(list(zip(filtered_scif['Base Size'],
                                                              filtered_scif['Multi-Pack Size'])),
                                                     index=filtered_scif.index).isin(size_subpackages_tuples)]

        sub_packages = self.does_exist(kpi_line, Const.SUBPACKAGES_NUM)
        if sub_packages:
            if sub_packages == [Const.NOT_NULL]:
                filtered_scif = filtered_scif[~filtered_scif['Multi-Pack Size'].isnull()]
            elif sub_packages == [Const.GREATER_THAN_ONE]:
                filtered_scif = filtered_scif[filtered_scif['Multi-Pack Size'] > 1]
            else:
                filtered_scif = filtered_scif[filtered_scif['Multi-Pack Size'].isin(
                    [int(i) for i in sub_packages])]

        if self.does_exist(kpi_line, Const.MINIMUM_FACINGS_REQUIRED):
            number_of_passing_displays, _ = self.get_number_of_passing_displays(filtered_scif)

            if self.does_exist(kpi_line, Const.PARENT_KPI_NAME):
                parent_kpi_name = kpi_line[Const.PARENT_KPI_NAME] + Const.LIBERTY + Const.DRILLDOWN
                kpi_fk = self.common_db.get_kpi_fk_by_kpi_type(
                    kpi_line[Const.KPI_NAME] + Const.LIBERTY)
                self.common_db.write_to_db_result(kpi_fk, numerator_id=self.manufacturer_fk, numerator_result=0,
                                                  denominator_id=self.store_id, denominator_result=0, weight=weight,
                                                  result=number_of_passing_displays,
                                                  score=number_of_passing_displays,
                                                  identifier_parent=parent_kpi_name, should_enter=True)
                return 0
            else:
                parent_kpi_name = kpi_line[Const.KPI_NAME] + Const.LIBERTY
                identifier_result = parent_kpi_name + Const.DRILLDOWN
                kpi_fk = self.common_db.get_kpi_fk_by_kpi_type(parent_kpi_name + Const.DRILLDOWN)
                self.common_db.write_to_db_result(kpi_fk, numerator_id=self.manufacturer_fk, numerator_result=0,
                                                  denominator_id=self.store_id, denominator_result=0, weight=weight,
                                                  result=number_of_passing_displays,
                                                  score=number_of_passing_displays * weight,
                                                  identifier_parent=parent_kpi_name,
                                                  identifier_result=identifier_result, should_enter=True)
                return number_of_passing_displays
        else:
            return 0

    # Share of Display functions
    def calculate_share_of_display(self, kpi_line, relevant_scif, weight):
        base_scif = relevant_scif.copy()

        ssd_still = self.does_exist(kpi_line, Const.ATT4)
        if ssd_still:
            ssd_still_scif = base_scif[base_scif['att4'].isin(ssd_still)]
        else:
            ssd_still_scif = base_scif

        denominator_passing_displays, _ = \
            self.get_number_of_passing_displays(ssd_still_scif)

        manufacturer = self.does_exist(kpi_line, Const.MANUFACTURER)
        if manufacturer:
            filtered_scif = ssd_still_scif[ssd_still_scif['manufacturer_name'].isin(manufacturer)]
        else:
            filtered_scif = ssd_still_scif

        liberty_truck = self.does_exist(kpi_line, Const.LIBERTY_KEY_MANUFACTURER)
        if liberty_truck:
            liberty_truck_scif = ssd_still_scif[ssd_still_scif[Const.LIBERTY_KEY_MANUFACTURER].isin(
                liberty_truck)]
            filtered_scif = filtered_scif.append(liberty_truck_scif, sort=False).drop_duplicates()

        if self.does_exist(kpi_line, Const.MARKET_SHARE_TARGET):
            market_share_target = self.get_market_share_target(ssd_still=ssd_still)
        else:
            market_share_target = 0

        if self.does_exist(kpi_line, Const.INCLUDE_BODY_ARMOR) and self.body_armor_delivered:
            body_armor_scif = relevant_scif[relevant_scif['brand_fk'] == Const.BODY_ARMOR_BRAND_FK]
            filtered_scif = filtered_scif.append(body_armor_scif, sort=False)

        if self.does_exist(kpi_line, Const.MINIMUM_FACINGS_REQUIRED):
            numerator_passing_displays, _ = \
                self.get_number_of_passing_displays(filtered_scif)

            if denominator_passing_displays != 0:
                share_of_displays = \
                    numerator_passing_displays / float(denominator_passing_displays)
            else:
                share_of_displays = 0

            result = 1 if share_of_displays > market_share_target else 0

            parent_kpi_name = kpi_line[Const.KPI_NAME] + Const.LIBERTY
            kpi_fk = self.common_db.get_kpi_fk_by_kpi_type(parent_kpi_name + Const.DRILLDOWN)
            self.common_db.write_to_db_result(kpi_fk, numerator_id=self.manufacturer_fk,
                                              numerator_result=numerator_passing_displays,
                                              denominator_id=self.store_id,
                                              denominator_result=denominator_passing_displays, weight=weight,
                                              result=share_of_displays * 100, target=market_share_target * 100,
                                              score=result * weight,
                                              identifier_parent=parent_kpi_name, should_enter=True)

            return result
        else:
            return 0

    def get_number_of_passing_displays(self, filtered_scif):
        if filtered_scif.empty:
            return 0, 0

        filtered_scif = \
            filtered_scif.groupby(['Base Size', 'Multi-Pack Size', 'scene_id'],
                                  as_index=False)['facings'].sum()

        filtered_scif['passed_displays'] = \
            filtered_scif.apply(lambda row: self._calculate_pass_status_of_display(row), axis=1)

        number_of_displays = filtered_scif['passed_displays'].sum()
        facings_of_displays = filtered_scif[filtered_scif['passed_displays'] == 1]['facings'].sum()

        return number_of_displays, facings_of_displays

    def _calculate_pass_status_of_display(self, row):  # need to move to external KPI targets
        template = self.templates[Const.MINIMUM_FACINGS]
        relevant_template = template[(template[Const.BASE_SIZE_MIN] <= row['Base Size']) &
                                     (template[Const.BASE_SIZE_MAX] >= row['Base Size']) &
                                     (template[Const.MULTI_PACK_SIZE] == row['Multi-Pack Size'])]
        if relevant_template.empty:
            return 0
        minimum_facings = relevant_template[Const.MINIMUM_FACINGS_REQUIRED_FOR_DISPLAY].min()
        return 1 if row['facings'] >= minimum_facings else 0

    # Share of Cooler functions
    def calculate_share_of_coolers(self, kpi_line, relevant_scif, weight):
        scene_ids = relevant_scif['scene_id'].unique().tolist()

        total_coolers = len(scene_ids)
        if total_coolers == 0:
            return 0

        passing_coolers = 0

        if self.does_exist(kpi_line, Const.MARKET_SHARE_TARGET):
            market_share_target = self.get_market_share_target()
        else:
            market_share_target = 0

        for scene_id in scene_ids:
            cooler_scif = relevant_scif[relevant_scif['scene_id'] == scene_id]

            filtered_scif = cooler_scif.copy()

            manufacturer = self.does_exist(kpi_line, Const.MANUFACTURER)
            if manufacturer:
                filtered_scif = cooler_scif[cooler_scif['manufacturer_name'].isin(manufacturer)]

            liberty_truck = self.does_exist(kpi_line, Const.LIBERTY_KEY_MANUFACTURER)
            if liberty_truck:
                liberty_truck_scif = cooler_scif[cooler_scif[Const.LIBERTY_KEY_MANUFACTURER].isin(
                    liberty_truck)]
                filtered_scif = filtered_scif.append(
                    liberty_truck_scif, sort=False).drop_duplicates()

            if self.does_exist(kpi_line, Const.INCLUDE_BODY_ARMOR) and self.body_armor_delivered:
                body_armor_scif = cooler_scif[cooler_scif['brand_fk'] == Const.BODY_ARMOR_BRAND_FK]
                filtered_scif = filtered_scif.append(body_armor_scif, sort=False).drop_duplicates()

            coke_facings_threshold = self.does_exist(kpi_line, Const.COKE_FACINGS_THRESHOLD)
            cooler_sos = filtered_scif['facings'].sum() / cooler_scif['facings'].sum()
            cooler_result = 1 if cooler_sos >= coke_facings_threshold else 0

            passing_coolers += cooler_result

        coke_market_share = passing_coolers / float(total_coolers)
        result = 1 if coke_market_share > market_share_target else 0

        parent_kpi_name = kpi_line[Const.KPI_NAME] + Const.LIBERTY
        kpi_fk = self.common_db.get_kpi_fk_by_kpi_type(parent_kpi_name + Const.DRILLDOWN)
        self.common_db.write_to_db_result(kpi_fk, numerator_id=self.manufacturer_fk,
                                          numerator_result=passing_coolers,
                                          denominator_id=self.store_id,
                                          denominator_result=total_coolers, weight=weight,
                                          result=coke_market_share * 100, target=market_share_target * 100,
                                          score=result * weight,
                                          identifier_parent=parent_kpi_name, should_enter=True)

        return result

    # Survey functions
    def calculate_survey(self, kpi_line, relevant_scif, weight):
        return 1 if self.survey.check_survey_answer(kpi_line[Const.QUESTION_TEXT], 'Yes') else 0

    # helper functions
    def convert_base_size_and_multi_pack(self):
        self.scif.loc[:, 'Base Size'] = self.scif['Base Size'].apply(self.convert_base_size_values)
        self.scif.loc[:, 'Multi-Pack Size'] = \
            self.scif['Multi-Pack Size'].apply(lambda x: int(x) if x is not None else None)

    @staticmethod
    def convert_base_size_values(value):
        try:
            new_value = float(value.split()[0]) if value not in [None, ''] else None
        except IndexError:
            Log.error('Could not convert base size value for {}'.format(value))
            new_value = None
        return new_value

    def get_market_share_target(self, ssd_still=None):  # need to move to external KPI targets
        template = self.templates[Const.MARKET_SHARE]
        relevant_template = template[(template[Const.ADDITIONAL_ATTRIBUTE_4] == self.additional_attribute_4) &
                                     (template[Const.RETAILER] == self.retailer) &
                                     (template[Const.BRANCH] == self.branch)]

        if relevant_template.empty:
            if ssd_still:
                if ssd_still[0].lower() == Const.SSD.lower():
                    return 49
                elif ssd_still[0].lower() == Const.STILL.lower():
                    return 16
                else:
                    return 0
            else:
                return 26

        if ssd_still:
            if ssd_still[0].lower() == Const.SSD.lower():
                return relevant_template[Const.SSD].iloc[0]
            elif ssd_still[0].lower() == Const.STILL.lower():
                return relevant_template[Const.STILL].iloc[0]

        # total 26, ssd only 49, still only 16
        return relevant_template[Const.SSD_AND_STILL].iloc[0]

    def get_body_armor_delivery_status(self):
        if self.store_info['additional_attribute_8'].iloc[0] == 'Y':
            return True
        else:
            return False

    def get_kpi_function(self, kpi_type):
        """
        transfers every kpi to its own function
        :param kpi_type: value from "sheet" column in the main sheet
        :return: function
        """
        if kpi_type == Const.SOS:
            return self.calculate_sos
        elif kpi_type == Const.AVAILABILITY:
            return self.calculate_availability
        elif kpi_type == Const.COUNT_OF_DISPLAY:
            return self.calculate_count_of_display
        elif kpi_type == Const.SHARE_OF_DISPLAY:
            return self.calculate_share_of_display
        elif kpi_type == Const.SHARE_OF_COOLERS:
            return self.calculate_share_of_coolers
        elif kpi_type == Const.SURVEY:
            return self.calculate_survey
        else:
            Log.warning(
                "The value '{}' in column sheet in the template is not recognized".format(kpi_type))
            return None

    @staticmethod
    def does_exist(kpi_line, column_name):
        """
        checks if kpi_line has values in this column, and if it does - returns a list of these values
        :param kpi_line: line from template
        :param column_name: str
        :return: list of values if there are, otherwise None
        """
        if column_name in kpi_line.keys() and kpi_line[column_name] != "":
            cell = kpi_line[column_name]
            if type(cell) in [int, float, np.float64]:
                return [cell]
            elif type(cell) in [unicode, str]:
                return [x.strip() for x in cell.split(",")]
        return None