Example #1
0
 def __init__(self, data_provider, output):
     self.output = output
     self.data_provider = data_provider
     self.project_name = self.data_provider.project_name
     self.common = Common(self.data_provider)
     self.old_common = oldCommon(self.data_provider)
     self.rds_conn = PSProjectConnector(self.project_name,
                                        DbUsers.CalculationEng)
     self.session_fk = self.data_provider.session_id
     self.match_product_in_scene = self.data_provider[Data.MATCHES]
     self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
     self.store_info = self.data_provider[Data.STORE_INFO]
     self.store_id = self.data_provider[Data.STORE_FK]
     self.survey = Survey(self.data_provider)
     self.block = Block(self.data_provider)
     self.general_toolbox = GENERALToolBox(self.data_provider)
     self.visit_date = self.data_provider[Data.VISIT_DATE]
     self.template_path = self.get_relevant_template()
     self.gap_data = self.get_gap_data()
     self.kpi_weights = parse_template(self.template_path,
                                       Consts.KPI_WEIGHT,
                                       lower_headers_row_index=0)
     self.template_data = self.parse_template_data()
     self.kpis_gaps = list()
     self.passed_availability = list()
     self.kpi_static_data = self.old_common.get_kpi_static_data()
     self.own_manufacturer_fk = int(
         self.data_provider.own_manufacturer.param_value.values[0])
     self.parser = Parser
     self.all_products = self.data_provider[Data.ALL_PRODUCTS]
Example #2
0
 def __init__(self, data_provider, output):
     GlobalSessionToolBox.__init__(self, data_provider, output)
     self.adjacency = Adjancency(data_provider)
     self.block = Block(data_provider)
     self.kpi_static_data = self.common.get_kpi_static_data()
     self.ps_data_provider = PsDataProvider(data_provider)
     self._scene_types = None
     self.external_targets = self.ps_data_provider.get_kpi_external_targets(
     )
Example #3
0
 def __init__(self, scene_toolbox_obj):
     self.__dict__.update(scene_toolbox_obj.__dict__)
     self.current_scene_fk = self.scene_info.iloc[0].scene_fk
     self.store_banner_name = self.store_info.iloc[
         0].additional_attribute_20
     self.template_name = self.templates.iloc[0].template_name
     self.custom_entity_data = self.get_relevant_custom_entity_data()
     self.match_product_data = self.match_product_in_scene.merge(
         self.products, on='product_fk', how='left')
     self.block = Block(self.data_provider, self.output)
Example #4
0
 def calculate(self):
     if not self.util.filtered_matches.empty:
         self.util.filtered_scif, self.util.filtered_matches = \
             self.util.commontools.set_filtered_scif_and_matches_for_specific_kpi(self.util.filtered_scif,
                                                                                  self.util.filtered_matches,
                                                                                  self.util.PRODUCT_BLOCKING)
         filtered_matches = self.util.filtered_matches.copy()
         if 'sub_category_fk' in filtered_matches.columns:
             filtered_matches = filtered_matches.drop(columns=['sub_category_fk'])
         self.block = Block(self.data_provider, custom_scif=self.util.filtered_scif,
                            custom_matches=filtered_matches)
         if not self.util.filtered_matches.empty:
             self.calculate_product_blocking()
         self.util.reset_filtered_scif_and_matches_to_exclusion_all_state()
Example #5
0
class ProductBlockingKpi(UnifiedCalculationsScript):

    def __init__(self, data_provider, config_params=None, **kwargs):
        super(ProductBlockingKpi, self).__init__(data_provider, config_params=config_params, **kwargs)
        self.util = PepsicoUtil(None, data_provider)
        self.block = None

    def kpi_type(self):
        pass

    def calculate(self):
        if not self.util.filtered_matches.empty:
            self.util.filtered_scif, self.util.filtered_matches = \
                self.util.commontools.set_filtered_scif_and_matches_for_specific_kpi(self.util.filtered_scif,
                                                                                     self.util.filtered_matches,
                                                                                     self.util.PRODUCT_BLOCKING)
            filtered_matches = self.util.filtered_matches.copy()
            if 'sub_category_fk' in filtered_matches.columns:
                filtered_matches = filtered_matches.drop(columns=['sub_category_fk'])
            self.block = Block(self.data_provider, custom_scif=self.util.filtered_scif,
                               custom_matches=filtered_matches)
            if not self.util.filtered_matches.empty:
                self.calculate_product_blocking()
            self.util.reset_filtered_scif_and_matches_to_exclusion_all_state()

    def calculate_product_blocking(self):
        external_targets = self.util.all_targets_unpacked[self.util.all_targets_unpacked['type'] == self.util.PRODUCT_BLOCKING]
        additional_block_params = {'check_vertical_horizontal': True, 'minimum_facing_for_block': 3,
                                   'include_stacking': True,
                                   'allowed_products_filters': {'product_type': ['Empty']}}
        kpi_fk = self.util.common.get_kpi_fk_by_kpi_type(self.util.PRODUCT_BLOCKING)

        for i, row in external_targets.iterrows():
            # print row['Group Name']
            group_fk = self.util.custom_entities[self.util.custom_entities['name'] == row['Group Name']]['pk'].values[0]
            # filters = self.util.get_block_and_adjacency_filters(row)
            filters = self.util.get_block_filters(row)
            target = row['Target']
            additional_block_params.update({'minimum_block_ratio': float(target)/100})

            result_df = self.block.network_x_block_together(filters, additional=additional_block_params)
            score = max_ratio = 0
            result = self.util.commontools.get_yes_no_result(0)
            if not result_df.empty:
                max_ratio = result_df['facing_percentage'].max()
                result_df = result_df[result_df['is_block']==True]
                if not result_df.empty:
                    max_ratio = result_df['facing_percentage'].max()
                    result_df = result_df[result_df['facing_percentage'] == max_ratio]
                    result = self.util.commontools.get_yes_no_result(1)
                    orientation = result_df['orientation'].values[0]
                    score = self.util.commontools.get_kpi_result_value_pk_by_value(orientation.upper())
            # print score
            self.write_to_db_result(fk=kpi_fk, numerator_id=group_fk, denominator_id=self.util.store_id,
                                    numerator_result=max_ratio * 100,
                                    score=score, result=result, target=target, by_scene=True)
            self.util.block_results = self.util.block_results.append(pd.DataFrame([{'Group Name': row['Group Name'],
                                                                                    'Score':
                                                                                        result_df['is_block'].values[
                                                                                            0] if not result_df.empty else False}]))
Example #6
0
    def __init__(self, data_provider, output):
        self.output = output
        self.data_provider = data_provider
        self.ps_data_provider = PsDataProvider(self.data_provider, self.output)
        self.common = Common(self.data_provider)
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.products = self.data_provider[Data.PRODUCTS]
        self.block = Block(data_provider)
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.store_id = self.data_provider[Data.STORE_FK]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.rds_conn = self.ps_data_provider.rds_conn
        self.kpi_static_data = self.common.get_kpi_static_data()
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.session_fk = self.session_info['pk'].values[0]
        self.kpi_results_queries = []
        self.kpi_static_queries = []
        self.own_manufacturer_fk = int(
            self.data_provider.own_manufacturer.param_value.values[0])

        self.adjacency = BlockAdjacency(self.data_provider,
                                        ps_data_provider=self.ps_data_provider,
                                        common=self.common,
                                        rds_conn=self.rds_conn)
        self.eyelight = Eyelight(self.data_provider, self.common,
                                 self.ps_data_provider)
        self.merged_scif_mpis = self.match_product_in_scene.merge(
            self.scif,
            how='left',
            left_on=['scene_fk', 'product_fk'],
            right_on=['scene_fk', 'product_fk'])
        self.targets = self.ps_data_provider.get_kpi_external_targets(
            key_fields=[
                "KPI Type", "Location: JSON", "Config Params: JSON",
                "Dataset 1: JSON", "Dataset 2: JSON"
            ])
        self.results_df = pd.DataFrame(columns=[
            'kpi_name', 'kpi_fk', 'numerator_id', 'numerator_result',
            'context_id', 'denominator_id', 'denominator_result', 'result',
            'score'
        ])
        self.custom_entity_table = self.get_kpi_custom_entity_table()
Example #7
0
    def __init__(self, data_provider, output):
        self.data_provider = data_provider
        self.common = Common(data_provider)
        self.output = output
        self.ps_data_provider = PsDataProvider(data_provider, output)
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.store_id = self.data_provider[Data.STORE_FK]
        self.products = self.data_provider[Data.PRODUCTS]
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.manufacturer_id = self.get_manufacturer_id_from_manufacturer_name(Const.MANUFACTURER)
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.template_info = self.data_provider[Data.TEMPLATES]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.block = Block(self.data_provider, self.output, common=self.common)

        self.mpis = self.match_product_in_scene \
            .merge(self.products, on='product_fk', suffixes=['', '_p']) \
            .merge(self.scene_info, on='scene_fk', suffixes=['', '_s'])[COLUMNS]
    def __init__(self, data_provider, output):
        self.output = output
        self.data_provider = data_provider
        self.common = Common(self.data_provider)
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.products = self.data_provider[Data.PRODUCTS]
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.store_id = self.data_provider[Data.STORE_FK]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.rds_conn = PSProjectConnector(self.project_name,
                                           DbUsers.CalculationEng)
        self.kpi_static_data = self.common.get_kpi_static_data()
        self.kpi_results_queries = []
        self.set_up_template = pd.read_excel(os.path.join(
            os.path.dirname(os.path.realpath(__file__)), '..', 'Data',
            'gsk_set_up.xlsx'),
                                             sheet_name='Functional KPIs',
                                             keep_default_na=False)

        self.gsk_generator = GSKGenerator(self.data_provider, self.output,
                                          self.common, self.set_up_template)
        self.blocking_generator = Block(self.data_provider)
        self.assortment = self.gsk_generator.get_assortment_data_provider()
        self.store_info = self.data_provider['store_info']
        self.store_fk = self.data_provider[StoreInfoConsts.STORE_FK]
        self.ps_data_provider = PsDataProvider(self.data_provider, self.output)
        self.targets = self.ps_data_provider.get_kpi_external_targets(
            key_fields=Consts.KEY_FIELDS, data_fields=Consts.DATA_FIELDS)
        self.own_manufacturer = self.get_manufacturer
        self.set_up_data = {
            (Consts.PLN_BLOCK, Const.KPI_TYPE_COLUMN): Const.NO_INFO,
            (Consts.POSITION_SCORE, Const.KPI_TYPE_COLUMN): Const.NO_INFO,
            (Consts.ECAPS_FILTER_IDENT, Const.KPI_TYPE_COLUMN): Const.NO_INFO,
            (Consts.PLN_MSL, Const.KPI_TYPE_COLUMN): Const.NO_INFO,
            ("GSK_PLN_LSOS_SCORE", Const.KPI_TYPE_COLUMN): Const.NO_INFO,
            (Consts.POSM, Const.KPI_TYPE_COLUMN): Const.NO_INFO
        }
Example #9
0
 def __init__(self, data_provider, output):
     GlobalSessionToolBox.__init__(self, data_provider, output)
     self.templates = {}
     self.parse_template()
     self.match_product_in_scene = self.data_provider[Data.MATCHES]
     self.match_scene_item_facts = pd.merge(
         self.scif,
         self.match_product_in_scene,
         how='right',
         left_on=['item_id', 'scene_id'],
         right_on=['product_fk',
                   'scene_fk'])  # Merges scif with mpis on product_fk
     self.block = Block(data_provider)
     self.own_manufacturer_fk = int(
         self.data_provider.own_manufacturer.param_value.values[0])
     self.results_df = pd.DataFrame(columns=[
         'kpi_name', 'kpi_fk', 'numerator_id', 'numerator_result',
         'context_id', 'denominator_id', 'denominator_result', 'result',
         'score'
     ])
Example #10
0
    def __init__(self, output, data_provider):
        super(PNGJP_SAND2BlockGoldenUtil, self).__init__(data_provider)
        self.output = output
        self.data_provider = data_provider
        self.common = Common(self.data_provider)

        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.products = self.data_provider[Data.PRODUCTS]
        self.templates = self.data_provider[Data.TEMPLATES]
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.scif = self.data_provider.scene_item_facts
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.store_info = self.data_provider[Data.STORE_INFO]
        self.store_id = self.store_info.iloc[0].store_fk
        self.store_type = self.data_provider.store_type
        self.rds_conn = PSProjectConnector(self.project_name,
                                           DbUsers.CalculationEng)
        self.kpi_static_data = self.common.get_kpi_static_data()
        self.ps_data_provider = PsDataProvider(self.data_provider, self.output)
        self.template_parser = PNGJPTemplateParser(self.data_provider,
                                                   self.rds_conn)

        self.targets_from_template = self.template_parser.get_targets()
        self.custom_entity_data = self.template_parser.get_custom_entity()
        self.external_targets = self.template_parser.get_external_targets()

        self.match_display_in_scene = self.data_provider.match_display_in_scene
        self.current_scene_fk = self.scene_info.iloc[0].scene_fk
        self.template_name = self.templates.iloc[0].template_name
        self.match_product_data = self.match_product_in_scene.merge(
            self.products, on='product_fk', how='left')
        self.block = Block(self.data_provider, self.output)
Example #11
0
class ColdCutToolBox:
    LEVEL1 = 1
    LEVEL2 = 2
    LEVEL3 = 3

    def __init__(self, data_provider, output):
        self.output = output
        self.data_provider = data_provider
        self.ps_data_provider = PsDataProvider(self.data_provider, self.output)
        self.common = Common(self.data_provider)
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.products = self.data_provider[Data.PRODUCTS]
        self.block = Block(data_provider)
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.store_id = self.data_provider[Data.STORE_FK]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.rds_conn = self.ps_data_provider.rds_conn
        self.kpi_static_data = self.common.get_kpi_static_data()
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.session_fk = self.session_info['pk'].values[0]
        self.kpi_results_queries = []
        self.kpi_static_queries = []
        self.own_manufacturer_fk = int(
            self.data_provider.own_manufacturer.param_value.values[0])

        self.adjacency = BlockAdjacency(self.data_provider,
                                        ps_data_provider=self.ps_data_provider,
                                        common=self.common,
                                        rds_conn=self.rds_conn)
        self.eyelight = Eyelight(self.data_provider, self.common,
                                 self.ps_data_provider)
        self.merged_scif_mpis = self.match_product_in_scene.merge(
            self.scif,
            how='left',
            left_on=['scene_fk', 'product_fk'],
            right_on=['scene_fk', 'product_fk'])
        self.targets = self.ps_data_provider.get_kpi_external_targets(
            key_fields=[
                "KPI Type", "Location: JSON", "Config Params: JSON",
                "Dataset 1: JSON", "Dataset 2: JSON"
            ])
        self.results_df = pd.DataFrame(columns=[
            'kpi_name', 'kpi_fk', 'numerator_id', 'numerator_result',
            'context_id', 'denominator_id', 'denominator_result', 'result',
            'score'
        ])
        self.custom_entity_table = self.get_kpi_custom_entity_table()

    def main_calculation(self):
        """
        This function calculates the KPI results.
        """
        relevant_kpi_types = [
            Consts.SOS, Consts.HORIZONTAL_SHELF_POSITION,
            Consts.VERTICAL_SHELF_POSITION, Consts.BLOCKING, Consts.BLOCK_ADJ,
            Consts.BLOCKING_ORIENTATION
        ]

        targets = self.targets[self.targets[Consts.ACTUAL_TYPE].isin(
            relevant_kpi_types)]
        self._calculate_kpis_from_template(targets)
        self.save_results_to_db()
        return

    def calculate_blocking(self, row, df):
        if df.empty:
            return None
        additional_data = row['Config Params: JSON']
        location_data = row['Location: JSON']
        kpi_fk = row['kpi_fk']
        population_data = row['Dataset 1: JSON']['include'][0]
        result_dict_list = self._logic_for_blocking(kpi_fk, population_data,
                                                    location_data,
                                                    additional_data)
        return result_dict_list

    def calculate_blocking_adj(self, row, df):
        result_dict_list = []
        additional_data = row['Config Params: JSON']
        location_data = row['Location: JSON']
        kpi_fk = row['kpi_fk']
        anchor_data = row['Dataset 1: JSON']['include'][0]
        target_data = row['Dataset 2: JSON']['include'][0]

        context_type = additional_data.get('context_type')
        if context_type:
            target_df = ParseInputKPI.filter_df(target_data, self.scif)
            target_values = target_df[context_type].unique().tolist()
            context_values = [
                v for v in df[context_type].unique().tolist()
                if v and pd.notna(v) and v in target_values
            ]
            for context_value in context_values:
                anchor_data.update({context_type: [context_value]})
                target_data.update({context_type: [context_value]})
                result_dict = self._logic_for_adj(
                    kpi_fk,
                    anchor_data,
                    target_data,
                    location_data,
                    additional_data,
                    eyelight_prefix='{}-'.format(context_value),
                    custom_entity=context_value)
                result_dict_list.append(result_dict)
        else:
            result_dict = self._logic_for_adj(kpi_fk, anchor_data, target_data,
                                              location_data, additional_data)
            result_dict_list.append(result_dict)
        return result_dict_list

    def _logic_for_adj(self,
                       kpi_fk,
                       anchor_data,
                       target_data,
                       location_data,
                       additional_data,
                       custom_entity=None,
                       eyelight_prefix=None):
        result = self.adjacency.evaluate_block_adjacency(
            anchor_data,
            target_data,
            location=location_data,
            additional=additional_data,
            kpi_fk=kpi_fk,
            eyelight_prefix=eyelight_prefix)
        result_type_fk = Consts.CUSTOM_RESULT['Yes'] if result and pd.notna(
            result) else Consts.CUSTOM_RESULT['No']
        result_dict = {
            'kpi_fk': kpi_fk,
            'numerator_id': self.own_manufacturer_fk,
            'denominator_id': self.store_id,
            'numerator_result': 1 if result else 0,
            'denominator_result': 1,
            'result': result_type_fk
        }
        if custom_entity:
            result_dict.update(
                {'context_id': self.get_custom_entity_value(custom_entity)})

        return result_dict

    def _logic_for_blocking(self, kpi_fk, population_data, location_data,
                            additional_data):
        result_dict_list = []
        additional_data.update({'use_masking_only': True})
        block = self.block.network_x_block_together(population=population_data,
                                                    location=location_data,
                                                    additional=additional_data)

        for row in block.itertuples():
            scene_match_fks = list(row.cluster.nodes[list(
                row.cluster.nodes())[0]]['scene_match_fk'])
            self.eyelight.write_eyelight_result(scene_match_fks, kpi_fk)
        passed_block = block[block['is_block']]

        if passed_block.empty:
            numerator_result = 0
            result_value = "No"
        else:
            numerator_result = 1
            result_value = "Yes"

        result_type_fk = Consts.CUSTOM_RESULT[result_value]
        # numerator_id = df.custom_entity_fk.iloc[0]

        result_dict = {
            'kpi_fk': kpi_fk,
            'numerator_id': self.own_manufacturer_fk,
            'numerator_result': numerator_result,
            'denominator_id': self.store_id,
            'denominator_result': 1,
            'result': result_type_fk
        }

        result_dict_list.append(result_dict)
        return result_dict_list

    def calculate_blocking_orientation(self, row, df):
        if df.empty:
            return
        result_dict_list = []
        additional_data = row['Config Params: JSON']
        location_data = row['Location: JSON']
        kpi_fk = row['kpi_fk']
        population_data = row['Dataset 1: JSON']
        if population_data:
            population_data = population_data['include'][0]
        else:
            population_data = {}

        additional_data.update({
            'vertical_horizontal_methodology':
            ['bucketing', 'percentage_of_shelves'],
            'shelves_required_for_vertical':
            .8,
            'check_vertical_horizontal':
            True
        })

        numerator_type = additional_data.get('numerator_type')
        if numerator_type:
            numerator_values = [
                v for v in df[numerator_type].unique().tolist()
                if v and pd.notna(v)
            ]
            for numerator_value in numerator_values:
                population_data.update({numerator_type: [numerator_value]})
                result_dict = self._logic_for_blocking_orientation(
                    kpi_fk, population_data, location_data, additional_data,
                    numerator_value)
                result_dict_list.append(result_dict)
        else:
            result_dict = self._logic_for_blocking_orientation(
                kpi_fk, population_data, location_data, additional_data)
            result_dict_list.append(result_dict)

        return result_dict_list

    def _logic_for_blocking_orientation(self,
                                        kpi_fk,
                                        population_data,
                                        location_data,
                                        additional_data,
                                        custom_entity=None):
        additional_data.update({'use_masking_only': True})
        block = self.block.network_x_block_together(population=population_data,
                                                    location=location_data,
                                                    additional=additional_data)
        if custom_entity:
            prefix = '{}-'.format(custom_entity)
            numerator_id = self.get_custom_entity_value(custom_entity)
        else:
            prefix = None
            numerator_id = self.own_manufacturer_fk
        for row in block.itertuples():

            scene_match_fks = list(row.cluster.nodes[list(
                row.cluster.nodes())[0]]['scene_match_fk'])
            self.eyelight.write_eyelight_result(scene_match_fks,
                                                kpi_fk,
                                                prefix=prefix)
        passed_block = block[block['is_block']]

        if passed_block.empty:
            result_value = "Not Blocked"
        else:
            result_value = passed_block.orientation.iloc[0]

        result = Consts.CUSTOM_RESULT[result_value]
        result_dict = {
            'kpi_fk': kpi_fk,
            'numerator_id': numerator_id,
            'numerator_result': 1 if result_value != 'Not Blocked' else 0,
            'denominator_id': self.store_id,
            'denominator_result': 1,
            'result': result
        }

        return result_dict

    def calculate_vertical_position(self, row, df):
        result_dict_list = []
        mpis = df  # get this from the external target filter_df method thingy
        scene_facings_df = mpis.groupby(['scene_fk', 'product_fk'],
                                        as_index=False)['facings'].max()
        scene_facings_df.rename(columns={'facings': 'scene_facings'},
                                inplace=True)
        shelf_df = self.merged_scif_mpis.groupby(
            ['scene_fk', 'bay_number'],
            as_index=False)['shelf_number_from_bottom'].max()
        shelf_df.rename(columns={'shelf_number_from_bottom': 'shelf_count'},
                        inplace=True)

        pre_sort_mpis = pd.merge(mpis,
                                 scene_facings_df,
                                 how='left',
                                 on=['scene_fk', 'product_fk'])
        scene_facings_df_sorted = pre_sort_mpis.sort_values('scene_facings')
        mpis = scene_facings_df_sorted.drop_duplicates(
            ['scene_fk', 'product_fk'], keep="last")

        mpis = pd.merge(mpis,
                        shelf_df,
                        how='left',
                        on=['scene_fk', 'bay_number'])

        mpis['position'] = mpis.apply(self._calculate_vertical_position,
                                      axis=1)
        mpis['result_type_fk'] = mpis['position'].apply(
            lambda x: Consts.CUSTOM_RESULT.get(x, 0))
        mpis = mpis.groupby(['product_fk'],
                            as_index=False)['result_type_fk'].agg(
                                lambda x: pd.Series.mode(x).iat[0])

        for result in mpis.itertuples():
            custom_fk_result = result.result_type_fk

            if type(custom_fk_result) == numpy.ndarray:
                custom_fk_result = result.result_type_fk[0]

            result_item = {
                'kpi_fk': row.kpi_fk,
                'numerator_id': result.product_fk,
                'numerator_result': 1,
                'denominator_id': self.store_id,
                'denominator_result': 1,
                'result': custom_fk_result,
                'score': 0
            }

            result_dict_list.append(result_item)
        return result_dict_list

    def calculate_horizontal_position(self, row, df):
        result_dict_list = []
        mpis = df  # get this from the external target filter_df method thingy

        scene_facings_df = mpis.groupby(['scene_fk', 'product_fk'],
                                        as_index=False)['facings'].max()
        scene_facings_df.rename(columns={'facings': 'scene_facings'},
                                inplace=True)
        pre_sort_mpis = pd.merge(mpis,
                                 scene_facings_df,
                                 how='left',
                                 on=['scene_fk', 'product_fk'])

        bay_df = pre_sort_mpis.groupby('scene_fk',
                                       as_index=False)['bay_number'].max()
        bay_df.rename(columns={'bay_number': 'bay_count'}, inplace=True)
        mpis = pd.merge(pre_sort_mpis, bay_df, how='left', on='scene_fk')
        mpis['position'] = mpis.apply(self._calculate_horizontal_position,
                                      axis=1)
        mpis['result_type_fk'] = mpis['position'].apply(
            lambda x: Consts.CUSTOM_RESULT.get(x, 0))
        mpis = mpis.groupby(['product_fk'],
                            as_index=False)['result_type_fk'].agg(
                                lambda x: pd.Series.mode(x).iat[0])

        for result in mpis.itertuples():
            custom_fk_result = result.result_type_fk

            if type(custom_fk_result) == numpy.ndarray:
                custom_fk_result = result.result_type_fk[0]

            result_item = {
                'kpi_fk': row.kpi_fk,
                'numerator_id': result.product_fk,
                'numerator_result': 1,
                'denominator_id': self.store_id,
                'denominator_result': 1,
                'result': custom_fk_result,
                'score': 0
            }

            result_dict_list.append(result_item)
        return result_dict_list

    @staticmethod
    def _calculate_horizontal_position(row):
        bay_count = row.bay_count
        if bay_count == 1:
            return 'Center'
        factor = round(bay_count / float(3))
        if row.bay_number <= factor:
            return 'Left'
        elif row.bay_number > (bay_count - factor):
            return 'Right'
        return 'Center'

    @staticmethod
    def _calculate_vertical_position(row):
        shelf_number = str(row.shelf_number_from_bottom)
        shelf_count = str(row.shelf_count)

        shelf_count_pos_map = Consts.shelf_map[shelf_count]
        pos_value = shelf_count_pos_map[shelf_number]

        return pos_value

    def calculate_facings_sos(self, row, df):
        data_filter = {'population': row['Dataset 2: JSON']}
        if 'include' not in data_filter['population'].keys():
            data_filter['population'].update(
                {'include': [{
                    'session_id': self.session_fk
                }]})
        data_filter.update({'location': row['Location: JSON']})
        config_json = row['Config Params: JSON']
        numerator_type = config_json['numerator_type']
        df = ParseInputKPI.filter_df(data_filter, self.scif)
        result_dict_list = self._logic_for_sos(row, df, numerator_type)
        return result_dict_list

    def _logic_for_sos(self, row, df, numerator_type):
        result_list = []
        facing_type = 'facings'
        config_json = row['Config Params: JSON']

        if 'include_stacking' in config_json:
            if config_json['include_stacking']:
                facing_type = 'facings_ign_stack'

        for num_item in df[numerator_type].unique().tolist():
            if num_item:
                numerator_scif = df[df[numerator_type] == num_item]
            else:
                numerator_scif = df[df[numerator_type].isnull()]
                num_item = 'None'

            numerator_result = numerator_scif[facing_type].sum()
            denominator_result = df[facing_type].sum()
            custom_entity_fk = self.get_custom_entity_value(num_item)
            sos_value = self.calculate_percentage_from_numerator_denominator(
                numerator_result, denominator_result)

            result_dict = {
                'kpi_fk': row.kpi_fk,
                'numerator_id': custom_entity_fk,
                'numerator_result': numerator_result,
                'denominator_id': self.store_id,
                'denominator_result': denominator_result,
                'result': sos_value
            }

            result_list.append(result_dict)
        return result_list

    def _get_calculation_function_by_kpi_type(self, kpi_type):
        if kpi_type == Consts.SOS:
            return self.calculate_facings_sos
        elif kpi_type == Consts.HORIZONTAL_SHELF_POSITION:
            return self.calculate_horizontal_position
        elif kpi_type == Consts.VERTICAL_SHELF_POSITION:
            return self.calculate_vertical_position
        elif kpi_type == Consts.BLOCKING:
            return self.calculate_blocking
        elif kpi_type == Consts.BLOCK_ADJ:
            return self.calculate_blocking_adj
        elif kpi_type == Consts.BLOCKING_ORIENTATION:
            return self.calculate_blocking_orientation

    def _calculate_kpis_from_template(self, template_df):
        for i, row in template_df.iterrows():
            try:

                calculation_function = self._get_calculation_function_by_kpi_type(
                    row[Consts.ACTUAL_TYPE])
                row = self.apply_json_parser(row)
                merged_scif_mpis = self._parse_json_filters_to_df(row)
                result_data = calculation_function(row, merged_scif_mpis)
                if result_data and isinstance(result_data, list):
                    for result in result_data:
                        self.results_df.loc[len(self.results_df),
                                            result.keys()] = result
                elif result_data and isinstance(result_data, dict):
                    self.results_df.loc[len(self.results_df),
                                        result_data.keys()] = result_data
            except Exception as e:
                Log.error('Unable to calculate {}: {}'.format(
                    row[Consts.KPI_NAME], e))

    def _parse_json_filters_to_df(self, row):
        jsonv = row[(row.index.str.contains('JSON'))
                    & (~row.index.str.contains('Config Params')) &
                    (~row.index.str.contains('Dataset 2'))]
        filter_json = jsonv[~jsonv.isnull()]
        filtered_scif_mpis = self.merged_scif_mpis
        for each_json in filter_json:
            final_json = {
                'population': each_json
            } if ('include' or 'exclude') in each_json else each_json
            filtered_scif_mpis = ParseInputKPI.filter_df(
                final_json, filtered_scif_mpis)
        if 'include_stacking' in row['Config Params: JSON'].keys():
            including_stacking = row['Config Params: JSON'][
                'include_stacking'][0]
            filtered_scif_mpis[Consts.FINAL_FACINGS] = \
                filtered_scif_mpis.facings if including_stacking == 'True' else filtered_scif_mpis.facings_ign_stack
            filtered_scif_mpis = filtered_scif_mpis[
                filtered_scif_mpis.stacking_layer == 1]
        return filtered_scif_mpis

    def apply_json_parser(self, row):
        json_relevent_rows_with_parse_logic = row[
            (row.index.str.contains('JSON')) & (row.notnull())].apply(
                self.parse_json_row)
        row = row[~row.index.isin(json_relevent_rows_with_parse_logic.index
                                  )].append(
                                      json_relevent_rows_with_parse_logic)
        return row

    def parse_json_row(self, item):
        '''
        :param item: improper json value (formatted incorrectly)
        :return: properly formatted json dictionary
        The function will be in conjunction with apply. The function will applied on the row(pandas series). This is
            meant to convert the json comprised of improper format of strings and lists to a proper dictionary value.
        '''

        if item:
            try:
                container = self.prereq_parse_json_row(item)
            except Exception as e:
                container = None
                Log.warning('{}: Unable to parse json for: {}'.format(e, item))
        else:
            container = None

        return container

    def save_results_to_db(self):
        self.results_df.drop(columns=['kpi_name'], inplace=True)
        self.results_df.rename(columns={'kpi_fk': 'fk'}, inplace=True)
        self.results_df['result'].fillna(0, inplace=True)
        self.results_df['score'].fillna(0, inplace=True)
        results = self.results_df.to_dict('records')
        for result in results:
            result = simplejson.loads(simplejson.dumps(result,
                                                       ignore_nan=True))
            self.common.write_to_db_result(**result)

    @staticmethod
    def prereq_parse_json_row(item):
        '''
        primarly logic for formatting the value of the json
        '''

        container = dict()
        try:
            container = ast.literal_eval(item)
        except:
            json_str = ",".join(item)
            json_str_fixed = json_str.replace("'", '"')
            container = json.loads(json_str_fixed)

        return container

    @staticmethod
    def _get_numerator_and_denominator_type(config_param,
                                            context_relevant=False):
        numerator_type = config_param['numerator_type'][0]
        denominator_type = config_param['denominator_type'][0]
        if context_relevant:
            context_type = config_param['context_type'][0]
            return numerator_type, denominator_type, context_type
        return numerator_type, denominator_type

    @staticmethod
    def calculate_percentage_from_numerator_denominator(
            numerator_result, denominator_result):
        try:
            ratio = numerator_result / denominator_result
        except Exception as e:
            Log.error(e.message)
            ratio = 0
        if not isinstance(ratio, (float, int)):
            ratio = 0
        return round(ratio * 100, 2)

    def get_kpi_custom_entity_table(self):
        """
        :param entity_type: pk of entity from static.entity_type
        :return: the DF of the static.custom_entity of this entity_type
        """
        query = "SELECT pk, name, entity_type_fk FROM static.custom_entity;"
        df = pd.read_sql_query(query, self.rds_conn.db)
        return df

    def get_custom_entity_value(self, value):
        try:
            custom_fk = self.custom_entity_table['pk'][
                self.custom_entity_table['name'] == value].iloc[0]
            return custom_fk
        except IndexError:
            Log.error('No custom entity found for: {}'.format(value))
            return None

    def commit_results(self):
        self.common.commit_results_data()
Example #12
0
class SceneLayoutComplianceCalc(object):
    def __init__(self, scene_toolbox_obj):
        self.__dict__.update(scene_toolbox_obj.__dict__)
        self.current_scene_fk = self.scene_info.iloc[0].scene_fk
        self.store_banner_name = self.store_info.iloc[
            0].additional_attribute_20
        self.template_name = self.templates.iloc[0].template_name
        self.custom_entity_data = self.get_relevant_custom_entity_data()
        self.match_product_data = self.match_product_in_scene.merge(
            self.products, on='product_fk', how='left')
        self.block = Block(self.data_provider, self.output)

    def parse_include_config_from_setup(self, kpi_details):
        include_empty = False
        include_other = False
        relev_setup_templtate = self.set_up_template[
            self.set_up_template['KPI Type'] == kpi_details.iloc[0]
            [KPI_TYPE_COL]]
        if not relev_setup_templtate.empty:
            Log.info("Missing KPI Config in setup template: "
                     "KPI {kpi} for session: {sess} and scene: {scene}".format(
                         kpi=kpi_details.iloc[0][KPI_TYPE_COL],
                         sess=self.session_uid,
                         scene=self.current_scene_fk,
                     ))
            empty_value = relev_setup_templtate['Include Empty'].iloc[0]
            if not pd.isnull(empty_value) and empty_value.lower().strip(
            ) == "include":
                include_empty = True

            others_value = relev_setup_templtate["Include Others"].iloc[0]
            if not pd.isnull(others_value) and others_value.lower().strip(
            ) == "include":
                include_other = True

        return include_empty, include_other

    def ensure_as_list(self, template_fks):
        if isinstance(template_fks, list):
            ext_target_template_fks = template_fks
        else:
            ext_target_template_fks = list([template_fks])
        return ext_target_template_fks

    def check_if_all_kpis_available(self):
        layout_compliance_kpis = [
            GSK_LAYOUT_COMPLIANCE_BLOCK, GSK_LAYOUT_COMPLIANCE_BRAND_FSOS,
            GSK_LAYOUT_COMPLIANCE_POSITION, GSK_LAYOUT_COMPLIANCE_SEQUENCE,
            GSK_LAYOUT_COMPLIANCE_SBRAND_FSOS
        ]
        kpis_not_found = []
        for kpi_name in layout_compliance_kpis:
            res_df = self.kpi_static_data[self.kpi_static_data[KPI_TYPE_COL] ==
                                          kpi_name]
            if res_df.empty:
                kpis_not_found.append(kpi_name)
                Log.warning(
                    "Error: KPI {} not found in static.kpi_level_2 table.".
                    format(kpi_name))

        status = True if len(kpis_not_found) == 0 else False
        return status

    def get_relevant_custom_entity_data(self):
        Log.info(
            "Getting custom entity data for the present super brands and store banner..."
        )
        columns_to_check = ["store_banner_pk", "super_brand_pk"]
        status = True
        for column in columns_to_check:
            if column not in self.targets.columns:
                Log.error(
                    "Error: {} not found in external targets".format(column))
                status = False

        if not status:
            return pd.DataFrame()

        query = """ select * from static.custom_entity where pk in {custom_entity_pks};"""
        custom_entity_data = pd.read_sql_query(
            query.format(custom_entity_pks=tuple(
                np.concatenate((self.targets['store_banner_pk'].dropna(
                ).unique().astype('int'), self.targets['super_brand_pk'].
                                dropna().unique().astype('int'))))),
            self.rds_conn.db)
        return custom_entity_data

    def calculate_all(self):
        if not self.check_if_all_kpis_available():
            Log.warning(
                'Unable to calculate GSK_LAYOUT_COMPLIANCE_KPIs: KPIs are not in kpi_level_2'
            )
            return

        if self.targets.empty:
            Log.warning(
                'Unable to calculate GSK_LAYOUT_COMPLIANCE_KPIs: external targets are empty'
            )
            return

        gsk_layout_compliance_brand_fsos = self.kpi_static_data[
            (self.kpi_static_data[KPI_TYPE_COL] ==
             GSK_LAYOUT_COMPLIANCE_BRAND_FSOS)
            & (self.kpi_static_data['delete_time'].isnull())]
        gsk_layout_compliance_position = self.kpi_static_data[
            (self.kpi_static_data[KPI_TYPE_COL] ==
             GSK_LAYOUT_COMPLIANCE_POSITION)
            & (self.kpi_static_data['delete_time'].isnull())]
        gsk_layout_compliance_sbrand_fsos = self.kpi_static_data[
            (self.kpi_static_data[KPI_TYPE_COL] ==
             GSK_LAYOUT_COMPLIANCE_SBRAND_FSOS)
            & (self.kpi_static_data['delete_time'].isnull())]
        gsk_layout_compliance_sequence = self.kpi_static_data[
            (self.kpi_static_data[KPI_TYPE_COL] ==
             GSK_LAYOUT_COMPLIANCE_SEQUENCE)
            & (self.kpi_static_data['delete_time'].isnull())]
        gsk_layout_compliance_block = self.kpi_static_data[
            (self.kpi_static_data[KPI_TYPE_COL] == GSK_LAYOUT_COMPLIANCE_BLOCK)
            & (self.kpi_static_data['delete_time'].isnull())]

        try:
            self.calculate_gsk_layout_compliance_brand_fsos(
                kpi_details=gsk_layout_compliance_brand_fsos)
        except Exception as e:
            Log.error("Error : {}".format(e))

        try:
            self.calculate_gsk_layout_compliance_block(
                kpi_details=gsk_layout_compliance_block)
        except Exception as e:
            Log.error("Error : {}".format(e))

        try:
            self.calculate_gsk_layout_compliance_sequence(
                kpi_details=gsk_layout_compliance_sequence)
        except Exception as e:
            Log.error("Error : {}".format(e))

        try:
            self.calculate_gsk_layout_compliance_super_brand_fsos(
                kpi_details=gsk_layout_compliance_sbrand_fsos)
        except Exception as e:
            Log.error("Error : {}".format(e))

        try:
            self.calculate_gsk_layout_compliance_position(
                kpi_details=gsk_layout_compliance_position)
        except Exception as e:
            Log.error("Error : {}".format(e))

    def calculate_gsk_layout_compliance_block(self, kpi_details):
        Log.info(
            "Calculating {kpi} for session: {sess} and scene: {scene}".format(
                kpi=kpi_details.iloc[0][KPI_TYPE_COL],
                sess=self.session_uid,
                scene=self.current_scene_fk,
            ))
        include_empty, include_other = self.parse_include_config_from_setup(
            kpi_details)
        block_targets = self.targets[self.targets['kpi_fk'] ==
                                     kpi_details['pk'].iloc[0]]
        # if no targets return
        if block_targets.empty:
            Log.warning('There is no target policy for calculating {}'.format(
                kpi_details.iloc[0][KPI_TYPE_COL]))
            return False
        else:
            for idx, each_target in block_targets.iterrows():
                # check for banner and template match in target
                target_banner_name = self.custom_entity_data[
                    self.custom_entity_data['pk'] ==
                    each_target.store_banner_pk].name.iloc[0]
                if self.templates.iloc[0].template_fk not in self.ensure_as_list(each_target.template_fks) or \
                        target_banner_name != self.store_banner_name:
                    Log.info(
                        """Session: {sess}; Scene:{scene}. Scene Type not matching [{k} not in {v}] 
                    or banner of current store -> {store_b} != target banner -> {targ_b}
                    target for calculating {kpi}.""".format(
                            sess=self.session_uid,
                            scene=self.current_scene_fk,
                            kpi=kpi_details.iloc[0][KPI_TYPE_COL],
                            store_b=self.store_banner_name,
                            targ_b=target_banner_name,
                            k=self.templates.iloc[0].template_fk,
                            v=each_target.template_fks,
                        ))
                    continue
                else:
                    result = score = 0
                    total_facings_count = biggest_block_facings_count = 0
                    block_threshold_perc = each_target.block_threshold_perc
                    super_brand_pk = each_target.super_brand_pk
                    store_banner_pk = each_target.store_banner_pk
                    super_brand_custom_entity = self.custom_entity_data[
                        self.custom_entity_data['pk'] == super_brand_pk]
                    sub_category_pk = each_target.sub_category_fk
                    Log.info(
                        "Calculating brand blocked for super brand: {super_b} [super_id] & sub category: {scat}"
                        .format(
                            super_b=super_brand_custom_entity.name.iloc[0],
                            super_id=super_brand_pk,
                            scat=sub_category_pk,
                        ))
                    stacking_include = bool(int(each_target.stacking_include))
                    # able to pass sub cat and super brand[?] // or get the prods and pass
                    population_filters = {
                        'sub_category_fk': [float(sub_category_pk)],
                        'Super Brand':
                        [super_brand_custom_entity.name.iloc[0]]
                    }

                    allowed_products = defaultdict(list)
                    if include_empty:
                        allowed_products['product_type'].append('Empty')
                    if include_other:
                        allowed_products['product_type'].append("Other")
                    allowed_products = dict(allowed_products)

                    location_filters = {'scene_fk': [self.current_scene_fk]}
                    additional_filters = {
                        'minimum_facing_for_block': 1,
                        'minimum_block_ratio': 0,
                        'include_stacking': stacking_include,
                        'check_vertical_horizontal': True,
                        'allowed_products_filters': allowed_products
                    }
                    block_res = self.block.network_x_block_together(
                        population_filters, location_filters,
                        additional_filters)
                    block_res.dropna(subset=['total_facings'], inplace=True)
                    block_res = block_res.query('is_block==True')
                    if block_res.empty:
                        Log.info(
                            "Fail: Cannot find brand blocked for super brand: {super_b} "
                            "[super_id] & sub category: {scat}. Save as a Fail."
                            .format(
                                super_b=super_brand_custom_entity.name.iloc[0],
                                super_id=super_brand_pk,
                                scat=sub_category_pk,
                            ))
                        continue
                    else:
                        Log.info(
                            "Found brand blocked for super brand: {super_b} "
                            "[super_id] & sub category: {scat}. Check and save."
                            .format(
                                super_b=super_brand_custom_entity.name.iloc[0],
                                super_id=super_brand_pk,
                                scat=sub_category_pk,
                            ))
                        biggest_cluster = block_res.sort_values(
                            by='block_facings', ascending=False).head(1)
                        biggest_block_facings_count = float(
                            biggest_cluster['block_facings'])
                        total_facings_count = float(
                            biggest_cluster['total_facings'])
                        if total_facings_count:
                            result = round((biggest_block_facings_count /
                                            total_facings_count) * 100, 2)
                        if result >= block_threshold_perc:
                            score = 1
                    self.common.write_to_db_result(
                        fk=kpi_details.iloc[0].pk,
                        numerator_id=store_banner_pk,
                        denominator_id=super_brand_pk,
                        context_id=sub_category_pk,
                        numerator_result=biggest_block_facings_count,
                        denominator_result=total_facings_count,
                        target=block_threshold_perc,
                        result=result,
                        score=score,
                        by_scene=True,
                    )

    def calculate_gsk_layout_compliance_sequence(self, kpi_details):
        Log.info(
            "Calculating {kpi} for session: {sess} and scene: {scene}".format(
                kpi=kpi_details.iloc[0][KPI_TYPE_COL],
                sess=self.session_uid,
                scene=self.current_scene_fk,
            ))
        sequence_targets = self.targets[self.targets['kpi_fk'] ==
                                        kpi_details['pk'].iloc[0]]
        # if no targets return
        if sequence_targets.empty:
            Log.warning('There is no target policy for calculating {}'.format(
                kpi_details.iloc[0][KPI_TYPE_COL]))
            return False
        else:
            for idx, each_target in sequence_targets.iterrows():
                # check for template and banner match in target
                target_banner_name = self.custom_entity_data[
                    self.custom_entity_data['pk'] ==
                    each_target.store_banner_pk].name.iloc[0]
                if self.templates.iloc[0].template_fk not in self.ensure_as_list(each_target.template_fks) or \
                        target_banner_name != self.store_banner_name:
                    Log.info(
                        """Session: {sess}; Scene:{scene}. Scene Type not matching [{k} not in {v}] 
                                or banner of current store -> {store_b} != target banner -> {targ_b}
                             target for calculating {kpi}.""".format(
                            sess=self.session_uid,
                            scene=self.current_scene_fk,
                            kpi=kpi_details.iloc[0][KPI_TYPE_COL],
                            store_b=self.store_banner_name,
                            targ_b=target_banner_name,
                            k=self.templates.iloc[0].template_fk,
                            v=each_target.template_fks,
                        ))
                    continue
                store_banner_pk = each_target.store_banner_pk
                brand_pk_to_check = each_target.brand_pk
                sequence_brand_pks = each_target.sequence_brand_pks
                condition = each_target.condition
                sub_category_pk = each_target.sub_category_fk
                stacking_include = bool(int(each_target.stacking_include))
                stack_filtered_mpis = self.match_product_data
                Log.info(
                    "Checking if brand: {br} is present {condition} as in sequence : {seq}."
                    .format(br=brand_pk_to_check,
                            condition=condition,
                            seq=sequence_brand_pks))
                if brand_pk_to_check not in sequence_brand_pks:
                    Log.error(
                        """ KPI:{kpi}. Session: {sess}; Scene:{scene}. brand to check {brand} not in list {br_lst}."""
                        .format(sess=self.session_uid,
                                scene=self.current_scene_fk,
                                kpi=kpi_details.iloc[0][KPI_TYPE_COL],
                                brand=brand_pk_to_check,
                                br_lst=sequence_brand_pks))
                    continue
                if not stacking_include:
                    # consider only stacking layer 1 products
                    stack_filtered_mpis = self.match_product_data[
                        self.match_product_data['stacking_layer'] == 1]
                interested_brand_prod_data = stack_filtered_mpis[
                    (stack_filtered_mpis['brand_fk'] == brand_pk_to_check)
                    & (stack_filtered_mpis['sub_category_fk'] ==
                       sub_category_pk)]
                if interested_brand_prod_data.empty:
                    Log.error(
                        """ KPI:{kpi}. Session: {sess}; Scene:{scene}. brand to check {brand} is not present."""
                        .format(
                            sess=self.session_uid,
                            scene=self.current_scene_fk,
                            kpi=kpi_details.iloc[0][KPI_TYPE_COL],
                            brand=brand_pk_to_check,
                        ))
                    continue
                # Check only in predecessor brands
                predecessor_brands = sequence_brand_pks[:sequence_brand_pks.
                                                        index(brand_pk_to_check
                                                              )]
                predecessor_brand_shelf_sorted = stack_filtered_mpis[
                    (stack_filtered_mpis['brand_fk'].isin(predecessor_brands))
                    & (stack_filtered_mpis['sub_category_fk'] ==
                       sub_category_pk)][['brand_fk', 'shelf_number'
                                          ]].sort_values(['shelf_number'])
                result = 0  # initialize as fail
                if predecessor_brand_shelf_sorted.empty:
                    # predecessor brands are not present; PASS
                    Log.info(
                        "No Predecessor Brands {} are present. PASS".format(
                            predecessor_brands))
                    result = 1
                else:
                    Log.info(
                        "Starting to checking> brand: {br} is present below or same level as in sequence : {seq}."
                        .format(br=brand_pk_to_check, seq=sequence_brand_pks))
                    min_shelf_of_brand = stack_filtered_mpis[
                        (stack_filtered_mpis['brand_fk'] == brand_pk_to_check)
                        & (stack_filtered_mpis['sub_category_fk'] ==
                           sub_category_pk)]['shelf_number'].min()
                    idx_brand_start_check = sequence_brand_pks.index(
                        brand_pk_to_check) - 1
                    while idx_brand_start_check >= 0:
                        predecessor_brand_to_check = sequence_brand_pks[
                            idx_brand_start_check]
                        Log.info(
                            "Checking if predecessor brand: {} is present.".
                            format(predecessor_brand_to_check))
                        if predecessor_brand_shelf_sorted[
                                predecessor_brand_shelf_sorted['brand_fk'] ==
                                predecessor_brand_to_check].empty:
                            # This is the logic to reduce index and continue while loop
                            Log.info(
                                "Brand {} is not present. Check next brand in predecessor"
                                .format(predecessor_brand_to_check))
                            result = 1  # PASS unless proved otherwise.
                            idx_brand_start_check -= 1
                            continue
                        # check for `predecessor_brand_to_check` present below the level of every brand_pk_to_check
                        Log.info(
                            "Check if Brand to check {br_c} is present below the level of brand in sequence {seq}"
                            .format(br_c=brand_pk_to_check,
                                    seq=predecessor_brand_to_check))
                        max_shelf_of_predecessor_brand = predecessor_brand_shelf_sorted[
                            predecessor_brand_shelf_sorted['brand_fk'] ==
                            predecessor_brand_to_check]['shelf_number'].max()
                        if min_shelf_of_brand >= max_shelf_of_predecessor_brand:
                            Log.info(
                                "PASS: brand: {brand} is below or same level as brand: {predecessor}."
                                .format(
                                    brand=brand_pk_to_check,
                                    predecessor=predecessor_brand_to_check))
                            result = 1
                            break
                        else:
                            Log.info(
                                "FAIL: brand: {brand} is above level of brand: {predecessor}."
                                .format(
                                    brand=brand_pk_to_check,
                                    predecessor=predecessor_brand_to_check))
                            result = 0
                            break
                        # end of check
                self.common.write_to_db_result(
                    fk=kpi_details.iloc[0].pk,
                    numerator_id=store_banner_pk,
                    denominator_id=brand_pk_to_check,
                    context_id=sub_category_pk,
                    result=result,
                    numerator_result=result,
                    denominator_result=1,
                    score=result,
                    target=1,
                    by_scene=True,
                )

    def calculate_gsk_layout_compliance_super_brand_fsos(self, kpi_details):
        Log.info(
            "Calculating {kpi} for session: {sess} and scene: {scene}".format(
                kpi=kpi_details.iloc[0][KPI_TYPE_COL],
                sess=self.session_uid,
                scene=self.current_scene_fk,
            ))
        brand_fsos_targets = self.targets[self.targets['kpi_fk'] ==
                                          kpi_details['pk'].iloc[0]]
        # if no targets return
        if brand_fsos_targets.empty:
            Log.warning('There is no target policy for calculating {}'.format(
                kpi_details.iloc[0][KPI_TYPE_COL]))
            return False
        else:
            for idx, each_target in brand_fsos_targets.iterrows():
                # check for template and banner match in target
                target_banner_name = self.custom_entity_data[
                    self.custom_entity_data['pk'] ==
                    each_target.store_banner_pk].name.iloc[0]
                if self.templates.iloc[0].template_fk not in self.ensure_as_list(each_target.template_fks) or \
                        target_banner_name != self.store_banner_name:
                    Log.info(
                        """Session: {sess}; Scene:{scene}. Scene Type not matching [{k} not in {v}] 
                                or banner of current store -> {store_b} != target banner -> {targ_b}
                             target for calculating {kpi}.""".format(
                            sess=self.session_uid,
                            scene=self.current_scene_fk,
                            kpi=kpi_details.iloc[0][KPI_TYPE_COL],
                            store_b=self.store_banner_name,
                            targ_b=target_banner_name,
                            k=self.templates.iloc[0].template_fk,
                            v=each_target.template_fks,
                        ))
                    continue
                numerator = result = score = 0
                store_banner_pk = each_target.store_banner_pk
                super_brand_pk = each_target.super_brand_pk
                brand_pk = each_target.brand_pk
                sub_category_pk = each_target.sub_category_fk
                stacking_include = bool(int(each_target.stacking_include))
                facings_field = 'facings'
                if not stacking_include:
                    # consider only stacking layer 1 products
                    facings_field = 'facings_ign_stack'
                scif_with_products = self.scif.merge(self.products,
                                                     on='product_fk',
                                                     how='left',
                                                     suffixes=('', '_prod'))
                super_brand_custom_entity = self.custom_entity_data[
                    self.custom_entity_data['pk'] == super_brand_pk]
                if super_brand_custom_entity.empty:
                    # should never happen
                    Log.error(
                        'Super Brand not found. Custom Entity Not loaded with a recent template update.'
                    )
                    continue
                denominator = scif_with_products[(
                    scif_with_products['sub_category_fk'] == sub_category_pk
                )][facings_field].sum()
                if denominator:
                    super_brand_name = super_brand_custom_entity.name.iloc[0]
                    numerator = scif_with_products[
                        (scif_with_products['Super Brand'] == super_brand_name)
                        & (scif_with_products['sub_category_fk'] ==
                           sub_category_pk)][facings_field].sum()
                    result = round((numerator / float(denominator)) * 100, 2)
                else:
                    Log.info(
                        "{kpi}: No products with sub cat: {scat} found. Save zero."
                        .format(kpi=kpi_details.iloc[0][KPI_TYPE_COL],
                                scat=sub_category_pk))
                    continue
                if result >= each_target.threshold:
                    score = 1
                self.common.write_to_db_result(
                    fk=kpi_details.iloc[0].pk,
                    numerator_id=store_banner_pk,
                    denominator_id=super_brand_pk,
                    context_id=sub_category_pk,
                    numerator_result=numerator,
                    denominator_result=denominator,
                    result=result,
                    score=score,
                    target=each_target.threshold,
                    by_scene=True,
                )

    def calculate_gsk_layout_compliance_position(self, kpi_details):
        Log.info(
            "Calculating {kpi} for session: {sess} and scene: {scene}".format(
                kpi=kpi_details.iloc[0][KPI_TYPE_COL],
                sess=self.session_uid,
                scene=self.current_scene_fk,
            ))
        position_targets = self.targets[self.targets['kpi_fk'] ==
                                        kpi_details['pk'].iloc[0]]

        def _get_shelf_range(sh):
            """Input => string ~ '1_2_shelf
                Output => xrange ~ xrange(1,3)
            '"""
            split_sh = sh.split('_')[:2]
            int_sh = map(int, split_sh)
            int_sh[1] = int_sh[1] + 1
            return xrange(*int_sh)

        # if no targets return
        if position_targets.empty:
            Log.warning('There is no target policy for calculating {}'.format(
                kpi_details.iloc[0][KPI_TYPE_COL]))
            return False
        else:
            # target_shelf_config_keys => '1_5_shelf, 6_7_shelf, above_12_shelf etc'
            for idx, each_target in position_targets.iterrows():
                result = score = 0
                target_shelf_config_keys = each_target[each_target.keys().map(
                    lambda x: x.endswith('_shelf'))]
                store_banner_pk = each_target.store_banner_pk
                sub_category_pk = each_target.sub_category_fk
                brand_pk = each_target.brand_pk
                stacking_include = bool(int(each_target.stacking_include))
                # numerator - Cumulative no of Facings "of the brand and sub category" available at desired shelf
                numerator = 0  # number of facings available in desired shelf
                # check for banner and template match in target
                target_banner_name = self.custom_entity_data[
                    self.custom_entity_data['pk'] ==
                    each_target.store_banner_pk].name.iloc[0]
                if self.templates.iloc[0].template_fk not in self.ensure_as_list(each_target.template_fks) or \
                        target_banner_name != self.store_banner_name:
                    Log.info(
                        """Session: {sess}; Scene:{scene}. Scene Type not matching [{k} not in {v}] 
                                                or banner of current store -> {store_b} != target banner -> {targ_b}
                                             target for calculating {kpi}.""".
                        format(
                            sess=self.session_uid,
                            scene=self.current_scene_fk,
                            kpi=kpi_details.iloc[0][KPI_TYPE_COL],
                            store_b=self.store_banner_name,
                            targ_b=target_banner_name,
                            k=self.templates.iloc[0].template_fk,
                            v=each_target.template_fks,
                        ))
                    continue
                stack_filtered_mpis = self.match_product_data
                if not stacking_include:
                    # consider only stacking layer 1 products
                    stack_filtered_mpis = self.match_product_data[
                        self.match_product_data['stacking_layer'] == 1]
                # denominator - total number of facings "of the brand and sub category" available in whole scene
                denominator = len(stack_filtered_mpis[
                    (stack_filtered_mpis['brand_fk'] == brand_pk)
                    & (stack_filtered_mpis['sub_category_fk'] ==
                       sub_category_pk)])
                if denominator:
                    for bay_number, grouped_bay_data in stack_filtered_mpis.groupby(
                            'bay_number'):
                        Log.info("Running {kpi} for bay {bay}".format(
                            kpi=kpi_details.iloc[0][KPI_TYPE_COL],
                            bay=bay_number))
                        # min_shelf = self.match_product_data['shelf_number'].unique().min()
                        max_shelf = grouped_bay_data['shelf_number'].unique(
                        ).max()
                        shelf_config_key = None
                        for each_shelf_conf in target_shelf_config_keys.keys():
                            if each_shelf_conf.startswith('above'):
                                above_shelf = int(
                                    each_shelf_conf.split('_')[1])
                                if max_shelf > above_shelf:
                                    # if strictly greater ~ above_12_shelf
                                    # satisfies when shelf is 13 and above
                                    shelf_config_key = each_shelf_conf
                                    break
                            elif max_shelf in _get_shelf_range(
                                    each_shelf_conf):
                                shelf_config_key = each_shelf_conf
                                break
                        if not shelf_config_key:
                            Log.error(
                                """ Session: {sess}; Scene:{scene}. 
                                      There is no shelf policy for calculating {kpi}."""
                                .format(sess=self.session_uid,
                                        scene=self.current_scene_fk,
                                        kpi=kpi_details.iloc[0][KPI_TYPE_COL]))
                            return False

                        # find the brand products in the shelf_config_key
                        interested_shelves = each_target[shelf_config_key]
                        Log.info(
                            "Using {shelf_config} => shelves to check {shelves} for bay: {bay}"
                            .format(shelf_config=shelf_config_key,
                                    shelves=interested_shelves,
                                    bay=bay_number))
                        per_bay_numerator = len(grouped_bay_data[
                            (grouped_bay_data['shelf_number'].isin(
                                interested_shelves))
                            & (grouped_bay_data['brand_fk'] == brand_pk) &
                            (grouped_bay_data['sub_category_fk']
                             == sub_category_pk)])
                        numerator = numerator + per_bay_numerator
                    if denominator:
                        result = round((numerator / float(denominator)) * 100,
                                       2)
                    if result >= each_target.target_perc:
                        score = 1
                else:
                    Log.info(
                        "{kpi}: No products with sub cat: {scat} brand: {brand} found. Save zero."
                        .format(
                            kpi=kpi_details.iloc[0][KPI_TYPE_COL],
                            scat=sub_category_pk,
                            brand=brand_pk,
                        ))
                    continue

                self.common.write_to_db_result(
                    fk=kpi_details.iloc[0].pk,
                    numerator_id=store_banner_pk,
                    denominator_id=brand_pk,
                    context_id=sub_category_pk,
                    numerator_result=numerator,
                    denominator_result=denominator,
                    result=result,
                    score=score,
                    target=each_target.target_perc,
                    by_scene=True,
                )

    def calculate_gsk_layout_compliance_brand_fsos(self, kpi_details):
        Log.info(
            "Calculating {kpi} for session: {sess} and scene: {scene}".format(
                kpi=kpi_details.iloc[0][KPI_TYPE_COL],
                sess=self.session_uid,
                scene=self.current_scene_fk,
            ))
        brand_fsos_targets = self.targets[self.targets['kpi_fk'] ==
                                          kpi_details['pk'].iloc[0]]
        # if no targets return
        if brand_fsos_targets.empty:
            Log.warning('There is no target policy for calculating {}'.format(
                kpi_details.iloc[0][KPI_TYPE_COL]))
            return False
        else:
            for idx, each_target in brand_fsos_targets.iterrows():
                # check for banner and template match in target
                target_banner_name = self.custom_entity_data[
                    self.custom_entity_data['pk'] ==
                    each_target.store_banner_pk].name.iloc[0]
                if self.templates.iloc[0].template_fk not in self.ensure_as_list(each_target.template_fks) or \
                        target_banner_name != self.store_banner_name:
                    Log.info(
                        """Session: {sess}; Scene:{scene}. Scene Type not matching [{k} not in {v}] 
                                                or banner of current store -> {store_b} != target banner -> {targ_b}
                                             target for calculating {kpi}.""".
                        format(
                            sess=self.session_uid,
                            scene=self.current_scene_fk,
                            kpi=kpi_details.iloc[0][KPI_TYPE_COL],
                            store_b=self.store_banner_name,
                            targ_b=target_banner_name,
                            k=self.templates.iloc[0].template_fk,
                            v=each_target.template_fks,
                        ))
                    continue
                numerator = result = score = 0
                store_banner_pk = each_target.store_banner_pk
                super_brand_pk = each_target.super_brand_pk
                brand_pk = each_target.brand_pk
                sub_category_pk = each_target.sub_category_fk
                stacking_include = bool(int(each_target.stacking_include))
                facings_field = 'facings'
                if not stacking_include:
                    # consider only stacking layer 1 products
                    facings_field = 'facings_ign_stack'
                super_brand_custom_entity = self.custom_entity_data[
                    self.custom_entity_data['pk'] == super_brand_pk]
                if super_brand_custom_entity.empty:
                    # should never happen
                    Log.error(
                        'Super Brand not found. Custom Entity Not loaded with a recent template update.'
                    )
                    continue
                super_brand_name = super_brand_custom_entity.name.iloc[0]
                scif_with_products = self.scif.merge(self.products,
                                                     on='product_fk',
                                                     how='left',
                                                     suffixes=('', '_prod'))
                denominator = scif_with_products[
                    (scif_with_products['Super Brand'] == super_brand_name)
                    & (scif_with_products['sub_category_fk'] == sub_category_pk
                       )][facings_field].sum()
                numerator = self.scif[(self.scif['brand_fk'] == brand_pk) & (
                    self.scif['sub_category_fk'] == sub_category_pk
                )][facings_field].sum()
                if denominator:
                    result = round((numerator / float(denominator)) * 100, 2)
                else:
                    if numerator == 0:
                        Log.info(
                            "{kpi}: No products with sub cat: {scat} brand: {brand} found. Save zero."
                            .format(
                                kpi=kpi_details.iloc[0][KPI_TYPE_COL],
                                scat=sub_category_pk,
                                brand=brand_pk,
                            ))
                        continue

                if result >= each_target.threshold:
                    score = 1
                self.common.write_to_db_result(
                    fk=kpi_details.iloc[0].pk,
                    numerator_id=store_banner_pk,
                    denominator_id=brand_pk,
                    context_id=sub_category_pk,
                    numerator_result=numerator,
                    denominator_result=denominator,
                    result=result,
                    score=score,
                    target=each_target.threshold,
                    by_scene=True,
                )
Example #13
0
class ToolBox(GlobalSessionToolBox):
    def __init__(self, data_provider, output):
        GlobalSessionToolBox.__init__(self, data_provider, output)
        self.adjacency = Adjancency(data_provider)
        self.block = Block(data_provider)
        self.kpi_static_data = self.common.get_kpi_static_data()
        self.ps_data_provider = PsDataProvider(data_provider)
        self._scene_types = None
        self.external_targets = self.ps_data_provider.get_kpi_external_targets(
        )

    @property
    def scene_types(self):
        if not self._scene_types:
            self._scene_types = self.scif['template_fk'].unique().tolist()
        return self._scene_types

    def main_calculation(self):
        custom_kpis = self.kpi_static_data[
            (self.kpi_static_data['kpi_calculation_stage_fk'] == 3)
            & (self.kpi_static_data['valid_from'] <= self.visit_date) &
            ((self.kpi_static_data['valid_until']).isnull() |
             (self.kpi_static_data['valid_until'] >= self.visit_date))]

        for kpi in custom_kpis.itertuples():
            kpi_function = self.get_kpi_function_by_family_fk(
                kpi.kpi_family_fk)
            kpi_function(kpi.pk)
        return

    @run_for_every_scene_type
    def calculate_presence(self, kpi_fk, template_fk=None):
        config = self.get_external_target_data_by_kpi_fk(kpi_fk)
        if config.empty or (template_fk is None):
            return

        result_df = self.scif[
            self.scif[config.numerator_param].isin(config.numerator_value)
            & (self.scif['template_fk'] == template_fk)]
        numerator_id = self.get_brand_fk_from_brand_name(
            config.numerator_value[0])
        result = 0 if result_df.empty else 1
        self.write_to_db(kpi_fk,
                         numerator_id=numerator_id,
                         denominator_id=template_fk,
                         result=result)
        return

    @run_for_every_scene_type
    def calculate_shelf_location(self, kpi_fk, template_fk=None):
        config = self.get_external_target_data_by_kpi_fk(kpi_fk)
        shelf_location = config.shelf_location
        if config.empty or (template_fk is None):
            return

        relevant_scene_fks = self.scif[
            self.scif['template_fk'] ==
            template_fk]['scene_fk'].unique().tolist()
        relevant_matches = self.matches[self.matches['scene_fk'].isin(
            relevant_scene_fks)]

        shelves = relevant_matches.groupby(
            'bay_number',
            as_index=False)['shelf_number'].max()['shelf_number'].mean()

        products_df = self.scif[
            (self.scif[config.numerator_param].isin(config.numerator_value))
            & (self.scif['template_fk'] == template_fk)]

        products_list = products_df['product_fk'].unique().tolist()

        if shelf_location == 'top':
            shelf_matches = relevant_matches[
                (relevant_matches['product_fk'].isin(products_list))
                & (relevant_matches['shelf_number'] <= (shelves / 3))]
        elif shelf_location == 'middle_bottom':
            shelf_matches = relevant_matches[
                (relevant_matches['product_fk'].isin(products_list))
                & (relevant_matches['shelf_number'] > (shelves / 3))]
        else:
            shelf_matches = pd.DataFrame()

        numerator_id = self.get_brand_fk_from_brand_name(
            config.numerator_value[0])
        result = 0 if shelf_matches.empty else 1
        self.write_to_db(kpi_fk,
                         numerator_id=numerator_id,
                         denominator_id=template_fk,
                         result=result)

    @run_for_every_scene_type
    def calculate_blocking(self, kpi_fk, template_fk=None):
        config = self.get_external_target_data_by_kpi_fk(kpi_fk)
        if config.empty or (template_fk is None):
            return
        location = {'template_fk': template_fk}
        blocks = self.block.network_x_block_together(
            {config.numerator_param: config.numerator_value},
            location,
            additional={'check_vertical_horizontal': True})
        if not blocks.empty:
            blocks = blocks[blocks['is_block']]
            orientation = config.orientation
            if orientation and orientation is not pd.np.nan:
                blocks = blocks[blocks['orientation'] == orientation]

        numerator_id = self.get_brand_fk_from_brand_name(
            config.numerator_value[0])
        result = 0 if blocks.empty else 1
        self.write_to_db(kpi_fk,
                         numerator_id=numerator_id,
                         denominator_id=template_fk,
                         result=result)

    @run_for_every_scene_type
    def calculate_adjacency(self, kpi_fk, template_fk=None):
        config = self.get_external_target_data_by_kpi_fk(kpi_fk)
        if config.empty or (template_fk is None):
            return
        location = {'template_fk': template_fk}
        anchor_pks = \
            self.scif[self.scif[config.anchor_param].isin(config.anchor_value)]['product_fk'].unique().tolist()
        tested_pks = \
            self.scif[self.scif[config.tested_param].isin(config.tested_value)]['product_fk'].unique().tolist()
        # handle populations that are not mutually exclusive
        tested_pks = [x for x in tested_pks if x not in anchor_pks]

        population = {
            'anchor_products': {
                'product_fk': anchor_pks
            },
            'tested_products': {
                'product_fk': tested_pks
            }
        }

        # this function is only needed until the adjacency function is enhanced to not crash when an empty population
        # is provided
        if self.check_population_exists(population, template_fk):
            try:
                adj_df = self.adjacency.network_x_adjacency_calculation(
                    population, location, {
                        'minimum_facings_adjacent': 1,
                        'minimum_block_ratio': 0,
                        'minimum_facing_for_block': 1,
                        'include_stacking': True
                    })
            except AttributeError:
                Log.info(
                    "Error calculating adjacency for kpi_fk {} template_fk {}".
                    format(kpi_fk, template_fk))
                return
            if adj_df.empty:
                result = 0
            else:
                result = 1 if not adj_df[adj_df['is_adj']].empty else 0
        else:
            result = 0
        numerator_id = self.get_brand_fk_from_brand_name(
            config.anchor_value[0])
        self.write_to_db(kpi_fk,
                         numerator_id=numerator_id,
                         denominator_id=template_fk,
                         result=result)
        return

    @run_for_every_scene_type
    def calculate_brand_facings(self, kpi_fk, template_fk=None):
        relevant_scif = self.scif[self.scif['template_fk'] == template_fk]

        denominator_results = relevant_scif.groupby(
            'Customer Category', as_index=False)[[
                'facings'
            ]].sum().rename(columns={'facings': 'denominator_result'})

        numerator_result = relevant_scif.groupby(
            ['brand_fk', 'Customer Category'], as_index=False)[[
                'facings'
            ]].sum().rename(columns={'facings': 'numerator_result'})

        results = numerator_result.merge(denominator_results)
        results['result'] = (results['numerator_result'] /
                             results['denominator_result'])
        results['result'].fillna(0, inplace=True)

        for index, row in results.iterrows():
            relevant_perfetti_product_fk = self.get_product_fk_from_perfetti_category(
                row['Customer Category'])
            self.write_to_db(fk=kpi_fk,
                             numerator_id=row['brand_fk'],
                             denominator_id=relevant_perfetti_product_fk,
                             numerator_result=row['numerator_result'],
                             denominator_result=row['denominator_result'],
                             context_id=template_fk,
                             result=row['result'],
                             score=row['result'])

    def get_kpi_function_by_family_fk(self, kpi_family_fk):
        if kpi_family_fk == 19:
            return self.calculate_presence
        elif kpi_family_fk == 20:
            return self.calculate_adjacency
        elif kpi_family_fk == 21:
            return self.calculate_blocking
        elif kpi_family_fk == 22:
            return self.calculate_shelf_location
        elif kpi_family_fk == 23:
            return self.calculate_brand_facings

    def get_external_target_data_by_kpi_fk(self, kpi_fk):
        return self.external_targets[self.external_targets['kpi_fk'] ==
                                     kpi_fk].iloc[0]

    def get_brand_fk_from_brand_name(self, brand_name):
        return self.all_products[self.all_products['brand_name'] ==
                                 brand_name]['brand_fk'].iloc[0]

    def get_product_fk_from_perfetti_category(self, perfetti_category):
        try:
            return self.all_products[self.all_products['Customer Category'] ==
                                     perfetti_category]['product_fk'].iloc[0]
        except IndexError:
            return None

    def check_population_exists(self, population, template_fk):
        relevant_scif = self.scif[self.scif['template_fk'] == template_fk]
        anchor_scif = relevant_scif[relevant_scif['product_fk'].isin(
            population['anchor_products']['product_fk'])]
        tested_scif = relevant_scif[relevant_scif['product_fk'].isin(
            population['tested_products']['product_fk'])]
        if anchor_scif.empty or tested_scif.empty:
            return False
        else:
            return True
Example #14
0
class ToolBox(GlobalSessionToolBox):
    def __init__(self, data_provider, output):
        GlobalSessionToolBox.__init__(self, data_provider, output)
        self.templates = {}
        self.parse_template()
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.match_scene_item_facts = pd.merge(
            self.scif,
            self.match_product_in_scene,
            how='right',
            left_on=['item_id', 'scene_id'],
            right_on=['product_fk',
                      'scene_fk'])  # Merges scif with mpis on product_fk
        self.block = Block(data_provider)
        self.own_manufacturer_fk = int(
            self.data_provider.own_manufacturer.param_value.values[0])
        self.results_df = pd.DataFrame(columns=[
            'kpi_name', 'kpi_fk', 'numerator_id', 'numerator_result',
            'context_id', 'denominator_id', 'denominator_result', 'result',
            'score'
        ])

    def parse_template(self):
        for sheet in SHEETS:
            self.templates[sheet] = pd.read_excel(TEMPLATE_PATH,
                                                  sheet_name=sheet)

        for shelf_postion_sheet in SHELF_POSITION_SHEET:
            self.templates[shelf_postion_sheet] = pd.read_excel(
                SHELF_POSITION_PATH,
                sheet_name=shelf_postion_sheet,
                index_col=0)

    def main_calculation(self):
        relevant_kpi_template = self.templates[Consts.KPIS]
        # Consts.SOS, Consts.DISTRIBUTION, Consts.ADJACENCY_BRAND_WITHIN_BAY,
        #                                 Consts.ADJACENCY_CATEGORY_WITHIN_BAY, Consts.SHELF_POSITION, Consts.LEAD_ANCHOR_BY_BAY
        foundation_kpi_types = [
            Consts.SOS, Consts.DISTRIBUTION, Consts.ADJACENCY_BRAND_WITHIN_BAY,
            Consts.ADJACENCY_CATEGORY_WITHIN_BAY, Consts.SHELF_POSITION,
            Consts.LEAD_ANCHOR_BY_BAY
        ]
        foundation_kpi_template = relevant_kpi_template[relevant_kpi_template[
            Consts.KPI_TYPE].isin(foundation_kpi_types)]

        self._calculate_kpis_from_template(foundation_kpi_template)
        self.save_results_to_db()
        return

    def save_results_to_db(self):
        self.results_df.drop(columns=['kpi_name'], inplace=True)
        self.results_df.rename(columns={'kpi_fk': 'fk'}, inplace=True)
        self.results_df[['result']].fillna(0, inplace=True)
        # self.results_df.fillna(None, inplace=True)
        results = self.results_df.to_dict('records')
        for result in results:
            # try:
            # if result['denominator_result'] == 0 and result['numerator_result']:
            #     del result['denominator_result']
            #     del result['numerator_result']
            result = simplejson.loads(simplejson.dumps(result,
                                                       ignore_nan=True))
            self.write_to_db(**result)
            # except:
            #     a = 1

    def _calculate_kpis_from_template(self, template_df):
        for i, row in template_df.iterrows():
            calculation_function = self._get_calculation_function_by_kpi_type(
                row[Consts.KPI_TYPE])
            try:
                kpi_rows = self.templates[row[Consts.KPI_TYPE]][self.templates[
                    row[Consts.KPI_TYPE]][Consts.KPI_NAME].str.encode(
                        'utf-8') == row[Consts.KPI_NAME].encode('utf-8')]
            except IndexError:
                pass
            for index, kpi_row in kpi_rows.iterrows():
                result_data = calculation_function(kpi_row)
                if result_data:
                    for result in result_data:
                        # if result['result'] <= 1:
                        #     result['result'] = result['result'] * 100
                        self.results_df.loc[len(self.results_df),
                                            result.keys()] = result

    def _get_calculation_function_by_kpi_type(self, kpi_type):
        if kpi_type == Consts.SOS:
            return self.calculate_sos
        elif kpi_type == Consts.DISTRIBUTION:
            return self.calculate_distribution
        elif kpi_type == Consts.ADJACENCY_BRAND_WITHIN_BAY:
            return self.calculate_adjacency_brand
        elif kpi_type == Consts.ADJACENCY_CATEGORY_WITHIN_BAY:
            return self.calculate_adjacency_category
        elif kpi_type == Consts.SHELF_POSITION:
            return self.calculate_shelf_position
        elif kpi_type == Consts.LEAD_ANCHOR_BY_BAY:
            return self.calculate_lead_anchor

    def calculate_adjacency_brand(self, row):
        kpi_name = row[Consts.KPI_NAME]
        kpi_fk = self.get_kpi_fk_by_kpi_type(kpi_name)
        relevant_brands = self._sanitize_values(
            row[Consts.BRAND_NAME])  # Gets the brand name from the template
        relevant_brand_fk_of_the_relevant_brand = \
            self.all_products.loc[self.all_products.brand_name.isin(relevant_brands), 'brand_fk'].iat[
                0]  # used to save as the denominator id
        direction = {'UP': 0, 'DOWN': 2, 'RIGHT': 1, 'LEFT': 3}
        result_dict_list = []

        relevant_match_scene_item_facts = self._filter_df(
            self.match_scene_item_facts, {Consts.BRAND_NAME: relevant_brands}
        )  # Filter the merged data frame with the brand to get the relevant dataframe

        if not relevant_match_scene_item_facts.empty:
            for relevant_scene in set(
                    relevant_match_scene_item_facts.scene_id
            ):  # Iterating through the unique scenes existin the merged dataframe
                score = relevant_scene
                mcif = relevant_match_scene_item_facts[
                    relevant_match_scene_item_facts.scene_id.isin(
                        [relevant_scene])]
                unique_bay_numbers = set(
                    mcif.bay_number
                )  # Getting the unique bay numbers in the the scene
                location = {Consts.SCENE_FK: relevant_scene}
                for bay in unique_bay_numbers:
                    context_id = bay  # bay number
                    #  Consts.BRAND_FK: relevant_brands,, Consts.BAY_NUMBER:bay
                    # relevant_mpis = self.match_product_in_scene[self.match_product_in_scene.bay_number.isin([bay])]
                    relevant_filters = {
                        Consts.BRAND_NAME: relevant_brands,
                        Consts.BAY_NUMBER: [bay]
                    }
                    # block = self.block.network_x_block_together(relevant_filters,
                    #                                             location=location_filters,
                    #                                             additional={'minimum_facing_for_block': 1,
                    #                                                         'use_masking_only': True})
                    block = self.block.network_x_block_together(
                        relevant_filters,
                        location=location,
                        additional={
                            'allowed_edge_type': ['encapsulated'],
                            'calculate_all_scenes': True,
                            'minimum_facing_for_block': 1,
                            'use_masking_only': True,
                            'allowed_products_filters': {
                                'product_type': 'Empty'
                            },
                        })
                    passed_block = block[block.is_block == True]
                    if not passed_block.empty:
                        passed_block = passed_block.iloc[
                            passed_block.block_facings.astype(int).idxmax(
                            )]  # logic to get the block with the largest number of facings in the block

                        valid_cluster_for_block = passed_block.cluster.nodes.values(
                        )
                        relevant_scene_match_fks_for_block = [
                            scene_match_fk for item in valid_cluster_for_block
                            for scene_match_fk in item['scene_match_fk']
                        ]
                        relevant_probe_match_fks_for_block = [
                            probe_match_fk for item in valid_cluster_for_block
                            for probe_match_fk in item['probe_match_fk']
                        ]

                        # {1: 'block_relevant_brand', 2: 'adjacency_relevant_brand', 3:'block_relecant_category',4:'adjacency_relevant_category'}
                        self._save_in_match_product_in_probe_state_reporting_for_explorer_filter(
                            relevant_probe_match_fks_for_block, 1)
                        '''The below line is used to filter the adjacency graph by the closest edges. Specifically since
                         the edges are determined by masking only, there's a chance that there will be two edges that come
                         out from a single node.'''
                        adj_graph = self.block.adj_graphs_by_scene.values(
                        )[0].edge_subgraph(
                            self._filter_redundant_edges(
                                self.block.adj_graphs_by_scene.values()[0]))

                        adj_items = {
                        }  # will contain the scene match fks that are adjacent to the block. key: scene_match_fk | values:[direction, brand_fk]
                        for match in relevant_scene_match_fks_for_block:
                            for node, node_data in adj_graph.adj[match].items(
                            ):
                                if node not in relevant_scene_match_fks_for_block:
                                    product_fk = self.match_scene_item_facts.loc[
                                        self.match_scene_item_facts.
                                        scene_match_fk == node,
                                        'item_id'].iat[0]
                                    if product_fk == 0:  # general empty
                                        # we need visibility in to seeing if the product is empty
                                        # since we are saving the brand. If the product is empty, then it will save
                                        # as General. That is why we are saving brand fk 3431 which is brand: empty
                                        important_brand = 3431
                                    elif product_fk == 23211:  # irrelevant
                                        # we need visibility in to seeing if the product is irrelevant
                                        # since we are saving the brand. If the product is irrelevant, then it will save
                                        # as General. That is why we are saving brand fk 3432 which is brand: irrelevant
                                        important_brand = 3432
                                    else:
                                        important_brand = self.match_scene_item_facts.loc[
                                            self.match_scene_item_facts.
                                            scene_match_fk == node,
                                            'brand_fk'].iat[0]

                                    adj_items[node] = [
                                        node_data, important_brand
                                    ]

                        brand_fks_for_adj_items = np.array(
                            [nd[1] for nd in adj_items.values()])
                        node_direction = np.array(
                            [nd[0]['direction'] for nd in adj_items.values()])

                        relevant_probe_match_fks_for_adjacent_products = self.match_product_in_scene[
                            self.match_product_in_scene.scene_match_fk.isin(
                                adj_items.keys())].probe_match_fk.values
                        # {1: 'block_relevant_brand', 2: 'adjacency_relevant_brand', 3:'block_relecant_category',4:'adjacency_relevant_category'}
                        self._save_in_match_product_in_probe_state_reporting_for_explorer_filter(
                            relevant_probe_match_fks_for_adjacent_products, 2)

                        for dir, dir_fk in direction.items():
                            if dir in node_direction:
                                index_of_revant_brand_fk = np.where(
                                    node_direction == dir)
                                relevant_brand_fks_for_adj_items = brand_fks_for_adj_items[
                                    index_of_revant_brand_fk]
                                values, counts = np.unique(
                                    relevant_brand_fks_for_adj_items,
                                    return_counts=True)
                                for j in range(len(values)):
                                    denominator_id = relevant_brand_fk_of_the_relevant_brand  # brand fk of the relevant brand
                                    numerator_id = values[
                                        j]  # brand_fk of the adjacency product
                                    numerator_result = dir_fk  # the direction of the adjacency
                                    result = counts[j]
                                    result_dict = {
                                        'kpi_fk': kpi_fk,
                                        'numerator_id': numerator_id,
                                        'denominator_id': denominator_id,
                                        'context_id': context_id,
                                        'numerator_result': numerator_result,
                                        'result': result,
                                        'score': score
                                    }
                                    result_dict_list.append(result_dict)
            return result_dict_list

    def calculate_adjacency_category(self, row):
        kpi_name = row[Consts.KPI_NAME]
        kpi_fk = self.get_kpi_fk_by_kpi_type(kpi_name)
        relevant_brands = self._sanitize_values(
            row[Consts.BRAND_NAME])  # Gets the brand name from the template
        relevant_brand_fk_of_the_relevant_brand = \
            self.all_products.loc[self.all_products.brand_name.isin(relevant_brands), 'brand_fk'].iat[
                0]  # used to save as the denominator id
        direction = {'UP': 0, 'DOWN': 2, 'RIGHT': 1, 'LEFT': 3}
        result_dict_list = []

        relevant_match_scene_item_facts = self._filter_df(
            self.match_scene_item_facts, {Consts.BRAND_NAME: relevant_brands}
        )  # Filter the merged data frame with the brand to get the relevant dataframe

        if not relevant_match_scene_item_facts.empty:
            for relevant_scene in set(
                    relevant_match_scene_item_facts.scene_id
            ):  # Iterating through the unique scenes in the merged dataframe
                mcif = relevant_match_scene_item_facts[
                    relevant_match_scene_item_facts.scene_id.isin(
                        [relevant_scene])]
                unique_bay_numbers = set(
                    mcif.bay_number
                )  # Getting the unique bay numbers in the the scene
                location = {Consts.SCENE_FK: relevant_scene}
                for bay in unique_bay_numbers:
                    #  Consts.BRAND_FK: relevant_brands,, Consts.BAY_NUMBER:bay
                    # relevant_mpis = self.match_product_in_scene[self.match_product_in_scene.bay_number.isin([bay])]
                    relevant_filters = {
                        Consts.BRAND_NAME: relevant_brands,
                        Consts.BAY_NUMBER: [bay]
                    }
                    # block = self.block.network_x_block_together(relevant_filters,
                    #                                             location=location_filters,
                    #                                             additional={'minimum_facing_for_block': 1,
                    #                                                         'use_masking_only': True})
                    block = self.block.network_x_block_together(
                        relevant_filters,
                        location=location,
                        additional={
                            'allowed_edge_type': ['encapsulated'],
                            'calculate_all_scenes': True,
                            'minimum_facing_for_block': 1,
                            'use_masking_only': True,
                            'allowed_products_filters': {
                                'product_type': 'Empty'
                            },
                        })
                    passed_block = block[block.is_block == True]
                    if not passed_block.empty:
                        passed_block = passed_block.iloc[
                            passed_block.block_facings.astype(int).idxmax(
                            )]  # logic to get the block with the largest number of facings in the block

                        valid_cluster_for_block = passed_block.cluster.nodes.values(
                        )
                        relevant_scene_match_fks_for_block = [
                            scene_match_fk for item in valid_cluster_for_block
                            for scene_match_fk in item['scene_match_fk']
                        ]

                        relevant_probe_match_fks_for_block = [
                            probe_match_fk for item in valid_cluster_for_block
                            for probe_match_fk in item['probe_match_fk']
                        ]

                        # {1: 'block_relevant_brand', 2: 'adjacency_relevant_brand', 3:'block_relecant_category',4:'adjacency_relevant_category'}
                        self._save_in_match_product_in_probe_state_reporting_for_explorer_filter(
                            relevant_probe_match_fks_for_block, 3)
                        '''The below line is used to filter the adjacency graph by the closest edges. Specifically since
                         the edges are determined by masking only, there's a chance that there will be two edges that come
                         out from a single node.'''
                        adj_graph = self.block.adj_graphs_by_scene.values(
                        )[0].edge_subgraph(
                            self._filter_redundant_edges(
                                self.block.adj_graphs_by_scene.values()[0]))

                        adj_items = {
                        }  # will contain the scene match fks that are adjacent to the block
                        for match in relevant_scene_match_fks_for_block:
                            for node, node_data in adj_graph.adj[match].items(
                            ):
                                if node not in relevant_scene_match_fks_for_block:
                                    product_fk = self.match_scene_item_facts.loc[
                                        self.match_scene_item_facts.
                                        scene_match_fk == node,
                                        'item_id'].iat[0]
                                    if product_fk == 0:  # general empty
                                        # we need visibility in to seeing if the product is empty
                                        # since we are saving the category. If the product is empty, then it will save
                                        # as General. That is why we are saving category fk 26 which is category: empty
                                        important_category = 26
                                    elif product_fk == 23211:  # irrelevant
                                        # we need visibility in to seeing if the product is irrelevant
                                        # since we are saving the category. If the product is irrelevant, then it will save
                                        # as General. That is why we are saving brand fk 27 which is category: irrelevant
                                        important_category = 27
                                    else:
                                        important_category = self.match_scene_item_facts.loc[
                                            self.match_scene_item_facts.
                                            scene_match_fk == node,
                                            'category_fk'].iat[0]
                                    adj_items[node] = [
                                        node_data, important_category
                                    ]

                        category_fks_for_adj_items = np.array(
                            [nd[1] for nd in adj_items.values()])
                        node_direction = np.array(
                            [nd[0]['direction'] for nd in adj_items.values()])

                        relevant_probe_match_fks_for_adjacent_products = self.match_product_in_scene[
                            self.match_product_in_scene.scene_match_fk.isin(
                                adj_items.keys())].probe_match_fk.values
                        # {1: 'block_relevant_brand', 2: 'adjacency_relevant_brand', 3:'block_relecant_category',4:'adjacency_relevant_category'}
                        self._save_in_match_product_in_probe_state_reporting_for_explorer_filter(
                            relevant_probe_match_fks_for_adjacent_products, 4)

                        for dir, dir_fk in direction.items():
                            if dir in node_direction:
                                index_of_revant_brand_fk = np.where(
                                    node_direction == dir)
                                relevant_category_fks_for_adj_items = category_fks_for_adj_items[
                                    index_of_revant_brand_fk]
                                values, counts = np.unique(
                                    relevant_category_fks_for_adj_items,
                                    return_counts=True)
                                for j in range(len(values)):
                                    numerator_id = values[
                                        j]  # brand_fk of the adjacency product
                                    denominator_id = relevant_brand_fk_of_the_relevant_brand  # brand fk of the relevant brand
                                    numerator_result = dir_fk  # the direction of the adjacency
                                    context_id = bay  # bay number
                                    result = counts[
                                        j]  # grouped by category, count
                                    score = relevant_scene
                                    result_dict = {
                                        'kpi_fk': kpi_fk,
                                        'numerator_id': numerator_id,
                                        'denominator_id': denominator_id,
                                        'context_id': context_id,
                                        'numerator_result': numerator_result,
                                        'result': result,
                                        'score': score
                                    }
                                    result_dict_list.append(result_dict)
            return result_dict_list

    def calculate_distribution(self, row):
        kpi_name = row[Consts.KPI_NAME]
        kpi_fk = self.get_kpi_fk_by_kpi_type(kpi_name)
        relevant_product_fk = self._sanitize_values(row.product_list_pk)

        bool_array_present_products_fk_in_session = pd.np.in1d(
            relevant_product_fk,
            self.scif.product_fk.unique().tolist())
        present_products_fk_in_session_index = pd.np.flatnonzero(
            bool_array_present_products_fk_in_session)
        present_products_fk_in_session = pd.np.array(
            relevant_product_fk).ravel()[present_products_fk_in_session_index]
        absent_products_fk_in_session_index = pd.np.flatnonzero(
            ~bool_array_present_products_fk_in_session)
        absent_products_fk_in_session = pd.np.array(
            relevant_product_fk).ravel()[absent_products_fk_in_session_index]

        result_dict_list = []
        for present_product_fk in present_products_fk_in_session:
            result = self.scif[self.scif.product_fk.isin([present_product_fk
                                                          ])].facings.iat[0]
            result_dict = {
                'kpi_name': kpi_name,
                'kpi_fk': kpi_fk,
                'numerator_id': present_product_fk,
                'denominator_id': self.store_id,
                'result': result
            }
            result_dict_list.append(result_dict)

        for absent_products_fk_in_session in absent_products_fk_in_session:
            result = 0
            result_dict = {
                'kpi_name': kpi_name,
                'kpi_fk': kpi_fk,
                'numerator_id': absent_products_fk_in_session,
                'denominator_id': self.store_id,
                'result': result
            }
            result_dict_list.append(result_dict)
        return result_dict_list

    def calculate_sos(self, row):
        kpi_name = row[Consts.KPI_NAME]
        kpi_fk = self.get_kpi_fk_by_kpi_type(kpi_name)
        iterate_by = self._sanitize_values(row.iterate_by)

        # Have to save the sos by sku. So each sku will have its result (sos) saved
        # The skus relevant are saved in the iterate(in the template)
        sku_relevelant_scif = self.scif[self.scif.product_fk.isin(iterate_by)]

        result_dict_list = []
        for unique_product_fk in set(sku_relevelant_scif.product_fk):
            # The logic for denominator result: The denominator scif is filter by category_fk.
            # The tricky part is the category_fk is determined by the product_fk.
            # So if the category_fk is 1 for product_fk 99. Then the denominator scif is filtered by the
            # category fk 1.
            denominator_relevant_scif = \
                self.scif[self.scif.category_fk.isin(
                    self.scif.category_fk[self.scif.product_fk == unique_product_fk].to_numpy())]
            # denominator_id = self.scif[self.scif]
            denominator_id = denominator_relevant_scif.category_fk.iat[0]
            denominator_result = denominator_relevant_scif[row[
                Consts.OUTPUT]].sum(
                ) if not denominator_relevant_scif.empty else 1

            relevant_numerator_scif = self.scif[self.scif.product_fk.isin(
                [unique_product_fk])]
            numerator_result = relevant_numerator_scif[row[Consts.OUTPUT]].sum(
            ) if not relevant_numerator_scif.empty else 0
            numerator_id = relevant_numerator_scif.product_fk.iat[0]

            result = (float(numerator_result) / denominator_result) * 100
            result_dict = {
                'kpi_name': kpi_name,
                'kpi_fk': kpi_fk,
                'numerator_id': numerator_id,
                'denominator_id': denominator_id,
                'numerator_result': numerator_result,
                'denominator_result': denominator_result,
                'result': result
            }
            result_dict_list.append(result_dict)
        return result_dict_list

    def calculate_shelf_position(self, row):
        kpi_name = row[Consts.KPI_NAME]
        kpi_fk = self.get_kpi_fk_by_kpi_type(kpi_name)
        relevant_product_fks = self._sanitize_values(
            row[Consts.NUMERATOR_VALUE_1])
        result_dict_list = []

        relevant_mcif = self.match_scene_item_facts[
            self.match_scene_item_facts.item_id.isin(relevant_product_fks)]
        if not relevant_mcif.empty:
            shelf_position_dict = {
                'Bottom': 22,
                'Middle': 23,
                'Eye': 24,
                'Top': 25
            }
            for unique_scene in set(relevant_mcif.scene_id):
                context_id = unique_scene
                relevant_mcif_2 = relevant_mcif[relevant_mcif.scene_id.isin(
                    [unique_scene])]
                for bay in set(relevant_mcif_2.bay_number):
                    relevant_mcif_3 = relevant_mcif_2[
                        relevant_mcif_2.bay_number.isin([bay])]
                    group_by_mcif = relevant_mcif_3.groupby(
                        ['item_id', 'shelf_number']).first()
                    relevant_group_by_mcif = group_by_mcif.reset_index(
                        'shelf_number')[['shelf_number', 'facings']]
                    for unique_product_fk in set(relevant_group_by_mcif.index):
                        result = self._get_shelf_position_id(
                            unique_scene=unique_scene,
                            unique_bay=bay,
                            grouped_shelf=relevant_group_by_mcif[
                                relevant_group_by_mcif.index.isin(
                                    [unique_product_fk])].shelf_number,
                            shelf_position_dict=shelf_position_dict
                        )  # return the proper shelf position based on number the number of shelfs using the shelf position dict
                        # final_mcif_of_product_fk = relevant_group_by_mcif.max()
                        numerator_result = \
                        relevant_group_by_mcif.loc[relevant_group_by_mcif.index == unique_product_fk, 'facings'].values[
                            0]
                        result_dict = {
                            'kpi_fk': kpi_fk,
                            'numerator_id': unique_product_fk,
                            'denominator_id': bay,
                            'context_id': context_id,
                            'numerator_result': numerator_result,
                            'result': result
                        }
                        result_dict_list.append(result_dict)
            return result_dict_list

    def calculate_lead_anchor(self, row):
        kpi_name = row[Consts.KPI_NAME]
        kpi_fk = self.get_kpi_fk_by_kpi_type(kpi_name)
        relevant_template_name = row[Consts.TEMPLATE_NAME]
        result_dict_list = []

        relevant_mcif = self.match_scene_item_facts[
            self.match_scene_item_facts.template_name.isin(
                [relevant_template_name])]
        if not relevant_mcif.empty:
            for relevant_scene in set(relevant_mcif.scene_id):
                filtered_mcif = relevant_mcif[relevant_mcif.scene_id.isin(
                    [relevant_scene])]
                for bay in set(filtered_mcif.bay_number):
                    bay_filtered_mcif = filtered_mcif[
                        filtered_mcif.bay_number.isin([bay])]

                    # this function gets the right(or left) most products in the bay
                    product_fks = self._get_the_most_right_or_most_left_product_fks_on_bay(
                        bay_filtered_mcif, relevant_template_name)
                    unique_product = Counter(product_fks).keys()
                    count_of_product_fks = Counter(product_fks).values()

                    # The logic of lead anchor is the the lead anchor (so the right most or the left most depending
                    # on the door handle position which is determined by the scene_type) is grouped by product fk and
                    # we want the count of facings of the product in right corner or left corner
                    for index in range(len(unique_product)):
                        numerator_id = unique_product[index]  # product fk
                        denominator_id = bay  # bay number
                        context_id = relevant_scene  # scene fk
                        numerator_result = count_of_product_fks[
                            index]  # count of facings of LEAD PRODUCTS  on right or left depending on the scene type
                        denominator_result = sum(
                            count_of_product_fks
                        )  # sum of the total facings of LEAD PRODUCTS
                        result = (float(numerator_result) /
                                  denominator_result) * 100
                        result_dict = {
                            'kpi_fk': kpi_fk,
                            'numerator_id': numerator_id,
                            'denominator_id': denominator_id,
                            'context_id': context_id,
                            'numerator_result': numerator_result,
                            'denominator_result': denominator_result,
                            'result': result
                        }
                        result_dict_list.append(result_dict)

            return result_dict_list

    @staticmethod
    def _sanitize_values(item):
        if pd.isna(item):
            return item
        else:
            items = [x.strip() for x in item.split(',')]
            return items

    @staticmethod
    def _filter_df(df, filters, exclude=0):
        for key, val in filters.items():
            if not isinstance(val, list):
                val = [val]
            if exclude:
                df = df[~df[key].isin(val)]
            else:
                df = df[df[key].isin(val)]
        return df

    def _filter_redundant_edges(self, adj_g):
        """Since the edges determines by the masking only, there's a chance that there will be two edges
        that come out from a single node. This method filters the redundant ones (the ones who skip the
        closet adjecent node)"""
        edges_filter = []
        for node_fk, node_data in adj_g.nodes(data=True):
            for direction in ['UP', 'DOWN', 'LEFT', 'RIGHT']:
                edges = [(edge[0], edge[1])
                         for edge in list(adj_g.edges(node_fk, data=True))
                         if edge[2]['direction'] == direction]
                if len(edges) <= 1:
                    edges_filter.extend(edges)
                else:
                    edges_filter.append(self._get_shortest_path(adj_g, edges))
        return edges_filter

    def _get_shortest_path(self, adj_g, edges_to_check):
        """ This method gets a list of edge and returns the one with the minimum distance"""
        distance_per_edge = {
            edge: self._get_edge_distance(adj_g, edge)
            for edge in edges_to_check
        }
        shortest_edge = min(distance_per_edge, key=distance_per_edge.get)
        return shortest_edge

    def _get_edge_distance(self, adj_g, edge):
        """
        This method gets an edge and calculate it's length (the distance between it's nodes)
        """
        first_node_coordinate = np.array(
            self._get_node_display_coordinates(adj_g, edge[0]))
        second_node_coordinate = np.array(
            self._get_node_display_coordinates(adj_g, edge[1]))
        distance = np.sqrt(
            np.sum((first_node_coordinate - second_node_coordinate)**2))
        return distance

    def _get_node_display_coordinates(self, adj_g, node_fk):
        """
        This method gets a node and extract the Display coordinates (x and y).
        Those attributes were added to each node since this is the attributes we condensed the graph by
        """
        return float(list(adj_g.nodes[node_fk]['rect_x'])[0]), float(
            list(adj_g.nodes[node_fk]['rect_y'])[0])

    @staticmethod
    def _get_the_most_right_or_most_left_product_fks_on_bay(
            bay_filtered_mcif, relevant_template_name):
        '''
        :param bay_filtered_mcif: the relevant merge dataframe of mpis and scif
        :param relevant_template_name: the template name (Right door handle or Left door handle)
        :return: If it is a right door handle, gets the product fks of all the right most product of each shelf.
        If it is a left door handle, gets the product fk of all the left most product of each shelf.
        '''

        product_fks = []
        for shelf in set(bay_filtered_mcif.shelf_number):
            shelf_filtered_mcif = bay_filtered_mcif[
                bay_filtered_mcif.shelf_number.isin([shelf])]
            '''If the template name is 'Right Door Handle', then below loc will get the product fk that
            is the most right product fk of the shelf. If the template name is Left Door Handle, then the 
            below loc will get the most left product fk of the shelf.'''
            most_right_or_left_product_on_shelf = shelf_filtered_mcif.loc[
                shelf_filtered_mcif.rect_x.idxmax(), 'item_id'] if relevant_template_name == 'Right Door Handle' else \
                shelf_filtered_mcif.loc[shelf_filtered_mcif.rect_x.idxmin(), 'item_id']
            product_fks.append(most_right_or_left_product_on_shelf)
        return product_fks

    def _get_shelf_position_id(self, unique_scene, unique_bay, grouped_shelf,
                               shelf_position_dict):
        '''
        :return: Uses the shelf position in the scene and bay and return the id of the shelf
        '''
        max_shelf_position = self.match_scene_item_facts[
            (self.match_scene_item_facts.scene_id.isin([unique_scene]))
            & (self.match_scene_item_facts.bay_number.isin([unique_bay])
               )].shelf_number.max()
        return shelf_position_dict[self.templates['Shelf Map'].loc[
            max_shelf_position, grouped_shelf.iat[0]]]

    def _save_in_match_product_in_probe_state_reporting_for_explorer_filter(
            self, relevant_probe_match_fks,
            match_product_in_probe_state_reporting_fk):
        df_for_common = pd.DataFrame({
            self.common.MATCH_PRODUCT_IN_PROBE_FK:
            relevant_probe_match_fks,
            self.common.MATCH_PRODUCT_IN_PROBE_STATE_REPORTING_FK:
            match_product_in_probe_state_reporting_fk
        })  # this is for block_relevant_brand
        self.common.match_product_in_probe_state_values = \
            self.common.match_product_in_probe_state_values.append(df_for_common)
Example #15
0
class CBCDAIRYILToolBox:
    def __init__(self, data_provider, output):
        self.output = output
        self.data_provider = data_provider
        self.project_name = self.data_provider.project_name
        self.common = Common(self.data_provider)
        self.old_common = oldCommon(self.data_provider)
        self.rds_conn = PSProjectConnector(self.project_name,
                                           DbUsers.CalculationEng)
        self.session_fk = self.data_provider.session_id
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.store_info = self.data_provider[Data.STORE_INFO]
        self.store_id = self.data_provider[Data.STORE_FK]
        self.survey = Survey(self.data_provider)
        self.block = Block(self.data_provider)
        self.general_toolbox = GENERALToolBox(self.data_provider)
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.template_path = self.get_relevant_template()
        self.gap_data = self.get_gap_data()
        self.kpi_weights = parse_template(self.template_path,
                                          Consts.KPI_WEIGHT,
                                          lower_headers_row_index=0)
        self.template_data = self.parse_template_data()
        self.kpis_gaps = list()
        self.passed_availability = list()
        self.kpi_static_data = self.old_common.get_kpi_static_data()
        self.own_manufacturer_fk = int(
            self.data_provider.own_manufacturer.param_value.values[0])
        self.parser = Parser
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]

    def get_relevant_template(self):
        """
        This function returns the relevant template according to it's visit date.
        Because of a change that was done in the logic there are 3 templates that match different dates.
        :return: Full template path
        """
        if self.visit_date <= datetime.date(datetime(2019, 12, 31)):
            return "{}/{}/{}".format(
                Consts.TEMPLATE_PATH, Consts.PREVIOUS_TEMPLATES,
                Consts.PROJECT_TEMPLATE_NAME_UNTIL_2019_12_31)
        else:
            return "{}/{}".format(Consts.TEMPLATE_PATH,
                                  Consts.CURRENT_TEMPLATE)

    def get_gap_data(self):
        """
        This function parse the gap data template and returns the gap priorities.
        :return: A dict with the priorities according to kpi_names. E.g: {kpi_name1: 1, kpi_name2: 2 ...}
        """
        gap_sheet = parse_template(self.template_path,
                                   Consts.KPI_GAP,
                                   lower_headers_row_index=0)
        gap_data = zip(gap_sheet[Consts.KPI_NAME], gap_sheet[Consts.ORDER])
        gap_data = {kpi_name: int(order) for kpi_name, order in gap_data}
        return gap_data

    def main_calculation(self):
        """
        This function calculates the KPI results.
        At first it fetches the relevant Sets (according to the stores attributes) and go over all of the relevant
        Atomic KPIs based on the project's template.
        Than, It aggregates the result per KPI using the weights and at last aggregates for the set level.
        """
        self.calculate_hierarchy_sos()
        self.calculate_oos()
        if self.template_data.empty:
            Log.warning(Consts.EMPTY_TEMPLATE_DATA_LOG.format(self.store_id))
            return
        kpi_set, kpis = self.get_relevant_kpis_for_calculation()
        kpi_set_fk = self.common.get_kpi_fk_by_kpi_type(Consts.TOTAL_SCORE)
        old_kpi_set_fk = self.get_kpi_fk_by_kpi_name(Consts.TOTAL_SCORE, 1)
        total_set_scores = list()
        for kpi_name in kpis:
            kpi_fk = self.common.get_kpi_fk_by_kpi_type(kpi_name)
            old_kpi_fk = self.get_kpi_fk_by_kpi_name(kpi_name, 2)
            kpi_weight = self.get_kpi_weight(kpi_name, kpi_set)
            atomics_df = self.get_atomics_to_calculate(kpi_name)
            atomic_results = self.calculate_atomic_results(
                kpi_fk, atomics_df)  # Atomic level
            kpi_results = self.calculate_kpis_and_save_to_db(
                atomic_results, kpi_fk, kpi_weight, kpi_set_fk)  # KPI lvl
            self.old_common.old_write_to_db_result(fk=old_kpi_fk,
                                                   level=2,
                                                   score=format(
                                                       kpi_results, '.2f'))
            total_set_scores.append(kpi_results)
        kpi_set_score = self.calculate_kpis_and_save_to_db(
            total_set_scores, kpi_set_fk)  # Set level
        self.old_common.write_to_db_result(fk=old_kpi_set_fk,
                                           level=1,
                                           score=kpi_set_score)
        self.handle_gaps()

    def calculate_oos(self):
        numerator = total_facings = 0
        store_kpi_fk = self.common.get_kpi_fk_by_kpi_type(kpi_type=Consts.OOS)
        sku_kpi_fk = self.common.get_kpi_fk_by_kpi_type(
            kpi_type=Consts.OOS_SKU)
        leading_skus_df = self.template_data[self.template_data[
            Consts.KPI_NAME].str.encode(
                "utf8") == Consts.LEADING_PRODUCTS.encode("utf8")]
        skus_ean_list = leading_skus_df[Consts.PARAMS_VALUE_1].tolist()
        skus_ean_set = set([
            ean_code.strip() for values in skus_ean_list
            for ean_code in values.split(",")
        ])
        product_fks = self.all_products[self.all_products[
            'product_ean_code'].isin(skus_ean_set)]['product_fk'].tolist()
        # sku level oos
        for sku in product_fks:
            # 2 for distributed and 1 for oos
            product_df = self.scif[self.scif['product_fk'] == sku]
            if product_df.empty:
                numerator += 1
                self.common.write_to_db_result(fk=sku_kpi_fk,
                                               numerator_id=sku,
                                               denominator_id=self.store_id,
                                               result=1,
                                               numerator_result=1,
                                               denominator_result=1,
                                               score=0,
                                               identifier_parent="OOS",
                                               should_enter=True)

        # store level oos
        denominator = len(product_fks)
        if denominator == 0:
            numerator = result = 0
        else:
            result = round(numerator / float(denominator), 4)
        self.common.write_to_db_result(fk=store_kpi_fk,
                                       numerator_id=self.own_manufacturer_fk,
                                       denominator_id=self.store_id,
                                       result=result,
                                       numerator_result=numerator,
                                       denominator_result=denominator,
                                       score=total_facings,
                                       identifier_result="OOS")

    def calculate_hierarchy_sos(self):
        store_kpi_fk = self.common.get_kpi_fk_by_kpi_type(
            kpi_type=Consts.SOS_BY_OWN_MAN)
        category_kpi_fk = self.common.get_kpi_fk_by_kpi_type(
            kpi_type=Consts.SOS_BY_OWN_MAN_CAT)
        brand_kpi_fk = self.common.get_kpi_fk_by_kpi_type(
            kpi_type=Consts.SOS_BY_OWN_MAN_CAT_BRAND)
        sku_kpi_fk = self.common.get_kpi_fk_by_kpi_type(
            kpi_type=Consts.SOS_BY_OWN_MAN_CAT_BRAND_SKU)
        sos_df = self.scif[self.scif['rlv_sos_sc'] == 1]
        # store level sos
        store_res, store_num, store_den = self.calculate_own_manufacturer_sos(
            filters={}, df=sos_df)
        self.common.write_to_db_result(fk=store_kpi_fk,
                                       numerator_id=self.own_manufacturer_fk,
                                       denominator_id=self.store_id,
                                       result=store_res,
                                       numerator_result=store_num,
                                       denominator_result=store_den,
                                       score=store_res,
                                       identifier_result="OWN_SOS")
        # category level sos
        session_categories = set(
            self.parser.filter_df(
                conditions={'manufacturer_fk': self.own_manufacturer_fk},
                data_frame_to_filter=self.scif)['category_fk'])
        for category_fk in session_categories:
            filters = {'category_fk': category_fk}
            cat_res, cat_num, cat_den = self.calculate_own_manufacturer_sos(
                filters=filters, df=sos_df)
            self.common.write_to_db_result(
                fk=category_kpi_fk,
                numerator_id=category_fk,
                denominator_id=self.store_id,
                result=cat_res,
                numerator_result=cat_num,
                denominator_result=cat_den,
                score=cat_res,
                identifier_parent="OWN_SOS",
                should_enter=True,
                identifier_result="OWN_SOS_cat_{}".format(str(category_fk)))
            # brand-category level sos
            filters['manufacturer_fk'] = self.own_manufacturer_fk
            cat_brands = set(
                self.parser.filter_df(conditions=filters,
                                      data_frame_to_filter=sos_df)['brand_fk'])
            for brand_fk in cat_brands:
                filters['brand_fk'] = brand_fk
                brand_df = self.parser.filter_df(conditions=filters,
                                                 data_frame_to_filter=sos_df)
                brand_num = brand_df['facings'].sum()
                brand_res, brand_num, cat_num = self.calculate_sos_res(
                    brand_num, cat_num)
                self.common.write_to_db_result(
                    fk=brand_kpi_fk,
                    numerator_id=brand_fk,
                    denominator_id=category_fk,
                    result=brand_res,
                    numerator_result=brand_num,
                    should_enter=True,
                    denominator_result=cat_num,
                    score=brand_res,
                    identifier_parent="OWN_SOS_cat_{}".format(
                        str(category_fk)),
                    identifier_result="OWN_SOS_cat_{}_brand_{}".format(
                        str(category_fk), str(brand_fk)))
                product_fks = set(
                    self.parser.filter_df(
                        conditions=filters,
                        data_frame_to_filter=sos_df)['product_fk'])
                for sku in product_fks:
                    filters['product_fk'] = sku
                    product_df = self.parser.filter_df(
                        conditions=filters, data_frame_to_filter=sos_df)
                    sku_facings = product_df['facings'].sum()
                    sku_result, sku_num, sku_den = self.calculate_sos_res(
                        sku_facings, brand_num)
                    self.common.write_to_db_result(
                        fk=sku_kpi_fk,
                        numerator_id=sku,
                        denominator_id=brand_fk,
                        result=sku_result,
                        numerator_result=sku_facings,
                        should_enter=True,
                        denominator_result=brand_num,
                        score=sku_facings,
                        identifier_parent="OWN_SOS_cat_{}_brand_{}".format(
                            str(category_fk), str(brand_fk)))
                del filters['product_fk']
            del filters['brand_fk']

    def calculate_own_manufacturer_sos(self, filters, df):
        filters['manufacturer_fk'] = self.own_manufacturer_fk
        numerator_df = self.parser.filter_df(conditions=filters,
                                             data_frame_to_filter=df)
        del filters['manufacturer_fk']
        denominator_df = self.parser.filter_df(conditions=filters,
                                               data_frame_to_filter=df)
        if denominator_df.empty:
            return 0, 0, 0
        denominator = denominator_df['facings'].sum()
        if numerator_df.empty:
            numerator = 0
        else:
            numerator = numerator_df['facings'].sum()
        return self.calculate_sos_res(numerator, denominator)

    @staticmethod
    def calculate_sos_res(numerator, denominator):
        if denominator == 0:
            return 0, 0, 0
        result = round(numerator / float(denominator), 3)
        return result, numerator, denominator

    def add_gap(self, atomic_kpi, score, atomic_weight):
        """
        In case the score is not perfect the gap is added to the gap list.
        :param atomic_weight: The Atomic KPI's weight.
        :param score: Atomic KPI score.
        :param atomic_kpi: A Series with data about the Atomic KPI.
        """
        parent_kpi_name = atomic_kpi[Consts.KPI_NAME]
        atomic_name = atomic_kpi[Consts.KPI_ATOMIC_NAME]
        atomic_fk = self.common.get_kpi_fk_by_kpi_type(atomic_name)
        current_gap_dict = {
            Consts.ATOMIC_FK: atomic_fk,
            Consts.PRIORITY: self.gap_data[parent_kpi_name],
            Consts.SCORE: score,
            Consts.WEIGHT: atomic_weight
        }
        self.kpis_gaps.append(current_gap_dict)

    @staticmethod
    def sort_by_priority(gap_dict):
        """ This is a util function for the kpi's gaps sorting by priorities"""
        return gap_dict[Consts.PRIORITY], gap_dict[Consts.SCORE]

    def handle_gaps(self):
        """ This function takes the top 5 gaps (by priority) and saves it to the DB (pservice.custom_gaps table) """
        self.kpis_gaps.sort(key=self.sort_by_priority)
        gaps_total_score = 0
        gaps_per_kpi_fk = self.common.get_kpi_fk_by_kpi_type(
            Consts.GAP_PER_ATOMIC_KPI)
        gaps_total_score_kpi_fk = self.common.get_kpi_fk_by_kpi_type(
            Consts.GAPS_TOTAL_SCORE_KPI)
        for gap in self.kpis_gaps[:5]:
            current_gap_score = gap[Consts.WEIGHT] - (gap[Consts.SCORE] / 100 *
                                                      gap[Consts.WEIGHT])
            gaps_total_score += current_gap_score
            self.insert_gap_results(gaps_per_kpi_fk,
                                    current_gap_score,
                                    gap[Consts.WEIGHT],
                                    numerator_id=gap[Consts.ATOMIC_FK],
                                    parent_fk=gaps_total_score_kpi_fk)
        total_weight = sum(
            map(lambda res: res[Consts.WEIGHT], self.kpis_gaps[:5]))
        self.insert_gap_results(gaps_total_score_kpi_fk, gaps_total_score,
                                total_weight)

    def insert_gap_results(self,
                           gap_kpi_fk,
                           score,
                           weight,
                           numerator_id=Consts.CBC_MANU,
                           parent_fk=None):
        """ This is a utility function that insert results to the DB for the GAP """
        should_enter = True if parent_fk else False
        score, weight = score * 100, round(weight * 100, 2)
        self.common.write_to_db_result(fk=gap_kpi_fk,
                                       numerator_id=numerator_id,
                                       numerator_result=score,
                                       denominator_id=self.store_id,
                                       denominator_result=weight,
                                       weight=weight,
                                       identifier_result=gap_kpi_fk,
                                       identifier_parent=parent_fk,
                                       result=score,
                                       score=score,
                                       should_enter=should_enter)

    def calculate_kpis_and_save_to_db(self,
                                      kpi_results,
                                      kpi_fk,
                                      parent_kpi_weight=1.0,
                                      parent_fk=None):
        """
        This KPI aggregates the score by weights and saves the results to the DB.
        :param kpi_results: A list of results and weights tuples: [(score1, weight1), (score2, weight2) ... ].
        :param kpi_fk: The relevant KPI fk.
        :param parent_kpi_weight: The parent's KPI total weight.
        :param parent_fk: The KPI SET FK that the KPI "belongs" too if exist.
        :return: The aggregated KPI score.
        """
        should_enter = True if parent_fk else False
        ignore_weight = not should_enter  # Weights should be ignored only in the set level!
        kpi_score = self.calculate_kpi_result_by_weight(
            kpi_results, parent_kpi_weight, ignore_weights=ignore_weight)
        total_weight = round(parent_kpi_weight * 100, 2)
        target = None if parent_fk else round(80,
                                              2)  # Requested for visualization
        self.common.write_to_db_result(fk=kpi_fk,
                                       numerator_id=Consts.CBC_MANU,
                                       numerator_result=kpi_score,
                                       denominator_id=self.store_id,
                                       denominator_result=total_weight,
                                       target=target,
                                       identifier_result=kpi_fk,
                                       identifier_parent=parent_fk,
                                       should_enter=should_enter,
                                       weight=total_weight,
                                       result=kpi_score,
                                       score=kpi_score)

        if not parent_fk:  # required only for writing set score in anoter kpi needed for dashboard
            kpi_fk = self.common.get_kpi_fk_by_kpi_type(
                Consts.TOTAL_SCORE_FOR_DASHBOARD)
            self.common.write_to_db_result(fk=kpi_fk,
                                           numerator_id=Consts.CBC_MANU,
                                           numerator_result=kpi_score,
                                           denominator_id=self.store_id,
                                           denominator_result=total_weight,
                                           target=target,
                                           identifier_result=kpi_fk,
                                           identifier_parent=parent_fk,
                                           should_enter=should_enter,
                                           weight=total_weight,
                                           result=kpi_score,
                                           score=kpi_score)

        return kpi_score

    def calculate_kpi_result_by_weight(self,
                                       kpi_results,
                                       parent_kpi_weight,
                                       ignore_weights=False):
        """
        This function aggregates the KPI results by scores and weights.
        :param ignore_weights: If True the function just sums the results.
        :param parent_kpi_weight: The parent's KPI total weight.
        :param kpi_results: A list of results and weights tuples: [(score1, weight1), (score2, weight2) ... ].
        :return: The aggregated KPI score.
        """
        if ignore_weights or len(kpi_results) == 0:
            return sum(kpi_results)
        weights_list = map(lambda res: res[1], kpi_results)
        if None in weights_list:  # Ignoring weights and dividing equally by length!
            kpi_score = sum(map(lambda res: res[0], kpi_results)) / float(
                len(kpi_results))
        elif round(
                sum(weights_list), 2
        ) < parent_kpi_weight:  # Missing weights needs to be divided among the kpis
            kpi_score = self.divide_missing_percentage(kpi_results,
                                                       parent_kpi_weight,
                                                       sum(weights_list))
        else:
            kpi_score = sum([score * weight for score, weight in kpi_results])
        return kpi_score

    @staticmethod
    def divide_missing_percentage(kpi_results, parent_weight, total_weights):
        """
        This function is been activated in case the total number of KPI weights doesn't equal to 100%.
        It divides the missing percentage among the other KPI and calculates the score.
        :param parent_weight: Parent KPI's weight.
        :param total_weights: The total number of weights that were calculated earlier.
        :param kpi_results: A list of results and weights tuples: [(score1, weight1), (score2, weight2) ... ].
        :return: KPI aggregated score.
        """
        missing_weight = parent_weight - total_weights
        weight_addition = missing_weight / float(
            len(kpi_results)) if kpi_results else 0
        kpi_score = sum([
            score * (weight + weight_addition) for score, weight in kpi_results
        ])
        return kpi_score

    def calculate_atomic_results(self, kpi_fk, atomics_df):
        """
        This method calculates the result for every atomic KPI (the lowest level) that are relevant for the kpi_fk.
        :param kpi_fk: The KPI FK that the atomic "belongs" too.
        :param atomics_df: The relevant Atomic KPIs from the project's template.
        :return: A list of results and weights tuples: [(score1, weight1), (score2, weight2) ... ].
        """
        total_scores = list()
        for i in atomics_df.index:
            current_atomic = atomics_df.loc[i]
            kpi_type, atomic_weight, general_filters = self.get_relevant_data_per_atomic(
                current_atomic)
            if general_filters is None:
                continue
            num_result, den_result, atomic_score = self.calculate_atomic_kpi_by_type(
                kpi_type, **general_filters)
            # Handling Atomic KPIs results
            if atomic_score is None:  # In cases that we need to ignore the KPI and divide it's weight
                continue
            elif atomic_score < 100:
                self.add_gap(current_atomic, atomic_score, atomic_weight)
            total_scores.append((atomic_score, atomic_weight))
            atomic_fk_lvl_2 = self.common.get_kpi_fk_by_kpi_type(
                current_atomic[Consts.KPI_ATOMIC_NAME].strip())
            old_atomic_fk = self.get_kpi_fk_by_kpi_name(
                current_atomic[Consts.KPI_ATOMIC_NAME].strip(), 3)
            self.common.write_to_db_result(fk=atomic_fk_lvl_2,
                                           numerator_id=Consts.CBC_MANU,
                                           numerator_result=num_result,
                                           denominator_id=self.store_id,
                                           weight=round(
                                               atomic_weight * 100, 2),
                                           denominator_result=den_result,
                                           should_enter=True,
                                           identifier_parent=kpi_fk,
                                           result=atomic_score,
                                           score=atomic_score * atomic_weight)
            self.old_common.old_write_to_db_result(
                fk=old_atomic_fk,
                level=3,
                result=str(format(atomic_score * atomic_weight, '.2f')),
                score=atomic_score)
        return total_scores

    def get_kpi_fk_by_kpi_name(self, kpi_name, kpi_level):
        if kpi_level == 1:
            column_key = 'kpi_set_fk'
            column_value = 'kpi_set_name'
        elif kpi_level == 2:
            column_key = 'kpi_fk'
            column_value = 'kpi_name'
        elif kpi_level == 3:
            column_key = 'atomic_kpi_fk'
            column_value = 'atomic_kpi_name'
        else:
            raise ValueError('invalid level')

        try:
            if column_key and column_value:
                return self.kpi_static_data[
                    self.kpi_static_data[column_value].str.encode('utf-8') ==
                    kpi_name.encode('utf-8')][column_key].values[0]

        except IndexError:
            Log.error(
                'Kpi name: {}, isnt equal to any kpi name in static table'.
                format(kpi_name))
            return None

    def get_relevant_data_per_atomic(self, atomic_series):
        """
        This function return the relevant data per Atomic KPI.
        :param atomic_series: The Atomic row from the Template.
        :return: A tuple with data: (atomic_type, atomic_weight, general_filters)
        """
        kpi_type = atomic_series.get(Consts.KPI_TYPE)
        atomic_weight = float(atomic_series.get(
            Consts.WEIGHT)) if atomic_series.get(Consts.WEIGHT) else None
        general_filters = self.get_general_filters(atomic_series)
        return kpi_type, atomic_weight, general_filters

    def calculate_atomic_kpi_by_type(self, atomic_type, **general_filters):
        """
        This function calculates the result according to the relevant Atomic Type.
        :param atomic_type: KPI Family from the template.
        :param general_filters: Relevant attributes and values to calculate by.
        :return: A tuple with results: (numerator_result, denominator_result, total_score).
        """
        num_result = denominator_result = 0
        if atomic_type in [Consts.AVAILABILITY]:
            atomic_score = self.calculate_availability(**general_filters)
        elif atomic_type == Consts.AVAILABILITY_FROM_BOTTOM:
            atomic_score = self.calculate_availability_from_bottom(
                **general_filters)
        elif atomic_type == Consts.MIN_2_AVAILABILITY:
            num_result, denominator_result, atomic_score = self.calculate_min_2_availability(
                **general_filters)
        elif atomic_type == Consts.SURVEY:
            atomic_score = self.calculate_survey(**general_filters)
        elif atomic_type == Consts.BRAND_BLOCK:
            atomic_score = self.calculate_brand_block(**general_filters)
        elif atomic_type == Consts.EYE_LEVEL:
            num_result, denominator_result, atomic_score = self.calculate_eye_level(
                **general_filters)
        else:
            Log.warning(Consts.UNSUPPORTED_KPI_LOG.format(atomic_type))
            atomic_score = None
        return num_result, denominator_result, atomic_score

    def get_relevant_kpis_for_calculation(self):
        """
        This function retrieve the relevant KPIs to calculate from the template
        :return: A tuple: (set_name, [kpi1, kpi2, kpi3...]) to calculate.
        """
        kpi_set = self.template_data[Consts.KPI_SET].values[0]
        kpis = self.template_data[self.template_data[
            Consts.KPI_SET].str.encode('utf-8') == kpi_set.encode('utf-8')][
                Consts.KPI_NAME].unique().tolist()
        # Planogram KPI should be calculated last because of the MINIMUM 2 FACINGS KPI.
        if Consts.PLANOGRAM_KPI in kpis and kpis.index(
                Consts.PLANOGRAM_KPI) != len(kpis) - 1:
            kpis.append(kpis.pop(kpis.index(Consts.PLANOGRAM_KPI)))
        return kpi_set, kpis

    def get_atomics_to_calculate(self, kpi_name):
        """
        This method filters the KPIs data to be the relevant atomic KPIs.
        :param kpi_name: The hebrew KPI name from the template.
        :return: A DataFrame that contains data about the relevant Atomic KPIs.
        """
        atomics = self.template_data[self.template_data[
            Consts.KPI_NAME].str.encode('utf-8') == kpi_name.encode('utf-8')]
        return atomics

    def get_store_attributes(self, attributes_names):
        """
        This function encodes and returns the relevant store attribute.
        :param attributes_names: List of requested store attributes to return.
        :return: A dictionary with the requested attributes, E.g: {attr_name: attr_val, ...}
        """
        # Filter store attributes
        store_info_dict = self.store_info.iloc[0].to_dict()
        filtered_store_info = {
            store_att: store_info_dict[store_att]
            for store_att in attributes_names
        }
        return filtered_store_info

    def parse_template_data(self):
        """
        This function responsible to filter the relevant template data..
        :return: A DataFrame with filtered Data by store attributes.
        """
        kpis_template = parse_template(self.template_path,
                                       Consts.KPI_SHEET,
                                       lower_headers_row_index=1)
        relevant_store_info = self.get_store_attributes(
            Consts.STORE_ATTRIBUTES_TO_FILTER_BY)
        filtered_data = self.filter_template_by_store_att(
            kpis_template, relevant_store_info)
        return filtered_data

    @staticmethod
    def filter_template_by_store_att(kpis_template, store_attributes):
        """
        This function gets a dictionary with store type, additional attribute 1, 2 and 3 and filters the template by it.
        :param kpis_template: KPI sheet of the project's template.
        :param store_attributes: {store_type: X, additional_attribute_1: Y, ... }.
        :return: A filtered DataFrame.
        """
        for store_att, store_val in store_attributes.iteritems():
            if store_val is None:
                store_val = ""
            kpis_template = kpis_template[(
                kpis_template[store_att].str.encode('utf-8') ==
                store_val.encode('utf-8')) | (kpis_template[store_att] == "")]
        return kpis_template

    def get_relevant_scenes_by_params(self, params):
        """
        This function returns the relevant scene_fks to calculate.
        :param params: The Atomic KPI row filters from the template.
        :return: List of scene fks.
        """
        template_names = params[Consts.TEMPLATE_NAME].split(Consts.SEPARATOR)
        template_groups = params[Consts.TEMPLATE_GROUP].split(Consts.SEPARATOR)
        filtered_scif = self.scif[[
            Consts.SCENE_ID, 'template_name', 'template_group'
        ]]
        if template_names and any(template_names):
            filtered_scif = filtered_scif[filtered_scif['template_name'].isin(
                template_names)]
        if template_groups and any(template_groups):
            filtered_scif = filtered_scif[filtered_scif['template_group'].isin(
                template_groups)]
        return filtered_scif[Consts.SCENE_ID].unique().tolist()

    def get_general_filters(self, params):
        """
        This function returns the relevant KPI filters according to the template.
        Filter params 1 & 2 are included and param 3 is for exclusion.
        :param params: The Atomic KPI row in the template
        :return: A dictionary with the relevant filters.
        """
        general_filters = {
            Consts.TARGET: params[Consts.TARGET],
            Consts.SPLIT_SCORE: params[Consts.SPLIT_SCORE],
            Consts.KPI_FILTERS: dict()
        }
        relevant_scenes = self.get_relevant_scenes_by_params(params)
        if not relevant_scenes:
            return None
        else:
            general_filters[Consts.KPI_FILTERS][
                Consts.SCENE_ID] = relevant_scenes
        for type_col, value_col in Consts.KPI_FILTER_VALUE_LIST:
            if params[value_col]:
                should_included = Consts.INCLUDE_VAL if value_col != Consts.PARAMS_VALUE_3 else Consts.EXCLUDE_VAL
                param_type, param_value = params[type_col], params[value_col]
                filter_param = self.handle_param_values(
                    param_type, param_value)
                general_filters[Consts.KPI_FILTERS][param_type] = (
                    filter_param, should_included)

        return general_filters

    @staticmethod
    def handle_param_values(param_type, param_value):
        """
        :param param_type: The param type to filter by. E.g: product_ean code or brand_name
        :param param_value: The value to filter by.
        :return: list of param values.
        """
        values_list = param_value.split(Consts.SEPARATOR)
        params = map(
            lambda val: float(val) if unicode.isdigit(val) and param_type !=
            Consts.EAN_CODE else val.strip(), values_list)
        return params

    def get_kpi_weight(self, kpi, kpi_set):
        """
        This method returns the KPI weight according to the project's template.
        :param kpi: The KPI name.
        :param kpi_set: Set KPI name.
        :return: The kpi weight (Float).
        """
        row = self.kpi_weights[(self.kpi_weights[Consts.KPI_SET].str.encode(
            'utf-8') == kpi_set.encode('utf-8')) & (self.kpi_weights[
                Consts.KPI_NAME].str.encode('utf-8') == kpi.encode('utf-8'))]
        weight = row.get(Consts.WEIGHT)
        return float(weight.values[0]) if not weight.empty else None

    def merge_and_filter_scif_and_matches_for_eye_level(self, **kpi_filters):
        """
        This function merges between scene_item_facts and match_product_in_scene DataFrames and filters the merged DF
        according to the @param kpi_filters.
        :param kpi_filters: Dictionary with attributes and values to filter the DataFrame by.
        :return: The merged and filtered DataFrame.
        """
        scif_matches_diff = self.match_product_in_scene[
            ['scene_fk', 'product_fk'] +
            list(self.match_product_in_scene.keys().difference(
                self.scif.keys()))]
        merged_df = pd.merge(self.scif[self.scif.facings != 0],
                             scif_matches_diff,
                             how='outer',
                             left_on=['scene_id', 'item_id'],
                             right_on=[Consts.SCENE_FK, Consts.PRODUCT_FK])
        merged_df = merged_df[self.general_toolbox.get_filter_condition(
            merged_df, **kpi_filters)]
        return merged_df

    @kpi_runtime()
    def calculate_eye_level(self, **general_filters):
        """
        This function calculates the Eye level KPI. It filters and products according to the template and
        returns a Tuple: (eye_level_facings / total_facings, score).
        :param general_filters: A dictionary with the relevant KPI filters.
        :return: E.g: (10, 20, 50) or (8, 10, 100) --> score >= 75 turns to 100.
        """
        merged_df = self.merge_and_filter_scif_and_matches_for_eye_level(
            **general_filters[Consts.KPI_FILTERS])
        relevant_scenes = merged_df['scene_id'].unique().tolist()
        total_number_of_facings = eye_level_facings = 0
        for scene in relevant_scenes:
            scene_merged_df = merged_df[merged_df['scene_id'] == scene]
            scene_matches = self.match_product_in_scene[
                self.match_product_in_scene['scene_fk'] == scene]
            total_number_of_facings += len(scene_merged_df)
            scene_merged_df = self.filter_df_by_shelves(
                scene_merged_df, scene_matches, Consts.EYE_LEVEL_PER_SHELF)
            eye_level_facings += len(scene_merged_df)
        total_score = eye_level_facings / float(
            total_number_of_facings) if total_number_of_facings else 0
        total_score = 100 if total_score >= 0.75 else total_score * 100
        return eye_level_facings, total_number_of_facings, total_score

    @staticmethod
    def filter_df_by_shelves(df, scene_matches, eye_level_definition):
        """
        This function filters the df according to the eye-level definition
        :param df: data frame to filter
        :param scene_matches: match_product_in_scene for particular scene
        :param eye_level_definition: definition for eye level shelves
        :return: filtered data frame
        """
        # number_of_shelves = df.shelf_number_from_bottom.max()
        number_of_shelves = max(scene_matches.shelf_number_from_bottom.max(),
                                scene_matches.shelf_number.max())
        top, bottom = 0, 0
        for json_def in eye_level_definition:
            if json_def[Consts.MIN] <= number_of_shelves <= json_def[
                    Consts.MAX]:
                top = json_def[Consts.TOP]
                bottom = json_def[Consts.BOTTOM]
        return df[(df.shelf_number > top)
                  & (df.shelf_number_from_bottom > bottom)]

    @kpi_runtime()
    def calculate_availability_from_bottom(self, **general_filters):
        """
        This function checks if *all* of the relevant products are in the lowest shelf.
        :param general_filters: A dictionary with the relevant KPI filters.
        :return:
        """
        allowed_products_dict = self.get_allowed_product_by_params(
            **general_filters)
        filtered_matches = self.match_product_in_scene[
            self.match_product_in_scene[Consts.PRODUCT_FK].isin(
                allowed_products_dict[Consts.PRODUCT_FK])]
        relevant_shelves_to_check = set(
            filtered_matches[Consts.SHELF_NUM_FROM_BOTTOM].unique().tolist())
        # Check bottom shelf condition
        return 0 if len(
            relevant_shelves_to_check
        ) != 1 or Consts.LOWEST_SHELF not in relevant_shelves_to_check else 100

    @kpi_runtime()
    def calculate_brand_block(self, **general_filters):
        """
        This function calculates the brand block KPI. It filters and excluded products according to the template and
        than checks if at least one scene has a block.
        :param general_filters: A dictionary with the relevant KPI filters.
        :return: 100 if at least one scene has a block, 0 otherwise.
        """
        products_dict = self.get_allowed_product_by_params(**general_filters)
        block_result = self.block.network_x_block_together(
            population=products_dict,
            additional={
                'minimum_block_ratio': Consts.MIN_BLOCK_RATIO,
                'minimum_facing_for_block': Consts.MIN_FACINGS_IN_BLOCK,
                'allowed_products_filters': {
                    'product_type': ['Empty']
                },
                'calculate_all_scenes': False,
                'include_stacking': True,
                'check_vertical_horizontal': False
            })

        result = 100 if not block_result.empty and not block_result[
            block_result.is_block].empty else 0
        return result

    def get_allowed_product_by_params(self, **filters):
        """
        This function filters the relevant products for the block together KPI and exclude the ones that needs to be
        excluded by the template.
        :param filters: Atomic KPI filters.
        :return: A Dictionary with the relevant products. E.g: {'product_fk': [1,2,3,4,5]}.
        """
        allowed_product = dict()
        filtered_scif = self.calculate_availability(return_df=True, **filters)
        allowed_product[Consts.PRODUCT_FK] = filtered_scif[
            Consts.PRODUCT_FK].unique().tolist()
        return allowed_product

    @kpi_runtime()
    def calculate_survey(self, **general_filters):
        """
        This function calculates the result for Survey KPI.
        :param general_filters: A dictionary with the relevant KPI filters.
        :return: 100 if the answer is yes, else 0.
        """
        if Consts.QUESTION_ID not in general_filters[
                Consts.KPI_FILTERS].keys():
            Log.warning(Consts.MISSING_QUESTION_LOG)
            return 0
        survey_question_id = general_filters[Consts.KPI_FILTERS].get(
            Consts.QUESTION_ID)
        # General filters returns output for filter_df basically so we need to adjust it here.
        if isinstance(survey_question_id, tuple):
            survey_question_id = survey_question_id[0]  # Get rid of the tuple
        if isinstance(survey_question_id, list):
            survey_question_id = int(
                survey_question_id[0])  # Get rid of the list
        target_answer = general_filters[Consts.TARGET]
        survey_answer = self.survey.get_survey_answer(
            (Consts.QUESTION_FK, survey_question_id))
        if survey_answer in Consts.SURVEY_ANSWERS_TO_IGNORE:
            return None
        elif survey_answer:
            return 100 if survey_answer.strip() == target_answer else 0
        return 0

    @kpi_runtime()
    def calculate_availability(self, return_df=False, **general_filters):
        """
        This functions checks for availability by filters.
        During the calculation, if the KPI was passed, the results is being saved for future usage of
        "MIN 2 AVAILABILITY KPI".
        :param return_df: If True, the function returns the filtered scene item facts, else, returns the score.
        :param general_filters: A dictionary with the relevant KPI filters.
        :return: See @param return_df.
        """
        filtered_scif = self.scif[self.general_toolbox.get_filter_condition(
            self.scif, **general_filters[Consts.KPI_FILTERS])]
        if return_df:
            return filtered_scif
        if not filtered_scif.empty:
            tested_products = general_filters[Consts.KPI_FILTERS][
                Consts.EAN_CODE][0]
            self.passed_availability.append(tested_products)
            return 100
        return 0

    @staticmethod
    def get_number_of_facings_per_product_dict(df, ignore_stack=False):
        """
        This function gets a DataFrame and returns a dictionary with number of facings per products.
        :param df: Pandas.DataFrame with 'product_ean_code' and 'facings' / 'facings_ign_stack' fields.
        :param ignore_stack: If True will use 'facings_ign_stack' field, else 'facings' field.
        :return: E.g: {ean_code1: 10, ean_code2: 5, ean_code3: 1...}
        """
        stacking_field = Consts.FACINGS_IGN_STACK if ignore_stack else Consts.FACINGS
        df = df[[Consts.EAN_CODE, stacking_field]].dropna()
        df = df[df[stacking_field] > 0]
        facings_dict = dict(zip(df[Consts.EAN_CODE], df[stacking_field]))
        return facings_dict

    @kpi_runtime()
    def calculate_min_2_availability(self, **general_filters):
        """
        This KPI checks for all of the Availability Atomics KPIs that passed, if the tested products have at least
        2 facings in case of IGNORE STACKING!
        :param general_filters: A dictionary with the relevant KPI filters.
        :return: numerator result, denominator result and total_score
        """
        score = 0
        filtered_df = self.calculate_availability(return_df=True,
                                                  **general_filters)
        facings_counter = self.get_number_of_facings_per_product_dict(
            filtered_df, ignore_stack=True)
        for products in self.passed_availability:
            score += 1 if sum([
                facings_counter[product]
                for product in products if product in facings_counter
            ]) > 1 else 0
        total_score = (score / float(len(self.passed_availability))
                       ) * 100 if self.passed_availability else 0
        return score, len(self.passed_availability), total_score
class PSAPAC_SAND3ToolBox:
    # Gsk Japan kpis

    # DEFAULT_TARGET = {ProductsConsts.BRAND_FK: [-1], 'shelves': ["1,2,3"], 'block_target': [80], 'brand_target': [100], 'position_target': [80]}

    def __init__(self, data_provider, output):
        self.output = output
        self.data_provider = data_provider
        self.common = Common(self.data_provider)
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.products = self.data_provider[Data.PRODUCTS]
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.store_id = self.data_provider[Data.STORE_FK]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.rds_conn = PSProjectConnector(self.project_name,
                                           DbUsers.CalculationEng)
        self.kpi_static_data = self.common.get_kpi_static_data()
        self.kpi_results_queries = []
        self.set_up_template = pd.read_excel(os.path.join(
            os.path.dirname(os.path.realpath(__file__)), '..', 'Data',
            'gsk_set_up.xlsx'),
                                             sheet_name='Functional KPIs',
                                             keep_default_na=False)

        self.gsk_generator = GSKGenerator(self.data_provider, self.output,
                                          self.common, self.set_up_template)
        self.blocking_generator = Block(self.data_provider)
        self.assortment = self.gsk_generator.get_assortment_data_provider()
        self.store_info = self.data_provider['store_info']
        self.store_fk = self.data_provider[StoreInfoConsts.STORE_FK]
        self.ps_data_provider = PsDataProvider(self.data_provider, self.output)
        self.targets = self.ps_data_provider.get_kpi_external_targets(
            key_fields=Consts.KEY_FIELDS, data_fields=Consts.DATA_FIELDS)
        self.own_manufacturer = self.get_manufacturer
        self.set_up_data = {
            (Consts.PLN_BLOCK, Const.KPI_TYPE_COLUMN): Const.NO_INFO,
            (Consts.POSITION_SCORE, Const.KPI_TYPE_COLUMN): Const.NO_INFO,
            (Consts.ECAPS_FILTER_IDENT, Const.KPI_TYPE_COLUMN): Const.NO_INFO,
            (Consts.PLN_MSL, Const.KPI_TYPE_COLUMN): Const.NO_INFO,
            ("GSK_PLN_LSOS_SCORE", Const.KPI_TYPE_COLUMN): Const.NO_INFO,
            (Consts.POSM, Const.KPI_TYPE_COLUMN): Const.NO_INFO
        }

    @property
    def get_manufacturer(self):
        return int(self.data_provider.own_manufacturer[
            self.data_provider.own_manufacturer['param_name'] ==
            'manufacturer_id']['param_value'].iloc[0])

    def main_calculation(self, *args, **kwargs):
        """
        This function calculates the KPI results.Global functions and local functions
        """
        # global kpis

        assortment_store_dict = self.gsk_generator.availability_store_function(
        )
        self.common.save_json_to_new_tables(assortment_store_dict)

        assortment_category_dict = self.gsk_generator.availability_category_function(
        )
        self.common.save_json_to_new_tables(assortment_category_dict)

        assortment_subcategory_dict = self.gsk_generator.availability_subcategory_function(
        )
        self.common.save_json_to_new_tables(assortment_subcategory_dict)

        linear_sos_dict = self.gsk_generator.gsk_global_linear_sos_by_sub_category_function(
        )
        self.common.save_json_to_new_tables(linear_sos_dict)

        linear_sos_dict = self.gsk_generator.gsk_global_linear_sos_by_category_function(
        )
        self.common.save_json_to_new_tables(linear_sos_dict)

        linear_sos_dict = self.gsk_generator.gsk_global_linear_sos_whole_store_function(
        )
        self.common.save_json_to_new_tables(linear_sos_dict)

        # # local kpis
        for kpi in Consts.KPI_DICT.keys():
            self.gsk_generator.tool_box.extract_data_set_up_file(
                kpi, self.set_up_data, Consts.KPI_DICT)

        results_ecaps = self.gsk_ecaps_kpis()
        self.common.save_json_to_new_tables(results_ecaps)

        self.get_store_target()  # choosing the policy
        if self.targets.empty:
            Log.warning('There is no target policy matching this store')
        else:
            results_compliance = self.gsk_compliance()
            self.common.save_json_to_new_tables(results_compliance)

        results_pos = self.gsk_pos_kpis()
        self.common.save_json_to_new_tables(results_pos)

        self.common.commit_results_data()
        return

    def position_shelf(self, brand_fk, policy, df):
        """
        :param  brand_fk :
        :param  policy : dictionary that contains {
                                                'shelves':"1 ,2 ,4 ,5" (or any other string of numbers separate by ','),
                                                'position_target': 80 (or any other percentage you want the score to
                                                reach)
                                                }
        :param  df: data frame that contains columns MatchesConsts.SHELF_NUMBER , "brand kf"

        :returns   tuple of (result,score,numerator,denominator)
                   result = number of products from brand_fk in shelves / number of products from brand_fk ,
                   score  = if result reach position target 100  else 0 ,
                   numerator = number of products from brand_fk in shelves
                   denominator = number of products from brand_fk
        """
        if (Consts.SHELVES
                not in policy.keys()) or policy[Consts.SHELVES].empty:
            Log.warning(
                'This sessions have external targets but doesnt have value for shelves position'
            )
            return 0, 0, 0, 0, 0
        if isinstance(policy[Consts.SHELVES].iloc[0], list):
            shelf_from_bottom = [
                int(shelf) for shelf in policy[Consts.SHELVES].iloc[0]
            ]
        else:
            shelf_from_bottom = [
                int(shelf)
                for shelf in policy[Consts.SHELVES].iloc[0].split(",")
            ]

        threshold = policy[Consts.POSITION_TARGET].iloc[0]
        brand_df = df[df[ProductsConsts.BRAND_FK] == brand_fk]
        shelf_df = brand_df[brand_df[MatchesConsts.SHELF_NUMBER].isin(
            shelf_from_bottom)]
        numerator = shelf_df.shape[0]
        denominator = brand_df.shape[0]
        result = float(numerator) / float(denominator)
        score = 1 if (result * 100) >= threshold else 0
        return result, score, numerator, denominator, threshold

    def lsos_score(self, brand, policy):
        """
        :param brand : pk of brand
        :param policy :  dictionary of  { 'brand_target' : lsos number you want to reach}
        This function uses the lsos_in whole_store global calculation.
        it takes the result of the parameter 'brand' according to the policy set target and results.
        :return result,score,target
                result : result of this brand lsos
                score :  result / brand_target ,
                target  :  branf_target

        """
        df = pd.merge(self.match_product_in_scene,
                      self.all_products[Const.PRODUCTS_COLUMNS],
                      how='left',
                      on=[MatchesConsts.PRODUCT_FK])
        df = pd.merge(self.scif[Const.SCIF_COLUMNS],
                      df,
                      how='right',
                      right_on=[ScifConsts.SCENE_FK, ScifConsts.PRODUCT_FK],
                      left_on=[ScifConsts.SCENE_ID, ScifConsts.PRODUCT_FK])

        if df.empty:
            Log.warning('match_product_in_scene is empty ')
            return 0, 0, 0
        df = self.gsk_generator.tool_box.tests_by_template(
            'GSK_PLN_LSOS_SCORE', df, self.set_up_data)
        if df is None:
            Log.warning('match_product_in_scene is empty ')
            return 0, 0, 0
        result = self.gsk_generator.tool_box.calculate_sos(
            df, {ProductsConsts.BRAND_FK: brand}, {}, Const.LINEAR)[0]
        target = policy['brand_target'].iloc[0]
        score = float(result) / float(target)
        return result, score, target

    def brand_blocking(self, brand, policy):
        """
                :param brand : pk of brand
                :param policy :  dictionary of  { 'block_target' : number you want to reach}
                :return result : 1 if there is a block answer set_up_data conditions else 0
        """
        templates = self.set_up_data[(Const.SCENE_TYPE, Consts.PLN_BLOCK)]
        template_name = {
            ScifConsts.TEMPLATE_NAME: templates
        } if templates else None  # figure out which template name should I use
        ignore_empty = False
        # taking from params from set up  info
        stacking_param = False if not self.set_up_data[(
            Const.INCLUDE_STACKING, Consts.PLN_BLOCK)] else True  # false
        population_parameters = {
            ProductsConsts.BRAND_FK: [brand],
            ProductsConsts.PRODUCT_TYPE: [ProductTypeConsts.SKU]
        }

        if self.set_up_data[(Const.INCLUDE_OTHERS, Consts.PLN_BLOCK)]:
            population_parameters[ProductsConsts.PRODUCT_TYPE].append(
                Const.OTHER)
        if self.set_up_data[(Const.INCLUDE_IRRELEVANT, Consts.PLN_BLOCK)]:
            population_parameters[ProductsConsts.PRODUCT_TYPE].append(
                Const.IRRELEVANT)
        if self.set_up_data[(Const.INCLUDE_EMPTY, Consts.PLN_BLOCK)]:

            population_parameters[ProductsConsts.PRODUCT_TYPE].append(
                Const.EMPTY)
        else:
            ignore_empty = True

        if self.set_up_data[(Const.CATEGORY_INCLUDE,
                             Consts.PLN_BLOCK)]:  # category_name
            population_parameters[ProductsConsts.CATEGORY] = self.set_up_data[(
                Const.CATEGORY_INCLUDE, Consts.PLN_BLOCK)]

        if self.set_up_data[(Const.SUB_CATEGORY_INCLUDE,
                             Consts.PLN_BLOCK)]:  # sub_category_name
            population_parameters[
                ProductsConsts.SUB_CATEGORY] = self.set_up_data[(
                    Const.SUB_CATEGORY_INCLUDE, Consts.PLN_BLOCK)]

        # from Data file
        target = float(policy['block_target'].iloc[0]) / float(100)
        result = self.blocking_generator.network_x_block_together(
            location=template_name,
            population=population_parameters,
            additional={
                'minimum_block_ratio': target,
                'calculate_all_scenes': True,
                'ignore_empty': ignore_empty,
                'include_stacking': stacking_param,
                'check_vertical_horizontal': True,
                'minimum_facing_for_block': 1
            })
        result.sort_values('facing_percentage', ascending=False, inplace=True)
        score = 0 if result[result['is_block']].empty else 1
        numerator = 0 if result.empty else result['block_facings'].iloc[0]
        denominator = 0 if result.empty else result['total_facings'].iloc[0]

        return score, target, numerator, denominator

    def msl_assortment(self, kpi_fk, kpi_name):
        """
                        :param kpi_fk : name of level 3 assortment kpi
                        :param kpi_name: GSK_PLN_MSL_SCORE assortment , or   GSK_ECAPS assortment
                        :return kpi_results : data frame of assortment products of the kpi, product's availability,
                        product details.
                        filtered by set up
                """
        lvl3_assort, filter_scif = self.gsk_generator.tool_box.get_assortment_filtered(
            self.set_up_data, kpi_name)
        if lvl3_assort is None or lvl3_assort.empty:
            return None
        kpi_assortment_fk = self.common.get_kpi_fk_by_kpi_type(kpi_fk)
        kpi_results = lvl3_assort[lvl3_assort['kpi_fk_lvl3'] ==
                                  kpi_assortment_fk]  # general assortment
        kpi_results = pd.merge(kpi_results,
                               self.all_products[Const.PRODUCTS_COLUMNS],
                               how='left',
                               on=ProductsConsts.PRODUCT_FK)

        kpi_results = kpi_results[kpi_results[
            ProductsConsts.SUBSTITUTION_PRODUCT_FK].isnull()]
        return kpi_results

    def pln_ecaps_score(self, brand, assortment):
        """
                             :param brand : pk of desired brand
                             :param assortment : data frame of assortment products of the kpi, product's availability,
                                    product details. filtered by set up

                             besides result of lvl2_assortment function writing level 3 assortment product presence
                             results

                             :return  numerator : how many products available out of the granular groups
                                      denominator : how many products in assortment groups
                                      result :  (numerator/denominator)*100
                                      results :  array of dictionary, each dict contains the result details
        """
        identifier_parent = self.common.get_dictionary(
            brand_fk=brand,
            kpi_fk=self.common.get_kpi_fk_by_kpi_type(Consts.ECAP_ALL_BRAND))
        results = []
        kpi_ecaps_product = self.common.get_kpi_fk_by_kpi_type(
            Consts.PRODUCT_PRESENCE)
        ecaps_assortment_fk = self.common.get_kpi_fk_by_kpi_type(
            Consts.PLN_ASSORTMENT_KPI)
        if assortment.empty:
            return 0, 0, 0, results
        brand_results = assortment[assortment[ProductsConsts.BRAND_FK] ==
                                   brand]  # only assortment of desired brand
        for result in brand_results.itertuples():
            if (math.isnan(result.in_store)) | (result.kpi_fk_lvl3 !=
                                                ecaps_assortment_fk):
                score = self.gsk_generator.tool_box.result_value_pk(
                    Const.EXTRA)
                result_num = 1
            else:
                score = self.gsk_generator.tool_box.result_value_pk(Const.OOS) if result.in_store == 0 else \
                    self.gsk_generator.tool_box.result_value_pk(Const.DISTRIBUTED)
                result_num = result.in_store
            last_status = self.gsk_generator.tool_box.get_last_status(
                kpi_ecaps_product, result.product_fk)
            # score = result.in_store * 100
            results.append({
                'fk': kpi_ecaps_product,
                SessionResultsConsts.NUMERATOR_ID: result.product_fk,
                SessionResultsConsts.DENOMINATOR_ID: self.store_fk,
                SessionResultsConsts.DENOMINATOR_RESULT: 1,
                SessionResultsConsts.NUMERATOR_RESULT: result_num,
                SessionResultsConsts.RESULT: score,
                SessionResultsConsts.SCORE: last_status,
                'identifier_parent': identifier_parent,
                'identifier_result': 1,
                'should_enter': True
            })

        if 'total' not in self.assortment.LVL2_HEADERS or 'passes' not in self.assortment.LVL2_HEADERS:
            self.assortment.LVL2_HEADERS.extend(['total', 'passes'])
        lvl2 = self.assortment.calculate_lvl2_assortment(brand_results)
        if lvl2.empty:
            return 0, 0, 0, results  # in case of no assortment return 0
        result = round(
            np.divide(float(lvl2.iloc[0].passes), float(lvl2.iloc[0].total)),
            4)
        return lvl2.iloc[0].passes, lvl2.iloc[0].total, result, results

    def pln_msl_summary(self, brand, assortment):
        """
                :param brand : pk of desired brand
                :param assortment : data frame of assortment products of the kpi, product's availability,
                                           product details. filtered by set up
                :return  numerator : how many products available out of the granular groups
                                             denominator : how many products in assortment groups
                                             result :  (numerator/denominator)*100
                                             results :  array of dictionary, each dict contains the result details
               """

        if assortment is None or assortment.empty:
            return 0, 0, 0, 0
        brand_results = assortment[assortment[ProductsConsts.BRAND_FK] ==
                                   brand]  # only assortment of desired brand
        if 'total' not in self.assortment.LVL2_HEADERS or 'passes' not in self.assortment.LVL2_HEADERS:
            self.assortment.LVL2_HEADERS.extend(['total', 'passes'])

        lvl2 = self.assortment.calculate_lvl2_assortment(brand_results)
        if lvl2.empty:
            return 0, 0, 0, 0  # in case of no assortment return 0
        result = round(
            np.divide(float(lvl2.iloc[0].passes), float(lvl2.iloc[0].total)),
            4)
        return lvl2.iloc[0].passes, lvl2.iloc[0].total, result, lvl2.iloc[
            0].assortment_group_fk

    def get_store_target(self):
        """
            Function checks which policies out of self.target are relevant to this store visit according to store
            attributes.
        """

        parameters_dict = {StoreInfoConsts.STORE_NUMBER_1: 'store_number'}
        for store_param, target_param in parameters_dict.items():
            if target_param in self.targets.columns:
                if self.store_info[store_param][0] is None:
                    if self.targets.empty or self.targets[
                            self.targets[target_param] != ''].empty:
                        continue
                    else:
                        self.targets.drop(self.targets.index, inplace=True)
                self.targets = self.targets[
                    (self.targets[target_param] ==
                     self.store_info[store_param][0].encode(HelperConsts.UTF8))
                    | (self.targets[target_param] == '')]

    def gsk_compliance(self):
        """
                    Function calculate compliance score for each brand based on : 
                    position score, brand-assortment score,
                    block score ,lsos score.
                    Also calculate  compliance summary score  - average of brands compliance scores
                """
        results_df = []
        df = self.scif
        # kpis
        kpi_block_fk = self.common.get_kpi_fk_by_kpi_type(Consts.PLN_BLOCK)
        kpi_position_fk = self.common.get_kpi_fk_by_kpi_type(
            Consts.POSITION_SCORE)
        kpi_lsos_fk = self.common.get_kpi_fk_by_kpi_type(Consts.PLN_LSOS)
        kpi_msl_fk = self.common.get_kpi_fk_by_kpi_type(Consts.PLN_MSL)
        kpi_compliance_brands_fk = self.common.get_kpi_fk_by_kpi_type(
            Consts.COMPLIANCE_ALL_BRANDS)
        kpi_compliance_summary_fk = self.common.get_kpi_fk_by_kpi_type(
            Consts.COMPLIANCE_SUMMARY)
        identifier_compliance_summary = self.common.get_dictionary(
            kpi_fk=kpi_compliance_summary_fk)

        # targets
        block_target = 0.25
        posit_target = 0.25
        lsos_target = 0.25
        msl_target = 0.25

        total_brand_score = 0
        counter_brands = 0

        # assortment_lvl3 msl df initialize
        self.gsk_generator.tool_box.extract_data_set_up_file(
            Consts.PLN_MSL, self.set_up_data, Consts.KPI_DICT)
        assortment_msl = self.msl_assortment(Const.DISTRIBUTION,
                                             Consts.PLN_MSL)

        # set data frame to find position shelf
        df_position_score = pd.merge(self.match_product_in_scene,
                                     self.all_products,
                                     on=ProductsConsts.PRODUCT_FK)
        df_position_score = pd.merge(
            self.scif[Const.SCIF_COLUMNS],
            df_position_score,
            how='right',
            right_on=[ScifConsts.SCENE_FK, ProductsConsts.PRODUCT_FK],
            left_on=[ScifConsts.SCENE_ID, ScifConsts.PRODUCT_FK])
        df_position_score = self.gsk_generator.tool_box.tests_by_template(
            Consts.POSITION_SCORE, df_position_score, self.set_up_data)

        if not self.set_up_data[(Const.INCLUDE_STACKING,
                                 Consts.POSITION_SCORE)]:
            df_position_score = df_position_score if df_position_score is None else df_position_score[
                df_position_score[MatchesConsts.STACKING_LAYER] == 1]

        # calculate all brands if template doesnt require specific brand else only for specific brands
        template_brands = self.set_up_data[(Const.BRANDS_INCLUDE,
                                            Consts.PLN_BLOCK)]
        brands = df[df[ProductsConsts.BRAND_NAME].isin(template_brands)][ProductsConsts.BRAND_FK].unique() if \
            template_brands else df[ProductsConsts.BRAND_FK].dropna().unique()

        for brand in brands:
            policy = self.targets[self.targets[ProductsConsts.BRAND_FK] ==
                                  brand]
            if policy.empty:
                Log.warning('There is no target policy matching brand'
                            )  # adding brand name
                return results_df
            identifier_parent = self.common.get_dictionary(
                brand_fk=brand, kpi_fk=kpi_compliance_brands_fk)
            # msl_kpi
            msl_numerator, msl_denominator, msl_result, msl_assortment_group = self.pln_msl_summary(
                brand, assortment_msl)
            msl_score = msl_result * msl_target
            results_df.append({
                'fk': kpi_msl_fk,
                SessionResultsConsts.NUMERATOR_ID: brand,
                SessionResultsConsts.DENOMINATOR_ID: self.store_fk,
                SessionResultsConsts.DENOMINATOR_RESULT: msl_denominator,
                SessionResultsConsts.NUMERATOR_RESULT: msl_numerator,
                SessionResultsConsts.RESULT: msl_result,
                SessionResultsConsts.SCORE: msl_score,
                SessionResultsConsts.TARGET: msl_target,
                SessionResultsConsts.CONTEXT_ID: msl_assortment_group,
                'identifier_parent': identifier_parent,
                'should_enter': True
            })
            # lsos kpi
            lsos_numerator, lsos_result, lsos_denominator = self.lsos_score(
                brand, policy)
            lsos_result = 1 if lsos_result > 1 else lsos_result
            lsos_score = lsos_result * lsos_target
            results_df.append({
                'fk': kpi_lsos_fk,
                SessionResultsConsts.NUMERATOR_ID: brand,
                SessionResultsConsts.DENOMINATOR_ID: self.store_fk,
                SessionResultsConsts.DENOMINATOR_RESULT: lsos_denominator,
                SessionResultsConsts.NUMERATOR_RESULT: lsos_numerator,
                SessionResultsConsts.RESULT: lsos_result,
                SessionResultsConsts.SCORE: lsos_score,
                SessionResultsConsts.TARGET: lsos_target,
                'identifier_parent': identifier_parent,
                SessionResultsConsts.WEIGHT: lsos_denominator,
                'should_enter': True
            })
            # block_score
            block_result, block_benchmark, numerator_block, block_denominator = self.brand_blocking(
                brand, policy)
            block_score = round(block_result * block_target, 4)
            results_df.append({
                'fk':
                kpi_block_fk,
                SessionResultsConsts.NUMERATOR_ID:
                brand,
                SessionResultsConsts.DENOMINATOR_ID:
                self.store_fk,
                SessionResultsConsts.DENOMINATOR_RESULT:
                block_denominator,
                SessionResultsConsts.NUMERATOR_RESULT:
                numerator_block,
                SessionResultsConsts.RESULT:
                block_result,
                SessionResultsConsts.SCORE:
                block_score,
                SessionResultsConsts.TARGET:
                block_target,
                'identifier_parent':
                identifier_parent,
                'should_enter':
                True,
                SessionResultsConsts.WEIGHT: (block_benchmark * 100)
            })

            # position score
            if df_position_score is not None:
                position_result, position_score, position_num, position_den, position_benchmark = self.position_shelf(
                    brand, policy, df_position_score)
            else:
                position_result, position_score, position_num, position_den, position_benchmark = 0, 0, 0, 0, 0
            position_score = round(position_score * posit_target, 4)
            results_df.append({
                'fk': kpi_position_fk,
                SessionResultsConsts.NUMERATOR_ID: brand,
                SessionResultsConsts.DENOMINATOR_ID: self.store_fk,
                SessionResultsConsts.DENOMINATOR_RESULT: position_den,
                SessionResultsConsts.NUMERATOR_RESULT: position_num,
                SessionResultsConsts.RESULT: position_result,
                SessionResultsConsts.SCORE: position_score,
                SessionResultsConsts.TARGET: posit_target,
                'identifier_parent': identifier_parent,
                'should_enter': True,
                SessionResultsConsts.WEIGHT: position_benchmark
            })

            # compliance score per brand
            compliance_score = round(
                position_score + block_score + lsos_score + msl_score, 4)
            results_df.append({
                'fk': kpi_compliance_brands_fk,
                SessionResultsConsts.NUMERATOR_ID: self.own_manufacturer,
                SessionResultsConsts.DENOMINATOR_ID: brand,
                SessionResultsConsts.DENOMINATOR_RESULT: 1,
                SessionResultsConsts.NUMERATOR_RESULT: compliance_score,
                SessionResultsConsts.RESULT: compliance_score,
                SessionResultsConsts.SCORE: compliance_score,
                'identifier_parent': identifier_compliance_summary,
                'identifier_result': identifier_parent,
                'should_enter': True
            })

            # counter and sum updates
            total_brand_score = round(total_brand_score + compliance_score, 4)
            counter_brands = counter_brands + 1
        if counter_brands == 0:
            return results_df
        # compliance summary
        average_brand_score = round(total_brand_score / counter_brands, 4)
        results_df.append({
            'fk': kpi_compliance_summary_fk,
            SessionResultsConsts.NUMERATOR_ID: self.own_manufacturer,
            SessionResultsConsts.DENOMINATOR_ID: self.store_fk,
            SessionResultsConsts.DENOMINATOR_RESULT: counter_brands,
            SessionResultsConsts.NUMERATOR_RESULT: total_brand_score,
            SessionResultsConsts.RESULT: average_brand_score,
            SessionResultsConsts.SCORE: average_brand_score,
            'identifier_result': identifier_compliance_summary
        })

        return results_df

    def gsk_ecaps_kpis(self):
        """
                      Function calculate for each brand ecaps score, and for all brands together set ecaps summary score
                      :return
                             results_df :  array of dictionary, each dict contains kpi's result details
       """
        results_df = []
        kpi_ecaps_brands_fk = self.common.get_kpi_fk_by_kpi_type(
            Consts.ECAP_ALL_BRAND)
        kpi_ecaps_summary_fk = self.common.get_kpi_fk_by_kpi_type(
            Consts.ECAP_SUMMARY)
        identifier_ecaps_summary = self.common.get_dictionary(
            kpi_fk=kpi_ecaps_summary_fk)
        total_brand_score = 0
        assortment_display = self.msl_assortment(Consts.PLN_ASSORTMENT_KPI,
                                                 Consts.ECAPS_FILTER_IDENT)

        if assortment_display is None or assortment_display.empty:
            return results_df
        template_brands = self.set_up_data[(Const.BRANDS_INCLUDE,
                                            Consts.ECAPS_FILTER_IDENT)]
        brands = assortment_display[assortment_display[ProductsConsts.BRAND_NAME].isin(template_brands)][
            ProductsConsts.BRAND_FK].unique() if \
            template_brands else assortment_display[ProductsConsts.BRAND_FK].dropna().unique()

        for brand in brands:
            numerator_res, denominator_res, result, product_presence_df = self.pln_ecaps_score(
                brand, assortment_display)
            results_df.extend(product_presence_df)
            identifier_all_brand = self.common.get_dictionary(
                brand_fk=brand,
                kpi_fk=self.common.get_kpi_fk_by_kpi_type(
                    Consts.ECAP_ALL_BRAND))
            results_df.append({
                'fk': kpi_ecaps_brands_fk,
                SessionResultsConsts.NUMERATOR_ID: self.own_manufacturer,
                SessionResultsConsts.DENOMINATOR_ID: brand,
                SessionResultsConsts.DENOMINATOR_RESULT: denominator_res,
                SessionResultsConsts.NUMERATOR_RESULT: numerator_res,
                SessionResultsConsts.RESULT: result,
                SessionResultsConsts.SCORE: result,
                'identifier_parent': identifier_ecaps_summary,
                'identifier_result': identifier_all_brand,
                'should_enter': True
            })

            total_brand_score = total_brand_score + result
        if len(
                brands
        ) > 0:  # don't want to show result in case of there are no brands relevan to the template
            result_summary = round(total_brand_score / len(brands), 4)
            results_df.append({
                'fk':
                kpi_ecaps_summary_fk,
                SessionResultsConsts.NUMERATOR_ID:
                self.own_manufacturer,
                SessionResultsConsts.DENOMINATOR_ID:
                self.store_fk,
                SessionResultsConsts.DENOMINATOR_RESULT:
                len(brands),
                SessionResultsConsts.NUMERATOR_RESULT:
                total_brand_score,
                SessionResultsConsts.RESULT:
                result_summary,
                SessionResultsConsts.SCORE:
                result_summary,
                'identifier_result':
                identifier_ecaps_summary
            })
        return results_df

    def gsk_pos_kpis(self):
        """
        Function calculate POSM Distribution
        :return
          - results :  array of dictionary, each dict contains kpi's result details
        """
        results = []
        OOS = 1
        DISTRIBUTED = 2

        self.gsk_generator.tool_box.extract_data_set_up_file(
            Consts.POSM, self.set_up_data, Consts.KPI_DICT)
        assortment_pos = self.msl_assortment(Consts.POSM_SKU, Consts.POSM)

        kpi_gsk_pos_distribution_store_fk = self.common.get_kpi_fk_by_kpi_type(
            Consts.GSK_POS_DISTRIBUTION_STORE)
        kpi_gsk_pos_distribution_brand_fk = self.common.get_kpi_fk_by_kpi_type(
            Consts.GSK_POS_DISTRIBUTION_BRAND)
        kpi_gsk_pos_distribution_sku_fk = self.common.get_kpi_fk_by_kpi_type(
            Consts.GSK_POS_DISTRIBUTION_SKU)

        if assortment_pos is None or assortment_pos.empty:
            Log.info(
                "Assortment df is empty. GSK_POS_DISTRIBUTION Kpis are not calculated"
            )
            return results

        # Calculate KPI : GSK_POS_DISTRIBUTION_STORE
        assortment_pos['in_store'] = assortment_pos['in_store'].astype('int')
        Log.info(
            "Dropping duplicate product_fks accros multiple-granular groups")
        Log.info("Before : {}".format(len(assortment_pos)))
        assortment_pos = assortment_pos.drop_duplicates(
            subset=[ProductsConsts.PRODUCT_FK])
        Log.info("After : {}".format(len(assortment_pos)))

        numerator_res = len(assortment_pos[assortment_pos['in_store'] == 1])
        denominator_res = len(assortment_pos)

        result = round(
            (numerator_res /
             float(denominator_res)), 4) if denominator_res != 0 else 0

        results.append({
            'fk': kpi_gsk_pos_distribution_store_fk,
            SessionResultsConsts.NUMERATOR_ID: self.own_manufacturer,
            SessionResultsConsts.DENOMINATOR_ID: self.store_fk,
            SessionResultsConsts.NUMERATOR_RESULT: numerator_res,
            SessionResultsConsts.DENOMINATOR_RESULT: denominator_res,
            SessionResultsConsts.RESULT: result,
            SessionResultsConsts.SCORE: result,
            # 'identifier_parent': identifier_ecaps_summary,
            'identifier_result': "Gsk_Pos_Distribution_Store",
            'should_enter': True
        })

        # Calculate KPI: GSK_POS_DISTRIBUTION_BRAND
        brands_group = assortment_pos.groupby([ProductsConsts.BRAND_FK])
        for brand, assortment_pos_by_brand in brands_group:
            numerator_res = len(assortment_pos_by_brand[
                assortment_pos_by_brand['in_store'] == 1])
            denominator_res = len(assortment_pos_by_brand)
            result = round(
                (numerator_res /
                 float(denominator_res)), 4) if denominator_res != 0 else 0

            results.append({
                'fk':
                kpi_gsk_pos_distribution_brand_fk,
                SessionResultsConsts.NUMERATOR_ID:
                int(brand),
                SessionResultsConsts.DENOMINATOR_ID:
                self.store_fk,
                SessionResultsConsts.NUMERATOR_RESULT:
                numerator_res,
                SessionResultsConsts.DENOMINATOR_RESULT:
                denominator_res,
                SessionResultsConsts.RESULT:
                result,
                SessionResultsConsts.SCORE:
                result,
                'identifier_parent':
                "Gsk_Pos_Distribution_Store",
                'identifier_result':
                "Gsk_Pos_Distribution_Brand_" + str(int(brand)),
                'should_enter':
                True
            })

            for idx, each_product in assortment_pos_by_brand.iterrows():
                product_fk = each_product[ProductsConsts.PRODUCT_FK]
                result = 1 if int(each_product['in_store']) == 1 else 0
                result_status = DISTRIBUTED if result == 1 else OOS
                last_status = self.gsk_generator.tool_box.get_last_status(
                    kpi_gsk_pos_distribution_sku_fk, product_fk)

                results.append({
                    'fk':
                    kpi_gsk_pos_distribution_sku_fk,
                    SessionResultsConsts.NUMERATOR_ID:
                    product_fk,
                    SessionResultsConsts.DENOMINATOR_ID:
                    self.store_fk,
                    SessionResultsConsts.NUMERATOR_RESULT:
                    result,
                    SessionResultsConsts.DENOMINATOR_RESULT:
                    1,
                    SessionResultsConsts.RESULT:
                    result_status,
                    SessionResultsConsts.SCORE:
                    last_status,
                    'identifier_parent':
                    "Gsk_Pos_Distribution_Brand_" + str(int(brand)),
                    'identifier_result':
                    "Gsk_Pos_Distribution_SKU_" + str(int(product_fk)),
                    'should_enter':
                    True
                })

        return results
Example #17
0
class TysonToolBox:
    def __init__(self, data_provider, output):
        self.data_provider = data_provider
        self.common = Common(data_provider)
        self.output = output
        self.ps_data_provider = PsDataProvider(data_provider, output)
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.store_id = self.data_provider[Data.STORE_FK]
        self.products = self.data_provider[Data.PRODUCTS]
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.manufacturer_id = self.get_manufacturer_id_from_manufacturer_name(Const.MANUFACTURER)
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.template_info = self.data_provider[Data.TEMPLATES]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.block = Block(self.data_provider, self.output, common=self.common)

        self.mpis = self.match_product_in_scene \
            .merge(self.products, on='product_fk', suffixes=['', '_p']) \
            .merge(self.scene_info, on='scene_fk', suffixes=['', '_s'])[COLUMNS]

    def main_calculation(self):
        self.calculate_shelf_neighbors('scrambles', 'breakfast meat')
        self.calculate_shelf_neighbors('scrambles', 'irrelevant')
        self.calculate_shelf_neighbors('ore ida', 'breakfast meat')
        self.calculate_shelf_neighbors('ore ida', 'irrelevant')
        self.calculate_max_block_adjacency('scrambles', 'ore ida')
        self.calculate_adjacent_bay('breakfast meat', 'irrelevant')

    def calculate_shelf_neighbors(self, anchor, target):
        """
        Determines whether any products in `anchor` are located in the same scene and bay as any products in `target`.

        :param anchor: Key referencing the anchor product.
        :param target: Key referencing the products being compared.
        """

        kpi = Const.KPIs[(anchor, target)]
        kpi_id = self.common.get_kpi_fk_by_kpi_name(kpi)
        brand_id = self.get_brand_id_from_brand_name(Const.BRANDs.get(anchor))

        result = self.neighbors(anchor, target, target_type='product') if target == 'irrelevant' \
            else self.neighbors(anchor, target)

        self.common.write_to_db_result(
            fk=kpi_id,
            numerator_id=brand_id,
            numerator_result=result,
            denominator_id=self.store_id,
            denominator_result=1,
            result=result
        )

    def calculate_max_block_adjacency(self, anchor, target):
        """
        Calculates the Max Block Adjacency for `anchor` and `target` products.

        :param anchor: Key for customer products.
        :param target: Key for competitor products.
        """

        kpi = Const.KPIs[('scrambles', 'ore ida')]
        kpi_id = self.common.get_kpi_fk_by_kpi_name(kpi)
        anchor_max_block, anchor_scene = self.get_max_block_from_products(Const.PRODUCTS[anchor])
        target_max_block, target_scene = self.get_max_block_from_products(Const.PRODUCTS[target])

        result = 0
        if anchor_max_block and target_max_block and (anchor_scene == target_scene):
            possible_adjacencies = itertools.product(anchor_max_block.nodes, target_max_block.nodes)
            adj_graph = self.block.adj_graphs_by_scene
            directed_edges = [list(val.edges) for key, val in adj_graph.items() if str(anchor_scene) in key][0]
            complimentary_edges = [edge[::-1] for edge in directed_edges if edge[::-1] not in directed_edges]
            all_edges = directed_edges + complimentary_edges
            result = int(any(True for edge in possible_adjacencies if edge in all_edges))

            if not result:
                empty_matches = self.match_product_in_scene[
                    (self.match_product_in_scene['product_fk'] == 0)
                    & (self.match_product_in_scene['scene_fk'] == anchor_scene)]['scene_match_fk']
                empty_edges = {match: [edge[1] for edge in all_edges if edge[0] == match] for match in empty_matches}
                result = int(any([True for _, val in empty_edges.items()
                                  if any([True for product in anchor_max_block.nodes if product in val])
                                  and any([True for product in target_max_block.nodes if product in val])]))

        self.common.write_to_db_result(
            fk=kpi_id,
            numerator_id=self.manufacturer_id,
            numerator_result=result,
            denominator_id=self.store_id,
            denominator_result=1,
            result=result
        )

    def calculate_adjacent_bay(self, anchor, target):
        """
        Determines whether any products in `anchor` are located in the same scene
        and same or adjacent bay as any products in `target`.

        :param anchor: Key referencing the target product.
        :param target: Key referencing the products being compared.
        """
        kpi = Const.KPIs.get((anchor, target))
        kpi_id = self.common.get_kpi_fk_by_kpi_name(kpi)
        result = self.neighbors(anchor, target,
                                anchor_type='category', target_type='product', same_bay=False)

        self.common.write_to_db_result(
            fk=kpi_id,
            numerator_id=self.manufacturer_id,
            numerator_result=result,
            denominator_id=self.store_id,
            denominator_result=1,
            result=result
        )

    def neighbors(self, anchor, target, anchor_type='product', target_type='category', same_bay=True):
        """
        Determine whether any of the products in `anchor` and `target` are
            in the same scene and same bay (if `same_bay` is True)
        or  in the same scene and same or adjacent bay (if `same_bay`is False).

        :param anchor:
        :param target:
        :param anchor_type:
        :param target_type:
        :param same_bay: Indicates whether to count the same bay or also adjacent bays.
        :return: Returns 1 if any products are neighbors, else 0.
        """

        anchor_ids = None
        target_ids = None

        if anchor_type == 'product':
            anchor_ids = self.get_product_id_from_product_name(Const.PRODUCTS[anchor])
        elif anchor_type == 'category':
            anchor_ids = self.get_category_id_from_category_name(Const.CATEGORIES[anchor])

        if target_type == 'category':
            target_ids = self.get_category_id_from_category_name(Const.CATEGORIES[target])
        elif target_type == 'product':
            target_ids = self.get_product_id_from_product_name(Const.PRODUCTS[target])

        anchor_products = self.filter_df(self.mpis, anchor_type + '_fk', anchor_ids).drop_duplicates()
        target_products = self.filter_df(self.mpis, target_type + '_fk', target_ids).drop_duplicates()

        if same_bay:
            neighbors = anchor_products.merge(target_products, how='inner', on=['scene_fk', 'bay_number'])
        else:
            try:
                scenes = pd.concat([anchor_products['scene_fk'], target_products['scene_fk']]).unique()
                probe_groups = pd.DataFrame()
                for scene in scenes:
                    probe_groups = pd.concat([probe_groups, self.get_probe_groups(scene)])
                anchor_products = anchor_products.merge(probe_groups, on=['scene_fk', 'product_fk'])
                target_products = target_products.merge(probe_groups, on=['scene_fk', 'product_fk'])
            except Exception:
                Log.warning("Probe Group query failed.")

            scene_neighbors = anchor_products.merge(target_products, on=['scene_fk'], suffixes=['', '_y'])
            neighbors = scene_neighbors.apply(
                lambda row: 1 if abs(int(row['bay_number']) - int(row['bay_number_y'])) < 2
                and ('group_id' not in scene_neighbors.columns or row['group_id'] == row['group_id_y'])
                else np.nan,
                axis='columns'
            ).dropna()

        return int(not neighbors.empty)

    def get_brand_id_from_brand_name(self, brand_name):
        # return self.all_products.set_index(['brand_name']).loc[brand_name, 'brand_fk'].iloc[0]
        # return self.all_products.loc[self.all_products['brand_name'] == brand_name, 'brand_fk'].values[0]
        return self.all_products[self.all_products['brand_name'] == brand_name].iloc[0].at['brand_fk']

    def get_product_id_from_product_name(self, product_name):
        return self.all_products.set_index(['product_english_name']).loc[product_name, 'product_fk'].unique()

    def get_category_id_from_category_name(self, category_name):
        return self.all_products.set_index(['category']).loc[category_name, 'category_fk'].unique()

    def get_manufacturer_id_from_manufacturer_name(self, manufacturer_name):
        return self.all_products.loc[self.all_products['manufacturer_name'] == manufacturer_name,
                                     'manufacturer_fk'].values[0]

    @staticmethod
    def filter_df(df, column, values):
        """
        Returns a subset of `df` whose values are in `values`.

        :param df: DataFrame to filter
        :param column: Column name to filter on
        :param values: Values list to filter by
        :return: The filtered DataFrame
        """

        filtered = None
        if hasattr(values, '__iter__'):
            filtered = df[df[column].isin(values)]
        elif isinstance(values, str):
            filtered = df[df[column] == values]
        return filtered

    def get_max_block_from_products(self, products):
        """
        Get max block based on list of product names.

        :param products: List of product names.
        :return: (Max block graph, scene_fk).
        """

        product_ids = self.get_product_id_from_product_name(products)
        blocks = self.block.network_x_block_together(
            {'product_fk': product_ids},
            additional={
                'calculate_all_scenes': True,
                'use_masking_only': True,
                'minimum_facing_for_block': 2,
            }
        )
        blocks.sort_values(by=['block_facings', 'facing_percentage'], ascending=False, inplace=True)

        max_block = scene_id = None
        if not blocks.empty and any(blocks['is_block']):
            max_block = blocks['cluster'].iloc[0]
            scene_id = blocks['scene_fk'].iloc[0]

        return max_block, scene_id

    def get_probe_groups(self, scene):
        """
        Retrieves from probedata.stitching_probe_info the probe groups for all products in `scenes`.

        :param scene: np.ndarray of scene_fk.
        :return: dataframe containing product_fk, scene_fk, and group_id of products in `scenes`.
        """

        query = """
            SELECT DISTINCT mpip.product_fk, spi.group_id, ssi.scene_fk
                FROM probedata.stitching_probe_info AS spi
                LEFT JOIN probedata.stitching_scene_info AS ssi ON ssi.pk = spi.stitching_scene_info_fk
                LEFT JOIN probedata.match_product_in_probe AS mpip ON mpip.probe_fk = spi.probe_fk
                WHERE ssi.scene_fk = {};
        """.format(scene)

        return pd.read_sql_query(query, self.ps_data_provider.rds_conn.db)

    def commit_results(self):
        self.common.commit_results_data()