示例#1
0
    def __init__(self, data_provider, output, common=None):
        self.output = output
        self.data_provider = data_provider
        # self.common = common
        self.common = Common(self.data_provider)
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.products = self.data_provider[Data.PRODUCTS]
        self.templates = self.data_provider[Data.TEMPLATES]
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.store_id = self.data_provider[Data.STORE_FK] if self.data_provider[Data.STORE_FK] is not None \
                                                            else self.session_info['store_fk'].values[0]
        self.all_templates = self.data_provider[Data.ALL_TEMPLATES]
        self.store_type = self.data_provider.store_type
        self.rds_conn = PSProjectConnector(self.project_name,
                                           DbUsers.CalculationEng)
        self.kpi_static_data = self.common.get_kpi_static_data()
        self.kpi_results_queries = []
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]

        self.probe_groups = self.get_probe_group()
        self.match_product_in_scene = self.match_product_in_scene.merge(
            self.probe_groups, on='probe_match_fk', how='left')
        self.is_solid_scene = True if len(self.probe_groups['probe_group_id'].
                                          unique().tolist()) <= 1 else False
        self.toolbox = GENERALToolBox(self.data_provider)
        self.commontools = PEPSICOUKCommonToolBox(self.data_provider,
                                                  self.rds_conn)

        self.custom_entities = self.commontools.custom_entities
        self.on_display_products = self.commontools.on_display_products
        self.exclusion_template = self.commontools.exclusion_template
        self.filtered_scif = self.commontools.filtered_scif
        self.filtered_matches = self.commontools.filtered_matches
        self.excluded_matches = self.compare_matches()
        self.filtered_matches = self.filtered_matches.merge(
            self.probe_groups, on='probe_match_fk', how='left')

        self.scene_bay_shelf_product = self.commontools.scene_bay_shelf_product
        self.external_targets = self.commontools.external_targets
        self.own_manuf_fk = self.all_products[
            self.all_products['manufacturer_name'] ==
            self.PEPSICO]['manufacturer_fk'].values[0]
        self.block = Block(self.data_provider,
                           custom_scif=self.filtered_scif,
                           custom_matches=self.filtered_matches)
        self.adjacency = Adjancency(self.data_provider,
                                    custom_scif=self.filtered_scif,
                                    custom_matches=self.filtered_matches)
        self.block_results = pd.DataFrame(columns=['Group Name', 'Score'])
        self.kpi_results = pd.DataFrame(
            columns=['kpi_fk', 'numerator', 'denominator', 'result', 'score'])
        self.passed_blocks = {}
示例#2
0
 def __init__(self, data_provider, output):
     self.output = output
     self.data_provider = data_provider
     self.common = Common(self.data_provider)
     self.project_name = self.data_provider.project_name
     self.session_uid = self.data_provider.session_uid
     self.products = self.data_provider[Data.PRODUCTS]
     self.all_products = self.data_provider[Data.ALL_PRODUCTS]
     self.match_product_in_scene = self.data_provider[Data.MATCHES]
     self.visit_date = self.data_provider[Data.VISIT_DATE]
     self.session_info = self.data_provider[Data.SESSION_INFO]
     self.scene_info = self.data_provider[Data.SCENES_INFO]
     self.template_info = self.data_provider.all_templates
     self.store_id = self.data_provider[Data.STORE_FK]
     self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
     self.match_product_in_scene = self.data_provider[Data.MATCHES]
     self.mpis = self.match_product_in_scene.merge(self.products, on='product_fk', suffixes=['', '_p']) \
         .merge(self.scene_info, on='scene_fk', suffixes=['', '_s']) \
         .merge(self.template_info, on='template_fk', suffixes=['', '_t'])
     self.rds_conn = PSProjectConnector(self.project_name,
                                        DbUsers.CalculationEng)
     self.custom_entity_data = self.get_custom_entity_data()
     self.kpi_static_data = self.common.get_kpi_static_data()
     self.kpi_results_queries = []
     self.templates = {}
     for sheet in Const.SHEETS:
         self.templates[sheet] = pd.read_excel(TEMPLATE_PATH,
                                               sheetname=sheet).fillna('')
     self.store_info = self.data_provider[Data.STORE_INFO]
     self.store_type = self.store_info['store_type'].iloc[0]
     # main_template = self.templates[Const.KPIS]
     # self.templates[Const.KPIS] = main_template[main_template[Const.STORE_TYPE] == self.store_type]
     self.block = Block(self.data_provider, self.output, common=self.common)
     self.adjacency = Adjancency(self.data_provider,
                                 self.output,
                                 common=self.common)
     self.ignore_stacking = False
     self.facings_field = 'facings' if not self.ignore_stacking else 'facings_ign_stack'
 def __init__(self, data_provider, output):
     self.output = output
     self.data_provider = data_provider
     self.common = Common(self.data_provider)
     self.project_name = self.data_provider.project_name
     self.session_uid = self.data_provider.session_uid
     self.products = self.data_provider[Data.PRODUCTS]
     self.all_products = self.data_provider[Data.ALL_PRODUCTS]
     self.match_product_in_scene = self.data_provider[Data.MATCHES]
     self.products = self.data_provider[Data.PRODUCTS]
     self.templates = self.data_provider.all_templates
     self.visit_date = self.data_provider[Data.VISIT_DATE]
     self.session_info = self.data_provider[Data.SESSION_INFO]
     self.scene_info = self.data_provider[Data.SCENES_INFO]
     self.store_id = self.data_provider[Data.STORE_FK]
     self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
     self.rds_conn = ProjectConnector(self.project_name,
                                      DbUsers.CalculationEng)
     self.kpi_static_data = self.common.get_kpi_static_data()
     self.kpi_sub_brand_data = pd.read_sql_query(self.get_sub_brand_data(),
                                                 self.rds_conn.db)
     self.kpi_results_queries = []
     self.Presence_template = parse_template(TEMPLATE_PATH, "Presence")
     self.BaseMeasure_template = parse_template(TEMPLATE_PATH,
                                                "Base Measurement")
     self.Anchor_template = parse_template(TEMPLATE_PATH, "Anchor")
     self.Blocking_template = parse_template(TEMPLATE_PATH, "Blocking")
     self.Adjaceny_template = parse_template(TEMPLATE_PATH, "Adjacency")
     self.Eye_Level_template = parse_template(TEMPLATE_PATH, "Eye Level")
     self.eye_level_definition = parse_template(TEMPLATE_PATH, "Shelves")
     self.ignore_stacking = False
     self.facings_field = 'facings' if not self.ignore_stacking else 'facings_ign_stack'
     self.availability = Availability(self.data_provider)
     self.blocking_calc = Block(self.data_provider)
     self.mpis = self.match_product_in_scene.merge(self.products, on='product_fk', suffixes=['', '_p']) \
         .merge(self.scene_info, on='scene_fk', suffixes=['', '_s']) \
         .merge(self.templates, on='template_fk', suffixes=['', '_t'])
示例#4
0
class ProductBlockingKpi(UnifiedCalculationsScript):

    def __init__(self, data_provider, config_params=None, **kwargs):
        super(ProductBlockingKpi, self).__init__(data_provider, config_params=config_params, **kwargs)
        self.util = PepsicoUtil(None, data_provider)
        self.block = Block(self.data_provider, custom_scif=self.util.filtered_scif, custom_matches=self.util.filtered_matches)

    def kpi_type(self):
        pass

    def calculate(self):
        if not self.util.filtered_matches.empty:
            self.calculate_product_blocking()

    def calculate_product_blocking(self):
        external_targets = self.util.all_targets_unpacked[self.util.all_targets_unpacked['type'] == self.util.PRODUCT_BLOCKING]
        additional_block_params = {'check_vertical_horizontal': True, 'minimum_facing_for_block': 3,
                                   'include_stacking': True,
                                   'allowed_products_filters': {'product_type': ['Empty']}}
        kpi_fk = self.util.common.get_kpi_fk_by_kpi_type(self.util.PRODUCT_BLOCKING)

        for i, row in external_targets.iterrows():
            group_fk = self.util.custom_entities[self.util.custom_entities['name'] == row['Group Name']]['pk'].values[0]
            filters = self.util.get_block_and_adjacency_filters(row)
            target = row['Target']
            additional_block_params.update({'minimum_block_ratio': float(target)/100})

            result_df = self.block.network_x_block_together(filters, additional=additional_block_params)
            score = max_ratio = 0
            result = self.util.commontools.get_yes_no_result(0)
            if not result_df.empty:
                max_ratio = result_df['facing_percentage'].max()
                result_df = result_df[result_df['is_block']==True]
                if not result_df.empty:
                    max_ratio = result_df['facing_percentage'].max()
                    result_df = result_df[result_df['facing_percentage'] == max_ratio]
                    result = self.util.commontools.get_yes_no_result(1)
                    orientation = result_df['orientation'].values[0]
                    score = self.util.commontools.get_kpi_result_value_pk_by_value(orientation.upper())
            self.write_to_db_result(fk=kpi_fk, numerator_id=group_fk, denominator_id=self.util.store_id,
                                    numerator_result=max_ratio * 100,
                                    score=score, result=result, target=target, by_scene=True)

            # connection with adjacency kpi
            # TODO: setup the dependency
            self.util.block_results = self.util.block_results.append(pd.DataFrame([{'Group Name': row['Group Name'],
                                                                     'Score': result_df['is_block'].values[0] if not result_df.empty else False}]))
示例#5
0
 def __init__(self, data_provider, output):
     self.output = output
     self.data_provider = data_provider
     self.project_name = self.data_provider.project_name
     self.adjacency = Adjancency(self.data_provider)
     self.block = Block(self.data_provider)
     self.template_name = 'summary_kpis.xlsx'
     self.TEMPLATE_PATH = os.path.join(
         os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
         'Data', self.template_name)
     self.template_data = parse_template(self.TEMPLATE_PATH, "KPIs")
     self.all_products = self.data_provider[Data.ALL_PRODUCTS]
     self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
     self.tools = GENERALToolBox(self.data_provider)
     self.common = Common(self.data_provider)
     self.kpi_results_queries = []
     self.cub_tools = CUBAUCUBAUGENERALToolBox(self.data_provider,
                                               self.output)
     self.store_type = self.data_provider.store_type
     self.store_info = self.data_provider[Data.STORE_INFO]
     self.session_uid = self.data_provider.session_uid
     self.visit_date = self.data_provider.visit_date
 def __init__(self, data_provider, output):
     self.k_engine = BaseCalculationsScript(data_provider, output)
     self.output = output
     self.data_provider = data_provider
     self.project_name = self.data_provider.project_name
     self.session_uid = self.data_provider.session_uid
     self.products = self.data_provider[Data.PRODUCTS]
     self.all_products = self.data_provider[Data.ALL_PRODUCTS]
     self.all_templates = self.data_provider[Data.ALL_TEMPLATES]
     self.match_product_in_scene = self.data_provider[Data.MATCHES]
     self.visit_date = self.data_provider[Data.VISIT_DATE]
     self.session_info = self.data_provider[Data.SESSION_INFO]
     self.scene_info = self.data_provider[Data.SCENES_INFO]
     self.store_id = self.data_provider[Data.STORE_FK]
     self.store_type = self.data_provider[Data.STORE_INFO][
         StoreInfoConsts.STORE_TYPE].values[0]
     self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
     self.match_display_in_scene = self.get_match_display()
     self.data_provider.probe_groups = self.get_probe_group(
         self.data_provider.session_uid)
     self.tools = PNGJP_SAND2GENERALToolBox(self.data_provider,
                                            self.output,
                                            rds_conn=self.rds_conn)
     self.template_name = 'TemplateQualitative.xlsx'
     self.TEMPLATE_PATH = os.path.join(
         os.path.dirname(os.path.realpath(__file__)), '..', 'Data',
         self.template_name)
     self.template_data = parse_template(self.TEMPLATE_PATH, self.HIERARCHY)
     self.golden_zone_data = parse_template(self.TEMPLATE_PATH,
                                            self.GOLDEN_ZONE)
     self.golden_zone_data_criteria = parse_template(
         self.TEMPLATE_PATH, self.GOLDEN_ZONE_CRITERIA)
     self.block_data = parse_template(self.TEMPLATE_PATH, self.BLOCK)
     self.adjacency_data = parse_template(self.TEMPLATE_PATH,
                                          self.ADJACENCY)
     self.anchor_data = parse_template(self.TEMPLATE_PATH, self.ANCHOR)
     self.perfect_execution_data = parse_template(self.TEMPLATE_PATH,
                                                  self.PERFECT_EXECUTION)
     self.category_list_data = parse_template(self.TEMPLATE_PATH,
                                              self.CATEGORY_LIST)
     self.product_groups_data = parse_template(self.TEMPLATE_PATH,
                                               self.PRODUCT_GROUP)
     self._custom_templates = {}
     self.scenes_types_for_categories = {}
     self.kpi_static_data = self.get_kpi_static_data()
     self.kpi_results_queries = []
     self.kpi_results = {}
     self.atomic_results = {}
     self.categories = self.all_products[
         ProductsConsts.CATEGORY_FK].unique().tolist()
     self.display_types = [
         'Aisle', 'Casher', 'End-shelf', 'Entrance', 'Island', 'Side-End',
         'Side-net'
     ]
     self.custom_scif_queries = []
     self.session_fk = self.data_provider[Data.SESSION_INFO][
         BasicConsts.PK].iloc[0]
     self.block = Block(data_provider=self.data_provider,
                        rds_conn=self.rds_conn)
     self.adjacency = Adjancency(data_provider=self.data_provider,
                                 rds_conn=self.rds_conn)
     self.fix_utf_space_problem()
     self.kpi_scores = {}
示例#7
0
class MILLERCOORSToolBox:
    LEVEL1 = 1
    LEVEL2 = 2
    LEVEL3 = 3

    EXCLUDE_FILTER = 0
    INCLUDE_FILTER = 1
    CONTAIN_FILTER = 2
    EXCLUDE_EMPTY = False
    INCLUDE_EMPTY = True

    def __init__(self, data_provider, output):
        self.output = output
        self.data_provider = data_provider
        self.common = Common(self.data_provider)
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.products = self.data_provider[Data.PRODUCTS]
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.template_info = self.data_provider.all_templates
        self.store_id = self.data_provider[Data.STORE_FK]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.mpis = self.match_product_in_scene.merge(self.products, on='product_fk', suffixes=['', '_p']) \
            .merge(self.scene_info, on='scene_fk', suffixes=['', '_s']) \
            .merge(self.template_info, on='template_fk', suffixes=['', '_t'])
        self.rds_conn = PSProjectConnector(self.project_name,
                                           DbUsers.CalculationEng)
        self.custom_entity_data = self.get_custom_entity_data()
        self.kpi_static_data = self.common.get_kpi_static_data()
        self.kpi_results_queries = []
        self.templates = {}
        for sheet in Const.SHEETS:
            self.templates[sheet] = pd.read_excel(TEMPLATE_PATH,
                                                  sheetname=sheet).fillna('')
        self.store_info = self.data_provider[Data.STORE_INFO]
        self.store_type = self.store_info['store_type'].iloc[0]
        # main_template = self.templates[Const.KPIS]
        # self.templates[Const.KPIS] = main_template[main_template[Const.STORE_TYPE] == self.store_type]
        self.block = Block(self.data_provider, self.output, common=self.common)
        self.adjacency = Adjancency(self.data_provider,
                                    self.output,
                                    common=self.common)
        self.ignore_stacking = False
        self.facings_field = 'facings' if not self.ignore_stacking else 'facings_ign_stack'

    def main_calculation(self, *args, **kwargs):
        """
        This function calculates the KPI results.
        """
        # self.calculate_block_adjacency(None, None)
        main_template = self.templates[Const.KPIS]
        for i, main_line in main_template.iterrows():
            relevant_scif = self.scif[self.scif['product_type'] != 'Empty']
            kpi_name = main_line[Const.KPI_NAME]
            kpi_type = main_line[Const.KPI_TYPE]
            scene_types = self.does_exist(main_line, Const.STORE_LOCATION)
            if scene_types:
                relevant_scif = self.scif[self.scif['template_name'].isin(
                    scene_types)]
                if relevant_scif.empty:
                    continue

            relevant_template = self.templates[kpi_type]
            relevant_template = relevant_template[relevant_template[
                Const.KPI_NAME] == kpi_name]
            relevant_template = relevant_template.merge(
                main_template,
                how='left',
                left_on=Const.KPI_NAME,
                right_on=Const.KPI_NAME)
            kpi_function = self.get_kpi_function(kpi_type)
            for idx, kpi_line in relevant_template.iterrows():
                kpi_function(kpi_line, relevant_scif)
        return

    def calculate_anchor(self, kpi_line, relevant_scif):

        filters = {
            kpi_line[Const.PARAM]: kpi_line[Const.VALUE],
            'template_name': kpi_line[Const.STORE_LOCATION]
        }

        passed_scenes, total_scenes = self.calculate_products_on_edge(
            min_number_of_facings=1, min_number_of_shelves=1, **filters)

        template_fk = relevant_scif['template_fk'].values[0]
        result_dict = self.build_dictionary_for_db_insert(
            kpi_name=kpi_line[Const.KPI_NAME],
            numerator_id=999,
            numerator_result=int(passed_scenes > 0),
            result=int(passed_scenes > 0),
            denominator_id=template_fk,
            denominator_result=1)
        self.common.write_to_db_result(**result_dict)

        return

    def calculate_block(self, kpi_line, relevant_scif):
        kpi_result = 0
        for scene in relevant_scif.scene_fk.unique():
            scene_filter = {'scene_fk': scene}
            location_filter = {'scene_id': scene}
            mpis = self.filter_df(self.mpis, scene_filter)
            # allowed = {'product_type': ['Other', 'Empty']}
            filters = {kpi_line[Const.PARAM]: kpi_line[Const.VALUE]}
            items = set(self.filter_df(mpis, filters)['scene_match_fk'].values)
            additional = {'minimum_facing_for_block': 2}
            # allowed_items = set(self.filter_df(mpis, allowed)['scene_match_fk'].values)
            if not (items):
                break

            block_result = self.block.network_x_block_together(
                filters, location=location_filter, additional=additional)

            passed_blocks = block_result[block_result['is_block'] ==
                                         True].cluster.tolist()

            if passed_blocks:
                kpi_result = 1
                break

        template_fk = relevant_scif['template_fk'].values[0]
        result_dict = self.build_dictionary_for_db_insert(
            kpi_name=kpi_line[Const.KPI_NAME],
            numerator_id=999,
            numerator_result=kpi_result,
            result=kpi_result,
            denominator_id=template_fk,
            denominator_result=1)
        self.common.write_to_db_result(**result_dict)

        return

    def calculate_adjacency(self, kpi_line, relevant_scif):
        kpi_result = 0
        for scene in relevant_scif.scene_fk.unique():
            scene_filter = {'scene_fk': scene}
            mpis = self.filter_df(self.mpis, scene_filter)
            # allowed = {'product_type': ['Other', 'Empty']}
            filters = {
                kpi_line[Const.ANCHOR_PARAM]: kpi_line[Const.ANCHOR_VALUE]
            }
            # determine if there are any matching products in the scene
            items = set(self.filter_df(mpis, filters)['scene_match_fk'].values)
            # allowed_items = set(self.filter_df(mpis, allowed)['scene_match_fk'].values)
            # items.update(allowed_items)
            if not (items):
                break

            all_graph = AdjacencyGraph(mpis,
                                       None,
                                       self.products,
                                       product_attributes=['rect_x', 'rect_y'],
                                       name=None,
                                       adjacency_overlap_ratio=.4)

            match_to_node = {
                int(node['match_fk']): i
                for i, node in all_graph.base_adjacency_graph.nodes(data=True)
            }
            node_to_match = {val: key for key, val in match_to_node.items()}
            edge_matches = set(
                sum([[
                    node_to_match[i] for i in all_graph.base_adjacency_graph[
                        match_to_node[item]].keys()
                ] for item in items], []))
            adjacent_items = edge_matches - items
            adj_mpis = mpis[(mpis['scene_match_fk'].isin(adjacent_items))
                            & (~mpis['product_type'].isin(
                                ['Empty', 'Irrelevant', 'Other', 'POS']))]

            if kpi_line[Const.LIST_ATTRIBUTE]:
                for value in adj_mpis[kpi_line[
                        Const.LIST_ATTRIBUTE]].unique().tolist():
                    if kpi_line[Const.LIST_ATTRIBUTE] == 'brand_name':
                        numerator_fk = adj_mpis[adj_mpis['brand_name'] ==
                                                value].brand_fk.values[0]
                    else:
                        if value is not None:
                            try:
                                numerator_fk = \
                                self.custom_entity_data[self.custom_entity_data['name'] == value].pk.values[0]
                            except IndexError:
                                Log.warning(
                                    'Custom entity "{}" does not exist'.format(
                                        value))
                                continue
                        else:
                            continue

                    result_dict = self.build_dictionary_for_db_insert(
                        kpi_name=kpi_line[Const.KPI_NAME],
                        numerator_id=numerator_fk,
                        numerator_result=1,
                        result=1,
                        denominator_id=scene,
                        denominator_result=1)
                    self.common.write_to_db_result(**result_dict)
                return
            else:
                if kpi_line[Const.TESTED_VALUE] in adj_mpis[kpi_line[
                        Const.TESTED_PARAM]].unique().tolist():
                    kpi_result = 1
                    break

        if kpi_line[
                Const.
                LIST_ATTRIBUTE]:  # handle cases where there are no relevant products,
            return  # so we miss the other check above
        template_fk = relevant_scif['template_fk'].values[0]
        result_dict = self.build_dictionary_for_db_insert(
            kpi_name=kpi_line[Const.KPI_NAME],
            numerator_id=999,
            numerator_result=kpi_result,
            result=kpi_result,
            denominator_id=template_fk,
            denominator_result=1)
        self.common.write_to_db_result(**result_dict)

    def calculate_block_adjacency(self, kpi_line, relevant_scif):
        kpi_result = 0
        for scene in relevant_scif.scene_fk.unique():
            scene_filter = {'scene_fk': scene}
            location_filter = {'scene_id': scene}
            mpis = self.filter_df(self.mpis, scene_filter)
            # allowed = {'product_type': ['Other', 'Empty']}
            if kpi_line[Const.TESTED_PARAM] == kpi_line[Const.ANCHOR_PARAM]:
                filters = {
                    kpi_line[Const.ANCHOR_PARAM]: [
                        kpi_line[Const.ANCHOR_VALUE],
                        kpi_line[Const.TESTED_VALUE]
                    ]
                }
            elif kpi_line[Const.TESTED_PARAM] == '':
                filters = {
                    kpi_line[Const.ANCHOR_PARAM]: kpi_line[Const.ANCHOR_VALUE]
                }
            else:
                filters = {
                    kpi_line[Const.ANCHOR_PARAM]: kpi_line[Const.ANCHOR_VALUE],
                    kpi_line[Const.TESTED_PARAM]: kpi_line[Const.TESTED_VALUE]
                }
            items = set(self.filter_df(mpis, filters)['scene_match_fk'].values)
            additional = {'minimum_facing_for_block': 2}
            # allowed_items = set(self.filter_df(mpis, allowed)['scene_match_fk'].values)
            if not (items):
                break

            block_result = self.block.network_x_block_together(
                filters, location=location_filter, additional=additional)

            passed_blocks = block_result[block_result['is_block'] ==
                                         True].cluster.tolist()

            if passed_blocks and kpi_line[Const.LIST_ATTRIBUTE]:
                match_fk_list = set(match for cluster in passed_blocks
                                    for node in cluster.nodes()
                                    for match in cluster.node[node]
                                    ['group_attributes']['match_fk_list'])

                all_graph = AdjacencyGraph(
                    mpis,
                    None,
                    self.products,
                    product_attributes=['rect_x', 'rect_y'],
                    name=None,
                    adjacency_overlap_ratio=.4)
                # associate all nodes in the master graph to their associated match_fks
                match_to_node = {
                    int(node['match_fk']): i
                    for i, node in all_graph.base_adjacency_graph.nodes(
                        data=True)
                }
                # create a dict of all match_fks to their corresponding nodes
                node_to_match = {
                    val: key
                    for key, val in match_to_node.items()
                }
                edge_matches = set(
                    sum([[
                        node_to_match[i]
                        for i in all_graph.base_adjacency_graph[
                            match_to_node[match]].keys()
                    ] for match in match_fk_list], []))
                adjacent_matches = edge_matches - match_fk_list
                adj_mpis = mpis[(mpis['scene_match_fk'].isin(adjacent_matches))
                                & (~mpis['product_type'].isin(
                                    ['Empty', 'Irrelevant', 'Other', 'POS']))]

                for value in adj_mpis[kpi_line[
                        Const.LIST_ATTRIBUTE]].unique().tolist():
                    if kpi_line[Const.LIST_ATTRIBUTE] == 'brand_name':
                        numerator_fk = adj_mpis[adj_mpis['brand_name'] ==
                                                value].brand_fk.values[0]
                    else:
                        if value is not None:
                            try:
                                numerator_fk = \
                                self.custom_entity_data[self.custom_entity_data['name'] == value].pk.values[0]
                            except IndexError:
                                Log.warning(
                                    'Custom entity "{}" does not exist'.format(
                                        value))
                                continue
                        else:
                            continue

                    result_dict = self.build_dictionary_for_db_insert(
                        kpi_name=kpi_line[Const.KPI_NAME],
                        numerator_id=numerator_fk,
                        numerator_result=1,
                        result=1,
                        denominator_id=scene,
                        denominator_result=1)
                    self.common.write_to_db_result(**result_dict)
                return
            elif kpi_line[
                    Const.
                    LIST_ATTRIBUTE]:  # return if this is a list_attribute KPI with no passing blocks
                return
            if passed_blocks:  # exit loop if this isn't a list_attribute KPI, but has passing blocks
                kpi_result = 1
                break
        if kpi_line[
                Const.
                LIST_ATTRIBUTE]:  # handle cases where there are no relevant products,
            return  # so we miss the other check above
        template_fk = relevant_scif['template_fk'].values[0]
        result_dict = self.build_dictionary_for_db_insert(
            kpi_name=kpi_line[Const.KPI_NAME],
            numerator_id=999,
            numerator_result=kpi_result,
            result=kpi_result,
            denominator_id=template_fk,
            denominator_result=1)
        self.common.write_to_db_result(**result_dict)
        return

    def calculate_products_on_edge(self,
                                   min_number_of_facings=1,
                                   min_number_of_shelves=1,
                                   **filters):
        """
        :param min_number_of_facings: Minimum number of edge facings for KPI to pass.
        :param min_number_of_shelves: Minimum number of different shelves with edge facings for KPI to pass.
        :param filters: This are the parameters which dictate the relevant SKUs for the edge calculation.
        :return: A tuple: (Number of scenes which pass, Total number of relevant scenes)
        """
        filters, relevant_scenes = self.separate_location_filters_from_product_filters(
            **filters)
        if len(relevant_scenes) == 0:
            return 0, 0
        number_of_edge_scenes = 0
        for scene in relevant_scenes:
            edge_facings = pd.DataFrame(columns=self.mpis.columns)
            matches = self.mpis[self.mpis['scene_fk'] == scene]
            for shelf in matches['shelf_number'].unique():
                shelf_matches = matches[matches['shelf_number'] == shelf]
                if not shelf_matches.empty:
                    shelf_matches = shelf_matches.sort_values(
                        by=['bay_number', 'facing_sequence_number'])
                    edge_facings = edge_facings.append(shelf_matches.iloc[0])
                    if len(edge_facings) > 1:
                        edge_facings = edge_facings.append(
                            shelf_matches.iloc[-1])
            edge_facings = edge_facings[self.get_filter_condition(
                edge_facings, **filters)]
            if len(edge_facings) >= min_number_of_facings \
                    and len(edge_facings['shelf_number'].unique()) >= min_number_of_shelves:
                number_of_edge_scenes += 1
        return number_of_edge_scenes, len(relevant_scenes)

    def separate_location_filters_from_product_filters(self, **filters):
        """
        This function gets scene-item-facts filters of all kinds, extracts the relevant scenes by the location filters,
        and returns them along with the product filters only.
        """
        location_filters = {}
        for field in filters.keys():
            if field not in self.all_products.columns and field in self.scif.columns:
                location_filters[field] = filters.pop(field)
        relevant_scenes = self.scif[self.get_filter_condition(
            self.scif, **location_filters)]['scene_id'].unique()
        return filters, relevant_scenes

    @staticmethod
    def filter_df(df, filters, exclude=0):
        for key, val in filters.items():
            if not isinstance(val, list):
                val = [val]
            if exclude:
                df = df[~df[key].isin(val)]
            else:
                df = df[df[key].isin(val)]
        return df

    def get_filter_condition(self, df, **filters):
        """
        :param df: The data frame to be filters.
        :param filters: These are the parameters which the data frame is filtered by.
                       Every parameter would be a tuple of the value and an include/exclude flag.
                       INPUT EXAMPLE (1):   manufacturer_name = ('Diageo', DIAGEOAUJTIUAGENERALToolBox.INCLUDE_FILTER)
                       INPUT EXAMPLE (2):   manufacturer_name = 'Diageo'
        :return: a filtered Scene Item Facts data frame.
        """
        if not filters:
            return df['pk'].apply(bool)
        if self.facings_field in df.keys():
            filter_condition = (df[self.facings_field] > 0)
        else:
            filter_condition = None
        for field in filters.keys():
            if field in df.keys():
                if isinstance(filters[field], tuple):
                    value, exclude_or_include = filters[field]
                else:
                    value, exclude_or_include = filters[
                        field], self.INCLUDE_FILTER
                if not value:
                    continue
                if not isinstance(value, list):
                    value = [value]
                if exclude_or_include == self.INCLUDE_FILTER:
                    condition = (df[field].isin(value))
                elif exclude_or_include == self.EXCLUDE_FILTER:
                    condition = (~df[field].isin(value))
                elif exclude_or_include == self.CONTAIN_FILTER:
                    condition = (df[field].str.contains(value[0], regex=False))
                    for v in value[1:]:
                        condition |= df[field].str.contains(v, regex=False)
                else:
                    continue
                if filter_condition is None:
                    filter_condition = condition
                else:
                    filter_condition &= condition
            else:
                Log.warning('field {} is not in the Data Frame'.format(field))

        return filter_condition

    def get_kpi_function(self, kpi_type):
        """
        transfers every kpi to its own function
        :param kpi_type: value from "sheet" column in the main sheet
        :return: function
        """
        if kpi_type == Const.ANCHOR:
            return self.calculate_anchor
        elif kpi_type == Const.BLOCK:
            return self.calculate_block
        elif kpi_type == Const.BLOCK_ADJACENCY:
            return self.calculate_block_adjacency
        elif kpi_type == Const.ADJACENCY:
            return self.calculate_adjacency
        else:
            Log.warning(
                "The value '{}' in column sheet in the template is not recognized"
                .format(kpi_type))
            return None

    @staticmethod
    def does_exist(kpi_line, column_name):
        """
        checks if kpi_line has values in this column, and if it does - returns a list of these values
        :param kpi_line: line from template
        :param column_name: str
        :return: list of values if there are, otherwise None
        """
        if column_name in kpi_line.keys() and kpi_line[column_name] != "":
            cell = kpi_line[column_name]
            if type(cell) in [int, float]:
                return [cell]
            elif type(cell) in [unicode, str]:
                return [cell]
        return None

    def build_dictionary_for_db_insert(self,
                                       fk=None,
                                       kpi_name=None,
                                       numerator_id=0,
                                       numerator_result=0,
                                       result=0,
                                       denominator_id=0,
                                       denominator_result=0,
                                       score=0,
                                       score_after_actions=0,
                                       denominator_result_after_actions=None,
                                       numerator_result_after_actions=0,
                                       weight=None,
                                       kpi_level_2_target_fk=None,
                                       context_id=None,
                                       parent_fk=None,
                                       target=None,
                                       identifier_parent=None,
                                       identifier_result=None):
        try:
            insert_params = dict()
            if not fk:
                if not kpi_name:
                    return
                else:
                    insert_params['fk'] = self.common.get_kpi_fk_by_kpi_name(
                        kpi_name)
            else:
                insert_params['fk'] = fk
            insert_params['numerator_id'] = numerator_id
            insert_params['numerator_result'] = numerator_result
            insert_params['denominator_id'] = denominator_id
            insert_params['denominator_result'] = denominator_result
            insert_params['result'] = result
            insert_params['score'] = score
            if target:
                insert_params['target'] = target
            if denominator_result_after_actions:
                insert_params[
                    'denominator_result_after_actions'] = denominator_result_after_actions
            if context_id:
                insert_params['context_id'] = context_id
            if identifier_parent:
                insert_params['identifier_parent'] = identifier_parent
                insert_params['should_enter'] = True
            if identifier_result:
                insert_params['identifier_result'] = identifier_result
            return insert_params
        except IndexError:
            Log.error('error in build_dictionary_for_db_insert')
            return None

    def get_custom_entity_data(self):
        query = """
                select *
                from static.custom_entity
                """
        custom_entity_data = pd.read_sql_query(query, self.rds_conn.db)
        return custom_entity_data

    def commit_results(self):
        self.common.commit_results_data()
class PERNODUSToolBox:
    LEVEL1 = 1
    LEVEL2 = 2
    LEVEL3 = 3
    EXCLUDE_EMPTY = True
    INCLUDE_FILTER = 1
    DEFAULT = 'Default'
    TOP = 'Top'
    BOTTOM = 'Bottom'
    STRICT_MODE = ALL = 1000

    def __init__(self, data_provider, output):
        self.output = output
        self.data_provider = data_provider
        self.common = Common(self.data_provider)
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.products = self.data_provider[Data.PRODUCTS]
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.products = self.data_provider[Data.PRODUCTS]
        self.templates = self.data_provider.all_templates
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.store_id = self.data_provider[Data.STORE_FK]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.rds_conn = ProjectConnector(self.project_name,
                                         DbUsers.CalculationEng)
        self.kpi_static_data = self.common.get_kpi_static_data()
        self.kpi_sub_brand_data = pd.read_sql_query(self.get_sub_brand_data(),
                                                    self.rds_conn.db)
        self.kpi_results_queries = []
        self.Presence_template = parse_template(TEMPLATE_PATH, "Presence")
        self.BaseMeasure_template = parse_template(TEMPLATE_PATH,
                                                   "Base Measurement")
        self.Anchor_template = parse_template(TEMPLATE_PATH, "Anchor")
        self.Blocking_template = parse_template(TEMPLATE_PATH, "Blocking")
        self.Adjaceny_template = parse_template(TEMPLATE_PATH, "Adjacency")
        self.Eye_Level_template = parse_template(TEMPLATE_PATH, "Eye Level")
        self.eye_level_definition = parse_template(TEMPLATE_PATH, "Shelves")
        self.ignore_stacking = False
        self.facings_field = 'facings' if not self.ignore_stacking else 'facings_ign_stack'
        self.availability = Availability(self.data_provider)
        self.blocking_calc = Block(self.data_provider)
        self.mpis = self.match_product_in_scene.merge(self.products, on='product_fk', suffixes=['', '_p']) \
            .merge(self.scene_info, on='scene_fk', suffixes=['', '_s']) \
            .merge(self.templates, on='template_fk', suffixes=['', '_t'])

    def main_calculation(self, *args, **kwargs):

        # #Base Measurement
        for i, row in self.BaseMeasure_template.iterrows():
            try:
                kpi_name = row['KPI']
                value = row['value']
                location = row['Store Location']
                kpi_set_fk = self.kpi_static_data['pk'][
                    self.kpi_static_data['type'] == row['KPI LEVEL 2']].iloc[0]
                self.calculate_category_space(kpi_set_fk, kpi_name, value,
                                              location)

            except Exception as e:
                Log.info('KPI {} calculation failed due to {}'.format(
                    kpi_name.encode('utf-8'), e))
                continue

        # # Anchor
        for i, row in self.Anchor_template.iterrows():
            try:
                kpi_name = row['KPI']
                value = row['value']
                kpi_set_fk = self.kpi_static_data['pk'][
                    self.kpi_static_data['type'] == row['KPI LEVEL 2']].iloc[0]

                self.calculate_anchor(kpi_set_fk, kpi_name)

            except Exception as e:
                Log.info('KPI {} calculation failed due to {}'.format(
                    kpi_name.encode('utf-8'), e))
                continue

        # #Presence
        self.calculate_presence()

        # #Blocking
        for i, row in self.Blocking_template.iterrows():
            try:
                kpi_name = row['KPI']
                kpi_set_fk = self.kpi_static_data['pk'][
                    self.kpi_static_data['type'] == row['KPI LEVEL 2']].iloc[0]
                self.calculate_blocking(kpi_set_fk, kpi_name)

            except Exception as e:
                Log.info('KPI {} calculation failed due to {}'.format(
                    kpi_name.encode('utf-8'), e))
                continue

        # #Eye Level
        for i, row in self.Eye_Level_template.iterrows():
            try:
                kpi_name = row['KPI']
                kpi_set_fk = self.kpi_static_data['pk'][
                    self.kpi_static_data['type'] == row['KPI LEVEL 2']].iloc[0]
                self.calculate_eye_level(kpi_set_fk, kpi_name)

            except Exception as e:
                Log.info('KPI {} calculation failed due to {}'.format(
                    kpi_name.encode('utf-8'), e))
                continue

        # Adjacency
        for i, row in self.Adjaceny_template.iterrows():
            try:
                kpi_set_fk = self.kpi_static_data['pk'][
                    self.kpi_static_data['type'] == row['KPI LEVEL 2']].iloc[0]
                kpi_name = row['KPI']
                self.adjacency(kpi_set_fk, kpi_name)
            except Exception as e:
                Log.info('KPI {} calculation failed due to {}'.format(
                    kpi_name.encode('utf-8'), e))
                continue

        self.common.commit_results_data()

        return

    def get_templates(self):

        for sheet in Const.SHEETS_MAIN:
            self.templates[sheet] = pd.read_excel(Const.TEMPLATE_PATH,
                                                  sheetname=sheet,
                                                  keep_default_na=False)

    def calculate_blocking(self, kpi_set_fk, kpi_name):
        template = self.Blocking_template.loc[self.Blocking_template['KPI'] ==
                                              kpi_name]
        kpi_template = template.loc[template['KPI'] == kpi_name]
        if kpi_template.empty:
            return None
        kpi_template = kpi_template.iloc[0]

        relevant_filter = {kpi_template['param']: kpi_template['value']}

        result = self.blocking_calc.network_x_block_together(
            relevant_filter,
            location={'template_name': 'Shelf'},
            additional={'minimum_facing_for_block': 2})

        score = 0
        if result.empty:
            pass
        else:
            score = 1

        if kpi_template['param'] == "brand_name":
            brand_fk = self.all_products['brand_fk'][
                self.all_products["brand_name"] ==
                kpi_template['value']].iloc[0]

            self.common.write_to_db_result(fk=kpi_set_fk,
                                           numerator_id=brand_fk,
                                           denominator_id=self.store_id,
                                           result=score,
                                           score=score)

        if kpi_template['param'] == "sub_category":
            sub_category_fk = \
            self.all_products["sub_category_fk"][self.all_products["sub_category"] == kpi_template['value']].iloc[0]
            self.common.write_to_db_result(fk=kpi_set_fk,
                                           numerator_id=sub_category_fk,
                                           denominator_id=self.store_id,
                                           result=score,
                                           score=score)

        if kpi_template['param'] == "size":
            self.common.write_to_db_result(fk=kpi_set_fk,
                                           numerator_id=self.store_id,
                                           numerator_result=375,
                                           denominator_id=self.store_id,
                                           result=score,
                                           score=score)

    def calculate_presence(self):
        """
        Use facings to determine presence of specific UPCs, brands, or segments - INBEVBE
        """
        for i, row in self.Presence_template.iterrows():

            param_type = row[Const.PARAM_TYPE]
            param_values = str(row[Const.PARAM_VALUES]).split(',')
            param_values = [item.strip() for item in param_values]

            general_filters = {}
            general_filters[param_type] = param_values

            filtered_df = self.scif[self.get_filter_condition(
                self.scif, **general_filters)]
            kpi_set_fk = self.kpi_static_data['pk'][
                self.kpi_static_data['type'] == row['KPI LEVEL 2']].iloc[0]

            if row['list']:
                template_fk = filtered_df['template_fk'].iloc[0]
                brand_fks = filtered_df['brand_fk'].unique().tolist()
                for brand_fk in brand_fks:
                    self.common.write_to_db_result(fk=kpi_set_fk,
                                                   numerator_id=brand_fk,
                                                   denominator_id=template_fk,
                                                   result=1,
                                                   score=1)

            else:
                result = len(filtered_df[param_type].unique())
                if result == len(param_values):
                    score = 1
                else:
                    score = 0

                if param_type == 'sub_brand':
                    brand_fk = self.all_products['brand_fk'][
                        self.all_products['sub_brand'] ==
                        param_values[0]].iloc[0]

                    self.common.write_to_db_result(
                        fk=kpi_set_fk,
                        numerator_id=brand_fk,
                        denominator_id=self.store_id,
                        result=score,
                        score=score)
                elif param_type == 'template_name':
                    template_fk = filtered_df['template_fk'].iloc[0]
                    self.common.write_to_db_result(
                        fk=kpi_set_fk,
                        numerator_id=template_fk,
                        denominator_id=self.store_id,
                        result=1,
                        score=1)
        return

    def adjacency(self, kpi_set_fk, kpi_name):
        relevant_scif = self.filter_df(self.scif.copy(),
                                       {'template_name': 'Shelf'})
        template = self.Adjaceny_template.loc[self.Adjaceny_template['KPI'] ==
                                              kpi_name]
        kpi_template = template.loc[template['KPI'] == kpi_name]
        if kpi_template.empty:
            return None
        kpi_template = kpi_template.iloc[0]
        Param = kpi_template['param']
        Value1 = str(kpi_template['Product Att']).replace(', ', ',').split(',')
        filter = {Param: Value1}

        for scene in relevant_scif.scene_fk.unique():
            scene_filter = {'scene_fk': scene}
            mpis = self.filter_df(self.mpis, scene_filter)
            items = set(self.filter_df(mpis, filter)['scene_match_fk'].values)
            if not (items):
                return

            all_graph = AdjacencyGraph(mpis,
                                       None,
                                       self.products,
                                       product_attributes=['rect_x', 'rect_y'],
                                       name=None,
                                       adjacency_overlap_ratio=.4)

            match_to_node = {
                int(node['match_fk']): i
                for i, node in all_graph.base_adjacency_graph.nodes(data=True)
            }
            node_to_match = {val: key for key, val in match_to_node.items()}
            edge_matches = set(
                sum([[
                    node_to_match[i] for i in all_graph.base_adjacency_graph[
                        match_to_node[item]].keys()
                ] for item in items], []))
            adjacent_items = edge_matches - items
            adj_mpis = mpis[(mpis['scene_match_fk'].isin(adjacent_items))]

            if Param == 'sub_category':
                counted_adjacent_dict = dict(
                    adj_mpis['sub_category'].value_counts())

                for k, v in counted_adjacent_dict.items():
                    if v == 'General.':
                        del counted_adjacent_dict[k]

                sorted(counted_adjacent_dict.values(), reverse=True)[:10]

                list_of_adjacent_sub_categories = counted_adjacent_dict.keys()

                for adjacent_sub_category in list_of_adjacent_sub_categories:
                    if kpi_template['param'] == 'sub_category':
                        numerator_id = \
                        self.all_products['sub_category_fk'][self.all_products['sub_category'] == Value1[0]].iloc[0]
                        denominator_id = self.all_products['sub_category_fk'][
                            self.all_products['sub_category'] ==
                            adjacent_sub_category].iloc[0]

                        self.common.write_to_db_result(
                            fk=kpi_set_fk,
                            numerator_id=numerator_id,
                            denominator_id=denominator_id,
                            result=1,
                            score=1)

            if Param in ['brand_name', 'sub_brand']:
                counted_adjacent_dict = dict(
                    adj_mpis['sub_category'].value_counts())

                for k, v in counted_adjacent_dict.items():
                    if v == 'General.':
                        del counted_adjacent_dict[k]

                sorted(counted_adjacent_dict.values(), reverse=True)[:10]

                list_of_adjacent_brands = counted_adjacent_dict.keys()

                for adjacent_brand in list_of_adjacent_brands:
                    if Param == 'sub_brand':
                        numerator_id = self.kpi_sub_brand_data['pk'][
                            self.kpi_sub_brand_data['name'] ==
                            Value1[0]].iloc[0]
                        denominator_id = \
                        self.all_products['brand_fk'][self.all_products['brand_name'] == adjacent_brand].iloc[0]

                    if Param == 'brand_name':
                        numerator_id = self.all_products['brand_fk'][
                            self.all_products['brand_name'] ==
                            Value1[0]].iloc[0]
                        denominator_id = \
                        self.all_products['brand_fk'][self.all_products['brand_name'] == adjacent_brand].iloc[0]

                    self.common.write_to_db_result(
                        fk=kpi_set_fk,
                        numerator_id=numerator_id,
                        denominator_id=denominator_id,
                        result=1,
                        score=1)

    def calculate_category_space(self,
                                 kpi_set_fk,
                                 kpi_name,
                                 category,
                                 scene_types=None):
        template = self.BaseMeasure_template.loc[
            (self.BaseMeasure_template['KPI'] == kpi_name)
            & (self.BaseMeasure_template['Store Location'] == scene_types)]
        kpi_template = template.loc[template['KPI'] == kpi_name]
        if kpi_template.empty:
            return None
        kpi_template = kpi_template.iloc[0]
        values_to_check = []

        if kpi_template['param']:
            values_to_check = str(kpi_template['value']).split(',')

        filters = {'template_name': scene_types}

        if values_to_check:
            for primary_filter in values_to_check:
                filters[kpi_template['param']] = primary_filter
                result = self.calculate_category_space_length(**filters)
                score = result
                category_fk = self.scif[self.scif['category'] ==
                                        primary_filter]['category_fk'].iloc[0]
                self.common.write_to_db_result(fk=kpi_set_fk,
                                               numerator_id=category_fk,
                                               numerator_result=0,
                                               denominator_result=0,
                                               denominator_id=self.store_id,
                                               result=result,
                                               score=score)
        else:
            result = self.calculate_category_space_length(**filters)
            score = result
            template_fk = self.scif[self.scif['template_name'] ==
                                    scene_types]['template_fk'].iloc[0]
            self.common.write_to_db_result(fk=kpi_set_fk,
                                           numerator_id=template_fk,
                                           numerator_result=0,
                                           denominator_result=0,
                                           denominator_id=self.store_id,
                                           result=result,
                                           score=score)

    def calculate_category_space_length(self, threshold=0.5, **filters):
        """
        :param threshold: The ratio for a bay to be counted as part of a category.
        :param filters: These are the parameters which the data frame is filtered by.
        :return: The total shelf width (in mm) the relevant facings occupy.
        """

        try:
            filtered_scif = self.scif[self.get_filter_condition(
                self.scif, **filters)]
            if self.EXCLUDE_EMPTY == True:
                filtered_scif = filtered_scif[
                    filtered_scif['product_type'] != 'Empty']

            space_length = 0
            bay_values = []
            max_linear_of_bays = 0
            product_fk_list = filtered_scif['product_fk'].unique().tolist()
            # space_length_DEBUG = 0
            for scene in filtered_scif['scene_fk'].unique().tolist():

                scene_matches = self.match_product_in_scene[
                    self.match_product_in_scene['scene_fk'] == scene]
                scene_filters = filters
                scene_filters['scene_fk'] = scene
                scene_filters['product_fk'] = product_fk_list

                for bay in scene_matches['bay_number'].unique().tolist():
                    bay_total_linear = scene_matches.loc[
                        (scene_matches['bay_number'] == bay)
                        & (scene_matches['stacking_layer'] == 1) &
                        (scene_matches['status']
                         == 1)]['width_mm_advance'].sum()
                    max_linear_of_bays += bay_total_linear
                    scene_filters['bay_number'] = bay
                    tested_group_linear = scene_matches[
                        self.get_filter_condition(scene_matches,
                                                  **scene_filters)]

                    tested_group_linear_value = tested_group_linear[
                        'width_mm_advance'].loc[
                            tested_group_linear['stacking_layer'] == 1].sum()

                    if tested_group_linear_value:
                        bay_ratio = tested_group_linear_value / float(
                            bay_total_linear)
                    else:
                        bay_ratio = 0

                    if bay_ratio >= threshold:
                        bay_values.append(4)
                    else:
                        bay_values.append(0)
                space_length = sum(bay_values)

        except Exception as e:
            Log.info('Linear Feet calculation failed due to {}'.format(e))
            space_length = 0

        return space_length

    def calculate_anchor(self, kpi_set_fk, kpi_name):
        template = self.Anchor_template.loc[self.Anchor_template['KPI'] ==
                                            kpi_name]
        kpi_template = template.loc[template['KPI'] == kpi_name]
        if kpi_template.empty:
            return None
        kpi_template = kpi_template.iloc[0]

        values_to_check = []

        if kpi_template['param']:
            values_to_check = str(kpi_template['value']).split(',')

        filters = {kpi_template['param']: values_to_check}
        result = self.calculate_products_on_edge(**filters)
        score = 1 if result >= 1 else 0

        for value in values_to_check:
            sub_category_fk = self.all_products['sub_category_fk'][
                self.all_products['sub_category'] == value].iloc[0]

            self.common.write_to_db_result(fk=kpi_set_fk,
                                           numerator_id=sub_category_fk,
                                           numerator_result=0,
                                           denominator_result=0,
                                           denominator_id=self.store_id,
                                           result=result,
                                           score=score)

    def calculate_products_on_edge(self,
                                   min_number_of_facings=1,
                                   min_number_of_shelves=1,
                                   **filters):
        """
        :param min_number_of_facings: Minimum number of edge facings for KPI to pass.
        :param min_number_of_shelves: Minimum number of different shelves with edge facings for KPI to pass.
        :param filters: This are the parameters which dictate the relevant SKUs for the edge calculation.
        :return: A tuple: (Number of scenes which pass, Total number of relevant scenes)
        """
        filters, relevant_scenes = self.separate_location_filters_from_product_filters(
            **filters)
        if len(relevant_scenes) == 0:
            return 0, 0
        number_of_edge_scenes = 0
        for scene in relevant_scenes:
            edge_facings = pd.DataFrame(
                columns=self.match_product_in_scene.columns)
            matches = self.match_product_in_scene[
                self.match_product_in_scene['scene_fk'] == scene]
            for shelf in matches['shelf_number'].unique():
                shelf_matches = matches[matches['shelf_number'] == shelf]
                if not shelf_matches.empty:
                    shelf_matches = shelf_matches.sort_values(
                        by=['bay_number', 'facing_sequence_number'])
                    edge_facings = edge_facings.append(shelf_matches.iloc[0])
                    if len(edge_facings) > 1:
                        edge_facings = edge_facings.append(
                            shelf_matches.iloc[-1])

            edge_facings = self.get_filter_condition(edge_facings, **filters)
            if edge_facings == None:
                edge_facings = 0
            elif edge_facings >= 1:
                return 1

        return edge_facings

    def separate_location_filters_from_product_filters(self, **filters):
        """
        This function gets scene-item-facts filters of all kinds, extracts the relevant scenes by the location filters,
        and returns them along with the product filters only.
        """
        location_filters = {}
        for field in filters.keys():
            if field not in self.all_products.columns and field in self.scif.columns:
                location_filters[field] = filters.pop(field)
        relevant_scenes = self.scif[self.get_filter_condition(
            self.scif, **location_filters)]['scene_id'].unique()
        return filters, relevant_scenes

    def calculate_eye_level(self, kpi_set_fk, kpi_name):
        template = self.Eye_Level_template.loc[self.Eye_Level_template['KPI']
                                               == kpi_name]
        kpi_template = template.loc[template['KPI'] == kpi_name]
        if kpi_template.empty:
            return None
        kpi_template = kpi_template.iloc[0]

        values_to_check = []

        if kpi_template['param']:
            values_to_check = str(kpi_template['value']).replace(
                ', ', ',').split(',')

        filters = {kpi_template['param']: values_to_check}
        result = self.calculate_eye_level_assortment(
            eye_level_configurations=self.eye_level_definition,
            min_number_of_products=1,
            percentage_result=True,
            requested_attribute='facings',
            **filters)
        score = 1 if result == True else 0

        self.common.write_to_db_result(fk=kpi_set_fk,
                                       numerator_id=self.store_id,
                                       numerator_result=0,
                                       denominator_result=0,
                                       denominator_id=self.store_id,
                                       result=score,
                                       score=score)

    def calculate_eye_level_assortment(self,
                                       eye_level_configurations=DEFAULT,
                                       min_number_of_products=ALL,
                                       **filters):
        """
        :param eye_level_configurations: A data frame containing information about shelves to ignore (==not eye level)
                                         for every number of shelves in each bay.
        :param min_number_of_products: Minimum number of eye level unique SKUs for KPI to pass.
        :param filters: This are the parameters which dictate the relevant SKUs for the eye-level calculation.
        :return: A tuple: (Number of scenes which pass, Total number of relevant scenes)
        """
        filters, relevant_scenes = self.separate_location_filters_from_product_filters(
            **filters)
        if len(relevant_scenes) == 0:
            return 0, 0
        # if eye_level_configurations == self.DEFAULT:
        #     if hasattr(self, 'eye_level_configurations'):
        #         eye_level_configurations = self.eye_level_configurations
        #     else:
        #         Log.error('Eye-level configurations are not set up')
        #         return False
        number_of_products = len(self.all_products[self.get_filter_condition(
            self.all_products, **filters)]['product_ean_code'])
        min_shelf, max_shelf, min_ignore, max_ignore = eye_level_configurations.columns
        number_of_eye_level_scenes = 0
        products_on_eye_level = []
        for scene in relevant_scenes:
            eye_level_facings = pd.DataFrame(
                columns=self.match_product_in_scene.columns)
            matches = self.match_product_in_scene[
                self.match_product_in_scene['scene_fk'] == scene]
            for bay in matches['bay_number'].unique():
                bay_matches = matches[matches['bay_number'] == bay]
                number_of_shelves = bay_matches['shelf_number'].max()
                configuration = eye_level_configurations[
                    (eye_level_configurations[min_shelf] <= number_of_shelves)
                    &
                    (eye_level_configurations[max_shelf] >= number_of_shelves)]
                if not configuration.empty:
                    configuration = configuration.iloc[0]
                else:
                    configuration = {min_ignore: 0, max_ignore: 0}
                min_include = configuration[min_ignore] + 1
                max_include = number_of_shelves - configuration[max_ignore]
                eye_level_shelves = bay_matches[
                    bay_matches['shelf_number'].between(
                        min_include, max_include)]
                eye_level_facings = eye_level_facings.append(eye_level_shelves)

                # eye_level_facings = pd.concat([eye_level_facings, self.all_products])
        found_pks = eye_level_facings['product_fk'][self.get_filter_condition(
            self.all_products, **filters)].unique().tolist()
        eye_level_assortment = self.all_products[filters.keys()[0]][
            self.all_products['product_fk'].isin(found_pks)].unique()

        result = set(filters.values()[0]) < set(eye_level_assortment)

        return result

    def kpi_name_builder(self, kpi_name, **filters):
        """
        This function builds kpi name according to naming convention
        """
        for filter in filters.keys():
            if filter == 'template_name':
                continue
            kpi_name = kpi_name.replace('{' + filter + '}',
                                        str(filters[filter]))
            kpi_name = kpi_name.replace("'", "\'")
        return kpi_name

    def get_filter_condition(self, df, **filters):
        """
        :param df: The data frame to be filters.
        :param filters: These are the parameters which the data frame is filtered by.
                       Every parameter would be a tuple of the value and an include/exclude flag.
                       INPUT EXAMPLE (1):   manufacturer_name = ('Diageo', DIAGEOAUPNGROGENERALToolBox.INCLUDE_FILTER)
                       INPUT EXAMPLE (2):   manufacturer_name = 'Diageo'
        :return: a filtered Scene Item Facts data frame.
        """
        if not filters:
            return df['pk'].apply(bool)
        if self.facings_field in df.keys():
            filter_condition = (df[self.facings_field] > 0)
        else:
            filter_condition = None
        for field in filters.keys():
            if field in df.keys():
                if isinstance(filters[field], tuple):
                    value, exclude_or_include = filters[field]
                else:
                    value, exclude_or_include = filters[
                        field], self.INCLUDE_FILTER
                if not value:
                    continue
                if not isinstance(value, list):
                    value = [value]
                if exclude_or_include == self.INCLUDE_FILTER:
                    condition = (df[field].isin(value))
                elif exclude_or_include == self.EXCLUDE_FILTER:
                    condition = (~df[field].isin(value))
                elif exclude_or_include == self.CONTAIN_FILTER:
                    condition = (df[field].str.contains(value[0], regex=False))
                    for v in value[1:]:
                        condition |= df[field].str.contains(v, regex=False)
                else:
                    continue
                if filter_condition is None:
                    filter_condition = condition
                else:
                    filter_condition &= condition
            else:
                Log.warning('field {} is not in the Data Frame'.format(field))

        return filter_condition

    @staticmethod
    def filter_df(df, filters, exclude=0):
        for key, val in filters.items():
            if not isinstance(val, list):
                val = [val]
            if exclude:
                df = df[~df[key].isin(val)]
            else:
                df = df[df[key].isin(val)]
        return df

    @staticmethod
    def get_sub_brand_data():
        return """
示例#9
0
class PEPSICOUKSceneToolBox:
    LEVEL1 = 1
    LEVEL2 = 2
    LEVEL3 = 3

    NUMBER_OF_FACINGS = 'Number of Facings'
    TOTAL_LINEAR_SPACE = 'Total Linear Space'
    NUMBER_OF_BAYS = 'Number of bays'
    NUMBER_OF_SHELVES = 'Number of shelves'
    PEPSICO = 'PEPSICO'
    PLACEMENT_BY_SHELF_NUMBERS_TOP = 'Placement by shelf numbers_Top'
    SHELF_PLACEMENT = 'Shelf Placement'
    SHELF_PLACEMENT_VERTICAL_LEFT = 'Shelf Placement Vertical_Left'
    SHELF_PLACEMENT_VERTICAL_CENTER = 'Shelf Placement Vertical_Center'
    SHELF_PLACEMENT_VERTICAL_RIGHT = 'Shelf Placement Vertical_Right'
    PRODUCT_BLOCKING = 'Product Blocking'
    PRODUCT_BLOCKING_ADJACENCY = 'Product Blocking Adjacency'
    PRIMARY_SHELF = 'Primary Shelf'
    NUMBER_OF_SHELVES_TEMPL_COLUMN = 'No of Shelves in Fixture (per bay) (key)'
    RELEVANT_SHELVES_TEMPL_COLUMN = 'Shelves From Bottom To Include (data)'
    SHELF_PLC_TARGETS_COLUMNS = [
        'kpi_operation_type_fk', 'operation_type', 'kpi_level_2_fk', 'type',
        NUMBER_OF_SHELVES_TEMPL_COLUMN, RELEVANT_SHELVES_TEMPL_COLUMN,
        'KPI Parent'
    ]
    SHELF_PLC_TARGET_COL_RENAME = {
        'kpi_operation_type_fk_x': 'kpi_operation_type_fk',
        'operation_type_x': 'operation_type',
        'kpi_level_2_fk_x': 'kpi_level_2_fk',
        'type_x': 'type',
        NUMBER_OF_SHELVES_TEMPL_COLUMN + '_x': NUMBER_OF_SHELVES_TEMPL_COLUMN,
        RELEVANT_SHELVES_TEMPL_COLUMN + '_x': RELEVANT_SHELVES_TEMPL_COLUMN,
        'KPI Parent_x': 'KPI Parent'
    }

    def __init__(self, data_provider, output, common=None):
        self.output = output
        self.data_provider = data_provider
        # self.common = common
        self.common = Common(self.data_provider)
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.products = self.data_provider[Data.PRODUCTS]
        self.templates = self.data_provider[Data.TEMPLATES]
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.store_id = self.data_provider[Data.STORE_FK] if self.data_provider[Data.STORE_FK] is not None \
                                                            else self.session_info['store_fk'].values[0]
        self.all_templates = self.data_provider[Data.ALL_TEMPLATES]
        self.store_type = self.data_provider.store_type
        self.rds_conn = PSProjectConnector(self.project_name,
                                           DbUsers.CalculationEng)
        self.kpi_static_data = self.common.get_kpi_static_data()
        self.kpi_results_queries = []
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]

        self.probe_groups = self.get_probe_group()
        self.match_product_in_scene = self.match_product_in_scene.merge(
            self.probe_groups, on='probe_match_fk', how='left')
        self.is_solid_scene = True if len(self.probe_groups['probe_group_id'].
                                          unique().tolist()) <= 1 else False
        self.toolbox = GENERALToolBox(self.data_provider)
        self.commontools = PEPSICOUKCommonToolBox(self.data_provider,
                                                  self.rds_conn)

        self.custom_entities = self.commontools.custom_entities
        self.on_display_products = self.commontools.on_display_products
        self.exclusion_template = self.commontools.exclusion_template
        self.filtered_scif = self.commontools.filtered_scif
        self.filtered_matches = self.commontools.filtered_matches
        self.excluded_matches = self.compare_matches()
        self.filtered_matches = self.filtered_matches.merge(
            self.probe_groups, on='probe_match_fk', how='left')

        self.scene_bay_shelf_product = self.commontools.scene_bay_shelf_product
        self.external_targets = self.commontools.external_targets
        self.own_manuf_fk = self.all_products[
            self.all_products['manufacturer_name'] ==
            self.PEPSICO]['manufacturer_fk'].values[0]
        self.block = Block(self.data_provider,
                           custom_scif=self.filtered_scif,
                           custom_matches=self.filtered_matches)
        self.adjacency = Adjancency(self.data_provider,
                                    custom_scif=self.filtered_scif,
                                    custom_matches=self.filtered_matches)
        self.block_results = pd.DataFrame(columns=['Group Name', 'Score'])
        self.kpi_results = pd.DataFrame(
            columns=['kpi_fk', 'numerator', 'denominator', 'result', 'score'])
        self.passed_blocks = {}

    def get_probe_group(self):
        query = PEPSICOUK_Queries.get_probe_group(self.session_uid)
        probe_group = pd.read_sql_query(query, self.rds_conn.db)
        return probe_group

    def compare_matches(self):
        initial_matches = set(
            self.match_product_in_scene['probe_match_fk'].values.tolist())
        filtered_matches = set(
            self.filtered_matches['probe_match_fk'].values.tolist())
        excluded_matches = initial_matches.difference(filtered_matches)
        return excluded_matches

    def main_function(self):
        if not self.filtered_matches.empty:
            self.calculate_internal_kpis()
            self.calculate_external_kpis()

    def calculate_external_kpis(self):
        # self.calculate_product_blocking()
        # self.calculate_adjacency()
        self.calculate_product_blocking_new()
        self.calculate_adjacency_new()

    def calculate_internal_kpis(self):
        self.calculate_number_of_facings_and_linear_space()
        self.calculate_number_of_bays_and_shelves()
        self.calculate_shelf_placement_horizontal()
        self.calculate_shelf_placement_vertical_mm()
        # self.calculate_shelf_placement_vertical()

    def calculate_shelf_placement_vertical_mm(self):
        probe_groups_list = self.probe_groups['probe_group_id'].unique(
        ).tolist()
        resulting_matches = pd.DataFrame()

        for probe_group in probe_groups_list:
            matches = self.match_product_in_scene[
                self.match_product_in_scene['probe_group_id'] == probe_group]
            filtered_matches = self.filtered_matches[
                self.filtered_matches['probe_group_id'] == probe_group]
            left_edge = self.get_left_edge_mm(matches)
            right_edge = self.get_right_edge_mm(matches)
            shelf_length = float(right_edge - left_edge)
            matches = self.define_product_position_mm(matches, shelf_length,
                                                      left_edge, right_edge)
            matches_position = matches[['probe_match_fk', 'position']]
            filtered_matches = filtered_matches.merge(matches_position,
                                                      on='probe_match_fk',
                                                      how='left')
            if resulting_matches.empty:
                resulting_matches = filtered_matches
            else:
                resulting_matches = resulting_matches.append(filtered_matches)

        result_df = self.get_vertical_placement_kpi_result_df(
            resulting_matches)
        for i, row in result_df.iterrows():
            self.common.write_to_db_result(
                fk=row['kpi_fk'],
                numerator_id=row['product_fk'],
                denominator_id=row['product_fk'],
                numerator_result=row['count'],
                denominator_result=row['total_facings'],
                result=row['ratio'],
                score=row['ratio'],
                by_scene=True)
            self.add_kpi_result_to_kpi_results_df([
                row['kpi_fk'], row['product_fk'], row['product_fk'],
                row['ratio'], row['ratio']
            ])

    @staticmethod
    def get_left_edge_mm(matches):
        matches[
            'left_edge_mm'] = matches['x_mm'] - matches['width_mm_advance'] / 2
        left_edge = matches['left_edge_mm'].min()
        return left_edge

    @staticmethod
    def get_right_edge_mm(matches):
        matches['right_edge_mm'] = matches[
            'x_mm'] + matches['width_mm_advance'] / 2
        right_edge = matches['right_edge_mm'].max()
        return right_edge

    def define_product_position_mm(self, matches, shelf_length, left_edge,
                                   right_edge):
        matches['position'] = ''
        matches.loc[(matches['x_mm'] >= left_edge) &
                    (matches['x_mm'] <= (left_edge + shelf_length / 3)),
                    'position'] = self.SHELF_PLACEMENT_VERTICAL_LEFT
        matches.loc[(matches['x_mm'] > (left_edge + shelf_length / 3)) &
                    (matches['x_mm'] <= (left_edge + shelf_length * 2 / 3)),
                    'position'] = self.SHELF_PLACEMENT_VERTICAL_CENTER
        matches.loc[(matches['x_mm'] > (left_edge + shelf_length * 2 / 3)) &
                    (matches['x_mm'] <= right_edge),
                    'position'] = self.SHELF_PLACEMENT_VERTICAL_RIGHT
        return matches

    def calculate_number_of_facings_and_linear_space(self):
        facing_kpi_fk = self.common.get_kpi_fk_by_kpi_type(
            self.NUMBER_OF_FACINGS)
        linear_kpi_fk = self.common.get_kpi_fk_by_kpi_type(
            self.TOTAL_LINEAR_SPACE)
        filtered_scif = self.filtered_scif.copy()
        filtered_scif['facings'] = filtered_scif.apply(
            self.update_facings_for_cardboard_boxes, axis=1)
        for i, row in filtered_scif.iterrows():
            self.common.write_to_db_result(fk=facing_kpi_fk,
                                           numerator_id=row['product_fk'],
                                           result=row['facings'],
                                           denominator_id=self.store_id,
                                           by_scene=True)
            self.common.write_to_db_result(fk=linear_kpi_fk,
                                           numerator_id=row['product_fk'],
                                           denominator_id=self.store_id,
                                           result=row['gross_len_add_stack'],
                                           by_scene=True)
            self.add_kpi_result_to_kpi_results_df([
                facing_kpi_fk, row['product_fk'], self.store_id,
                row['facings'], None
            ])
            self.add_kpi_result_to_kpi_results_df([
                linear_kpi_fk, row['product_fk'], self.store_id,
                row['gross_len_add_stack'], None
            ])

    @staticmethod
    def update_facings_for_cardboard_boxes(row):
        facings = row['facings'] * 3 if row[
            'att1'] == 'display cardboard box' else row['facings']
        return facings

    def calculate_number_of_bays_and_shelves(self):
        bays_kpi_fk = self.common.get_kpi_fk_by_kpi_type(self.NUMBER_OF_BAYS)
        shelves_kpi_fk = self.common.get_kpi_fk_by_kpi_type(
            self.NUMBER_OF_SHELVES)
        matches = self.match_product_in_scene[~(
            self.match_product_in_scene['bay_number'] == -1)]

        bays_in_scene = matches['bay_number'].unique().tolist()
        bays_num = len(bays_in_scene)
        # bay_shelf = matches.drop_duplicates(subset=['bay_number', 'shelf_number'])
        # shelf_num = len(bay_shelf)
        shelf_num = matches['shelf_number'].max()
        self.common.write_to_db_result(fk=bays_kpi_fk,
                                       numerator_id=self.own_manuf_fk,
                                       result=bays_num,
                                       denominator_id=self.store_id,
                                       by_scene=True)
        self.common.write_to_db_result(fk=shelves_kpi_fk,
                                       numerator_id=self.own_manuf_fk,
                                       result=shelf_num,
                                       denominator_id=self.store_id,
                                       by_scene=True)
        self.add_kpi_result_to_kpi_results_df(
            [bays_kpi_fk, self.own_manuf_fk, self.store_id, bays_num, None])
        self.add_kpi_result_to_kpi_results_df([
            shelves_kpi_fk, self.own_manuf_fk, self.store_id, shelf_num, None
        ])

    def calculate_shelf_placement_horizontal(self):
        # shelf_placement_targets = self.commontools.get_shelf_placement_kpi_targets_data()
        external_targets = self.commontools.all_targets_unpacked
        shelf_placmnt_targets = external_targets[
            external_targets['operation_type'] == self.SHELF_PLACEMENT]
        if not shelf_placmnt_targets.empty:
            bay_max_shelves = self.get_scene_bay_max_shelves(
                shelf_placmnt_targets)
            bay_all_shelves = bay_max_shelves.drop_duplicates(
                subset=['bay_number', 'shelves_all_placements'], keep='first')
            relevant_matches = self.filter_out_irrelevant_matches(
                bay_all_shelves)
            if not relevant_matches.empty:
                for i, row in bay_max_shelves.iterrows():
                    shelf_list = map(
                        lambda x: float(x),
                        row['Shelves From Bottom To Include (data)'].split(
                            ','))
                    relevant_matches.loc[
                        (relevant_matches['bay_number'] == row['bay_number']) &
                        (relevant_matches['shelf_number_from_bottom'].
                         isin(shelf_list)), 'position'] = row['type']
                kpi_results = self.get_kpi_results_df(relevant_matches,
                                                      bay_max_shelves)
                for i, result in kpi_results.iterrows():
                    self.common.write_to_db_result(
                        fk=result['kpi_level_2_fk'],
                        numerator_id=result['product_fk'],
                        denominator_id=result['product_fk'],
                        denominator_result=result['total_facings'],
                        numerator_result=result['count'],
                        result=result['ratio'],
                        score=result['ratio'],
                        by_scene=True)
                    self.add_kpi_result_to_kpi_results_df([
                        result['kpi_level_2_fk'], result['product_fk'],
                        result['product_fk'], result['ratio'], result['ratio']
                    ])

    def calculate_shelf_placement_vertical(self):
        probe_groups_list = self.probe_groups['probe_group_id'].unique(
        ).tolist()
        resulting_matches = pd.DataFrame()

        for probe_group in probe_groups_list:
            matches = self.match_product_in_scene[
                self.match_product_in_scene['probe_group_id'] == probe_group]
            filtered_matches = self.filtered_matches[
                self.filtered_matches['probe_group_id'] == probe_group]
            left_edge = matches['rect_x'].min()
            right_edge = matches['rect_x'].max()
            shelf_length = float(right_edge - left_edge)
            matches = self.define_product_position_px(matches, shelf_length,
                                                      left_edge, right_edge)
            matches_position = matches[['probe_match_fk', 'position']]
            filtered_matches = filtered_matches.merge(matches_position,
                                                      on='probe_match_fk',
                                                      how='left')
            if resulting_matches.empty:
                resulting_matches = filtered_matches
            else:
                resulting_matches = resulting_matches.append(filtered_matches)

        result_df = self.get_vertical_placement_kpi_result_df(
            resulting_matches)
        for i, row in result_df.iterrows():
            self.common.write_to_db_result(
                fk=row['kpi_fk'],
                numerator_id=row['product_fk'],
                denominator_id=row['product_fk'],
                numerator_result=row['count'],
                denominator_result=row['total_facings'],
                result=row['ratio'],
                score=row['ratio'],
                by_scene=True)
            self.add_kpi_result_to_kpi_results_df([
                row['kpi_fk'], row['product_fk'], row['product_fk'],
                row['ratio'], row['ratio']
            ])

    def define_product_position_px(self, matches, shelf_length, left_edge,
                                   right_edge):
        matches['position'] = ''
        matches.loc[(matches['rect_x'] >= left_edge) &
                    (matches['rect_x'] <= (left_edge + shelf_length / 3)),
                    'position'] = self.SHELF_PLACEMENT_VERTICAL_LEFT
        matches.loc[(matches['rect_x'] > (left_edge + shelf_length / 3)) &
                    (matches['rect_x'] <= (left_edge + shelf_length * 2 / 3)),
                    'position'] = self.SHELF_PLACEMENT_VERTICAL_CENTER
        matches.loc[(matches['rect_x'] > (left_edge + shelf_length * 2 / 3)) &
                    (matches['rect_x'] <= right_edge),
                    'position'] = self.SHELF_PLACEMENT_VERTICAL_RIGHT
        return matches

    def get_vertical_placement_kpi_result_df(self, filtered_matches):
        all_products_df = filtered_matches.groupby(['product_fk'],
                                                   as_index=False).agg(
                                                       {'count': np.sum})
        all_products_df.rename(columns={'count': 'total_facings'},
                               inplace=True)
        result_df = filtered_matches.groupby(['product_fk', 'position'],
                                             as_index=False).agg(
                                                 {'count': np.sum})
        result_df = result_df.merge(all_products_df,
                                    on='product_fk',
                                    how='left')
        result_df[
            'ratio'] = result_df['count'] / result_df['total_facings'] * 100
        result_df['kpi_fk'] = result_df['position'].apply(
            lambda x: self.common.get_kpi_fk_by_kpi_type(x))
        return result_df

    def calculate_product_blocking_new(self):
        external_targets = self.commontools.all_targets_unpacked[
            self.commontools.all_targets_unpacked['type'] ==
            self.PRODUCT_BLOCKING]
        additional_block_params = {
            'check_vertical_horizontal': True,
            'minimum_facing_for_block': 3,
            'include_stacking': True,
            'allowed_products_filters': {
                'product_type': ['Empty']
            }
        }
        kpi_fk = self.common.get_kpi_fk_by_kpi_type(self.PRODUCT_BLOCKING)

        for i, row in external_targets.iterrows():
            # group_fk = self.custom_entities[self.custom_entities['name'] == row['Group Name']]
            group_fk = self.custom_entities[self.custom_entities['name'] ==
                                            row['Group Name']]['pk'].values[0]
            filters = self.get_block_and_adjacency_filters(row)
            target = row['Target']
            additional_block_params.update(
                {'minimum_block_ratio': float(target) / 100})

            result_df = self.block.network_x_block_together(
                filters, additional=additional_block_params)
            score = max_ratio = 0
            result = self.commontools.get_yes_no_result(0)
            if not result_df.empty:
                max_ratio = result_df['facing_percentage'].max()
                result_df = result_df[result_df['is_block'] == True]
                if not result_df.empty:
                    self.passed_blocks[row['Group Name']] = result_df

                    max_ratio = result_df['facing_percentage'].max()
                    result_df = result_df[result_df['facing_percentage'] ==
                                          max_ratio]
                    result = self.commontools.get_yes_no_result(1)
                    orientation = result_df['orientation'].values[0]
                    score = self.commontools.get_kpi_result_value_pk_by_value(
                        orientation.upper())
            self.common.write_to_db_result(fk=kpi_fk,
                                           numerator_id=group_fk,
                                           denominator_id=self.store_id,
                                           numerator_result=max_ratio * 100,
                                           score=score,
                                           result=result,
                                           target=target,
                                           by_scene=True)

            self.block_results = self.block_results.append(
                pd.DataFrame([{
                    'Group Name':
                    row['Group Name'],
                    'Score':
                    result_df['is_block'].values[0]
                    if not result_df.empty else False
                }]))
            self.add_kpi_result_to_kpi_results_df(
                [kpi_fk, group_fk, self.store_id, result, score])

    def calculate_adjacency_new(self):
        block_pairs = self.get_group_pairs()
        kpi_fk = self.common.get_kpi_fk_by_kpi_type(
            self.PRODUCT_BLOCKING_ADJACENCY)
        block_pairs = [list(pair) for pair in block_pairs]
        for pair in block_pairs:
            group_1_fk = self.custom_entities[self.custom_entities['name'] ==
                                              pair[0]]['pk'].values[0]
            group_2_fk = self.custom_entities[self.custom_entities['name'] ==
                                              pair[1]]['pk'].values[0]

            adjacency_results = pd.DataFrame(columns=[
                'anchor_block', 'tested_block', 'anchor_facing_percentage',
                'tested_facing_percentage', 'scene_fk', 'is_adj'
            ])
            blocks = {
                'anchor_products': self.passed_blocks[pair[0]],
                'tested_products': self.passed_blocks[pair[1]]
            }
            merged_blocks = self.adjacency._union_anchor_tested_blocks(blocks)
            adjacency_results = self.adjacency._is_block_adjacent(
                adjacency_results, merged_blocks)
            score = 0
            result = self.commontools.get_yes_no_result(0)
            if not adjacency_results.empty:
                adjacency_results = adjacency_results[
                    adjacency_results['is_adj'] == True]
            if not adjacency_results.empty:
                score = 1 if adjacency_results['is_adj'].values[0] else 0
                result = self.commontools.get_yes_no_result(score)
            self.common.write_to_db_result(fk=kpi_fk,
                                           numerator_id=group_1_fk,
                                           denominator_id=group_2_fk,
                                           result=result,
                                           score=score,
                                           by_scene=True)
            self.add_kpi_result_to_kpi_results_df(
                [kpi_fk, group_1_fk, group_2_fk, result, score])

    def calculate_product_blocking(self):
        external_targets = self.commontools.all_targets_unpacked[
            self.commontools.all_targets_unpacked['type'] ==
            self.PRODUCT_BLOCKING]
        additional_block_params = {
            'check_vertical_horizontal': True,
            'minimum_facing_for_block': 3,
            'include_stacking': True,
            'allowed_products_filters': {
                'product_type': ['Empty']
            }
        }
        kpi_fk = self.common.get_kpi_fk_by_kpi_type(self.PRODUCT_BLOCKING)

        for i, row in external_targets.iterrows():
            # group_fk = self.custom_entities[self.custom_entities['name'] == row['Group Name']]
            group_fk = self.custom_entities[self.custom_entities['name'] ==
                                            row['Group Name']]['pk'].values[0]
            filters = self.get_block_and_adjacency_filters(row)
            target = row['Target']
            additional_block_params.update(
                {'minimum_block_ratio': float(target) / 100})

            result_df = self.block.network_x_block_together(
                filters, additional=additional_block_params)
            score = max_ratio = 0
            result = self.commontools.get_yes_no_result(0)
            if not result_df.empty:
                max_ratio = result_df['facing_percentage'].max()
                result_df = result_df[result_df['is_block'] == True]
                if not result_df.empty:
                    max_ratio = result_df['facing_percentage'].max()
                    result_df = result_df[result_df['facing_percentage'] ==
                                          max_ratio]
                    result = self.commontools.get_yes_no_result(1)
                    orientation = result_df['orientation'].values[0]
                    score = self.commontools.get_kpi_result_value_pk_by_value(
                        orientation.upper())
            self.common.write_to_db_result(fk=kpi_fk,
                                           numerator_id=group_fk,
                                           denominator_id=self.store_id,
                                           numerator_result=max_ratio * 100,
                                           score=score,
                                           result=result,
                                           target=target,
                                           by_scene=True)

            self.block_results = self.block_results.append(
                pd.DataFrame([{
                    'Group Name':
                    row['Group Name'],
                    'Score':
                    result_df['is_block'].values[0]
                    if not result_df.empty else False
                }]))

    @staticmethod
    def get_block_and_adjacency_filters(target_series):
        filters = {target_series['Parameter 1']: target_series['Value 1']}
        if target_series['Parameter 2']:
            filters.update(
                {target_series['Parameter 2']: target_series['Value 2']})
        if target_series['Parameter 3']:
            filters.update(
                {target_series['Parameter 3']: target_series['Value 3']})
        return filters

    def calculate_adjacency(self):
        block_pairs = self.get_group_pairs()
        if block_pairs:
            external_targets = self.commontools.all_targets_unpacked[
                self.commontools.all_targets_unpacked['type'] ==
                self.PRODUCT_BLOCKING]
            additional_block_params = {
                'check_vertical_horizontal': True,
                'minimum_facing_for_block': 3,
                'minimum_block_ratio': 0.9,
                'include_stacking': True,
                'allowed_products_filters': {
                    'product_type': ['Empty']
                }
            }
            kpi_fk = self.common.get_kpi_fk_by_kpi_type(
                self.PRODUCT_BLOCKING_ADJACENCY)

            for pair in block_pairs:
                pair = list(pair)
                group_1_fk = self.custom_entities[self.custom_entities['name']
                                                  == pair[0]]['pk'].values[0]
                group_2_fk = self.custom_entities[self.custom_entities['name']
                                                  == pair[1]]['pk'].values[0]

                group_1_targets = external_targets[
                    external_targets['Group Name'] == pair[0]].iloc[0]
                group_1_filters = self.get_block_and_adjacency_filters(
                    group_1_targets)

                group_2_targets = external_targets[
                    external_targets['Group Name'] == pair[1]].iloc[0]
                group_2_filters = self.get_block_and_adjacency_filters(
                    group_2_targets)

                result_df = self.adjacency.network_x_adjacency_calculation(
                    {
                        'anchor_products': group_1_filters,
                        'tested_products': group_2_filters
                    },
                    location=None,
                    additional=additional_block_params)
                score = 0
                result = self.commontools.get_yes_no_result(0)
                if not result_df.empty:
                    result_df = result_df[result_df['is_adj'] == True]
                if not result_df.empty:
                    score = 1 if result_df['is_adj'].values[0] else 0
                    result = self.commontools.get_yes_no_result(score)
                self.common.write_to_db_result(fk=kpi_fk,
                                               numerator_id=group_1_fk,
                                               denominator_id=group_2_fk,
                                               result=result,
                                               score=score,
                                               by_scene=True)

                # if self.excluded_matches:
                #     self.adjacency.data_provider._set_matches(self.match_product_in_scene)

    def get_group_pairs(self):
        valid_groups = self.block_results[self.block_results['Score'] ==
                                          1]['Group Name'].values.tolist()
        result_set = set()
        for i, group in enumerate(valid_groups):
            [
                result_set.add(frozenset([group, valid_groups[j]]))
                for j in range(i + 1, len(valid_groups))
            ]
        return list(result_set)

    def get_scene_bay_max_shelves(self, shelf_placement_targets):
        scene_bay_max_shelves = self.match_product_in_scene.groupby(
            ['bay_number'],
            as_index=False).agg({'shelf_number_from_bottom': np.max})
        scene_bay_max_shelves.rename(
            columns={'shelf_number_from_bottom': 'shelves_in_bay'},
            inplace=True)
        min_shelf_in_template = shelf_placement_targets[
            self.NUMBER_OF_SHELVES_TEMPL_COLUMN].min()  #added
        scene_bay_max_shelves = scene_bay_max_shelves[
            scene_bay_max_shelves['shelves_in_bay'] >=
            min_shelf_in_template]  #added

        max_shelf_in_template = shelf_placement_targets[
            self.NUMBER_OF_SHELVES_TEMPL_COLUMN].max()
        shelf_placement_targets = self.complete_missing_target_shelves(
            scene_bay_max_shelves, max_shelf_in_template,
            shelf_placement_targets)

        scene_bay_max_shelves = scene_bay_max_shelves.merge(
            shelf_placement_targets,
            left_on='shelves_in_bay',
            right_on=self.NUMBER_OF_SHELVES_TEMPL_COLUMN)
        scene_bay_max_shelves.rename(columns=self.SHELF_PLC_TARGET_COL_RENAME,
                                     inplace=True)
        scene_bay_max_shelves = scene_bay_max_shelves[
            self.SHELF_PLC_TARGETS_COLUMNS + ['bay_number', 'shelves_in_bay']]
        scene_bay_max_shelves = scene_bay_max_shelves.drop_duplicates()

        bay_all_relevant_shelves = self.get_bay_relevant_shelves_df(
            scene_bay_max_shelves)
        scene_bay_max_shelves = scene_bay_max_shelves.merge(
            bay_all_relevant_shelves, on='bay_number', how='left')

        scene_bay_max_shelves = scene_bay_max_shelves[~(
            scene_bay_max_shelves['bay_number'] == -1)]

        relevant_bays = self.filtered_matches['bay_number'].unique().tolist()
        final_df = scene_bay_max_shelves[
            scene_bay_max_shelves['bay_number'].isin(relevant_bays)]
        return final_df

    def get_bay_relevant_shelves_df(self, scene_bay_max_shelves):
        scene_bay_max_shelves[
            self.RELEVANT_SHELVES_TEMPL_COLUMN] = scene_bay_max_shelves[
                self.RELEVANT_SHELVES_TEMPL_COLUMN].astype(str)
        bay_all_relevant_shelves = scene_bay_max_shelves[[
            'bay_number', self.RELEVANT_SHELVES_TEMPL_COLUMN
        ]].drop_duplicates()
        bay_all_relevant_shelves['shelves_all_placements'] = bay_all_relevant_shelves.groupby('bay_number') \
            [self.RELEVANT_SHELVES_TEMPL_COLUMN].apply(lambda x: (x + ',').cumsum().str.strip())
        if 'bay_number' in bay_all_relevant_shelves.index.names:
            bay_all_relevant_shelves.index.names = ['custom_ind']
        bay_all_relevant_shelves = bay_all_relevant_shelves.drop_duplicates(subset=['bay_number'], keep='last') \
            [['bay_number', 'shelves_all_placements']]
        bay_all_relevant_shelves['shelves_all_placements'] = bay_all_relevant_shelves['shelves_all_placements']. \
            apply(lambda x: x[0:-1])
        return bay_all_relevant_shelves

    def complete_missing_target_shelves(self, scene_bay_df, max_shelf_template,
                                        shelf_placement_targets):
        shelf_placement_targets = shelf_placement_targets[
            self.SHELF_PLC_TARGETS_COLUMNS]
        shelf_placement_targets = shelf_placement_targets.reset_index(
            drop=True)
        for i, row in scene_bay_df.iterrows():
            if row['shelves_in_bay'] > max_shelf_template:
                if row['shelves_in_bay'] not in shelf_placement_targets[
                        self.NUMBER_OF_SHELVES_TEMPL_COLUMN].values.tolist():
                    rows_to_add = shelf_placement_targets[shelf_placement_targets[self.NUMBER_OF_SHELVES_TEMPL_COLUMN] \
                                                                == max_shelf_template]
                    rows_to_add[self.NUMBER_OF_SHELVES_TEMPL_COLUMN] = row[
                        'shelves_in_bay']
                    top_shelf_range = ','.join(
                        map(
                            lambda x: str(x),
                            range(int(float(max_shelf_template)),
                                  int(float(row['shelves_in_bay'] + 1)))))
                    rows_to_add.loc[
                        rows_to_add['type'] ==
                        self.PLACEMENT_BY_SHELF_NUMBERS_TOP,
                        self.RELEVANT_SHELVES_TEMPL_COLUMN] = top_shelf_range
                    shelf_placement_targets = shelf_placement_targets.append(
                        rows_to_add, ignore_index=True)
        return shelf_placement_targets

    def filter_out_irrelevant_matches(self, target_kpis_df):
        relevant_matches = self.scene_bay_shelf_product[~(
            self.scene_bay_shelf_product['bay_number'] == -1)]
        relevant_matches = relevant_matches[
            relevant_matches['bay_number'].isin(
                target_kpis_df['bay_number'].unique().tolist())]  # added
        for i, row in target_kpis_df.iterrows():
            all_shelves = map(lambda x: float(x),
                              row['shelves_all_placements'].split(','))
            rows_to_remove = relevant_matches[
                (relevant_matches['bay_number'] == row['bay_number'])
                & (~(relevant_matches['shelf_number_from_bottom'].isin(
                    all_shelves)))].index
            relevant_matches.drop(rows_to_remove, inplace=True)
        relevant_matches['position'] = ''
        return relevant_matches

    def get_kpi_results_df(self, relevant_matches, kpi_targets_df):
        total_products_facings = relevant_matches.groupby(
            ['product_fk'], as_index=False).agg({'count': np.sum})
        total_products_facings.rename(columns={'count': 'total_facings'},
                                      inplace=True)
        result_df = relevant_matches.groupby(['product_fk', 'position'],
                                             as_index=False).agg(
                                                 {'count': np.sum})
        result_df = result_df.merge(total_products_facings,
                                    on='product_fk',
                                    how='left')

        kpis_df = kpi_targets_df.drop_duplicates(
            subset=['kpi_level_2_fk', 'type', 'KPI Parent'])
        result_df = result_df.merge(kpis_df,
                                    left_on='position',
                                    right_on='type',
                                    how='left')
        # result_df['identifier_parent'] = result_df['KPI Parent'].apply(lambda x:
        #                                                                self.common.get_dictionary(
        #                                                                kpi_fk=int(float(x)))) # looks like no need for parent
        result_df['ratio'] = result_df.apply(self.get_sku_ratio, axis=1)
        return result_df

    def get_sku_ratio(self, row):
        ratio = float(row['count']) / row['total_facings'] * 100
        return ratio

    def add_kpi_result_to_kpi_results_df(self, result_list):
        self.kpi_results.loc[len(self.kpi_results)] = result_list
示例#10
0
 def __init__(self, data_provider, config_params=None, **kwargs):
     super(ProductBlockingKpi, self).__init__(data_provider, config_params=config_params, **kwargs)
     self.util = PepsicoUtil(None, data_provider)
     self.block = Block(self.data_provider, custom_scif=self.util.filtered_scif, custom_matches=self.util.filtered_matches)