Exemplo n.º 1
0
    def __init__(self, data_provider, output):
        self.output = output
        self.data_provider = data_provider
        self.common = Common(self.data_provider)
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.products = self.data_provider[Data.PRODUCTS]
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.store_id = self.data_provider[Data.STORE_FK]
        self.store_info = self.data_provider[Data.STORE_INFO]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.ps_data_provider = PsDataProvider(self.data_provider, self.output)
        self.manufacturer_fk = None if self.data_provider[Data.OWN_MANUFACTURER]['param_value'].iloc[0] is None else \
            int(self.data_provider[Data.OWN_MANUFACTURER]['param_value'].iloc[0])
        self.set_up_template = pd.read_excel(os.path.join(
            os.path.dirname(os.path.realpath(__file__)), '..', 'Data',
            'gsk_set_up.xlsx'),
                                             sheet_name='Functional KPIs',
                                             keep_default_na=False)

        self.gsk_generator = GSKGenerator(self.data_provider, self.output,
                                          self.common, self.set_up_template)
        self.targets = self.ps_data_provider.get_kpi_external_targets()
        self.sequence = Sequence(self.data_provider)
        self.set_up_data = {
            ('planogram', Const.KPI_TYPE_COLUMN): Const.NO_INFO,
            ('secondary_display', Const.KPI_TYPE_COLUMN): Const.NO_INFO,
            ('promo', Const.KPI_TYPE_COLUMN): Const.NO_INFO
        }
Exemplo n.º 2
0
    def __init__(self, data_provider, output):
        self.output = output
        self.data_provider = data_provider
        self.common = Common(self.data_provider)
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.products = self.data_provider[Data.PRODUCTS]
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.store_id = self.data_provider[Data.STORE_FK]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.rds_conn = PSProjectConnector(self.project_name,
                                           DbUsers.CalculationEng)
        self.kpi_static_data = self.common.get_kpi_static_data()
        self.kpi_results_queries = []
        self.set_up_template = pd.read_excel(os.path.join(
            os.path.dirname(os.path.realpath(__file__)), '..', 'Data',
            'gsk_set_up.xlsx'),
                                             sheet_name='Functional KPIs',
                                             keep_default_na=False)

        self.gsk_generator = GSKGenerator(self.data_provider, self.output,
                                          self.common, self.set_up_template)
Exemplo n.º 3
0
    def __init__(self, data_provider, output):

        self.output = output
        self.data_provider = data_provider
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.products = self.data_provider[Data.PRODUCTS]
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.store_id = self.data_provider[Data.STORE_FK]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.own_manufacturer_id = int(self.data_provider.own_manufacturer[
            self.data_provider.own_manufacturer['param_name'] ==
            'manufacturer_id']['param_value'].iloc[0])

        self.rds_conn = PSProjectConnector(self.project_name,
                                           DbUsers.CalculationEng)

        self.common = Common(self.data_provider)
        self.kpi_static_data = self.common.get_kpi_static_data()

        self.ps_data_provider = PsDataProvider(self.data_provider, self.output)
        self.store_type = self.ps_data_provider.session_info.store_type
        self.store_channel = self.ps_data_provider.session_info.additional_attribute_11.encode(
            'utf-8')
        self.store_format = self.ps_data_provider.session_info.additional_attribute_12.encode(
            'utf-8')
        self.retailer_fk = self.ps_data_provider.session_info.retailer_fk

        self.set_up_template = None
        self.gsk_generator = None
        self.core_range_targets = {}

        self.set_up_data = LocalConsts.SET_UP_DATA
        self.set_up_template = pd.read_excel(
            os.path.join(os.path.dirname(os.path.realpath(__file__)), '..',
                         'Data', 'gsk_set_up.xlsx'),
            sheet_name='Functional KPIs All Store',
            keep_default_na=False)
        self.gsk_generator = GSKGenerator(self.data_provider, self.output,
                                          self.common, self.set_up_template)
Exemplo n.º 4
0
class GSKNZToolBox:
    LEVEL1 = 1
    LEVEL2 = 2
    LEVEL3 = 3

    def __init__(self, data_provider, output):
        self.output = output
        self.data_provider = data_provider
        self.common = Common(self.data_provider)
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.products = self.data_provider[Data.PRODUCTS]
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.store_id = self.data_provider[Data.STORE_FK]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.rds_conn = PSProjectConnector(self.project_name,
                                           DbUsers.CalculationEng)
        self.kpi_static_data = self.common.get_kpi_static_data()
        self.kpi_results_queries = []
        self.set_up_template = pd.read_excel(os.path.join(
            os.path.dirname(os.path.realpath(__file__)), '..', 'Data',
            'gsk_set_up.xlsx'),
                                             sheet_name='Functional KPIs',
                                             keep_default_na=False)

        self.gsk_generator = GSKGenerator(self.data_provider, self.output,
                                          self.common, self.set_up_template)

    def main_calculation(self, *args, **kwargs):
        """
        This function calculates the KPI results.
        """
        assortment_store_dict = self.gsk_generator.availability_store_function(
        )
        self.common.save_json_to_new_tables(assortment_store_dict)

        assortment_category_dict = self.gsk_generator.availability_category_function(
        )
        self.common.save_json_to_new_tables(assortment_category_dict)

        assortment_subcategory_dict = self.gsk_generator.availability_subcategory_function(
        )
        self.common.save_json_to_new_tables(assortment_subcategory_dict)

        facings_sos_dict = self.gsk_generator.gsk_global_facings_sos_whole_store_function(
        )
        self.common.save_json_to_new_tables(facings_sos_dict)

        linear_sos_dict = self.gsk_generator.gsk_global_linear_sos_whole_store_function(
        )
        self.common.save_json_to_new_tables(linear_sos_dict)

        linear_sos_dict = self.gsk_generator.gsk_global_linear_sos_by_sub_category_function(
        )
        self.common.save_json_to_new_tables(linear_sos_dict)

        facings_sos_dict = self.gsk_generator.gsk_global_facings_by_sub_category_function(
        )
        self.common.save_json_to_new_tables(facings_sos_dict)

        facings_sos_dict = self.gsk_generator.gsk_global_facings_sos_by_category_function(
        )
        self.common.save_json_to_new_tables(facings_sos_dict)

        linear_sos_dict = self.gsk_generator.gsk_global_linear_sos_by_category_function(
        )
        self.common.save_json_to_new_tables(linear_sos_dict)

        self.common.commit_results_data()

        return
Exemplo n.º 5
0
class GSKSGToolBox:
    KPI_DICT = {
        "planogram": "planogram",
        "secondary_display": "secondary_display",
        "promo": "promo"
    }

    def __init__(self, data_provider, output):
        self.output = output
        self.data_provider = data_provider
        self.common = Common(self.data_provider)
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.products = self.data_provider[Data.PRODUCTS]
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.store_id = self.data_provider[Data.STORE_FK]
        self.store_info = self.data_provider[Data.STORE_INFO]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.ps_data_provider = PsDataProvider(self.data_provider, self.output)
        self.manufacturer_fk = None if self.data_provider[Data.OWN_MANUFACTURER]['param_value'].iloc[0] is None else \
            int(self.data_provider[Data.OWN_MANUFACTURER]['param_value'].iloc[0])
        self.set_up_template = pd.read_excel(os.path.join(
            os.path.dirname(os.path.realpath(__file__)), '..', 'Data',
            'gsk_set_up.xlsx'),
                                             sheet_name='Functional KPIs',
                                             keep_default_na=False)

        self.gsk_generator = GSKGenerator(self.data_provider, self.output,
                                          self.common, self.set_up_template)
        self.targets = self.ps_data_provider.get_kpi_external_targets()
        self.sequence = Sequence(self.data_provider)
        self.set_up_data = {
            ('planogram', Const.KPI_TYPE_COLUMN): Const.NO_INFO,
            ('secondary_display', Const.KPI_TYPE_COLUMN): Const.NO_INFO,
            ('promo', Const.KPI_TYPE_COLUMN): Const.NO_INFO
        }

    def main_calculation(self):
        """
        This function calculates the KPI results.
        """
        # # global kpis in store_level
        assortment_store_dict = self.gsk_generator.availability_store_function(
        )
        self.common.save_json_to_new_tables(assortment_store_dict)
        facings_sos_dict = self.gsk_generator.gsk_global_facings_sos_whole_store_function(
        )
        self.common.save_json_to_new_tables(facings_sos_dict)
        linear_sos_dict = self.gsk_generator.gsk_global_linear_sos_whole_store_function(
        )
        self.common.save_json_to_new_tables(linear_sos_dict)

        # global kpis in category level & kpi results are used for orange score kpi
        assortment_category_dict = self.gsk_generator.availability_category_function(
        )
        self.common.save_json_to_new_tables(assortment_category_dict)
        fsos_category_dict = self.gsk_generator.gsk_global_facings_sos_by_category_function(
        )
        self.common.save_json_to_new_tables(fsos_category_dict)

        # updating the set up dictionary for all local kpis
        for kpi in self.KPI_DICT.keys():
            self.gsk_generator.tool_box.extract_data_set_up_file(
                kpi, self.set_up_data, self.KPI_DICT)

        orange_score_dict = self.orange_score_category(
            assortment_category_dict, fsos_category_dict)

        self.common.save_json_to_new_tables(orange_score_dict)
        self.common.commit_results_data()

        score = 0
        return score

    def msl_compliance_score(self, category, categories_results_json,
                             cat_targets, parent_result_identifier):
        results_list = []
        msl_kpi_fk = self.common.get_kpi_fk_by_kpi_type(
            Consts.MSL_ORANGE_SCORE)
        msl_categories = self._filter_targets_by_kpi(cat_targets, msl_kpi_fk)
        if category not in categories_results_json:
            dst_result = 0
        else:
            dst_result = categories_results_json[category]
        weight = msl_categories['msl_weight'].iloc[0]
        score = dst_result * weight
        result = score / weight
        results_list.append({
            'fk': msl_kpi_fk,
            'numerator_id': category,
            'denominator_id': self.store_id,
            'denominator_result': 1,
            'numerator_result': result,
            'result': result,
            'target': weight,
            'score': score,
            'identifier_parent': parent_result_identifier,
            'should_enter': True
        })
        return score, results_list

    def fsos_compliance_score(self, category, categories_results_json,
                              cat_targets, parent_result_identifier):
        """
               This function return json of keys- categories and values -  kpi result for category
               :param cat_targets-targets df for the specific category
               :param category: pk of category
               :param categories_results_json: type of the desired kpi
               :return category json :  number-category_fk,number-result
           """
        results_list = []
        fsos_kpi_fk = self.common.get_kpi_fk_by_kpi_type(
            Consts.FSOS_ORANGE_SCORE)
        category_targets = self._filter_targets_by_kpi(cat_targets,
                                                       fsos_kpi_fk)
        dst_result = categories_results_json[
            category] if category in categories_results_json.keys() else 0
        benchmark = category_targets['fsos_benchmark'].iloc[0]
        weight = category_targets['fsos_weight'].iloc[0]
        score = weight if dst_result >= benchmark else 0
        result = score / weight
        results_list.append({
            'fk': fsos_kpi_fk,
            'numerator_id': category,
            'denominator_id': self.store_id,
            'denominator_result': 1,
            'numerator_result': result,
            'result': result,
            'target': weight,
            'score': score,
            'identifier_parent': parent_result_identifier,
            'should_enter': True
        })
        return score, results_list

    def extract_json_results_by_kpi(self, general_kpi_results, kpi_type):
        """
            This function return json of keys and values. keys= categories & values = kpi result for category
            :param general_kpi_results: list of json's , each json is a db result
            :param kpi_type: type of the desired kpi
            :return category json :  number-category_fk,number-result
        """
        kpi_fk = self.common.get_kpi_fk_by_kpi_type(kpi_type)
        if general_kpi_results is None:
            return {}
        categories_results_json = self.extract_json_results(
            kpi_fk, general_kpi_results)
        return categories_results_json

    @staticmethod
    def extract_json_results(kpi_fk, general_kpi_results):
        """
        This function created json of keys- categories and values -  kpi result for category
        :param kpi_fk: pk of the kpi you want to extract results from.
        :param general_kpi_results: list of json's , each json is the db results
        :return category json :  number-category_fk,number-result
        """
        category_json = {}
        for row in general_kpi_results:
            if row['fk'] == kpi_fk:
                category_json[row[
                    DB.SessionResultsConsts.DENOMINATOR_ID]] = row[
                        DB.SessionResultsConsts.RESULT]
        return category_json

    def store_target(self):
        """
        This function filters the external targets df , to the only df with policy that answer current session's store
        attributes.
        It search which store attributes defined the targets policy.
        In addition it gives the targets flexibility to send "changed variables" , external targets need to save
        store param+_key and store_val + _value , than this function search the store param to look for and which value
        it need to have for this policy.
        """
        target_columns = self.targets.columns
        store_att = ['store_name', 'store_number', 'att']
        store_columns = [
            col for col in target_columns
            if len([att for att in store_att if att in col]) > 0
        ]
        for col in store_columns:
            if self.targets.empty:
                return
            if 'key' in col:
                value = col.replace('_key', '') + '_value'
                if value not in store_columns:
                    continue
                self.target_test(col, value)
                store_columns.remove(value)
            else:
                if 'value' in col:
                    continue
                self.target_test(col)

    def target_test(self, store_param, store_param_val=None):
        """
        :param store_param: string , store attribute . by this attribute will compare between targets policy and
        current session
        :param store_param_val: string , if not None the store attribute value the policy have
               This function filters the targets to the only targets with a attributes that answer the current session's
                store attributes
        """
        store_param_val = store_param_val if store_param_val is not None else store_param
        store_param = [
            store_param
        ] if store_param_val is None else self.targets[store_param].unique()
        for param in store_param:
            if param is None:
                continue
            if self.store_info[param][0] is None:
                if self.targets.empty:
                    return
                else:
                    self.targets.drop(self.targets.index, inplace=True)

            self.targets['target_match'] = self.targets[store_param_val].apply(
                self.checking_param, store_info_col=param)
            self.targets = self.targets[self.targets['target_match']]

    def checking_param(self, df_param, store_info_col):
        # x is  self.targets[store_param_val]
        if isinstance(df_param, list):
            if self.store_info[store_info_col][0].encode(
                    GlobalConsts.HelperConsts.UTF8) in df_param:
                return True
        if isinstance(df_param, unicode):
            if self.store_info[store_info_col][0].encode(
                    GlobalConsts.HelperConsts.UTF8
            ) == df_param or df_param == '':
                return True
        if isinstance(df_param, type(None)):
            return True
        return False

    def display_distribution(self, display_name, category_fk, category_targets,
                             parent_identifier, kpi_name, parent_kpi_name,
                             scif_df):
        """
          This Function sum facings of posm that it name contains substring (decided by external_targets )
          if sum facings is equal/bigger than benchmark that gets weight.
            :param display_name display name (in external targets this key contains relevant substrings)
            :param category_fk
            :param category_targets-targets df for the specific category
            :param parent_identifier  - result identifier for this kpi parent
            :param kpi_name - kpi name
            :param parent_kpi_name - this parent kpi name
            :param scif_df - scif filtered by promo activation settings

        """
        kpi_fk = self.common.get_kpi_fk_by_kpi_type(kpi_name +
                                                    Consts.COMPLIANCE_KPI)
        results_list = []
        identifier_result = self.common.get_dictionary(category_fk=category_fk,
                                                       kpi_fk=kpi_fk)
        weight = category_targets['{}_weight'.format(parent_kpi_name)].iloc[0]

        if scif_df is None:
            results_list.append({
                'fk': kpi_fk,
                'numerator_id': category_fk,
                'denominator_id': self.store_id,
                'denominator_result': 1,
                'numerator_result': 0,
                'result': 0,
                'score': 0,
                'identifier_parent': parent_identifier,
                'identifier_result': identifier_result,
                'target': weight,
                'should_enter': True
            })
            return 0, results_list

        display_products = scif_df[(scif_df['product_type'] == 'POS')
                                   & (scif_df['category_fk'] == category_fk)]

        display_name = "{}_name".format(display_name.lower())
        display_names = category_targets[display_name].iloc[0]
        kpi_result = 0

        # check's if display names (received from external targets) are string or array of strings
        if isinstance(display_names, str) or isinstance(
                display_names, unicode):
            display_array = []
            if len(display_names) > 0:
                display_array.append(display_names)
            display_names = display_array

        # for each display name , search POSM that contains display name (= sub string)
        for display in display_names:
            current_display_prod = display_products[
                display_products['product_name'].str.contains(display)]
            display_sku_level = self.display_sku_results(
                current_display_prod, category_fk, kpi_name)
            kpi_result += current_display_prod['facings'].sum()
            results_list.extend(display_sku_level)

        benchmark = category_targets['{}_benchmark'.format(
            parent_kpi_name)].iloc[0]
        kpi_score = weight if kpi_result >= benchmark else 0
        results_list.append({
            'fk': kpi_fk,
            'numerator_id': category_fk,
            'denominator_id': self.store_id,
            'denominator_result': 1,
            'numerator_result': kpi_score,
            'result': kpi_score,
            'score': kpi_score,
            'identifier_parent': parent_identifier,
            'identifier_result': identifier_result,
            'target': weight,
            'should_enter': True
        })
        return kpi_score, results_list

    def display_sku_results(self, display_data, category_fk, kpi_name):
        """
          This Function create for each posm in display data  db result with score of  posm facings.
            :param category_fk
            :param display_data-targets df for the specific category
            :param kpi_name - kpi name
        """
        results_list = []
        kpi_fk = self.common.get_kpi_fk_by_kpi_type(kpi_name +
                                                    Consts.SKU_LEVEL_LIST)
        parent_kpi_fk = self.common.get_kpi_fk_by_kpi_type(
            kpi_name + Consts.COMPLIANCE_KPI)
        identifier_parent = self.common.get_dictionary(category_fk=category_fk,
                                                       kpi_fk=parent_kpi_fk)

        display_names = display_data['item_id'].unique()
        for display in display_names:
            count = float(display_data[display_data['item_id'] == display]
                          ['facings'].sum()) / float(100)
            results_list.append({
                'fk': kpi_fk,
                'numerator_id': display,
                'denominator_id': category_fk,
                'denominator_result': 1,
                'numerator_result': count,
                'result': count,
                'score': count,
                'identifier_parent': identifier_parent,
                'should_enter': True
            })
        return results_list

    def assortment(self):
        """
          This Function get relevant assortment based on filtered scif
        """
        lvl3_assort, filter_scif = self.gsk_generator.tool_box.get_assortment_filtered(
            self.set_up_data, "planogram")
        return lvl3_assort, filter_scif

    def msl_assortment(self, kpi_name):
        """
            :param kpi_name : name of level 3 assortment kpi
            :return kpi_results : data frame of assortment products of the kpi, product's availability,
            product details.(reduce assortment products that are not available)
            filtered by set up
         """
        lvl3_assort, filtered_scif = self.assortment()
        if lvl3_assort is None or lvl3_assort.empty:
            return None
        kpi_assortment_fk = self.common.get_kpi_fk_by_kpi_type(kpi_name)
        kpi_results = lvl3_assort[lvl3_assort['kpi_fk_lvl3'] ==
                                  kpi_assortment_fk]  # general assortment
        kpi_results = pd.merge(kpi_results,
                               self.all_products[Const.PRODUCTS_COLUMNS],
                               how='left',
                               on='product_fk')
        # only distributed products
        kpi_results = kpi_results[kpi_results['in_store'] == 1]

        # filtering substitied products
        kpi_results = kpi_results[
            kpi_results['substitution_product_fk'].isnull()]

        shelf_data = pd.merge(self.match_product_in_scene[[
            'scene_fk', 'product_fk', 'shelf_number'
        ]],
                              filtered_scif[['scene_id', 'product_fk']],
                              how='right',
                              left_on=['scene_fk', 'product_fk'],
                              right_on=['scene_id', 'product_fk'
                                        ])  # why is this happening?

        # merge assortment results with match_product_in_scene for shelf_number parameter
        kpi_results = pd.merge(shelf_data,
                               kpi_results,
                               how='right',
                               on=['product_fk'])  # also problematic
        return kpi_results

    def shelf_compliance(self, category, assortment_df, cat_targets,
                         identifier_parent):
        """
            This function calculate how many assortment products available on specific shelves
            :param category
            :param cat_targets : targets df for the specific category
            :param assortment_df :relevant assortment based on filtered scif
            :param identifier_parent - result identifier for shelf compliance kpi parent .

        """
        results_list = []
        kpi_fk = self.common.get_kpi_fk_by_kpi_type(Consts.SHELF_COMPLIANCE)
        category_targets = self._filter_targets_by_kpi(cat_targets, kpi_fk)
        if assortment_df is not None:
            assortment_cat = assortment_df[assortment_df['category_fk'] ==
                                           category]
            shelf_weight = category_targets['shelf_weight'].iloc[0]
            benchmark = category_targets['shelf_benchmark'].iloc[0]
            shelves = [
                int(shelf) for shelf in
                category_targets['shelf_number'].iloc[0].split(",")
            ]
            shelf_df = assortment_cat[assortment_cat['shelf_number'].isin(
                shelves)]
            numerator = len(shelf_df['product_fk'].unique())
            denominator = len(assortment_cat['product_fk'].unique())
            result = float(numerator) / float(
                denominator) if numerator and denominator != 0 else 0
            score = shelf_weight if result >= benchmark else 0
        else:
            denominator, numerator, score, shelf_weight = 0, 0, 0, 0
            result = float(numerator) / float(
                denominator) if numerator and denominator != 0 else 0
            shelf_weight = category_targets['shelf_weight'].iloc[0]
        results_list.append({
            'fk': kpi_fk,
            'numerator_id': category,
            'denominator_id': self.store_id,
            'denominator_result': denominator,
            'numerator_result': numerator,
            'result': result,
            'target': shelf_weight,
            'score': score,
            'identifier_parent': identifier_parent,
            'should_enter': True
        })
        return score, results_list, shelf_weight

    def planogram(self, category_fk, assortment, category_targets,
                  identifier_parent):
        """
          This function sum sequence kpi and  shelf compliance
            :param category_fk
            :param category_targets : targets df for the specific category
            :param assortment :relevant assortment based on filtered scif
            :param identifier_parent : result identifier for planogram kpi parent .

        """
        results_list = []
        kpi_fk = self.common.get_kpi_fk_by_kpi_type(Consts.PLN_CATEGORY)
        identifier_result = self.common.get_dictionary(category_fk=category_fk,
                                                       kpi_fk=kpi_fk)

        shelf_compliance_score, shelf_compliance_result, shelf_weight = self.shelf_compliance(
            category_fk, assortment, category_targets, identifier_result)
        results_list.extend(shelf_compliance_result)
        sequence_kpi, sequence_weight = self._calculate_sequence(
            category_fk, identifier_result)
        planogram_score = shelf_compliance_score + sequence_kpi
        planogram_weight = shelf_weight + sequence_weight
        planogram_result = planogram_score / float(
            planogram_weight) if planogram_weight else 0
        results_list.append({
            'fk': kpi_fk,
            'numerator_id': category_fk,
            'denominator_id': self.store_id,
            'denominator_result': 1,
            'numerator_result': planogram_score,
            'result': planogram_result,
            'target': planogram_weight,
            'score': planogram_score,
            'identifier_parent': identifier_parent,
            'identifier_result': identifier_result,
            'should_enter': True
        })
        return planogram_score, results_list

    def _calculate_sequence(self, cat_fk, planogram_identifier):
        """
        This method calculated the sequence KPIs using the external targets' data and sequence calculation algorithm.
        """
        sequence_kpi_fk, sequence_sku_kpi_fk = self._get_sequence_kpi_fks()
        sequence_targets = self._filter_targets_by_kpi(self.targets,
                                                       sequence_kpi_fk)
        sequence_targets = sequence_targets.loc[sequence_targets.category_fk ==
                                                cat_fk]
        passed_sequences_score, total_sequence_weight = 0, 0
        for i, sequence in sequence_targets.iterrows():
            population, location, sequence_attributes = self._prepare_data_for_sequence_calculation(
                sequence)
            sequence_result = self.sequence.calculate_sequence(
                population, location, sequence_attributes)
            score = self._save_sequence_results_to_db(sequence_sku_kpi_fk,
                                                      sequence_kpi_fk,
                                                      sequence,
                                                      sequence_result)
            passed_sequences_score += score
            total_sequence_weight += sequence[SessionResultsConsts.WEIGHT]
        self._save_sequence_main_level_to_db(sequence_kpi_fk,
                                             planogram_identifier, cat_fk,
                                             passed_sequences_score,
                                             total_sequence_weight)
        return passed_sequences_score, total_sequence_weight

    @staticmethod
    def _prepare_data_for_sequence_calculation(sequence_params):
        """
        This method gets the relevant targets per sequence and returns the sequence params for calculation.
        """
        population = {
            ProductsConsts.PRODUCT_FK:
            sequence_params[ProductsConsts.PRODUCT_FK]
        }
        location = {
            TemplatesConsts.TEMPLATE_GROUP:
            sequence_params[TemplatesConsts.TEMPLATE_GROUP]
        }
        additional_attributes = {
            AdditionalAttr.STRICT_MODE: sequence_params['strict_mode'],
            AdditionalAttr.INCLUDE_STACKING:
            sequence_params['include_stacking'],
            AdditionalAttr.CHECK_ALL_SEQUENCES: True
        }
        return population, location, additional_attributes

    def _extract_target_params(self, sequence_params):
        """
        This method extract the relevant category_fk and result value from the sequence parameters.
        """
        numerator_id = sequence_params[ProductsConsts.CATEGORY_FK]
        result_value = self.ps_data_provider.get_pks_of_result(
            sequence_params['sequence_name'])
        return numerator_id, result_value

    def _save_sequence_main_level_to_db(self, kpi_fk, planogram_identifier,
                                        cat_fk, sequence_score, total_weight):
        """
        This method saves the top sequence level to DB.
        """
        result = round(
            (sequence_score / float(total_weight)), 2) if total_weight else 0
        score = result * total_weight
        self.common.write_to_db_result(fk=kpi_fk,
                                       numerator_id=cat_fk,
                                       numerator_result=sequence_score,
                                       result=result,
                                       denominator_id=self.store_id,
                                       denominator_result=total_weight,
                                       score=score,
                                       weight=total_weight,
                                       target=total_weight,
                                       should_enter=True,
                                       identifier_result=kpi_fk,
                                       identifier_parent=planogram_identifier)

    def _save_sequence_results_to_db(self, kpi_fk, parent_kpi_fk,
                                     sequence_params, sequence_results):
        """
        This method handles the saving of the SKU level sequence KPI.
        :param kpi_fk: Sequence SKU kpi fk.
        :param parent_kpi_fk: Total sequence score kpi fk.
        :param sequence_params: A dictionary with sequence params for the external targets.
        :param sequence_results: A DataFrame with the results that were received by the sequence algorithm.
        :return: The score that was saved (0 or 100 * weight).
        """
        category_fk, result_value = self._extract_target_params(
            sequence_params)
        num_of_sequences = len(sequence_results)
        target, weight = sequence_params[
            SessionResultsConsts.TARGET], sequence_params[
                SessionResultsConsts.WEIGHT]
        score = weight if len(sequence_results) >= target else 0
        self.common.write_to_db_result(fk=kpi_fk,
                                       numerator_id=category_fk,
                                       numerator_result=num_of_sequences,
                                       result=result_value,
                                       denominator_id=self.store_id,
                                       denominator_result=None,
                                       score=score,
                                       weight=weight,
                                       parent_fk=parent_kpi_fk,
                                       target=target,
                                       should_enter=True,
                                       identifier_parent=parent_kpi_fk,
                                       identifier_result=(kpi_fk, category_fk))
        return score

    def _get_sequence_kpi_fks(self):
        """This method fetches the relevant sequence kpi fks"""
        sequence_kpi_fk = self.common.get_kpi_fk_by_kpi_type(
            Consts.SEQUENCE_KPI)
        sequence_sku_kpi_fk = self.common.get_kpi_fk_by_kpi_type(
            Consts.SEQUENCE_SKU_KPI)
        return sequence_kpi_fk, sequence_sku_kpi_fk

    def secondary_display(self, category_fk, cat_targets, identifier_parent,
                          scif_df):
        """
            This function calculate secondary score -  0  or full weight if at least
            one of it's child kpis equal to weight.
            :param category_fk
            :param cat_targets : targets df for the specific category
            :param identifier_parent : result identifier for promo activation kpi parent .
            :param scif_df : scif filtered by promo activation settings

        """
        results_list = []
        parent_kpi_name = 'display'
        total_kpi_fk = self.common.get_kpi_fk_by_kpi_type(
            Consts.DISPLAY_SUMMARY)
        category_targets = self._filter_targets_by_kpi(cat_targets,
                                                       total_kpi_fk)
        weight = category_targets['display_weight'].iloc[0]
        result_identifier = self.common.get_dictionary(category_fk=category_fk,
                                                       kpi_fk=total_kpi_fk)

        dispenser_score, dispenser_res = self.display_distribution(
            Consts.DISPENSER_TARGET, category_fk, category_targets,
            result_identifier, Consts.DISPENSERS, parent_kpi_name, scif_df)
        counter_top_score, counter_top_res = self.display_distribution(
            Consts.COUNTER_TOP_TARGET, category_fk, category_targets,
            result_identifier, Consts.COUNTERTOP, parent_kpi_name, scif_df)
        standee_score, standee_res = self.display_distribution(
            Consts.STANDEE_TARGET, category_fk, category_targets,
            result_identifier, Consts.STANDEE, parent_kpi_name, scif_df)
        results_list.extend(dispenser_res)
        results_list.extend(counter_top_res)
        results_list.extend(standee_res)

        display_score = weight if (dispenser_score == weight) or (
            counter_top_score == weight) or (standee_score == weight) else 0
        results_list.append({
            'fk': total_kpi_fk,
            'numerator_id': category_fk,
            'denominator_id': self.store_id,
            'denominator_result': 1,
            'numerator_result': display_score,
            'result': display_score,
            'target': weight,
            'score': display_score,
            'identifier_parent': identifier_parent,
            'identifier_result': result_identifier,
            'should_enter': True
        })

        return results_list, display_score

    def promo_activation(self, category_fk, cat_targets, identifier_parent,
                         scif_df):
        """
            This function calculate promo activation score -  0  or full weight if at least
            one of it's child kpis equal to weight.
            :param category_fk
            :param cat_targets : targets df for the specific category
            :param identifier_parent : result identifier for promo activation kpi parent .
            :param scif_df : scif filtered by promo activation settings

        """
        total_kpi_fk = self.common.get_kpi_fk_by_kpi_type(Consts.PROMO_SUMMARY)
        category_targets = self._filter_targets_by_kpi(cat_targets,
                                                       total_kpi_fk)
        result_identifier = self.common.get_dictionary(category_fk=category_fk,
                                                       kpi_fk=total_kpi_fk)
        results_list = []
        parent_kpi_name = 'promo'
        weight = category_targets['promo_weight'].iloc[0]

        hang_shell_score, hang_shell_res = self.display_distribution(
            Consts.HANGSELL, category_fk, category_targets, result_identifier,
            Consts.HANGSELL_KPI, parent_kpi_name, scif_df)
        top_shelf_score, top_shelf_res = self.display_distribution(
            Consts.TOP_SHELF, category_fk, category_targets, result_identifier,
            Consts.TOP_SHELF_KPI, parent_kpi_name, scif_df)
        results_list.extend(hang_shell_res)
        results_list.extend(top_shelf_res)
        promo_score = weight if (hang_shell_score == weight) or (
            top_shelf_score == weight) else 0

        results_list.append({
            'fk': total_kpi_fk,
            'numerator_id': category_fk,
            'denominator_id': self.store_id,
            'denominator_result': 1,
            'numerator_result': promo_score,
            'result': promo_score,
            'target': weight,
            'score': promo_score,
            'identifier_parent': identifier_parent,
            'identifier_result': result_identifier,
            'should_enter': True
        })

        return results_list, promo_score

    @staticmethod
    def _filter_targets_by_kpi(targets, kpi_fk):
        """ This function filter all targets but targets which related to relevant kpi"""
        filtered_targets = targets.loc[targets.kpi_fk == kpi_fk]
        return filtered_targets

    def orange_score_category(self, assortment_category_res,
                              fsos_category_res):
        """
        This function calculate orange score kpi by category. Settings are based on external targets and set up file.
        :param assortment_category_res :  array  of assortment results
        :param fsos_category_res : array  of facing sos by store results
        """
        results_list = []
        self.store_target()
        if self.targets.empty:
            return

        total_kpi_fk = self.common.get_kpi_fk_by_kpi_type(
            Consts.ORANGE_SCORE_COMPLIANCE)
        fsos_json_global_results = self.extract_json_results_by_kpi(
            fsos_category_res, Consts.GLOBAL_FSOS_BY_CATEGORY)
        msl_json_global_results = self.extract_json_results_by_kpi(
            assortment_category_res, Consts.GLOBAL_DST_BY_CATEGORY)

        # scif after filtering it by set up file for each kpi
        scif_secondary = self.gsk_generator.tool_box.tests_by_template(
            'secondary_display', self.scif, self.set_up_data)
        scif_promo = self.gsk_generator.tool_box.tests_by_template(
            'promo', self.scif, self.set_up_data)
        categories = self.targets[
            DataProviderConsts.ProductsConsts.CATEGORY_FK].unique()
        assortment = self.msl_assortment('Distribution - SKU')

        for cat in categories:
            orange_score_result_identifier = self.common.get_dictionary(
                category_fk=cat, kpi_fk=total_kpi_fk)

            cat_targets = self.targets[self.targets[
                DataProviderConsts.ProductsConsts.CATEGORY_FK] == cat]

            msl_score, msl_results = self.msl_compliance_score(
                cat, msl_json_global_results, cat_targets,
                orange_score_result_identifier)

            fsos_score, fsos_results = self.fsos_compliance_score(
                cat, fsos_json_global_results, cat_targets,
                orange_score_result_identifier)

            planogram_score, planogram_results = self.planogram(
                cat, assortment, cat_targets, orange_score_result_identifier)

            secondary_display_res, secondary_score = self.secondary_display(
                cat, cat_targets, orange_score_result_identifier,
                scif_secondary)

            promo_activation_res, promo_score = self.promo_activation(
                cat, cat_targets, orange_score_result_identifier, scif_promo)

            compliance_category_score = promo_score + secondary_score + fsos_score + msl_score + planogram_score
            results_list.extend(msl_results + fsos_results +
                                planogram_results + secondary_display_res +
                                promo_activation_res)
            results_list.append({
                'fk':
                total_kpi_fk,
                'numerator_id':
                self.manufacturer_fk,
                'denominator_id':
                cat,
                'denominator_result':
                1,
                'numerator_result':
                compliance_category_score,
                'result':
                compliance_category_score,
                'score':
                compliance_category_score,
                'identifier_result':
                orange_score_result_identifier
            })
        return results_list
Exemplo n.º 6
0
class GSKRUToolBox:
    def __init__(self, data_provider, output):

        self.output = output
        self.data_provider = data_provider
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.products = self.data_provider[Data.PRODUCTS]
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.store_id = self.data_provider[Data.STORE_FK]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.own_manufacturer_id = int(self.data_provider.own_manufacturer[
            self.data_provider.own_manufacturer['param_name'] ==
            'manufacturer_id']['param_value'].iloc[0])

        self.rds_conn = PSProjectConnector(self.project_name,
                                           DbUsers.CalculationEng)

        self.common = Common(self.data_provider)
        self.kpi_static_data = self.common.get_kpi_static_data()

        self.ps_data_provider = PsDataProvider(self.data_provider, self.output)
        self.store_type = self.ps_data_provider.session_info.store_type
        self.store_channel = self.ps_data_provider.session_info.additional_attribute_11.encode(
            'utf-8')
        self.store_format = self.ps_data_provider.session_info.additional_attribute_12.encode(
            'utf-8')
        self.retailer_fk = self.ps_data_provider.session_info.retailer_fk

        self.set_up_template = None
        self.gsk_generator = None
        self.core_range_targets = {}

        self.set_up_data = LocalConsts.SET_UP_DATA
        self.set_up_template = pd.read_excel(
            os.path.join(os.path.dirname(os.path.realpath(__file__)), '..',
                         'Data', 'gsk_set_up.xlsx'),
            sheet_name='Functional KPIs All Store',
            keep_default_na=False)
        self.gsk_generator = GSKGenerator(self.data_provider, self.output,
                                          self.common, self.set_up_template)

    def main_calculation(self, *args, **kwargs):
        """
        This function calculates the KPI results.
        """

        # Global KPIs

        # All Store KPIs
        assortment_store_dict = self.gsk_generator.availability_store_function(
            custom_suffix='_Stacking_Included')
        self.common.save_json_to_new_tables(assortment_store_dict)

        assortment_category_dict = self.gsk_generator.availability_category_function(
            custom_suffix='_Stacking_Included')
        self.common.save_json_to_new_tables(assortment_category_dict)

        assortment_subcategory_dict = self.gsk_generator.availability_subcategory_function(
            custom_suffix='_Stacking_Included')
        self.common.save_json_to_new_tables(assortment_subcategory_dict)

        facings_sos_dict = self.gsk_generator.gsk_global_facings_sos_whole_store_function(
            custom_suffix='_Stacking_Included',
            fractional_facings_parameters=LocalConsts.
            FRACTIONAL_FACINGS_PARAMETERS)
        self.common.save_json_to_new_tables(facings_sos_dict)

        facings_sos_dict = self.gsk_generator.gsk_global_facings_sos_by_category_function(
            custom_suffix='_Stacking_Included',
            fractional_facings_parameters=LocalConsts.
            FRACTIONAL_FACINGS_PARAMETERS)
        self.common.save_json_to_new_tables(facings_sos_dict)

        facings_sos_dict = self.gsk_generator.gsk_global_facings_by_sub_category_function(
            custom_suffix='_Stacking_Included',
            fractional_facings_parameters=LocalConsts.
            FRACTIONAL_FACINGS_PARAMETERS)
        self.common.save_json_to_new_tables(facings_sos_dict)

        linear_sos_dict = self.gsk_generator.gsk_global_linear_sos_whole_store_function(
        )
        self.common.save_json_to_new_tables(linear_sos_dict)

        linear_sos_dict = self.gsk_generator.gsk_global_linear_sos_by_category_function(
        )
        self.common.save_json_to_new_tables(linear_sos_dict)

        linear_sos_dict = self.gsk_generator.gsk_global_linear_sos_by_sub_category_function(
        )
        self.common.save_json_to_new_tables(linear_sos_dict)

        # Main Shelf KPIs
        self.rds_conn = PSProjectConnector(self.project_name,
                                           DbUsers.CalculationEng)
        self.set_up_template = pd.read_excel(
            os.path.join(os.path.dirname(os.path.realpath(__file__)), '..',
                         'Data', 'gsk_set_up.xlsx'),
            sheet_name='Functional KPIs Main Shelf',
            keep_default_na=False)
        self.gsk_generator.set_up_file = self.set_up_template
        self.gsk_generator.tool_box.set_up_file = self.gsk_generator.set_up_file
        self.gsk_generator.tool_box.set_up_data = LocalConsts.SET_UP_DATA.copy(
        )
        # self.gsk_generator = GSKGenerator(self.data_provider, self.output, self.common, self.set_up_template)

        facings_sos_dict = self.gsk_generator.gsk_global_facings_sos_whole_store_function(
            custom_suffix='_Stacking_Included_Main_Shelf',
            fractional_facings_parameters=LocalConsts.
            FRACTIONAL_FACINGS_PARAMETERS)
        self.common.save_json_to_new_tables(facings_sos_dict)

        facings_sos_dict = self.gsk_generator.gsk_global_facings_sos_by_category_function(
            custom_suffix='_Stacking_Included_Main_Shelf',
            fractional_facings_parameters=LocalConsts.
            FRACTIONAL_FACINGS_PARAMETERS)
        self.common.save_json_to_new_tables(facings_sos_dict)

        facings_sos_dict = self.gsk_generator.gsk_global_facings_by_sub_category_function(
            custom_suffix='_Stacking_Included_Main_Shelf',
            fractional_facings_parameters=LocalConsts.
            FRACTIONAL_FACINGS_PARAMETERS)
        self.common.save_json_to_new_tables(facings_sos_dict)

        linear_sos_dict = self.gsk_generator.gsk_global_linear_sos_whole_store_function(
            custom_suffix='_Main_Shelf')
        self.common.save_json_to_new_tables(linear_sos_dict)

        linear_sos_dict = self.gsk_generator.gsk_global_linear_sos_by_category_function(
            custom_suffix='_Main_Shelf')
        self.common.save_json_to_new_tables(linear_sos_dict)

        linear_sos_dict = self.gsk_generator.gsk_global_linear_sos_by_sub_category_function(
            custom_suffix='_Main_Shelf')
        self.common.save_json_to_new_tables(linear_sos_dict)

        # Secondary Shelf KPIs
        self.rds_conn = PSProjectConnector(self.project_name,
                                           DbUsers.CalculationEng)
        self.set_up_template = pd.read_excel(
            os.path.join(os.path.dirname(os.path.realpath(__file__)), '..',
                         'Data', 'gsk_set_up.xlsx'),
            sheet_name='Functional KPIs Secondary Shelf',
            keep_default_na=False)
        self.gsk_generator.set_up_file = self.set_up_template
        self.gsk_generator.tool_box.set_up_file = self.gsk_generator.set_up_file
        self.gsk_generator.tool_box.set_up_data = LocalConsts.SET_UP_DATA.copy(
        )
        # self.gsk_generator = GSKGenerator(self.data_provider, self.output, self.common, self.set_up_template)

        facings_sos_dict = self.gsk_generator.gsk_global_facings_sos_whole_store_function(
            custom_suffix='_Stacking_Included_Secondary_Shelf',
            fractional_facings_parameters=LocalConsts.
            FRACTIONAL_FACINGS_PARAMETERS)
        self.common.save_json_to_new_tables(facings_sos_dict)

        facings_sos_dict = self.gsk_generator.gsk_global_facings_sos_by_category_function(
            custom_suffix='_Stacking_Included_Secondary_Shelf',
            fractional_facings_parameters=LocalConsts.
            FRACTIONAL_FACINGS_PARAMETERS)
        self.common.save_json_to_new_tables(facings_sos_dict)

        facings_sos_dict = self.gsk_generator.gsk_global_facings_by_sub_category_function(
            custom_suffix='_Stacking_Included_Secondary_Shelf',
            fractional_facings_parameters=LocalConsts.
            FRACTIONAL_FACINGS_PARAMETERS)
        self.common.save_json_to_new_tables(facings_sos_dict)

        linear_sos_dict = self.gsk_generator.gsk_global_linear_sos_whole_store_function(
            custom_suffix='_Secondary_Shelf')
        self.common.save_json_to_new_tables(linear_sos_dict)

        linear_sos_dict = self.gsk_generator.gsk_global_linear_sos_by_category_function(
            custom_suffix='_Secondary_Shelf')
        self.common.save_json_to_new_tables(linear_sos_dict)

        linear_sos_dict = self.gsk_generator.gsk_global_linear_sos_by_sub_category_function(
            custom_suffix='_Secondary_Shelf')
        self.common.save_json_to_new_tables(linear_sos_dict)

        # Local KPIs
        self.rds_conn = PSProjectConnector(self.project_name,
                                           DbUsers.CalculationEng)
        self.set_up_template = pd.read_excel(
            os.path.join(os.path.dirname(os.path.realpath(__file__)), '..',
                         'Data', 'gsk_set_up.xlsx'),
            sheet_name='Functional KPIs Local',
            keep_default_na=False)
        self.gsk_generator.set_up_file = self.set_up_template
        self.gsk_generator.tool_box.set_up_file = self.gsk_generator.set_up_file
        self.gsk_generator.tool_box.set_up_data = LocalConsts.SET_UP_DATA.copy(
        )
        # self.gsk_generator = GSKGenerator(self.data_provider, self.output, self.common, self.set_up_template)

        # SOA
        soa_dict = self.gsk_soa_function()
        self.common.save_json_to_new_tables(soa_dict)

        # # Core Range Assortment - disabled until phase 2
        # cra_dict = self.gsk_cra_function()
        # self.common.save_json_to_new_tables(cra_dict)

        self.common.commit_results_data()

        return

    def gsk_soa_function(self):

        results = []

        kpi_soa_fk = \
            self.common.get_kpi_fk_by_kpi_type(LocalConsts.SOA_KPI)
        kpi_soa_manufacturer_internal_target_fk = \
            self.common.get_kpi_fk_by_kpi_type(LocalConsts.SOA_MANUFACTURER_INTERNAL_TARGET_KPI)
        kpi_soa_manufacturer_external_target_fk = \
            self.common.get_kpi_fk_by_kpi_type(LocalConsts.SOA_MANUFACTURER_EXTERNAL_TARGET_KPI)
        kpi_soa_subcat_internal_target_fk = \
            self.common.get_kpi_fk_by_kpi_type(LocalConsts.SOA_SUBCAT_INTERNAL_TARGET_KPI)
        kpi_soa_subcat_external_target_fk = \
            self.common.get_kpi_fk_by_kpi_type(LocalConsts.SOA_SUBCAT_EXTERNAL_TARGET_KPI)

        identifier_internal = self.common.get_dictionary(
            manufacturer_fk=self.own_manufacturer_id,
            kpi_fk=kpi_soa_manufacturer_internal_target_fk)
        identifier_external = self.common.get_dictionary(
            manufacturer_fk=self.own_manufacturer_id,
            kpi_fk=kpi_soa_manufacturer_external_target_fk)

        targets = \
            self.ps_data_provider.get_kpi_external_targets(kpi_fks=[kpi_soa_fk],
                                                           key_filters={'additional_attribute_11': self.store_channel,
                                                                        'additional_attribute_12': self.store_format})

        # if targets.empty:
        #     Log.warning('No SOA targets defined for this session')
        # else:

        self.gsk_generator.tool_box. \
            extract_data_set_up_file(LocalConsts.SOA, self.set_up_data, LocalConsts.KPI_DICT)
        df = self.gsk_generator.tool_box.tests_by_template(
            LocalConsts.SOA, self.scif, self.set_up_data)
        df, facings_column = self.df_filter_by_stacking(df, LocalConsts.SOA)

        # Sub-Category
        for sub_category_fk in df[
                ScifConsts.SUB_CATEGORY_FK].unique().tolist():

            numerator_result = len(
                df[(df[ScifConsts.MANUFACTURER_FK] == self.own_manufacturer_id)
                   & (df[ScifConsts.SUB_CATEGORY_FK] == sub_category_fk)][
                       ScifConsts.PRODUCT_FK].unique().tolist())
            denominator_result = len(
                df[df[ScifConsts.SUB_CATEGORY_FK] == sub_category_fk][
                    ScifConsts.PRODUCT_FK].unique().tolist())
            result = round(float(numerator_result) / float(denominator_result), 4) \
                if numerator_result != 0 and denominator_result != 0 \
                else 0

            target = targets[targets['sub_category_fk'] ==
                             sub_category_fk]['internal_target'].values
            target = float(target[0]) if len(target) > 0 else None
            target = target / 100 if target else None
            if target:
                # score = 1 if result >= target else 0
                score = round(result / target, 4)
            else:
                score = 0
            results.append({
                'fk': kpi_soa_subcat_internal_target_fk,
                SessionResultsConsts.NUMERATOR_ID: self.own_manufacturer_id,
                SessionResultsConsts.NUMERATOR_RESULT: numerator_result,
                SessionResultsConsts.DENOMINATOR_ID: self.store_id,
                SessionResultsConsts.DENOMINATOR_RESULT: denominator_result,
                SessionResultsConsts.CONTEXT_ID: sub_category_fk,
                SessionResultsConsts.RESULT: result,
                SessionResultsConsts.TARGET: target,
                SessionResultsConsts.SCORE: score,
                'identifier_parent': identifier_internal,
                'should_enter': True
            })

            self.core_range_targets.update({sub_category_fk: target})

            target = targets[targets['sub_category_fk'] ==
                             sub_category_fk]['external_target'].values
            target = float(target[0]) if len(target) > 0 else None
            target = target / 100 if target else None
            if target:
                # score = 1 if result >= target else 0
                score = round(result / target, 4)
            else:
                score = 0
            results.append({
                'fk': kpi_soa_subcat_external_target_fk,
                SessionResultsConsts.NUMERATOR_ID: self.own_manufacturer_id,
                SessionResultsConsts.NUMERATOR_RESULT: numerator_result,
                SessionResultsConsts.DENOMINATOR_ID: self.store_id,
                SessionResultsConsts.DENOMINATOR_RESULT: denominator_result,
                SessionResultsConsts.CONTEXT_ID: sub_category_fk,
                SessionResultsConsts.RESULT: result,
                SessionResultsConsts.TARGET: target,
                SessionResultsConsts.SCORE: score,
                'identifier_parent': identifier_external,
                'should_enter': True
            })

        # Manufacturer
        numerator_result = len(
            df[df[ScifConsts.MANUFACTURER_FK] == self.own_manufacturer_id][
                ScifConsts.PRODUCT_FK].unique().tolist())
        denominator_result = len(df[ScifConsts.PRODUCT_FK].unique().tolist())
        result = round(float(numerator_result) / float(denominator_result), 4) \
            if numerator_result != 0 and denominator_result != 0 \
            else 0

        target = targets[
            targets['sub_category_fk'].isnull()]['internal_target'].values
        target = float(target[0]) if len(target) > 0 else None
        target = target / 100 if target else None
        if target:
            # score = 1 if result >= target else 0
            score = round(result / target, 4)
        else:
            score = 0
        results.append({
            'fk': kpi_soa_manufacturer_internal_target_fk,
            SessionResultsConsts.NUMERATOR_ID: self.own_manufacturer_id,
            SessionResultsConsts.NUMERATOR_RESULT: numerator_result,
            SessionResultsConsts.DENOMINATOR_ID: self.store_id,
            SessionResultsConsts.DENOMINATOR_RESULT: denominator_result,
            SessionResultsConsts.RESULT: result,
            SessionResultsConsts.TARGET: target,
            SessionResultsConsts.SCORE: score,
            'identifier_result': identifier_internal,
            'should_enter': True
        })

        target = targets[
            targets['sub_category_fk'].isnull()]['external_target'].values
        target = float(target[0]) if len(target) > 0 else None
        target = target / 100 if target else None
        if target:
            # score = 1 if result >= target else 0
            score = round(result / target, 4)
        else:
            score = 0
        results.append({
            'fk': kpi_soa_manufacturer_external_target_fk,
            SessionResultsConsts.NUMERATOR_ID: self.own_manufacturer_id,
            SessionResultsConsts.NUMERATOR_RESULT: numerator_result,
            SessionResultsConsts.DENOMINATOR_ID: self.store_id,
            SessionResultsConsts.DENOMINATOR_RESULT: denominator_result,
            SessionResultsConsts.RESULT: result,
            SessionResultsConsts.TARGET: target,
            SessionResultsConsts.SCORE: score,
            'identifier_result': identifier_external,
            'should_enter': True
        })

        return results

    def gsk_cra_function(self):

        results = []

        kpi_cra_fk = \
            self.common.get_kpi_fk_by_kpi_type(LocalConsts.CRA_KPI)
        kpi_cra_manufacturer_fk = \
            self.common.get_kpi_fk_by_kpi_type(LocalConsts.CRA_MANUFACTURER_KPI)
        kpi_cra_subcat_fk = \
            self.common.get_kpi_fk_by_kpi_type(LocalConsts.CRA_SUBCAT_KPI)
        kpi_cra_subcat_by_product_fk = \
            self.common.get_kpi_fk_by_kpi_type(LocalConsts.CRA_SUBCAT_BY_PRODUCT_KPI)

        identifier_manufacturer = self.common.get_dictionary(
            manufacturer_fk=self.own_manufacturer_id,
            kpi_fk=kpi_cra_manufacturer_fk)

        total_cra_size_target = 0
        total_cra_size_actual = 0

        targets = \
            self.ps_data_provider.get_kpi_external_targets(kpi_fks=[kpi_cra_fk],
                                                           key_filters={'additional_attribute_12': self.store_format,
                                                                        'retailer_fk': self.retailer_fk})

        if targets.empty:
            Log.warning('No CRA targets defined for this session')
        else:

            self.gsk_generator.tool_box. \
                extract_data_set_up_file(LocalConsts.CRA, self.set_up_data, LocalConsts.KPI_DICT)
            df = self.gsk_generator.tool_box.tests_by_template(
                LocalConsts.CRA, self.scif, self.set_up_data)
            df, facings_column = self.df_filter_by_stacking(
                df, LocalConsts.CRA)

            df = df[df[ScifConsts.SUB_CATEGORY_FK].notnull()][
                [ScifConsts.SUB_CATEGORY_FK, ScifConsts.PRODUCT_FK, facings_column]]\
                .groupby([ScifConsts.SUB_CATEGORY_FK, ScifConsts.PRODUCT_FK]).agg({facings_column: 'sum'})\
                .reset_index()
            df = df.merge(
                targets[['sub_category_fk', 'product_fk', 'priority']],
                how='left',
                left_on=[ScifConsts.SUB_CATEGORY_FK, ScifConsts.PRODUCT_FK],
                right_on=['sub_category_fk', 'product_fk'])
            df['unique_product_id'] = \
                df.apply(lambda r:
                         'P' + str(r['priority']) if pd.notnull(r['priority']) else 'N' + str(r['product_fk']), axis=1)

            # Sub-Category
            target_subcat_fks = set(
                targets['sub_category_fk'].unique().tolist()) & set(
                    self.core_range_targets.keys())
            for sub_category_fk in target_subcat_fks:

                identifier_subcat = self.common.get_dictionary(
                    manufacturer_fk=self.own_manufacturer_id,
                    sub_category_fk=sub_category_fk,
                    kpi_fk=kpi_cra_subcat_fk)

                if sub_category_fk not in self.core_range_targets.keys():
                    numerator_result = denominator_result = result = score = 0
                else:
                    subcat_size = len(
                        df[df[ScifConsts.SUB_CATEGORY_FK] == sub_category_fk]
                        ['unique_product_id'].unique().tolist())
                    core_range_target = self.core_range_targets[
                        sub_category_fk]
                    cra_priority = round(
                        subcat_size *
                        core_range_target if core_range_target else 0)

                    cra_products_target = targets[
                        (targets['sub_category_fk'] == sub_category_fk)
                        & (targets['priority'] <= cra_priority)][[
                            'product_fk', 'priority'
                        ]]
                    cra_products_actual = df[
                        (df[ScifConsts.SUB_CATEGORY_FK] == sub_category_fk)
                        & (df['priority'] <= cra_priority)][[
                            ScifConsts.PRODUCT_FK, facings_column
                        ]]

                    cra_size_target = len(
                        targets[(targets['sub_category_fk'] == sub_category_fk)
                                & (targets['priority'] <= cra_priority)]
                        ['priority'].unique().tolist())
                    cra_size_actual = len(
                        df[(df[ScifConsts.SUB_CATEGORY_FK] == sub_category_fk)
                           & (df['priority'] <= cra_priority)]
                        ['priority'].unique().tolist())

                    if cra_size_target == 0:
                        numerator_result = denominator_result = result = score = 0
                    else:

                        # Product
                        for i, product in cra_products_target.iterrows():

                            numerator_result = \
                                cra_products_actual[cra_products_actual[ScifConsts.PRODUCT_FK] ==
                                                    product['product_fk']][facings_column].sum()
                            denominator_result = product['priority']
                            result = 1 if numerator_result else 0
                            score = result

                            results.append({
                                'fk':
                                kpi_cra_subcat_by_product_fk,
                                SessionResultsConsts.NUMERATOR_ID:
                                product['product_fk'],
                                SessionResultsConsts.NUMERATOR_RESULT:
                                numerator_result,
                                SessionResultsConsts.DENOMINATOR_ID:
                                self.own_manufacturer_id,
                                SessionResultsConsts.DENOMINATOR_RESULT:
                                denominator_result,
                                SessionResultsConsts.CONTEXT_ID:
                                sub_category_fk,
                                SessionResultsConsts.RESULT:
                                result,
                                SessionResultsConsts.SCORE:
                                score,
                                'identifier_parent':
                                identifier_subcat,
                                'should_enter':
                                True
                            })

                        numerator_result = cra_size_actual
                        denominator_result = cra_size_target
                        result = round(float(numerator_result) / float(denominator_result), 4) \
                            if numerator_result != 0 and denominator_result != 0 \
                            else 0
                        score = result

                        total_cra_size_target += cra_size_target
                        total_cra_size_actual += cra_size_actual

                results.append({
                    'fk': kpi_cra_subcat_fk,
                    SessionResultsConsts.NUMERATOR_ID:
                    self.own_manufacturer_id,
                    SessionResultsConsts.NUMERATOR_RESULT: numerator_result,
                    SessionResultsConsts.DENOMINATOR_ID: self.store_id,
                    SessionResultsConsts.DENOMINATOR_RESULT:
                    denominator_result,
                    SessionResultsConsts.CONTEXT_ID: sub_category_fk,
                    SessionResultsConsts.RESULT: result,
                    SessionResultsConsts.SCORE: score,
                    'identifier_parent': identifier_manufacturer,
                    'identifier_result': identifier_subcat,
                    'should_enter': True
                })

            # Manufacturer
            if target_subcat_fks:
                numerator_result = total_cra_size_actual
                denominator_result = total_cra_size_target
                result = round(float(total_cra_size_actual) / float(total_cra_size_target), 4) \
                    if numerator_result != 0 and denominator_result != 0 \
                    else 0
                score = result

                results.append({
                    'fk': kpi_cra_manufacturer_fk,
                    SessionResultsConsts.NUMERATOR_ID:
                    self.own_manufacturer_id,
                    SessionResultsConsts.NUMERATOR_RESULT: numerator_result,
                    SessionResultsConsts.DENOMINATOR_ID: self.store_id,
                    SessionResultsConsts.DENOMINATOR_RESULT:
                    denominator_result,
                    SessionResultsConsts.RESULT: result,
                    SessionResultsConsts.SCORE: score,
                    'identifier_result': identifier_manufacturer,
                    'should_enter': True
                })

        return results

    def df_filter_by_stacking(self, df, kpi_type):
        include_stacking = self.set_up_data.get(
            (GlobalConsts.INCLUDE_STACKING, kpi_type), True)
        facings_column = ScifConsts.FACINGS
        if not include_stacking:
            facings_column = ScifConsts.FACINGS_IGN_STACK
        df = df[df[facings_column] > 0]
        return df, facings_column
Exemplo n.º 7
0
class GSKSGToolBox:
    LEVEL1 = 1
    LEVEL2 = 2
    LEVEL3 = 3

    def __init__(self, data_provider, output):
        self.output = output
        self.data_provider = data_provider
        self.common = Common(self.data_provider)
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.products = self.data_provider[Data.PRODUCTS]
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.store_id = self.data_provider[Data.STORE_FK]
        self.store_info = self.data_provider[Data.STORE_INFO]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        # self.rds_conn = PSProjectConnector(self.project_name, DbUsers.CalculationEng)
        self.kpi_static_data = self.common.get_kpi_static_data()
        self.kpi_results_queries = []
        self.store_type = self.store_info[STORE_LVL_1].values[0]
        self.set_up_template = pd.read_excel(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'Data',
                                                          'gsk_set_up.xlsx'), sheet_name='Functional KPIs',
                                             keep_default_na=False)

        self.gsk_generator = GSKGenerator(self.data_provider, self.output, self.common, self.set_up_template)
        # self.templates_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'Data')
        # self.excel_file_path = os.path.join(self.templates_path, 'template.xlsx')
        # self.survey_file = pd.read_excel(self.excel_file_path, sheetname=SURVEY_SHEET)
        # self.msl_list = pd.read_excel(self.excel_file_path,
        #                               header=[[0, 1, 2]],
        #                               sheetname=MSL).dropna()
        #
        # self.calculations = {'SOS': self.calculate_sos, 'MSL': self.calculate_MSL, 'Sequence': self.calculate_sequence,
        #                      'Presence': self.calculate_presence, 'Facings': self.calculate_facings,
        #                      'No Facings': self.calculate_no_facings, 'Survey': self.calculate_survey}
        # self.sequence = Sequence(data_provider)
        # self.availability = Availability(data_provider)
        # self.sos = SOS(data_provider, self.output)
        # self.survey = Survey(data_provider, self.output)
        # self.toolbox = GENERALToolBox(self.data_provider)

    def main_calculation(self):
        """
        This function calculates the KPI results.
        """
        assortment_store_dict = self.gsk_generator.availability_store_function()
        self.common.save_json_to_new_tables(assortment_store_dict)

        assortment_category_dict = self.gsk_generator.availability_category_function()
        self.common.save_json_to_new_tables(assortment_category_dict)

        # assortment_subcategory_dict = self.gsk_generator.availability_subcategory_function()
        # self.common.save_json_to_new_tables(assortment_subcategory_dict)

        facings_sos_dict = self.gsk_generator.gsk_global_facings_sos_whole_store_function()
        if facings_sos_dict is None:
            Log.warning('Scene item facts is empty for this session')
        else:
            self.common.save_json_to_new_tables(facings_sos_dict)

        linear_sos_dict = self.gsk_generator.gsk_global_linear_sos_whole_store_function()
        if linear_sos_dict is None:
            Log.warning('Scene item facts is empty for this session')
        else:
            self.common.save_json_to_new_tables(linear_sos_dict)

        linear_sos_dict = self.gsk_generator.gsk_global_linear_sos_by_sub_category_function()
        if linear_sos_dict is None:
            Log.warning('Scene item facts is empty for this session')
        else:
            self.common.save_json_to_new_tables(linear_sos_dict)

        facings_sos_dict = self.gsk_generator.gsk_global_facings_by_sub_category_function()
        if facings_sos_dict is None:
            Log.warning('Scene item facts is empty for this session')
        else:
            self.common.save_json_to_new_tables(facings_sos_dict)

        facings_sos_dict = self.gsk_generator.gsk_global_facings_sos_by_category_function()
        if facings_sos_dict is None:
            Log.warning('Scene item facts is empty for this session')
        else:
            self.common.save_json_to_new_tables(facings_sos_dict)

        linear_sos_dict = self.gsk_generator.gsk_global_linear_sos_by_category_function()
        if linear_sos_dict is None:
            Log.warning('Scene item facts is empty for this session')
        else:
            self.common.save_json_to_new_tables(linear_sos_dict)
        self.common.commit_results_data()
        score = 0
        return score




        # template = self.get_relevant_calculations()
        # self.handle_calculation(template)
        # self.common_old_tables.commit_results_data()
        # self.common.commit_results_data()
        #
        # score = 0





##### old GSK project ###

#     def set_calculation_result_by_score_type(self, row):
#         if row[ATOMIC] == 'Share of Shelf':
#             print ''
#
#         if row['Score Method'] == 'Binary':
#             if row['result'] >= row['Benchmark']:
#                 return 1
#             else:
#                 return 0
#         elif ['Score Method'] == 'Proportional':
#             return row['result'] * 100 / row['Benchmark']
#
#         else:
#             return row['result']
#
#     def check_if_kpi_passed(self, row):
#         return 1 if row['result_bin'] == row['Weight'] and row['Weight']!=0 else 0
#
#     def check_if_sequence_passed(self, row):
#         if row['ATOMIC_TARGET'] != -1:
#             return 1 if row['scenes_passed'] == row['ATOMIC_TARGET'] else 0
#         else:
#             return row['result']
#
#     def is_template_relevant(self, template_names):
#         """
#
#         :param template_names: a string of template names seperated by ','
#         :return: 1 if at least one of the template names given was in session, 0 otherwise.
#         """
#         in_session = set(self.scif['template_name'])
#         in_template = set([val.strip(' \n') for val in str(template_names).split(',')])
#         return 1 if (in_session & in_template) else 0
#
#     def handle_calculation(self, kpis):
#
#         kpi_results = pd.DataFrame(columns=[SET, KPI, ATOMIC, 'valid_template_name',
#                                             'KPI Weight', 'Weight','ATOMIC_TARGET',
#                                             'Score Method', 'Benchmark', 'Conditional Weight', 'result'])
#
#         # calculating each calculation in template.
#         for i in xrange(len(kpis)):
#             current_kpi = kpis.iloc[i]
#
#             # check if relevant template name exists in session.
#             is_template_valid = self.is_template_relevant(current_kpi['template_name'])
#
#             # calculates only if there is at least ont template_name from template in the session.
#             result = self.calculate_atomic(current_kpi) if is_template_valid else (0, 0, 0)
#             if result is None:
#                 continue
#
#             if isinstance(result, tuple):
#                 scenes_passed = result[1]
#                 scenes_total = result[2]
#                 result = result[0]
#
#
#             kpi_results = kpi_results.append({SET: current_kpi[SET],
#                                               KPI: current_kpi[KPI],
#                                               ATOMIC: current_kpi[ATOMIC],
#                                               'kpi_type': current_kpi[KPI_TYPE],
#                                               'KPI Weight': current_kpi['KPI Weight'],
#                                               'Weight': current_kpi['Weight'],
#                                               'Score Method': current_kpi['Score Method'],
#                                               'KPI Score Method': current_kpi['KPI Score Method'],
#                                               'Benchmark': current_kpi['Benchmark'],
#                                               'ATOMIC_TARGET': current_kpi['ATOMIC_TARGET'],
#                                               'Conditional Weight': current_kpi['Conditional Weight'],
#                                               'result': result, 'valid_template_name': is_template_valid,
#                                               'scenes_passed':scenes_passed, 'scenes_total':scenes_total},
#                                              ignore_index=True)
#
#         kpi_results['Conditional Weight'] = kpi_results['Conditional Weight'].fillna(-1)
#         kpi_results['ATOMIC_TARGET'] = kpi_results['ATOMIC_TARGET'].fillna(-1)
#
#         # boolean_res = kpi_results.loc[kpi_results['Score Method'] == 'Binary']
#         # boolean_res['result'] = boolean_res['result'].astype(bool)
#         #
#         # boolean_res = boolean_res.groupby([SET, KPI, ATOMIC, 'KPI Weight', 'Weight', 'KPI Score Method',
#         #                                    'Benchmark', 'Conditional Weight'], as_index=False).agg({'result': 'all',
#         #                                                                                             'valid_template_name': 'sum',
#         #                                                                                             'scenes_passed':'sum' ,
#         #                                                                                             'scenes_total':'sum'})
#         #
#         # kpi_results = kpi_results.loc[kpi_results['Score Method'] != 'Binary']
#         #
#         # aggs_res = kpi_results.groupby([SET, KPI, ATOMIC, 'KPI Weight', 'Weight', 'KPI Score Method',
#         #                                                                           'Benchmark', 'Conditional Weight'],
#         #                                as_index=False).agg({'result': 'sum', 'valid_template_name': 'sum','scenes_passed':'sum' ,
#         #                                                                                             'scenes_total':'sum'})
#         #
#         # aggs_res = aggs_res.append(boolean_res, ignore_index=True)
#
#         kpi_results = kpi_results.groupby([SET, KPI, ATOMIC,'ATOMIC_TARGET', 'KPI Weight', 'Weight', 'KPI Score Method','Score Method',
#                                                            'Benchmark', 'Conditional Weight'], as_index=False).agg({'result': 'sum',
#                                                             'valid_template_name':'sum',
#                                                             'scenes_passed':'sum' ,
#                                                            'scenes_total':'sum'})
#
#         kpi_results['result'] = kpi_results.apply(self.check_if_sequence_passed, axis=1)
#
#         # if not && condinal weight is NA MSL conditional weight + this KPI weight
#
#         ## if method binary change result to 100/0
#
#         # kpi_results.loc[(kpi_results['Score Method'] == 'Binary') &
#         #                 (kpi_results['result'] >= kpi_results['Benchmark']), 'result'] = 1
#         # kpi_results.loc[(kpi_results['Score Method'] == 'Binary') &
#         #                 (kpi_results['result'] < kpi_results['Benchmark']), 'result'] = 0
#         # kpi_results['result_bin'] = kpi_results['result']
#         ### Changed ###
#         # kpi_results.loc[(kpi_results['Score Method'] == 'Proportional')
#         #                 & (kpi_results['result'] < kpi_results['Benchmark']), 'result_bin'] = 0
#
#         # kpi_results.loc[kpi_results['Score Method'] == 'Proportional',
#         #                 'result_bin'] = (kpi_results['result'] / kpi_results['Benchmark'])
#         #
#
#
#         # kpi_results.loc[
#         #     (kpi_results['Benchmark'] == 'Pass'), 'result_bin'] = kpi_results['result']
#
#         kpi_results['result_bin'] = kpi_results.apply(self.set_calculation_result_by_score_type, axis=1)
#         kpi_results['result_bin'] = kpi_results['result_bin'] * kpi_results['Weight']
#
#
#         kpi_results['valid_template_name'] = kpi_results['valid_template_name'].astype(float)
#         kpi_results['result_bin'] = kpi_results['result_bin'].apply(lambda x: round(x, 4))
#         kpi_results['result'] = kpi_results['result'].apply(lambda x: round(x, 4))
#
#         ## write level3 to db
#         store_fk = self.store_info['store_fk'][0]
#         # ## asking if template isnt valid to write to db
#         for i in xrange(len(kpi_results)):
#             result = kpi_results.iloc[i]
#             # kpi_fk = self.common.get_kpi_fk_by_kpi_type(result[ATOMIC])
#             if result[SET] == PAIN_LEVEL_1:
#                 kpi_super_fk = self.common.get_kpi_fk_by_kpi_type(result[KPI]+PAIN)
#                 category_fk = PAIN_FK
#                 kpi_fk = self.common.get_kpi_fk_by_kpi_type(result[ATOMIC]+PAIN)
#             else:
#                 kpi_super_fk = self.common.get_kpi_fk_by_kpi_type(result[KPI]+ORAL_CARE)
#                 category_fk = ORAL_FK
#                 kpi_fk = self.common.get_kpi_fk_by_kpi_type(result[ATOMIC]+ORAL_CARE)
#
#             #web db
#             identifier_parent_fk_web = self.common.get_dictionary(
#                 kpi_fk=self.common.get_kpi_fk_by_kpi_type(result[KPI]),
#                 kpi_level1=self.common.get_kpi_fk_by_kpi_type(result[SET]))
#
#             #supervisor
#             identifier_parent_fk_supervisor = self.common.get_dictionary(
#                 kpi_fk=kpi_super_fk,
#                 kpi_level1=self.common.get_kpi_fk_by_kpi_type(result[SET]))
#
#             # numerator /  denominator understnad
#             self.common.write_to_db_result(fk=kpi_fk, numerator_id=MANUFACTURER_FK, result=result['result'],
#                                            score=result['result_bin'],
#                                            denominator_id=category_fk,
#                                            identifier_parent=identifier_parent_fk_web,
#                                            numerator_result=result['scenes_passed'],
#                                            denominator_result=result['scenes_total'],
#                                            weight=result['Weight']*100, should_enter=True)
#
#             self.common.write_to_db_result(fk=kpi_fk, numerator_id=MANUFACTURER_FK, result=result['result'],
#                                            score=result['result_bin'],
#                                            denominator_id=category_fk,
#                                            identifier_parent=identifier_parent_fk_supervisor,
#                                            numerator_result=result['scenes_passed'],
#                                            denominator_result=result['scenes_total'],
#                                            weight=result['Weight']*100, should_enter=True)
#
#             NAME_ADD = PAIN if result[SET] == PAIN_LEVEL_1 else ORAL_CARE
#             try:
#                 old_atomic_kpi_fk = self.old_kpi_static_data.loc[(self.old_kpi_static_data['kpi_set_name'] == result[SET]) &
#                                                       (self.old_kpi_static_data['kpi_name'] == result[KPI]+NAME_ADD) &
#                                                       (self.old_kpi_static_data['atomic_kpi_name'] == result[ATOMIC]+NAME_ADD)][
#                 'atomic_kpi_fk'].iloc[0]
#                 old_kpi_fk = self.old_kpi_static_data.loc[(self.old_kpi_static_data['kpi_set_name'] == result[SET]) &
#                                                           (self.old_kpi_static_data['kpi_name'] == result[
#                                                               KPI] + NAME_ADD) ]['kpi_fk'].iloc[0]
#
#                 self.common_old_tables.write_to_db_result(fk=old_atomic_kpi_fk, atomic_kpi_fk=old_atomic_kpi_fk,
#                                                           level=self.LEVEL3,
#                                                          score=result['result_bin'],
#                                                          result=result['result'],
#                                                          session_uid=self.session_uid, store_fk=self.store_id,
#                                                          display_text=result[ATOMIC],
#                                                          visit_date=self.visit_date.isoformat(),
#                                                          calculation_time=datetime.utcnow().isoformat(),
#                                                          kps_name=result[SET],
#                                                          kpi_fk=old_kpi_fk)
#             except:
#                 print 'cannot find atomic {} in kpi {} in set {}'.format(result[ATOMIC]+NAME_ADD,result[KPI]+NAME_ADD,
#                                                                          result[SET])
#
#         kpi_results['kpi_pass'] = kpi_results.apply(self.check_if_kpi_passed, axis =1)
#
#         sum_kpis = kpi_results.loc[(kpi_results['KPI Score Method'] == 'SUM')]
#         max_kpis = kpi_results.loc[(kpi_results['KPI Score Method'] == 'MAX')]
#         prop_kpis = kpi_results.loc[(kpi_results['KPI Score Method'] == 'Proportional')]
#
#         # sum_kpis['kpi_pass']=
#         sum_kpis=sum_kpis.groupby([SET, KPI, 'KPI Weight', SCORE_METHOD, 'Conditional Weight'], as_index=False).agg({
#                                                                                                     'valid_template_name': 'max',
#                                                                                                     'kpi_pass' :'sum',
#                                                                                                     'result_bin': 'sum',
#                                                                                                     'scenes_passed': 'sum',
#                                                                                                     'scenes_total': 'sum'})
#
#         max_kpis = max_kpis.groupby([SET, KPI, 'KPI Weight', SCORE_METHOD, 'Conditional Weight'], as_index=False).agg({
#             'valid_template_name': 'max',
#             'kpi_pass': '******',
#             'result_bin': 'max',
#             'scenes_passed': 'sum',
#             'scenes_total': 'sum'})
#
#         prop_kpis = prop_kpis.groupby([SET, KPI, 'KPI Weight', SCORE_METHOD, 'Conditional Weight'], as_index=False).agg({
#             'valid_template_name': 'max',
#             'kpi_pass': '******',
#             'result_bin': 'max',
#             'scenes_passed': 'sum',
#             'scenes_total': 'sum'})
#
#         aggs_res_level_2 = sum_kpis.append(max_kpis,ignore_index=True)
#         aggs_res_level_2 = aggs_res_level_2.append(prop_kpis, ignore_index=True)
#
#         ################### PLUS WEIGHT ###################
#         # takes conditional weight for irrelevnat kpis that has it.
#         # aggs_res_level_2.loc[(aggs_res_level_2['valid_template_name'] == 0) &
#         #                      (aggs_res_level_2['Conditional Weight'] != -1), 'result_bin'] = aggs_res_level_2[
#         #     'Conditional Weight']
#         #
#         # # sets = aggs_res_level_2[SET].unique()
#         #
#         # # The kpis to 'take weight' from for the MSL.
#         # invalid_templates = aggs_res_level_2.loc[(aggs_res_level_2['valid_template_name'] == 0) &
#         #                                          (aggs_res_level_2['Conditional Weight'] == -1) &
#         #                                          (aggs_res_level_2[KPI] != 'MSL')]
#         #
#         # invalid_templates = invalid_templates.groupby([SET], as_index=False)[['KPI Weight']].sum()
#         # invalid_templates = invalid_templates.rename(columns={'KPI Weight': 'PLUS_WEIGHT'})
#         # invalid_templates[KPI] = 'MSL'
#         # aggs_res_level_2 = aggs_res_level_2.merge(invalid_templates, on=[SET, KPI], how='left')
#         #
#         # aggs_res_level_2 = aggs_res_level_2.loc[~(aggs_res_level_2['valid_template_name'] == 0) |
#         #                                         ~(aggs_res_level_2['Conditional Weight'] == -1)]
#         #
#         # aggs_res_level_2['PLUS_WEIGHT'] = aggs_res_level_2['PLUS_WEIGHT'].fillna(0)
#         # aggs_res_level_2['KPI Weight'] += aggs_res_level_2['PLUS_WEIGHT']
#         #
#         # aggs_res_level_2.loc[aggs_res_level_2['valid_template_name'] == 0, 'result_bin'] = 1
#         # aggs_res_level_2.loc[aggs_res_level_2['valid_template_name'] == 0, 'result_bin'] = 1
#         #
#         # aggs_res_level_2.loc[(aggs_res_level_2[SCORE_METHOD] == 'Binary') &
#         #                 (kpi_results['valid_template_name'] > 0), 'result_bin'] = 1
#         # aggs_res_level_2.loc[(aggs_res_level_2[SCORE_METHOD] == 'Binary') &
#         #                                  (aggs_res_level_2['valid_template_name'] <= 0), 'result_bin'] = 0
#         # aggs_res_level_2['total_result'] = aggs_res_level_2['KPI Weight'] * aggs_res_level_2['result_bin']
#
#         ## write to db level 2 kpis
#
#         for i in xrange(len(aggs_res_level_2)):
#             result = aggs_res_level_2.iloc[i]
#
#             if result[SET] == PAIN_LEVEL_1:
#                 kpi_super_fk = self.common.get_kpi_fk_by_kpi_type(result[KPI] + PAIN)
#                 category_fk = PAIN_FK
#             else:
#                 kpi_super_fk = self.common.get_kpi_fk_by_kpi_type(result[KPI] + ORAL_CARE)
#                 category_fk = ORAL_FK
#
#             kpi_fk = self.common.get_kpi_fk_by_kpi_type(result[KPI])
#             identifier_child_super_fk = self.common.get_dictionary(
#                 kpi_fk=kpi_super_fk,
#                 kpi_level1=self.common.get_kpi_fk_by_kpi_type(result[SET]))
#
#             identifier_child_fk = self.common.get_dictionary(
#                 kpi_fk=kpi_fk,
#                 kpi_level1=self.common.get_kpi_fk_by_kpi_type(result[SET]))
#
#             identifier_parent_fk_supervisor = self.common.get_dictionary(
#                 kpi_fk=self.common.get_kpi_fk_by_kpi_type(result[SET]))
#
#             identifier_parent_fk_web = self.common.get_dictionary(
#                 kpi_category=self.common.get_kpi_fk_by_kpi_type(result[SET]),kpi_fk=ORANGE_SCORE)
#
#
#             #supervisor result to db
#             self.common.write_to_db_result(fk=kpi_super_fk, numerator_id=MANUFACTURER_FK, result=result['kpi_pass'],
#                                            score=result['result_bin']*100,
#                                            denominator_id=category_fk,
#                                            numerator_result=result['scenes_passed'],
#                                            denominator_result=result['scenes_total'],
#                                            identifier_result=identifier_child_super_fk,
#                                            identifier_parent=identifier_parent_fk_supervisor,
#                                            weight=result['KPI Weight']*100, should_enter=True)
#
#             # web result to db
#             self.common.write_to_db_result(fk=kpi_fk, numerator_id=MANUFACTURER_FK, result=result['kpi_pass'],
#                                            score=result['result_bin']*100,
#                                            denominator_id=category_fk,
#                                            numerator_result=result['scenes_passed'],
#                                            denominator_result=result['scenes_total'],
#                                            identifier_result=identifier_child_fk,
#                                            identifier_parent=identifier_parent_fk_web,
#                                            weight=result['KPI Weight']*100, should_enter=True)
#
#             NAME_ADD = PAIN if result[SET] == PAIN_LEVEL_1 else ORAL_CARE
#             try:
#                 old_kpi_fk = self.old_kpi_static_data.loc[(self.old_kpi_static_data['kpi_set_name'] == result[SET]) &
#                                                       (self.old_kpi_static_data['kpi_name'] == result[KPI]+NAME_ADD)][
#                                                         'kpi_fk'].iloc[0]
#                 kwargs = {'session_uid': self.session_uid, 'store_fk': self.store_id,
#                           'visit_date': self.visit_date.isoformat(), 'kpi_fk': old_kpi_fk,
#                           'kpk_name': result[KPI] + NAME_ADD, 'score_2': result['result_bin']}
#
#                 self.common_old_tables.write_to_db_result(fk=old_kpi_fk, level=self.LEVEL2, score=result['result_bin'],
#                                                           **kwargs)
#             except:
#                 print 'kpi {} in set {}'.format(result[KPI]+NAME_ADD, result[SET])
#
#         # aggregating to level 1:
#         aggs_res_level_1 = aggs_res_level_2.groupby([SET], as_index=False)['result_bin'].sum()
#
#         # write to db
#
#         for i in xrange(len(aggs_res_level_1)):
#             result = aggs_res_level_1.iloc[i]
#             kpi_fk = self.common.get_kpi_fk_by_kpi_type(result[SET])
#             category_fk = ORAL_FK if kpi_fk == ORAL_KPI else PAIN_FK
#             identifier_child_fk_web = self.common.get_dictionary(
#                 kpi_category=kpi_fk, kpi_fk=ORANGE_SCORE)
#             identifier_child_fk_supervisor = self.common.get_dictionary(
#                  kpi_fk=kpi_fk)
#
#             # supervisor result to db
#             self.common.write_to_db_result(fk=kpi_fk, numerator_id=MANUFACTURER_FK, score=result['result_bin'],
#                                            result=result['result_bin'],denominator_id=store_fk,
#                                            identifier_result=identifier_child_fk_supervisor
#                                            ,should_enter=True)
#             # web result to db
#             self.common.write_to_db_result(fk=ORANGE_SCORE, numerator_id=MANUFACTURER_FK, score=result['result_bin'],
#                                            result=result['result_bin'],
#                                            denominator_id=category_fk,
#                                            identifier_result=identifier_child_fk_web
#                                            ,should_enter=True)
#
#             old_kpi_fk = self.old_kpi_static_data.loc[(self.old_kpi_static_data['kpi_set_name'] == result[SET])][
#                 'kpi_set_fk'].iloc[0]
#             self.common_old_tables.write_to_db_result(old_kpi_fk, self.LEVEL1,  result['result_bin'])
#
#
#     def get_relevant_calculations(self):
#         # Gets the store type name and the relevant template according to it.
#         store_type = self.store_info['store_type'].values[0]
#         # Gets the relevant kpis from template
#         template = pd.read_excel(self.excel_file_path, sheetname=KPI_SHEET)
#         template = template.loc[template[STORE_TYPE] == store_type]
#
#         return template
#
#     def calculate_atomic(self, row):
#         # gets the atomic kpi's calculation type and run the relevant calculation according to it.
#         kpi_type = row[KPI_TYPE]
#
#         # runs the relevant calculation
#         calculation = self.calculations.get(kpi_type, '')
#         if calculation or calculation == 0:
#             return calculation(row)
#         else:
#             Log.info('kpi type {} does not exist'.format(kpi_type))
#             return None
#
#     def handle_sos_calculation(self, row, ign_stack=False):
#         """
#         calculates SOS line in the relevant scif.
#         :param kpi_line: line from SOS sheet.
#         :param relevant_scif: filtered scif.
#         :param isnt_dp: if "store attribute" in the main sheet has DP, and the store is not DP, we should filter
#         all the DP products out of the numerator.
#         :return: boolean
#         """
#
#         target = row['target'] if not pd.isnull(row['target']) else 0
#
#         # templates = [val.strip(' \n') for val in str(row['template_name']).split(',')]
#         # category_fk = PAIN_FK if row[SET] == PAIN_LEVEL_1 else ORAL_FK
#
#         # valid_scenes = self.scif.loc[self.scif['template_name'].isin(templates)]['scene_id'].unique()
#         # scene_passed_count = 0
#         # for scene_id in valid_scenes:
#         #     row['scene_id'] = scene_id
#         #     filters, general_filters = self.get_filters(row)
#         #     res = self.sos.calculate_share_of_shelf(sos_filters=filters, **general_filters)
#         #     res = res >= target if not pd.isnull(row['target']) else res
#         #     scene_passed_count = scene_passed_count+1 if res else scene_passed_count
#         # row = row.drop('scene_id')
#
#         filters, general_filters = self.get_filters(row)
#         # general_filters['category_fk'] = category_fk
#         filters.update(general_filters)
#
#         denom_scif = self.scif[self.toolbox.get_filter_condition(self.scif, **general_filters)]
#
#         num_scif = denom_scif[self.toolbox.get_filter_condition(denom_scif, **filters)]
#
#         facings = 'facings_ign_stack' if ign_stack else 'facings'
#         res = num_scif[facings].sum() / denom_scif[facings].sum() if denom_scif[facings].sum() > 0 \
#             else 0
#         # res = res >= target if not pd.isnull(row['target']) else res
#         return res
#
#     def calculate_sos(self, row):
#         # Calculates the sos kpi according to the template.
#
#
#         # target = row['target'] if not pd.isnull(row['target']) else 0
#         # #
#         # filters, general_filters = self.get_filters(row)
#         # templates = [val.strip(' \n') for val in str(row['template_name']).split(',')]
#         # valid_scenes = self.scif.loc[self.scif['template_name'].isin(templates)]['scene_id'].unique()
#         # scene_passed_count = 0
#         # for scene_id in valid_scenes:
#         #     row['scene_id'] = scene_id
#         #     filters, general_filters = self.get_filters(row)
#         #     res = self.sos.calculate_share_of_shelf(sos_filters=filters, **general_filters)
#         #     res = res >= target if not pd.isnull(row['target']) else res
#         #     scene_passed_count = scene_passed_count+1 if res else scene_passed_count
#         # row = row.drop('scene_id')
#         # filters, general_filters = self.get_filters(row)
#         # res = self.sos.calculate_share_of_shelf(sos_filters=filters, **general_filters)
#
#         return self.handle_sos_calculation(row, ign_stack=True), 1, 1
#
#     def calculate_presence(self, row):
#
#         return self.calculate_facings(row)
#
#     def calculate_facings(self, row, no_facing=False):
#         """This function calculates facing from given filter, and return True if at least
#          one scene had facing as neede by target. Assuming row has a target (if not, target =0).
#          returns whether kpi passed, the number of scenes passed and total scenes checked/"""
#
#         facing_scenes_counted = 0
#         no_facing_scenes_counted = 0
#         passed = False
#         target = row['target'] if not pd.isnull(row['target']) else 0
#         row_filter, general_filters = self.get_filters(row)
#         row_filter.update(general_filters)
#         # Gets relevant scenes
#         templates = [val.strip(' \n') for val in str(row['template_name']).split(',')]
#         valid_scenes = self.scif.loc[self.scif['template_name'].isin(templates)]['scene_id'].unique()
#         for scene_id in valid_scenes:
#             # Checks for each product if found in scene, if so, 'count' it.
#             row_filter['scene_id'] = scene_id
#             result = self.availability.calculate_availability(**row_filter)
#             if no_facing:
#                 if int(result < target):
#                     no_facing_scenes_counted += 1
#                     passed = True
#             else:
#                 if int(result >= target):
#                     facing_scenes_counted += 1
#                     passed = True
#
#         if no_facing:
#             return passed, no_facing_scenes_counted, len(valid_scenes)
#         return passed, facing_scenes_counted, len(valid_scenes)
#
#     def calculate_no_facings(self, row):
#         return self.calculate_facings(row, no_facing=True)
#
#     def calculate_MSL(self, row):
#         """This function gets the relevant assortment,
#          and returns the number of shown is session out of assortment.
#          counting by each scene number of product available out of MSL assortment.
#          if at least one scene has result  > target, atomic passes.
#          returns: whether at least one scene passed, number of scene passed, number of scene checked
#          """
#
#         target = row['target'] if not pd.isnull(row['target']) else 0
#         scif = self.scif
#         scene_passed = False
#         products_in_scenes = pd.DataFrame(columns=['product_ean_code', 'scene_id', 'result'])
#         # Gets relevant assortment from template according to store attributes.
#         store_data = (self.store_type, self.store_info[STORE_LVL_2].values[0], self.store_info[STORE_LVL_3].values[0])
#
#         # if store attribute are not defined in template, fail the kpi.
#         if isinstance(self.msl_list.get(store_data, None), type(None)):
#             Log.info('Store attribute {} is not in template.'.format(store_data))
#             return 0
#
#         category_fk = PAIN_FK if row[SET] == PAIN_LEVEL_1 else ORAL_FK
#
#         kpi_filters, general = self.get_filters(row)
#
#         # filter all products by assortment & template
#         scif = scif.loc[(scif['in_assort_sc'] == 1) &
#                         (scif['rlv_dist_sc'] == 1) &
#                         (scif['category_fk'] == category_fk)]
#
#         if general:
#             # filter conition filters the products without facings, which result incorrect denominator
#             scif = scif.drop(['facings'], axis=1)
#             scif = scif[self.toolbox.get_filter_condition(scif, **general)]
#
#         products = scif['product_ean_code']
#         total_products = len(products)
#         # removes all filters which are nans
#         scif = pd.merge(self.match_product_in_scene, scif, how='left',
#                         left_on=['scene_fk', 'product_fk'], right_on=['scene_id', 'item_id'])
#         scif = scif.drop_duplicates(['scene_id', 'item_id'])
#
#         scif = scif.dropna(subset=kpi_filters.keys() + general.keys())
#
#         if kpi_filters.get('shelf_number', '') or general.get('shelf_number', ''):
#             scif['shelf_number'] = (scif['shelf_number'].astype(int)).astype(str)
#
#         kpi_filters.update(general)
#
#         # get the relevant scene by the template name given
#         templates = [val.strip(' \n') for val in str(row['template_name']).split(',')]
#         valid_scenes = self.scif.loc[self.scif['template_name'].isin(templates)]['scene_id'].unique()
#
#         # save which products were in each relevant scene
#         if kpi_filters:
#             scif = scif[self.toolbox.get_filter_condition(scif, **kpi_filters)]
#         for scene_id in valid_scenes:
#             # Checks for each product if found in scene, if so, 'count' it.
#             for product in set(products):
#                 product_in_scene = scif.loc[(scif['in_assort_sc'] == 1) &
#                                             (scif['rlv_dist_sc'] == 1) &
#                                             (scif['dist_sc'] == 1) &
#                                             (scif['scene_id'] == scene_id) &
#                                             (scif['product_ean_code'] == str(product))
#                                             ][['product_ean_code', 'scene_id', 'rlv_dist_sc']]
#                 res = 1 if not product_in_scene.empty else 0
#                 products_in_scenes = products_in_scenes.append({
#                     'product_ean_code': product,
#                     'scene_id': scene_id,
#                     'result': res,
#                 }, ignore_index=True)
#
#         sum_exist = len(products_in_scenes[products_in_scenes['result'] != 0]['product_ean_code'].unique())
#         scene_passed_count = len(products_in_scenes[products_in_scenes['result'] != 0]['scene_id'].unique())
#         # for scene_id in valid_scenes:
#         #     in_scene = products_in_scenes.loc[products_in_scenes['scene_id'] == scene_id]
#         #     exist_products = in_scene['result'].sum()
#         res = float(sum_exist) / total_products if total_products else 0
#         #     if res >= target:
#         #         scene_passed = True
#         #         scene_passed_count += 1
#         # sum_exist = float(sum_exist) / total_products if total_products else 0
#
#         return res, scene_passed_count, len(valid_scenes)
#
#     def calculate_sequence(self, row):
#         sequence_filter, general_filters = self.get_filters(row)
#
#         # running sequence kpi, allowing empty spaces, not allowing Irrelevant.
#         # assuming should pass in ALL relevant scenes.
#         # If an entity in sequence is missing (less than 1 facing)- will fail.
#
#         # assuming sequence organs are defined by only one filter!
#         if len(sequence_filter) == 1:
#             key = sequence_filter.keys()[0]
#             sequence_filter = (key, sequence_filter[key])
#             result = self.sequence.calculate_product_sequence(sequence_filter, direction='left', min_required_to_pass=1,
#                                                               **general_filters)
#         else:
#             result = None
#             Log.info('More than 1 filter was applied for sequence organs- Not supported!')
#         return result, 0, 0
#
#     def calculate_survey(self, row):
#         """
#         gets the relevant survey for atomic.
#         assuming there is only one survey in atomic, if not- will calculate only the first.
#         Handles the case where there are same atomics name in different store types.
#         Contains may cause 'x' to be found in 'xz', therefore not enough as a check.
#         """
#
#         # Gets the atomic's survey
#         atomic_name = row[ATOMIC]
#         rows = self.survey_file.loc[(self.survey_file['KPI Name'] == atomic_name)
#                                     & (self.survey_file['Store Policy'].str.contains(self.store_type, case=True))]
#         rows['match_policy'] = rows.apply(self.ensure_policy, axis=1)
#         rows = rows.loc[rows['match_policy'] == 1]
#
#         if len(rows) > 1:
#             Log.info('More than one survey question for atomic- calculating only first survey')
#
#         # Get the survey relevant data
#         survey_data = rows.iloc[0]
#         question = survey_data[SURVEY_QUEST]
#         target_answer = survey_data['Compare to Target']
#
#         # return whether the given answer matches the target answer.
#         return self.survey.check_survey_answer(question, target_answer), 0, 0
#
#     def ensure_policy(self, row):
#         # This checks if the store policy matches the store policy required
#         relevant_stores = map(str.strip, map(str.upper, (str(row[STORE_TYPE]).split(','))))
#         return 1 if self.store_type.upper() in relevant_stores else 0
#
#     def get_filters(self, row):
#         filters = {}
#         general_filters = {}
#         # gets the relevant column names to consider in kpi
#         cols = map(str.strip, str(row[COLS_TO_LOOK]).split(','))
#         for col in cols:
#             # column must exist
#             if col in row.keys():
#                 # handle the values in column
#                 if col == 'exclude':
#                     excludes = self.handle_complex_data(row[col], exclude=True)
#                     filters.update(excludes)
#                     general_filters.update(excludes)
#                     continue
#                 if col in ['target', STORE_TYPE]:
#                     continue
#                 if col == 'denominator':
#                     denom = self.handle_complex_data(row[col])
#                     general_filters.update(denom)
#                     continue
#                 elif self.is_string_a_list(str(row[col])):
#                     value = map(str.strip, str(row[col]).split(','))
#                 else:
#                     value = [row[col]]
#
#                 # add the filter to relevant dictionary
#                 if col in GENERAL_COLS:
#                     general_filters[col] = value
#                 else:
#                     filters[col] = value
#             else:
#                 Log.info('attribute {} is not in template'.format(col))
#
#         return filters, general_filters
#
#     @staticmethod
#     def is_string_a_list(str_value):
#         # checks whether a string is representing a list of values
#         return len(str_value.split(',')) > 0
#
#     def handle_complex_data(self, value, exclude=False):
#         # value is string of dictionary format with multi values, for example 'product_type:Irrelevant, Empty;
#         # scene_id:34,54'
#
#         exclude_dict = {}
#         # gets the different fields
#         fields = value.split(';')
#         for field in fields:
#
#             # gets the key and value of field
#             field = field.split(':')
#             key = field[0]
#             if key == 'product_type':
#                 values = {'Irrelevant', 'Empty', 'POS', 'SKU', 'Other'} - set(map(str.strip, str(field[1]).split(',')))
#                 exclude_dict[key] = list(values)
#             else:
#                 values = map(str.strip, str(field[1]).split(','))
#                 exclude_dict[key] = (values, EXCLUDE) if exclude else values
#
#         return exclude_dict
#
#
# # ###### to be changed in sdk:
#
#     @staticmethod
#     def get_all_kpi_data():
#         return """
#             select api.name as atomic_kpi_name, api.pk as atomic_kpi_fk,
#                    kpi.display_text as kpi_name, kpi.pk as kpi_fk,
#                    kps.name as kpi_set_name, kps.pk as kpi_set_fk
#             from static.atomic_kpi api
#             left join static.kpi kpi on kpi.pk = api.kpi_fk
#             join static.kpi_set kps on kps.pk = kpi.kpi_set_fk
#         """
#
#     def get_kpi_static_data(self):
#         """
#         This function extracts the static new KPI data (new tables) and saves it into one global data frame.
#         The data is taken from static.kpi_level_2.
#         """
#         query = self.get_all_kpi_data()
#         kpi_static_data = pd.read_sql_query(query, self.rds_conn.db)
#         return kpi_static_data