class PepsicoUtil(UnifiedKPISingleton):

    LEVEL1 = 1
    LEVEL2 = 2
    LEVEL3 = 3

    EXCLUSION_TEMPLATE_PATH = os.path.join(
        os.path.dirname(os.path.realpath(__file__)), '..', 'Data',
        'Inclusion_Exclusion_Template_Rollout.xlsx')
    ADDITIONAL_DISPLAY = 'additional display'
    INCLUDE_EMPTY = True
    EXCLUDE_EMPTY = False
    OPERATION_TYPES = []

    SOS_VS_TARGET = 'SOS vs Target'
    HERO_SKU_SPACE_TO_SALES_INDEX = 'Hero SKU Space to Sales Index'
    HERO_SKU_SOS_VS_TARGET = 'Hero SKU SOS vs Target'
    LINEAR_SOS_INDEX = 'Linear SOS Index'
    PEPSICO = 'PEPSICO'
    SHELF_PLACEMENT = 'Shelf Placement'
    HERO_SKU_PLACEMENT_TOP = 'Hero SKU Placement by shelf numbers_Top'
    HERO_PLACEMENT = 'Hero Placement'
    HERO_SKU_STACKING = 'Hero SKU Stacking'
    HERO_SKU_PRICE = 'Hero SKU Price'
    HERO_SKU_PROMO_PRICE = 'Hero SKU Promo Price'
    BRAND_FULL_BAY_KPIS = ['Brand Full Bay_90', 'Brand Full Bay']
    BRAND_FULL_BAY = 'Brand Full Bay'
    HERO_PREFIX = 'Hero SKU'
    ALL = 'ALL'
    HERO_SKU_OOS_SKU = 'Hero SKU OOS - SKU'
    HERO_SKU_OOS = 'Hero SKU OOS'
    HERO_SKU_AVAILABILITY = 'Hero SKU Availability'
    BRAND_SPACE_TO_SALES_INDEX = 'Brand Space to Sales Index'
    BRAND_SPACE_SOS_VS_TARGET = 'Brand Space SOS vs Target'
    SUB_BRAND_SPACE_TO_SALES_INDEX = 'Sub Brand Space to Sales Index'
    SUB_BRAND_SPACE_SOS_VS_TARGET = 'Sub Brand Space SOS vs Target'
    PEPSICO_SEGMENT_SPACE_TO_SALES_INDEX = 'PepsiCo Segment Space to Sales Index'
    PEPSICO_SEGMENT_SOS_VS_TARGET = 'PepsiCo Segment SOS vs Target'
    PEPSICO_SUB_SEGMENT_SPACE_TO_SALES_INDEX = 'PepsiCo Sub Segment Space to Sales Index'
    PEPSICO_SUB_SEGMENT_SOS_VS_TARGET = 'PepsiCo Sub Segment SOS vs Target'

    PLACEMENT_BY_SHELF_NUMBERS_TOP = 'Placement by shelf numbers_Top'
    TOTAL_LINEAR_SPACE = 'Total Linear Space'
    NUMBER_OF_FACINGS = 'Number of Facings'
    NUMBER_OF_BAYS = 'Number of bays'
    NUMBER_OF_SHELVES = 'Number of shelves'
    PRODUCT_BLOCKING = 'Product Blocking'
    PRODUCT_BLOCKING_ADJACENCY = 'Product Blocking Adjacency'
    SHELF_PLACEMENT_VERTICAL_LEFT = 'Shelf Placement Vertical_Left'
    SHELF_PLACEMENT_VERTICAL_CENTER = 'Shelf Placement Vertical_Center'
    SHELF_PLACEMENT_VERTICAL_RIGHT = 'Shelf Placement Vertical_Right'
    NUMBER_OF_SHELVES_TEMPL_COLUMN = 'No of Shelves in Fixture (per bay) (key)'
    RELEVANT_SHELVES_TEMPL_COLUMN = 'Shelves From Bottom To Include (data)'
    SHELF_PLC_TARGETS_COLUMNS = [
        'kpi_operation_type_fk', 'operation_type', 'kpi_level_2_fk', 'type',
        NUMBER_OF_SHELVES_TEMPL_COLUMN, RELEVANT_SHELVES_TEMPL_COLUMN,
        'KPI Parent'
    ]
    SHELF_PLC_TARGET_COL_RENAME = {
        'kpi_operation_type_fk_x': 'kpi_operation_type_fk',
        'operation_type_x': 'operation_type',
        'kpi_level_2_fk_x': 'kpi_level_2_fk',
        'type_x': 'type',
        NUMBER_OF_SHELVES_TEMPL_COLUMN + '_x': NUMBER_OF_SHELVES_TEMPL_COLUMN,
        RELEVANT_SHELVES_TEMPL_COLUMN + '_x': RELEVANT_SHELVES_TEMPL_COLUMN,
        'KPI Parent_x': 'KPI Parent'
    }
    HERO_SKU_AVAILABILITY_SKU = 'Hero SKU Availability - SKU'
    HERO_SKU_PLACEMENT_BY_SHELF_NUMBERS = 'Hero SKU Placement by shelf numbers'

    HERO_SKU_AVAILABILITY_BY_HERO_TYPE = 'Hero SKU Availability by Hero Type'
    SHARE_OF_ASSORTMENT_BY_HERO_TYPE = 'Share of Assortment by Hero Type'
    HERO_SKU_LABEL = 'Hero SKU'
    HERO_TYPE = 'hero_type'
    HERO_SKU_SOS_OF_CAT_BY_HERO_TYPE = 'Hero SKU SOS of Category by Hero Type'
    CATEGORY_FULL_BAY = 'Category Full Bay'
    CSN = 'CSN'
    PRICE = 'Price'
    PROMO_PRICE = 'Promo Price'
    LINEAR_SPACE_PER_PRODUCT = 'Linear Space Per Product'
    FACINGS_PER_PRODUCT = 'Facings per Product'
    PRICE_SCENE = 'Price Scene'
    PROMO_PRICE_SCENE = 'Promo Price Scene'
    HERO_SKU_SOS = 'Hero SKU SOS'
    BRAND_SOS = 'Brand SOS'
    SUB_BRAND_SOS = 'Sub Brand SOS'
    PEPSICO_SEGMENT_SOS = 'PepsiCo Segment SOS'
    BRAND_SOS_OF_SEGMENT = 'Brand SOS of Segment'
    BINS_NOT_RECOGNIZED = 'Bins_not_recognized'

    def __init__(self, output, data_provider):
        super(PepsicoUtil, self).__init__(data_provider)
        self.output = output
        self.common = Common(self.data_provider)
        # self.common_v1 = CommonV1(self.data_provider)
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.products = self.data_provider[Data.PRODUCTS]
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.store_id = self.data_provider[Data.STORE_FK] if self.data_provider[Data.STORE_FK] is not None \
                                                            else self.session_info['store_fk'].values[0]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.rds_conn = PSProjectConnector(self.project_name,
                                           DbUsers.CalculationEng)
        self.display_scene = self.get_match_display_in_scene()
        self.kpi_static_data = self.common.get_kpi_static_data()
        self.kpi_results_queries = []

        self.probe_groups = self.get_probe_group()
        self.match_product_in_scene = self.match_product_in_scene.merge(
            self.probe_groups, on='probe_match_fk', how='left')

        self.toolbox = GENERALToolBox(self.data_provider)
        self.commontools = PEPSICOUKCommonToolBox(self.data_provider,
                                                  self.rds_conn)

        self.all_templates = self.commontools.all_templates
        self.custom_entities = self.commontools.custom_entities
        self.on_display_products = self.commontools.on_display_products
        self.exclusion_template = self.commontools.exclusion_template
        self.filtered_scif = self.commontools.filtered_scif.copy()
        self.filtered_matches = self.commontools.filtered_matches.copy()
        self.filtered_matches = self.filtered_matches.merge(
            self.probe_groups, on='probe_match_fk', how='left')

        self.filtered_scif_secondary = self.commontools.filtered_scif_secondary.copy(
        )
        self.filtered_matches_secondary = self.commontools.filtered_matches_secondary.copy(
        )

        self.scene_bay_shelf_product = self.commontools.scene_bay_shelf_product
        self.ps_data = PsDataProvider(self.data_provider, self.output)
        self.full_store_info = self.commontools.full_store_info.copy()
        self.external_targets = self.commontools.external_targets
        self.assortment = Assortment(self.commontools.data_provider,
                                     self.output)
        self.lvl3_ass_result = self.get_lvl3_relevant_assortment_result()
        self.own_manuf_fk = self.all_products[
            self.all_products['manufacturer_name'] ==
            self.PEPSICO]['manufacturer_fk'].values[0]

        self.scene_kpi_results = self.get_results_of_scene_level_kpis()
        self.kpi_results_check = pd.DataFrame(columns=[
            'kpi_fk', 'numerator', 'denominator', 'result', 'score', 'context'
        ])
        self.sos_vs_target_targets = self.construct_sos_vs_target_base_df()

        self.all_targets_unpacked = self.commontools.all_targets_unpacked.copy(
        )
        self.block_results = pd.DataFrame(columns=['Group Name', 'Score'])
        self.hero_type_custom_entity_df = self.get_hero_type_custom_entity_df()

    def get_match_display_in_scene(self):
        query = PEPSICOUK_Queries.get_match_display(self.session_uid)
        match_display = pd.read_sql_query(query, self.rds_conn.db)
        return match_display

    def get_probe_group(self):
        query = PEPSICOUK_Queries.get_probe_group(self.session_uid)
        probe_group = pd.read_sql_query(query, self.rds_conn.db)
        return probe_group

    @staticmethod
    def get_full_bay_and_positional_filters(parameters):
        filters = {parameters['Parameter 1']: parameters['Value 1']}
        if parameters['Parameter 2']:
            filters.update({parameters['Parameter 2']: parameters['Value 2']})
        if parameters['Parameter 3']:
            filters.update({parameters['Parameter 3']: parameters['Value 3']})
        return filters

    # @staticmethod
    # def get_stack_data(row):
    #     is_stack = False
    #     sequences_list = row['all_sequences'][0:-1].split(',')
    #     count_sequences = collections.Counter(sequences_list)
    #     repeating_items = [c > 1 for c in count_sequences.values()]
    #     if repeating_items:
    #         if any(repeating_items):
    #             is_stack = True
    #     return is_stack

    @staticmethod
    def split_and_strip(value):
        return map(lambda x: x.strip(), str(value).split(','))

    def construct_sos_vs_target_base_df(self):
        sos_targets = self.get_relevant_sos_vs_target_kpi_targets()
        sos_targets = sos_targets.drop_duplicates(subset=[
            'kpi_operation_type_fk', 'kpi_level_2_fk', 'numerator_value',
            'denominator_value', 'type'
        ],
                                                  keep='first')
        sos_targets = sos_targets.drop(
            ['key_json', 'data_json', 'start_date', 'end_date'], axis=1)
        if not sos_targets.empty:
            sos_targets['numerator_id'] = sos_targets.apply(
                self.retrieve_relevant_item_pks,
                axis=1,
                args=('numerator_type', 'numerator_value'))
            sos_targets['denominator_id'] = sos_targets.apply(
                self.retrieve_relevant_item_pks,
                axis=1,
                args=('denominator_type', 'denominator_value'))
            sos_targets['identifier_parent'] = sos_targets['KPI Parent'].apply(
                lambda x: self.common.get_dictionary(kpi_fk=int(float(x))))
        return sos_targets

    def get_relevant_sos_vs_target_kpi_targets(self, brand_vs_brand=False):
        sos_vs_target_kpis = self.external_targets[
            self.external_targets['operation_type'] == self.SOS_VS_TARGET]
        sos_vs_target_kpis = sos_vs_target_kpis.drop_duplicates(subset=[
            'operation_type', 'kpi_level_2_fk', 'key_json', 'data_json'
        ])
        relevant_targets_df = pd.DataFrame(
            columns=sos_vs_target_kpis.columns.values.tolist())
        if not sos_vs_target_kpis.empty:
            policies_df = self.commontools.unpack_external_targets_json_fields_to_df(
                sos_vs_target_kpis, field_name='key_json')
            policy_columns = policies_df.columns.values.tolist()
            del policy_columns[policy_columns.index('pk')]
            store_dict = self.full_store_info.to_dict('records')[0]
            for column in policy_columns:
                store_att_value = store_dict.get(column)
                policies_df = policies_df[policies_df[column].isin(
                    [store_att_value, self.ALL])]
            kpi_targets_pks = policies_df['pk'].values.tolist()
            relevant_targets_df = sos_vs_target_kpis[
                sos_vs_target_kpis['pk'].isin(kpi_targets_pks)]
            # relevant_targets_df = relevant_targets_df.merge(policies_df, on='pk', how='left')
            data_json_df = self.commontools.unpack_external_targets_json_fields_to_df(
                relevant_targets_df, 'data_json')
            relevant_targets_df = relevant_targets_df.merge(data_json_df,
                                                            on='pk',
                                                            how='left')

            kpi_data = self.kpi_static_data[['pk', 'type']].drop_duplicates()
            kpi_data.rename(columns={'pk': 'kpi_level_2_fk'}, inplace=True)
            relevant_targets_df = relevant_targets_df.merge(
                kpi_data,
                left_on='kpi_level_2_fk',
                right_on='kpi_level_2_fk',
                how='left')
            linear_sos_fk = self.common.get_kpi_fk_by_kpi_type(
                self.LINEAR_SOS_INDEX)
            if brand_vs_brand:
                relevant_targets_df = relevant_targets_df[
                    relevant_targets_df['KPI Parent'] == linear_sos_fk]
            else:
                relevant_targets_df = relevant_targets_df[~(
                    relevant_targets_df['KPI Parent'] == linear_sos_fk)]
        return relevant_targets_df

    def retrieve_relevant_item_pks(self, row, type_field_name,
                                   value_field_name):
        try:
            if row[type_field_name].endswith("_fk"):
                item_id = row[value_field_name]
            else:
                # print row[type_field_name], ' :', row[value_field_name]
                item_id = self.custom_entities[
                    self.custom_entities['name'] ==
                    row[value_field_name]]['pk'].values[0]
        except KeyError as e:
            Log.error('No id found for field {}. Error: {}'.format(
                row[type_field_name], e))
            item_id = None
        return item_id

    def calculate_sos(self, sos_filters, **general_filters):
        numerator_linear = self.calculate_share_space(
            **dict(sos_filters, **general_filters))
        denominator_linear = self.calculate_share_space(**general_filters)
        return float(numerator_linear), float(denominator_linear)

    def calculate_share_space(self, **filters):
        filtered_scif = self.filtered_scif[self.toolbox.get_filter_condition(
            self.filtered_scif, **filters)]
        space_length = filtered_scif['updated_gross_length'].sum()
        return space_length

    def add_kpi_result_to_kpi_results_df(self, result_list):
        self.kpi_results_check.loc[len(self.kpi_results_check)] = result_list

    def get_results_of_scene_level_kpis(self):
        scene_kpi_results = pd.DataFrame()
        if not self.scene_info.empty:
            scene_kpi_results = self.ps_data.get_scene_results(
                self.scene_info['scene_fk'].drop_duplicates().values)
        return scene_kpi_results

    def get_store_data_by_store_id(self):
        store_id = self.store_id if self.store_id else self.session_info[
            'store_fk'].values[0]
        query = PEPSICOUK_Queries.get_store_data_by_store_id(store_id)
        query_result = pd.read_sql_query(query, self.rds_conn.db)
        return query_result

    def get_facings_scene_bay_shelf_product(self):
        self.filtered_matches['count'] = 1
        aggregate_df = self.filtered_matches.groupby(
            ['scene_fk', 'bay_number', 'shelf_number', 'product_fk'],
            as_index=False).agg({'count': np.sum})
        return aggregate_df

    def get_lvl3_relevant_assortment_result(self):
        assortment_result = self.assortment.get_lvl3_relevant_ass()
        # if assortment_result.empty:
        #     return assortment_result
        # products_in_session = self.filtered_scif.loc[self.filtered_scif['facings'] > 0]['product_fk'].values
        # assortment_result.loc[assortment_result['product_fk'].isin(products_in_session), 'in_store'] = 1
        return assortment_result

    @staticmethod
    def get_block_and_adjacency_filters(target_series):
        filters = {target_series['Parameter 1']: target_series['Value 1']}
        if target_series['Parameter 2']:
            filters.update(
                {target_series['Parameter 2']: target_series['Value 2']})

        if target_series['Parameter 3']:
            filters.update(
                {target_series['Parameter 3']: target_series['Value 3']})
        return filters

    @staticmethod
    def get_block_filters(target_series):
        if isinstance(target_series['Value 1'], list):
            filters = {target_series['Parameter 1']: target_series['Value 1']}
        else:
            filters = {
                target_series['Parameter 1']: [target_series['Value 1']]
            }

        if target_series['Parameter 2']:
            if isinstance(target_series['Value 2'], list):
                filters.update(
                    {target_series['Parameter 2']: target_series['Value 2']})
            else:
                filters.update(
                    {target_series['Parameter 2']: [target_series['Value 2']]})

        if target_series['Parameter 3']:
            if isinstance(target_series['Value 2'], list):
                filters.update(
                    {target_series['Parameter 3']: target_series['Value 3']})
            else:
                filters.update(
                    {target_series['Parameter 3']: [target_series['Value 3']]})
        return filters

    def reset_filtered_scif_and_matches_to_exclusion_all_state(self):
        self.filtered_scif = self.commontools.filtered_scif.copy()
        self.filtered_matches = self.commontools.filtered_matches.copy()

    def reset_secondary_filtered_scif_and_matches_to_exclusion_all_state(self):
        self.filtered_scif_secondary = self.commontools.filtered_scif_secondary.copy(
        )
        self.filtered_matches_secondary = self.commontools.filtered_matches_secondary.copy(
        )

    def get_available_hero_sku_list(self, dependencies_df):
        hero_list = dependencies_df[
            (dependencies_df['kpi_type'] == self.HERO_SKU_AVAILABILITY_SKU)
            & (dependencies_df['numerator_result'] == 1
               )]['numerator_id'].unique().tolist()
        return hero_list

    def get_unavailable_hero_sku_list(self, dependencies_df):
        hero_list = dependencies_df[
            (dependencies_df['kpi_type'] == self.HERO_SKU_AVAILABILITY_SKU)
            & (dependencies_df['numerator_result'] == 0
               )]['numerator_id'].unique().tolist()
        return hero_list

    def get_hero_type_custom_entity_df(self):
        hero_type_df = self.custom_entities[self.custom_entities['entity_type']
                                            == self.HERO_TYPE]
        hero_type_df.rename(columns={'pk': 'entity_fk'}, inplace=True)
        return hero_type_df
Exemple #2
0
class BATMXToolBox:
    def __init__(self, data_provider, output):
        self.output = output
        self.data_provider = data_provider
        self.common = Common(self.data_provider)
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.store_id = self.data_provider[Data.STORE_FK]
        self.ps_data_provider = PsDataProvider(self.data_provider, self.output)
        self.scenes = self.scene_info['scene_fk'].tolist()
        self.manufacturer_fk = int(
            self.data_provider[Data.OWN_MANUFACTURER]['param_value'].iloc[0])
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        scenes_with_templates = self.scif[['template_name',
                                           'scene_fk']].drop_duplicates()
        exits = scenes_with_templates[
            scenes_with_templates['template_name'].str.contains(
                Const.EXIT_TEMPLATE)]['scene_fk'].tolist()
        entries = scenes_with_templates[
            scenes_with_templates['template_name'].str.contains(
                Const.ENTRY_TEMPLATE)]['scene_fk'].tolist()
        if self.scenes:
            self.scene_results = self.ps_data_provider.get_scene_results(
                self.scenes)
            self.exit_results = self.scene_results[
                self.scene_results["scene_fk"].isin(exits)]
            self.entry_results = self.scene_results[
                self.scene_results["scene_fk"].isin(entries)]
        else:
            self.scene_results = pd.DataFrame()

    def main_calculation(self):
        """
        This function calculates the KPI results.
        """
        try:
            self.calculate_oos()
        except Exception as e:
            Log.error('{}'.format(e))
        try:
            self.calculate_pogs_and_sos_kpis()
        except Exception as e:
            Log.error('{}'.format(e))

    def calculate_pogs_and_sos_kpis(self):
        self.write_score_and_delta_sos(Const.SOS_LEVELS)
        self.write_score_and_delta_pog(
            Const.POG_KPI_NAMES[Const.TOBACCO_CENTER])
        self.write_score_and_delta_pog(
            Const.POG_KPI_NAMES[Const.PROMOTIONAL_TRAY])

    def write_score_and_delta_pog(self, kpi_names):
        visit_kpi_fk = self.common.get_kpi_fk_by_kpi_name(
            kpi_names[Const.VISIT_LEVEL])
        fixture_kpi_fk = self.common.get_kpi_fk_by_kpi_name(
            kpi_names[Const.FIXTURE_LEVEL])
        identifier_visit = self.common.get_dictionary(kpi_fk=visit_kpi_fk)
        exit_results = self.exit_results[self.exit_results['kpi_level_2_fk'] ==
                                         fixture_kpi_fk]
        entry_results = self.entry_results[self.entry_results['kpi_level_2_fk']
                                           == fixture_kpi_fk]
        self.write_hierarchy_pog_to_scene(exit_results, entry_results,
                                          identifier_visit, kpi_names)
        avg_exit = self.get_averages(exit_results)
        avg_entry = self.get_averages(entry_results)
        delta = avg_exit - avg_entry if avg_entry else avg_exit
        self.common.write_to_db_result(fk=visit_kpi_fk,
                                       numerator_id=self.manufacturer_fk,
                                       denominator_id=self.store_id,
                                       score=avg_exit,
                                       result=delta,
                                       identifier_result=identifier_visit)

    def write_hierarchy_pog_to_scene(self, exit_results, entry_results,
                                     identifier_visit, kpi_names):
        pog_kpi_fk = self.common.get_kpi_fk_by_kpi_name(
            kpi_names[Const.PLANOGRAM_LEVEL])
        for planogram_fk in exit_results['numerator_id'].unique().tolist():
            pog_exit_results = exit_results[exit_results['numerator_id'] ==
                                            planogram_fk]
            pog_exit_score = self.get_average(pog_exit_results, 'score')
            pog_exit_identifier = self.common.get_dictionary(
                planogram_fk=planogram_fk,
                entry_exit=Const.EXIT,
                kpi_fk=pog_kpi_fk)
            self.common.write_to_db_result(
                fk=pog_kpi_fk,
                numerator_id=planogram_fk,
                denominator_id=self.store_id,
                score=pog_exit_score,
                result=pog_exit_score,
                identifier_result=pog_exit_identifier,
                identifier_parent=identifier_visit,
                should_enter=True)
            pog_exit_fks = pog_exit_results['pk'].tolist()
            pog_entry_results = entry_results[entry_results['numerator_id'] ==
                                              planogram_fk]
            pog_result_fks = pog_entry_results['pk'].tolist() + pog_exit_fks
            for scene_result_fk in pog_result_fks:
                self.common.write_to_db_result(
                    should_enter=True,
                    scene_result_fk=scene_result_fk,
                    numerator_id=self.manufacturer_fk,
                    denominator_id=self.store_id,
                    identifier_parent=pog_exit_identifier,
                    only_hierarchy=True)

    def write_score_and_delta_sos(self, kpi_names):
        visit_kpi_fk = self.common.get_kpi_fk_by_kpi_name(
            kpi_names[Const.VISIT_LEVEL])
        fixture_kpi_fk = self.common.get_kpi_fk_by_kpi_name(
            kpi_names[Const.FIXTURE_LEVEL])
        identifier = self.common.get_dictionary(kpi_fk=visit_kpi_fk)
        exit_results = self.exit_results[self.exit_results['kpi_level_2_fk'] ==
                                         fixture_kpi_fk]
        entry_results = self.entry_results[self.entry_results['kpi_level_2_fk']
                                           == fixture_kpi_fk]
        avg_exit = self.get_averages(exit_results)
        avg_entry = self.get_averages(entry_results)
        delta = avg_exit - avg_entry if avg_entry else avg_exit
        # May 23, 2019: changed per Rifka's request to include 'score' as 'result' - may need to be revised
        # self.common.write_to_db_result(fk=visit_kpi_fk, numerator_id=self.manufacturer_fk, denominator_id=self.store_id,
        #                                score=avg_exit, result=delta, identifier_result=identifier)
        self.common.write_to_db_result(fk=visit_kpi_fk,
                                       numerator_id=self.manufacturer_fk,
                                       denominator_id=self.store_id,
                                       score=avg_exit,
                                       result=avg_exit,
                                       identifier_result=identifier)
        fixture_results_pk = exit_results['pk'].tolist(
        ) + entry_results['pk'].tolist()
        for scene_result_fk in fixture_results_pk:
            self.common.write_to_db_result(should_enter=True,
                                           scene_result_fk=scene_result_fk,
                                           numerator_id=self.manufacturer_fk,
                                           denominator_id=self.store_id,
                                           identifier_parent=identifier,
                                           only_hierarchy=True,
                                           only_hierarchy2=True)

    def calculate_oos(self):
        visit_kpi_fk = self.common.get_kpi_fk_by_kpi_name(Const.OOS_VISIT)
        fixture_kpi_fk = self.common.get_kpi_fk_by_kpi_name(Const.OOS_FIXTURE)
        identifier = self.common.get_dictionary(kpi_fk=visit_kpi_fk)
        entry_results = self.entry_results[self.entry_results['kpi_level_2_fk']
                                           == fixture_kpi_fk]
        if entry_results.empty:
            return
        else:
            score = self.get_average(entry_results, 'score')
            self.common.write_to_db_result(
                fk=visit_kpi_fk,
                numerator_id=self.manufacturer_fk,
                denominator_id=self.store_id,
                score=score,
                result=score,
                identifier_result=identifier,
                numerator_result=self.get_average(entry_results,
                                                  'numerator_result'),
                denominator_result=self.get_average(entry_results,
                                                    'denominator_result'))
            fixture_results_pk = entry_results['pk'].tolist(
            ) + self.exit_results[self.exit_results['kpi_level_2_fk'] ==
                                  fixture_kpi_fk]['pk'].tolist()
            for scene_result_fk in fixture_results_pk:
                self.common.write_to_db_result(
                    should_enter=True,
                    scene_result_fk=scene_result_fk,
                    numerator_id=self.manufacturer_fk,
                    denominator_id=self.store_id,
                    identifier_parent=identifier,
                    only_hierarchy=True,
                    only_hierarchy2=True)

    @staticmethod
    def get_average(df, column):
        return df[column].sum() / len(df)

    @staticmethod
    def get_averages(fixture_results):
        if fixture_results.empty:
            return None
        else:
            fixtures_amount = fixture_results['score'].count()
            scores_sum = fixture_results['score'].sum()
            return scores_sum / fixtures_amount
class INTEG16ToolBox:
    LEVEL1 = 1
    LEVEL2 = 2
    LEVEL3 = 3
    TEMPLATE_PATH = os.path.join(
        os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'Data',
        'KPI_Templates.xlsx')
    CCIT_MANU = 'HBC Italia'
    MULTIPLIER_SHEET = 'Multiplier'
    STORE_ATT_1 = 'Store Att1'
    SCORE_MULTIPLIER = 'Score multiplier'
    NON_KPI = 0

    def __init__(self, data_provider, output, common):
        self.output = output
        self.data_provider = data_provider
        self.common = common
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.products = self.data_provider[Data.PRODUCTS]
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.store_id = self.data_provider[Data.STORE_FK]
        self.store_info = self.data_provider[Data.STORE_INFO]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.rds_conn = PSProjectConnector(self.project_name,
                                           DbUsers.CalculationEng)
        self.ps_data_provider = PsDataProvider(self.data_provider, self.output)
        self.kpi_static_data = self.common.get_kpi_static_data()
        self.scene_results = self.ps_data_provider.get_scene_results(
            self.scene_info['scene_fk'].drop_duplicates().values)
        self.kpi_results_queries = []
        self.multiplier_template = pd.read_excel(
            self.TEMPLATE_PATH, sheetname=self.MULTIPLIER_SHEET)

    def get_manufacturer_fk(self, manu):
        return self.all_products[
            self.all_products['manufacturer_name'] ==
            manu]['manufacturer_fk'].drop_duplicates().values[0]

    def main_function(self):
        """
        This function calculates the KPI results.
        """
        relevant_kpi_res = self.common.get_kpi_fk_by_kpi_type('scene_score')
        scene_kpi_fks = self.scene_results[self.scene_results['kpi_level_2_fk']
                                           == relevant_kpi_res]['pk'].values
        origin_res = self.scene_results[self.scene_results['kpi_level_2_fk'] ==
                                        relevant_kpi_res]['result'].sum()
        # store_att_1 = self.store_info['additional_attribute_1'].values[0]
        # multiplier = self.multiplier_template[self.multiplier_template[self.STORE_ATT_1] == store_att_1][
        #     self.SCORE_MULTIPLIER]
        # multi_res = origin_res
        # if not multiplier.empty:
        #     multi_res = origin_res * multiplier.values[0]
        manu_fk = self.get_manufacturer_fk(self.CCIT_MANU)
        kpi_fk = self.common.get_kpi_fk_by_kpi_type('store_score')
        identifier_result = self.common.get_dictionary(kpi_fk=kpi_fk)
        identifier_result['session_fk'] = self.session_info['pk'].values[0]
        identifier_result['store_fk'] = self.store_id
        self.common.write_to_db_result(fk=kpi_fk,
                                       numerator_id=manu_fk,
                                       numerator_result=origin_res,
                                       result=origin_res,
                                       score=origin_res,
                                       should_enter=False,
                                       identifier_result=identifier_result)
        for scene in scene_kpi_fks:
            self.common.write_to_db_result(fk=self.NON_KPI,
                                           should_enter=True,
                                           scene_result_fk=scene,
                                           identifier_parent=identifier_result)
        return
class BATMXToolBox:
    def __init__(self, data_provider, output):
        self.output = output
        self.data_provider = data_provider
        self.common = Common(self.data_provider)
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.store_id = self.data_provider[Data.STORE_FK]
        self.ps_data_provider = PsDataProvider(self.data_provider, self.output)
        self.scenes = self.scene_info['scene_fk'].tolist()
        self.manufacturer_fk = self.all_products[
            self.all_products["manufacturer_name"].isin(
                Const.BAT_MANUFACTURERS)]['manufacturer_fk'].iloc[0]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        scenes_with_templates = self.scif[['template_name',
                                           'scene_fk']].drop_duplicates()
        exits = scenes_with_templates[
            scenes_with_templates['template_name'].str.contains(
                Const.EXIT_TEMPLATE)]['scene_fk'].tolist()
        entries = scenes_with_templates[
            scenes_with_templates['template_name'].str.contains(
                Const.ENTRY_TEMPLATE)]['scene_fk'].tolist()
        if self.scenes:
            self.scene_results = self.ps_data_provider.get_scene_results(
                self.scenes)
            self.exit_results = self.scene_results[
                self.scene_results["scene_fk"].isin(exits)]
            self.entry_results = self.scene_results[
                self.scene_results["scene_fk"].isin(entries)]
        else:
            self.scene_results = pd.DataFrame()

    def main_calculation(self):
        """
        This function calculates the KPI results.
        """
        try:
            self.calculate_oos()
        except Exception as e:
            Log.error('{}'.format(e))
        try:
            self.calculate_pogs_and_sos_kpis()
        except Exception as e:
            Log.error('{}'.format(e))

    def calculate_pogs_and_sos_kpis(self):
        self.write_score_and_delta(Const.SOS_LEVELS)
        self.write_score_and_delta(Const.POG_KPI_NAMES[Const.TOBACCO_CENTER])
        self.write_score_and_delta(Const.POG_KPI_NAMES[Const.PROMOTIONAL_TRAY])

    def write_score_and_delta(self, kpi_names):
        visit_kpi_fk = self.common.get_kpi_fk_by_kpi_name(
            kpi_names[Const.VISIT_LEVEL])
        fixture_kpi_fk = self.common.get_kpi_fk_by_kpi_name(
            kpi_names[Const.FIXTURE_LEVEL])
        identifier = self.common.get_dictionary(kpi_fk=visit_kpi_fk)
        exit_results = self.exit_results[self.exit_results['kpi_level_2_fk'] ==
                                         fixture_kpi_fk]
        entry_results = self.entry_results[self.entry_results['kpi_level_2_fk']
                                           == fixture_kpi_fk]
        avg_exit = self.get_averages(exit_results)
        avg_entry = self.get_averages(entry_results)
        delta = avg_exit - avg_entry
        self.common.write_to_db_result(fk=visit_kpi_fk,
                                       numerator_id=self.manufacturer_fk,
                                       denominator_id=self.store_id,
                                       score=avg_exit,
                                       result=delta,
                                       identifier_result=identifier)
        fixture_results_pk = exit_results['pk'].tolist(
        ) + entry_results['pk'].tolist()
        for scene_result_fk in fixture_results_pk:
            self.common.write_to_db_result(should_enter=True,
                                           scene_result_fk=scene_result_fk,
                                           numerator_id=self.manufacturer_fk,
                                           denominator_id=self.store_id,
                                           identifier_parent=identifier,
                                           only_hierarchy=True)

    def calculate_oos(self):
        visit_kpi_fk = self.common.get_kpi_fk_by_kpi_name(Const.OOS_VISIT)
        fixture_kpi_fk = self.common.get_kpi_fk_by_kpi_name(Const.OOS_FIXTURE)
        identifier = self.common.get_dictionary(kpi_fk=visit_kpi_fk)
        results = self.entry_results[self.entry_results['kpi_level_2_fk'] ==
                                     fixture_kpi_fk]
        if results.empty:
            return
        else:
            results = results.iloc[0]
            self.common.write_to_db_result(
                fk=visit_kpi_fk,
                numerator_id=self.manufacturer_fk,
                denominator_id=self.store_id,
                score=results['score'],
                numerator_result=results['numerator_result'],
                denominator_result=results['denominator_result'],
                identifier_result=identifier)
            fixture_results_pk = results['pk'].tolist() + self.entry_results[
                self.entry_results['kpi_level_2_fk'] ==
                fixture_kpi_fk]['pk'].tolist()
            for scene_result_fk in fixture_results_pk:
                self.common.write_to_db_result(
                    should_enter=True,
                    scene_result_fk=scene_result_fk,
                    numerator_id=self.manufacturer_fk,
                    denominator_id=self.store_id,
                    identifier_parent=identifier,
                    only_hierarchy=True)

    @staticmethod
    def get_averages(fixture_results):
        if fixture_results.empty:
            return None
        else:
            fixtures_amount = fixture_results['score'].count()
            scores_sum = fixture_results['score'].sum()
            return scores_sum / fixtures_amount
class REDToolBox:

    def __init__(self, data_provider, output, calculation_type, common_db2):
        self.output = output
        self.data_provider = data_provider
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.products = self.data_provider[Data.PRODUCTS]
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.store_id = self.data_provider[Data.STORE_FK]
        self.store_info = self.data_provider[Data.STORE_INFO]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.scif = self.scif[self.scif['product_type'] != "Irrelevant"]
        self.ps_data_provider = PsDataProvider(self.data_provider, self.output)
        self.templates = {}
        self.result_values = self.ps_data_provider.get_result_values()
        self.calculation_type = calculation_type
        if self.calculation_type == Const.SOVI:
            self.TEMPLATE_PATH = Const.TEMPLATE_PATH
            self.RED_SCORE = Const.RED_SCORE
            self.RED_SCORE_INTEG = Const.RED_SCORE_INTEG
            for sheet in Const.SHEETS:
                self.templates[sheet] = pd.read_excel(self.TEMPLATE_PATH, sheetname=sheet).fillna('')
            self.converters = self.templates[Const.CONVERTERS]
            self.scenes_results = self.ps_data_provider.get_scene_results(
                self.scene_info['scene_fk'].drop_duplicates().values)
            self.scenes_results = self.scenes_results[[Const.DB_RESULT, Const.DB_SCENE_FK, Const.DB_SCENE_KPI_FK]]
        else:
            self.TEMPLATE_PATH = Const.SURVEY_TEMPLATE_PATH
            self.RED_SCORE = Const.MANUAL_RED_SCORE
            self.RED_SCORE_INTEG = Const.MANUAL_RED_SCORE_INTEG
            for sheet in Const.SHEETS_MANUAL:
                self.templates[sheet] = pd.read_excel(self.TEMPLATE_PATH, sheetname=sheet).fillna('')
        self.store_attr = self.store_info['additional_attribute_15'].iloc[0]
        self.toolbox = FunctionsToolBox(self.data_provider, self.output, self.templates, self.store_attr)
        self.common_db_integ = Common(self.data_provider, self.RED_SCORE_INTEG)
        self.kpi_static_data_integ = self.common_db_integ.get_kpi_static_data()
        self.common_db = Common(self.data_provider, self.RED_SCORE)
        self.common_db2 = common_db2
        self.region = self.store_info['region_name'].iloc[0]
        self.store_type = self.store_info['store_type'].iloc[0]
        if self.store_type in Const.STORE_TYPES:
            self.store_type = Const.STORE_TYPES[self.store_type]
        self.kpi_static_data = self.common_db.get_kpi_static_data()
        main_template = self.templates[Const.KPIS]
        self.templates[Const.KPIS] = main_template[(main_template[Const.REGION] == self.region) &
                                                   (main_template[Const.STORE_TYPE] == self.store_type)]
        self.session_results = pd.DataFrame(columns=Const.COLUMNS_OF_RESULTS)
        self.all_results = pd.DataFrame(columns=Const.COLUMNS_OF_RESULTS)
        self.used_scenes = []
        self.red_score = 0
        self.set_fk = self.common_db2.get_kpi_fk_by_kpi_name(self.RED_SCORE)
        self.set_integ_fk = self.common_db2.get_kpi_fk_by_kpi_name(self.RED_SCORE_INTEG)
        self.weight_factor = self.get_weight_factor()

    # main functions:

    def main_calculation(self, *args, **kwargs):
        """
            This function gets all the scene results from the SceneKPI, after that calculates every session's KPI,
            and in the end it calls "filter results" to choose every KPI and scene and write the results in DB.
        """
        main_template = self.templates[Const.KPIS]
        if self.calculation_type == Const.SOVI:
            session_template = main_template[main_template[Const.SESSION_LEVEL] == Const.V]
            for i, main_line in session_template.iterrows():
                self.calculate_main_kpi(main_line)
        else:
            for i, main_line in main_template.iterrows():
                self.calculate_manual_kpi(main_line)
        if not main_template.empty:
            self.choose_and_write_results()
        return self.red_score

    def calculate_main_kpi(self, main_line):
        """
        This function gets a line from the main_sheet, transfers it to the match function, and checks all of the
        KPIs in the same name in the match sheet.
        :param main_line: series from the template of the main_sheet.
        """
        kpi_name = main_line[Const.KPI_NAME]
        relevant_scif = self.scif
        scene_types = self.toolbox.does_exist(main_line, Const.SCENE_TYPE)
        if scene_types:
            relevant_scif = relevant_scif[relevant_scif['template_name'].isin(scene_types)]
        scene_groups = self.toolbox.does_exist(main_line, Const.SCENE_TYPE_GROUP)
        if scene_groups:
            relevant_scif = relevant_scif[relevant_scif['template_group'].isin(scene_groups)]
        if main_line[Const.SHEET] == Const.SCENE_AVAILABILITY:
            result = False if relevant_scif.empty else True
        else:
            result = self.toolbox.calculate_kpi_by_type(main_line, relevant_scif)
        self.write_to_session_level(kpi_name=kpi_name, result=result)

    def calculate_manual_kpi(self, main_line):
        """
        This function gets a line from the main_sheet, transfers it to the match function, and checks all of the
        KPIs in the same name in the match sheet.
        :param main_line: series from the template of the main_sheet.
        """
        kpi_name = main_line[Const.KPI_NAME]
        relevant_template = self.templates[Const.SURVEY]
        relevant_template = relevant_template[relevant_template[Const.KPI_NAME] == kpi_name]
        target = len(relevant_template) if main_line[Const.GROUP_TARGET] == Const.ALL \
            else main_line[Const.GROUP_TARGET]
        passed_counter = 0
        for i, kpi_line in relevant_template.iterrows():
            answer = self.toolbox.calculate_survey_specific(kpi_line)
            if answer:
                passed_counter += 1
        result = passed_counter >= target
        self.write_to_session_level(kpi_name=kpi_name, result=result)

    # write in DF:

    def write_to_session_level(self, kpi_name, result=0):
        """
        Writes a result in the DF
        :param kpi_name: string
        :param result: boolean
        """
        result_dict = {Const.KPI_NAME: kpi_name, Const.DB_RESULT: result * 1}
        self.session_results = self.session_results.append(result_dict, ignore_index=True)

    def write_to_all_levels(self, kpi_name, result, display_text, weight, scene_fk=None, reuse_scene=False):
        """
        Writes the final result in the "all" DF, add the score to the red score and writes the KPI in the DB
        :param kpi_name: str
        :param result: int
        :param display_text: str
        :param weight: int/float
        :param scene_fk: for the scene's kpi
        :param reuse_scene: this kpi can use scenes that were used
        """
        score = self.get_score(weight)
        result_value = Const.PASS if result > 0 else Const.FAIL
        if result_value == Const.PASS:
            self.red_score += score
        result_dict = {Const.KPI_NAME: kpi_name, Const.DB_RESULT: result, Const.SCORE: score}
        if scene_fk:
            result_dict[Const.DB_SCENE_FK] = scene_fk
            if not reuse_scene:
                self.used_scenes.append(scene_fk)
        self.all_results = self.all_results.append(result_dict, ignore_index=True)
        self.write_to_db(kpi_name, score, display_text=display_text, result_value=result_value)

    def choose_and_write_results(self):
        """
        writes all the KPI in the DB: first the session's ones, second the scene's ones and in the end the ones
        that depends on the previous ones. After all it writes the red score
        """
        main_template = self.templates[Const.KPIS]
        self.write_session_kpis(main_template)
        if self.calculation_type == Const.SOVI:
            self.write_scene_kpis(main_template)
        self.write_condition_kpis(main_template)
        self.write_missings(main_template)
        self.write_to_db(self.RED_SCORE, self.red_score)

    def write_missings(self, main_template):
        """
        write 0 in all the KPIs that didn't get score
        :param main_template:
        """
        for i, main_line in main_template.iterrows():
            kpi_name = main_line[Const.KPI_NAME]
            if not self.all_results[self.all_results[Const.KPI_NAME] == kpi_name].empty:
                continue
            result = 0
            display_text = main_line[Const.DISPLAY_TEXT]
            weight = main_line[Const.WEIGHT]
            self.write_to_all_levels(kpi_name, result, display_text, weight)

    def write_session_kpis(self, main_template):
        """
        iterates all the session's KPIs and saves them
        :param main_template: main_sheet.
        """
        session_template = main_template[main_template[Const.CONDITION] == ""]
        if self.calculation_type == Const.SOVI:
            session_template = session_template[session_template[Const.SESSION_LEVEL] == Const.V]
        for i, main_line in session_template.iterrows():
            kpi_name = main_line[Const.KPI_NAME]
            result = self.session_results[self.session_results[Const.KPI_NAME] == kpi_name]
            if result.empty:
                continue
            result = result.iloc[0][Const.DB_RESULT]
            display_text = main_line[Const.DISPLAY_TEXT]
            weight = main_line[Const.WEIGHT]
            self.write_to_all_levels(kpi_name, result, display_text, weight)

    def write_incremental_kpis(self, scene_template):
        """
        lets the incremental KPIs choose their scenes (if they passed).
        if KPI passed some scenes, we will choose the scene that the children passed
        :param scene_template: filtered main_sheet
        :return: the new template (without the KPI written already)
        """
        incremental_template = scene_template[scene_template[Const.INCREMENTAL] != ""]
        while not incremental_template.empty:
            for i, main_line in incremental_template.iterrows():
                kpi_name = main_line[Const.KPI_NAME]
                reuse_scene = main_line[Const.REUSE_SCENE] == Const.V
                kpi_scene_fk = self.common_db2.get_kpi_fk_by_kpi_name(kpi_name + Const.SCENE_SUFFIX)
                kpi_results = self.scenes_results[self.scenes_results[Const.DB_SCENE_KPI_FK] == kpi_scene_fk]
                if not reuse_scene:
                    kpi_results = kpi_results[~(kpi_results[Const.DB_SCENE_FK].isin(self.used_scenes))]
                true_results = kpi_results[kpi_results[Const.DB_RESULT] > 0]
                increments = main_line[Const.INCREMENTAL]
                if ', ' in increments:
                    first_kpi = increments.split(', ')[0]
                    others = increments.replace(', '.format(first_kpi), '')
                    scene_template.loc[scene_template[Const.KPI_NAME] == first_kpi, Const.INCREMENTAL] = others
                if true_results.empty:
                    scene_template.loc[scene_template[Const.KPI_NAME] == kpi_name, Const.INCREMENTAL] = ""
                else:
                    true_results = true_results.sort_values(by=Const.DB_RESULT, ascending=False)
                    display_text = main_line[Const.DISPLAY_TEXT]
                    weight = main_line[Const.WEIGHT]
                    scene_fk = true_results.iloc[0][Const.DB_SCENE_FK]
                    self.write_to_all_levels(kpi_name, true_results.iloc[0][Const.DB_RESULT], display_text,
                                             weight, scene_fk=scene_fk, reuse_scene=reuse_scene)
                    scene_template = scene_template[~(scene_template[Const.KPI_NAME] == kpi_name)]
            incremental_template = scene_template[scene_template[Const.INCREMENTAL] != ""]
        return scene_template

    def write_regular_scene_kpis(self, scene_template):
        """
        lets the regular KPIs choose their scenes (if they passed).
        Like in the incremental part - if KPI passed some scenes, we will choose the scene that the children passed
        :param scene_template: filtered main_sheet (only scene KPIs, and without the passed incremental)
        :return: the new template (without the KPI written already)
        """
        for i, main_line in scene_template.iterrows():
            kpi_name = main_line[Const.KPI_NAME]
            reuse_scene = main_line[Const.REUSE_SCENE] == Const.V
            kpi_scene_fk = self.common_db2.get_kpi_fk_by_kpi_name(kpi_name + Const.SCENE_SUFFIX)
            kpi_results = self.scenes_results[self.scenes_results[Const.DB_SCENE_KPI_FK] == kpi_scene_fk]
            if not reuse_scene:
                kpi_results = kpi_results[~(kpi_results[Const.DB_SCENE_FK].isin(self.used_scenes))]
            true_results = kpi_results[kpi_results[Const.DB_RESULT] > 0]
            display_text = main_line[Const.DISPLAY_TEXT]
            weight = main_line[Const.WEIGHT]
            if true_results.empty:
                continue
            true_results = true_results.sort_values(by=Const.DB_RESULT, ascending=False)
            scene_fk = true_results.iloc[0][Const.DB_SCENE_FK]
            self.write_to_all_levels(kpi_name, true_results.iloc[0][Const.DB_RESULT], display_text, weight,
                                     scene_fk=scene_fk, reuse_scene=reuse_scene)
            scene_template = scene_template[~(scene_template[Const.KPI_NAME] == kpi_name)]
        return scene_template

    def write_not_passed_scene_kpis(self, scene_template):
        """
        lets the KPIs not passed choose their scenes.
        :param scene_template: filtered main_sheet (only scene KPIs, and without the passed KPIs)
        """
        for i, main_line in scene_template.iterrows():
            kpi_name = main_line[Const.KPI_NAME]
            kpi_scene_fk = self.common_db2.get_kpi_fk_by_kpi_name(kpi_name + Const.SCENE_SUFFIX)
            reuse_scene = main_line[Const.REUSE_SCENE] == Const.V
            kpi_results = self.scenes_results[self.scenes_results[Const.DB_SCENE_KPI_FK] == kpi_scene_fk]
            if not reuse_scene:
                kpi_results = kpi_results[~(kpi_results[Const.DB_SCENE_FK].isin(self.used_scenes))]
            display_text = main_line[Const.DISPLAY_TEXT]
            weight = main_line[Const.WEIGHT]
            if kpi_results.empty:
                continue
            scene_fk = kpi_results.iloc[0][Const.DB_SCENE_FK]
            self.write_to_all_levels(kpi_name, 0, display_text, weight, scene_fk=scene_fk, reuse_scene=reuse_scene)

    def write_scene_kpis(self, main_template):
        """
        iterates every scene_kpi that does not depend on others, and choose the scene they will take:
        1. the incrementals take their scene (if they passed).
        2. the regular KPIs that passed choose their scenes.
        3. the ones that didn't pass choose their random scenes.
        :param main_template: main_sheet.
        """
        scene_template = main_template[(main_template[Const.SESSION_LEVEL] != Const.V) &
                                       (main_template[Const.CONDITION] == "")]
        scene_template = self.write_incremental_kpis(scene_template)
        scene_template = self.write_regular_scene_kpis(scene_template)
        self.write_not_passed_scene_kpis(scene_template)

    def write_condition_kpis(self, main_template):
        """
        writes all the KPI that depend on other KPIs by checking if the parent KPI has passed and in which scene.
        :param main_template: main_sheet
        """
        condition_template = main_template[main_template[Const.CONDITION] != '']
        for i, main_line in condition_template.iterrows():
            condition = main_line[Const.CONDITION]
            kpi_name = main_line[Const.KPI_NAME]
            if self.calculation_type == Const.MANUAL or main_line[Const.SESSION_LEVEL] == Const.V:
                kpi_results = self.session_results[self.session_results[Const.KPI_NAME] == kpi_name]
            else:
                kpi_scene_fk = self.common_db2.get_kpi_fk_by_kpi_name(kpi_name + Const.SCENE_SUFFIX)
                kpi_results = self.scenes_results[self.scenes_results[Const.DB_SCENE_KPI_FK] == kpi_scene_fk]
            condition_result = self.all_results[(self.all_results[Const.KPI_NAME] == condition) &
                                                (self.all_results[Const.DB_RESULT] > 0)]
            if condition_result.empty:
                continue
            condition_result = condition_result.iloc[0]

            if Const.DB_SCENE_FK in condition_result:
                condition_scene = condition_result[Const.DB_SCENE_FK]
            else:
                condition_scene = None

            if condition_scene and Const.DB_SCENE_FK in kpi_results:
                results = kpi_results[kpi_results[Const.DB_SCENE_FK] == condition_scene]
            else:
                results = kpi_results
            if results.empty:
                continue
            result = results.iloc[0][Const.DB_RESULT]
            display_text = main_line[Const.DISPLAY_TEXT]
            weight = main_line[Const.WEIGHT]
            scene_fk = results.iloc[0][Const.DB_SCENE_FK] if Const.DB_SCENE_FK in kpi_results else None
            self.write_to_all_levels(kpi_name, result, display_text, weight, scene_fk=scene_fk)

    def get_weight_factor(self):
        sum_weights = self.templates[Const.KPIS][Const.WEIGHT].sum()
        return sum_weights / 100.0

    def get_score(self, weight):
        return weight / self.weight_factor

    def get_pks_of_result(self, result):
        """
        converts string result to its pk (in static.kpi_result_value)
        :param result: str
        :return: int
        """
        pk = self.result_values[self.result_values['value'] == result]['pk'].iloc[0]
        return pk

    @staticmethod
    def get_0_1_of_result(result):
        """
        converts string result to its pk (in static.kpi_result_value)
        :param result: str
        :return: int
        """
        pk = 0 if result == Const.FAIL else 1
        return pk

    def write_to_db(self, kpi_name, score, display_text='', result_value=Const.FAIL):
        """
        writes result in the DB
        :param kpi_name: str
        :param score: float, the weight of the question
        :param display_text: str
        :param result_value: str, Pass/Fail
        """
        if kpi_name == self.RED_SCORE:
            self.common_db2.write_to_db_result(
                fk=self.set_fk, score=score, numerator_id=Const.MANUFACTURER_FK, denominator_id=self.store_id,
                identifier_result=self.common_db2.get_dictionary(kpi_fk=self.set_fk))
            self.common_db2.write_to_db_result(
                fk=self.set_integ_fk, score=score, numerator_id=Const.MANUFACTURER_FK, denominator_id=self.store_id,
                identifier_result=self.common_db2.get_dictionary(kpi_fk=self.set_integ_fk))
            self.write_to_db_result(
                self.common_db.get_kpi_fk_by_kpi_name(self.RED_SCORE, 1), score=score, level=1)
            self.write_to_db_result(
                self.common_db_integ.get_kpi_fk_by_kpi_name(self.RED_SCORE_INTEG, 1), score=score, level=1,
                set_type=Const.MANUAL)
        else:
            integ_kpi_fk = self.common_db2.get_kpi_fk_by_kpi_name(kpi_name)
            display_kpi_fk = self.common_db2.get_kpi_fk_by_kpi_name(display_text)
            if display_kpi_fk is None:
                display_kpi_fk = self.common_db2.get_kpi_fk_by_kpi_name(display_text[:100])
            result = self.get_pks_of_result(result_value)
            self.common_db2.write_to_db_result(
                fk=display_kpi_fk, score=score, identifier_parent=self.common_db2.get_dictionary(kpi_fk=self.set_fk),
                should_enter=True, result=result, numerator_id=Const.MANUFACTURER_FK, denominator_id=self.store_id)
            result = self.get_0_1_of_result(result_value)
            self.common_db2.write_to_db_result(
                fk=integ_kpi_fk, score=score, should_enter=True, result=result,
                identifier_parent=self.common_db2.get_dictionary(kpi_fk=self.set_integ_fk),
                numerator_id=Const.MANUFACTURER_FK, denominator_id=self.store_id)
            if result_value == Const.FAIL:
                score = 0
            self.write_to_db_result(
                self.common_db.get_kpi_fk_by_kpi_name(kpi_name, 2), score=score, level=2)
            self.write_to_db_result(
                self.common_db.get_kpi_fk_by_kpi_name(kpi_name, 3), score=score, level=3, display_text=display_text)
            self.write_to_db_result(self.common_db_integ.get_kpi_fk_by_kpi_name(
                kpi_name, 3), score=score, level=3, display_text=kpi_name, set_type=Const.MANUAL)

    def write_to_db_result(self, fk, level, score, set_type=Const.SOVI, **kwargs):
        """
        This function creates the result data frame of every KPI (atomic KPI/KPI/KPI set),
        and appends the insert SQL query into the queries' list, later to be written to the DB.
        """
        if kwargs:
            kwargs['score'] = score
            attributes = self.create_attributes_dict(fk=fk, level=level, set_type=set_type, **kwargs)
        else:
            attributes = self.create_attributes_dict(fk=fk, score=score, set_type=set_type, level=level)
        if level == self.common_db.LEVEL1:
            table = self.common_db.KPS_RESULT
        elif level == self.common_db.LEVEL2:
            table = self.common_db.KPK_RESULT
        elif level == self.common_db.LEVEL3:
            table = self.common_db.KPI_RESULT
        else:
            return
        query = insert(attributes, table)
        if set_type == Const.SOVI:
            self.common_db.kpi_results_queries.append(query)
        else:
            self.common_db_integ.kpi_results_queries.append(query)

    def create_attributes_dict(self, score, fk=None, level=None, display_text=None, set_type=Const.SOVI, **kwargs):
        """
        This function creates a data frame with all attributes needed for saving in KPI results tables.
        or
        you can send dict with all values in kwargs
        """
        kpi_static_data = self.kpi_static_data if set_type == Const.SOVI else self.kpi_static_data_integ
        if level == self.common_db.LEVEL1:
            if kwargs:
                kwargs['score'] = score
                values = [val for val in kwargs.values()]
                col = [col for col in kwargs.keys()]
                attributes = pd.DataFrame(values, columns=col)
            else:
                kpi_set_name = kpi_static_data[kpi_static_data['kpi_set_fk'] == fk]['kpi_set_name'].values[0]
                attributes = pd.DataFrame(
                    [(kpi_set_name, self.session_uid, self.store_id, self.visit_date.isoformat(),
                      format(score, '.2f'), fk)],
                    columns=['kps_name', 'session_uid', 'store_fk', 'visit_date', 'score_1', 'kpi_set_fk'])
        elif level == self.common_db.LEVEL2:
            if kwargs:
                kwargs['score'] = score
                values = [val for val in kwargs.values()]
                col = [col for col in kwargs.keys()]
                attributes = pd.DataFrame(values, columns=col)
            else:
                kpi_name = kpi_static_data[kpi_static_data['kpi_fk'] == fk]['kpi_name'].values[0].replace("'", "\\'")
                attributes = pd.DataFrame(
                    [(self.session_uid, self.store_id, self.visit_date.isoformat(), fk, kpi_name, score)],
                    columns=['session_uid', 'store_fk', 'visit_date', 'kpi_fk', 'kpk_name', 'score'])
        elif level == self.common_db.LEVEL3:
            if kwargs:
                kwargs['score'] = score
                values = tuple([val for val in kwargs.values()])
                col = [col for col in kwargs.keys()]
                attributes = pd.DataFrame([values], columns=col)
            else:
                data = kpi_static_data[kpi_static_data['atomic_kpi_fk'] == fk]
                kpi_fk = data['kpi_fk'].values[0]
                kpi_set_name = kpi_static_data[kpi_static_data['atomic_kpi_fk'] == fk]['kpi_set_name'].values[0]
                attributes = pd.DataFrame(
                    [(display_text, self.session_uid, kpi_set_name, self.store_id, self.visit_date.isoformat(),
                      datetime.utcnow().isoformat(), score, kpi_fk, fk)],
                    columns=['display_text', 'session_uid', 'kps_name', 'store_fk', 'visit_date',
                             'calculation_time', 'score', 'kpi_fk', 'atomic_kpi_fk'])
        else:
            attributes = pd.DataFrame()
        return attributes.to_dict()

    def remove_queries_of_calculation_type(self):
        """
        In case that the session has no results in the SOVI KPIs we are deleting all the queries
        and calculating the MANUAL
        :return:
        """
        self.common_db2.kpi_results = pd.DataFrame(columns=self.common_db2.COLUMNS)

    def commit_results(self):
        """
        committing the results in both sets
        """
        self.common_db.delete_results_data_by_kpi_set()
        self.common_db.commit_results_data_without_delete()
        if self.common_db_integ:
            self.common_db_integ.delete_results_data_by_kpi_set()
            self.common_db_integ.commit_results_data_without_delete()
Exemple #6
0
class CCITToolBox:
    LEVEL1 = 1
    LEVEL2 = 2
    LEVEL3 = 3
    TEMPLATE_PATH = os.path.join(
        os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'Data',
        'KPI_Templates.xlsx')
    CCIT_MANU = 'HBC Italia'
    MULTIPLIER_SHEET = 'Multiplier'
    STORE_ATT_1 = 'Store Att1'
    SCORE_MULTIPLIER = 'Score multiplier'
    NON_KPI = 0
    ATOMIC_KPI_NAME = 'atomic_kpi_name'
    KPI_NAME = 'kpi_name'
    KPI_SET_NAME = 'kpi_set_name'

    def __init__(self, data_provider, output, commonV2, commonV1):
        self.output = output
        self.data_provider = data_provider
        self.common = commonV2
        self.commonV1 = commonV1
        self.project_name = self.data_provider.project_name
        self.session_uid = self.data_provider.session_uid
        self.products = self.data_provider[Data.PRODUCTS]
        self.all_products = self.data_provider[Data.ALL_PRODUCTS]
        self.match_product_in_scene = self.data_provider[Data.MATCHES]
        self.visit_date = self.data_provider[Data.VISIT_DATE]
        self.session_info = self.data_provider[Data.SESSION_INFO]
        self.scene_info = self.data_provider[Data.SCENES_INFO]
        self.store_id = self.data_provider[Data.STORE_FK]
        self.store_info = self.data_provider[Data.STORE_INFO]
        self.scif = self.data_provider[Data.SCENE_ITEM_FACTS]
        self.rds_conn = PSProjectConnector(self.project_name,
                                           DbUsers.CalculationEng)
        self.ps_data_provider = PsDataProvider(self.data_provider, self.output)
        self.kpi_static_data = self.common.get_kpi_static_data()
        self.old_kpi_static_data = self.commonV1.get_kpi_static_data()
        if not self.data_provider.scene_item_facts.empty:
            self.scene_results = self.ps_data_provider.get_scene_results(
                self.scene_info['scene_fk'].drop_duplicates().values)
        self.kpi_results_queries = []
        self.multiplier_template = pd.read_excel(
            self.TEMPLATE_PATH, sheetname=self.MULTIPLIER_SHEET)

    def get_manufacturer_fk(self, manu):
        return self.all_products[
            self.all_products['manufacturer_name'] ==
            manu]['manufacturer_fk'].drop_duplicates().values[0]

    def insert_results_to_old_tables(self):
        kpi_lvls = pd.DataFrame(
            columns=['level_by_num', 'level_by_name', 'kpis'])
        kpi_lvls['level_by_num'] = [self.LEVEL3, self.LEVEL2, self.LEVEL1]
        kpi_lvls['level_by_name'] = [
            self.ATOMIC_KPI_NAME, self.KPI_NAME, self.KPI_SET_NAME
        ]
        for kpi in kpi_lvls['level_by_name'].values:
            kpis = self.old_kpi_static_data[kpi].drop_duplicates().values
            kpis = ",".join(kpis)
            kpi_lvls.loc[kpi_lvls['level_by_name'] == kpi, 'kpis'] = kpis
        for row in kpi_lvls.itertuples():
            for kpi in row.kpis.split(','):
                old_kpi_fk = self.commonV1.get_kpi_fk_by_kpi_name(
                    kpi, row.level_by_num)
                if row.level_by_num != self.LEVEL1:
                    new_kpi_fk = self.common.get_kpi_fk_by_kpi_name(kpi)
                else:
                    new_kpi_fk = self.common.get_kpi_fk_by_kpi_type(
                        'scene_score')
                score = self.scene_results[self.scene_results['kpi_level_2_fk']
                                           == new_kpi_fk]['score'].sum()
                self.commonV1.write_to_db_result(old_kpi_fk, row.level_by_num,
                                                 score)

    def main_function(self):
        """
        This function calculates the KPI results.
        """
        relevant_kpi_res = self.common.get_kpi_fk_by_kpi_type('scene_score')
        scene_kpi_fks = self.scene_results[self.scene_results['kpi_level_2_fk']
                                           == relevant_kpi_res]['pk'].values
        origin_res = self.scene_results[self.scene_results['kpi_level_2_fk'] ==
                                        relevant_kpi_res]['result'].sum()
        # store_att_1 = self.store_info['additional_attribute_1'].values[0]
        # multiplier = self.multiplier_template[self.multiplier_template[self.STORE_ATT_1] == store_att_1][
        #     self.SCORE_MULTIPLIER]
        # multi_res = origin_res
        # if not multiplier.empty:
        #     multi_res = origin_res * multiplier.values[0]
        manu_fk = self.get_manufacturer_fk(self.CCIT_MANU)
        kpi_fk = self.common.get_kpi_fk_by_kpi_type('store_score')
        identifier_result = self.common.get_dictionary(kpi_fk=kpi_fk)
        identifier_result['session_fk'] = self.session_info['pk'].values[0]
        identifier_result['store_fk'] = self.store_id
        self.common.write_to_db_result(fk=kpi_fk,
                                       numerator_id=manu_fk,
                                       numerator_result=origin_res,
                                       denominator_id=self.store_id,
                                       result=origin_res,
                                       score=origin_res,
                                       should_enter=False,
                                       identifier_result=identifier_result)
        for scene in scene_kpi_fks:
            self.common.write_to_db_result(fk=self.NON_KPI,
                                           should_enter=True,
                                           scene_result_fk=scene,
                                           identifier_parent=identifier_result)
        self.insert_results_to_old_tables()
        return