def __init__(self, data_provider, output): self.output = output self.data_provider = data_provider self.common = Common(self.data_provider) self.commonV2 = CommonV2(self.data_provider) self.project_name = self.data_provider.project_name self.session_uid = self.data_provider.session_uid self.k_engine = BaseCalculationsGroup(data_provider, output) self.products = self.data_provider[Data.PRODUCTS] # self.all_products = self.data_provider[Data.ALL_PRODUCTS] self.match_product_in_scene = self.data_provider[Data.MATCHES] self.visit_date = self.data_provider[Data.VISIT_DATE] self.session_info = self.data_provider[Data.SESSION_INFO] self.scene_info = self.data_provider[Data.SCENES_INFO] self.store_id = self.data_provider[Data.STORE_FK] self.store_info = self.data_provider[Data.STORE_INFO] self.scif = self.data_provider[Data.SCENE_ITEM_FACTS] self.rds_conn = PSProjectConnector(self.project_name, DbUsers.CalculationEng) self.kpi_static_data = self.commonV2.get_kpi_static_data() self.kpi_results_queries = [] self.templates = {} self.all_products = self.commonV2.data_provider[Data.ALL_PRODUCTS] self.session_id = self.data_provider.session_id self.score_templates = {} self.get_templates() self.get_score_template() self.manufacturer_fk = self.all_products[ self.all_products['manufacturer_name'] == 'Coca Cola'].iloc[0] self.sos = SOS(self.data_provider, self.output) self.total_score = 0 self.session_fk = self.data_provider[Data.SESSION_INFO]['pk'].iloc[0] self.toolbox = GENERALToolBox(self.data_provider) self.scenes_info = self.data_provider[Data.SCENES_INFO] self.kpi_results_new_tables_queries = []
def __init__(self, data_provider, output): self.output = output self.data_provider = data_provider self.project_name = self.data_provider.project_name self.session_uid = self.data_provider.session_uid self.products = self.data_provider[Data.PRODUCTS] self.all_products = self.data_provider[Data.ALL_PRODUCTS] self.match_product_in_scene = self.data_provider[Data.MATCHES] self.visit_date = self.data_provider[Data.VISIT_DATE] self.session_info = self.data_provider[Data.SESSION_INFO] self.scene_info = self.data_provider[Data.SCENES_INFO] self.store_id = self.data_provider[Data.STORE_FK] self.store_info = self.data_provider[Data.STORE_INFO] self.scif = self.data_provider[Data.SCENE_ITEM_FACTS] self.united_scenes = self.get_united_scenes() # we don't need to check scenes without United products self.survey = Survey(self.data_provider, self.output) self.sos = SOS(self.data_provider, self.output) self.templates = {} self.common_db = Common(self.data_provider, CMA_COMPLIANCE) self.region = self.store_info['region_name'].iloc[0] self.store_type = self.store_info['store_type'].iloc[0] self.program = self.store_info['additional_attribute_14'].iloc[0] self.sales_center = self.store_info['additional_attribute_5'].iloc[0] if self.store_type in STORE_TYPES: ##### self.store_type = STORE_TYPES[self.store_type] #### self.store_attr = self.store_info['additional_attribute_15'].iloc[0] self.kpi_static_data = self.common_db.get_kpi_static_data() self.total_score = 0 for sheet in Const.SHEETS_CMA: self.templates[sheet] = pd.read_excel(TEMPLATE_PATH, sheetname=sheet).fillna('')
def __init__(self, data_provider, output, commonv2): self.k_engine = BaseCalculationsScript(data_provider, output) self.output = output self.data_provider = data_provider self.common = commonv2 self.project_name = self.data_provider.project_name self.session_uid = self.data_provider.session_uid self.visit_date = self.data_provider[Data.VISIT_DATE] self.session_info = self.data_provider[Data.SESSION_INFO] self.scene_info = self.data_provider[Data.SCENES_INFO] self.all_products = self.data_provider[Data.ALL_PRODUCTS] self.templates = self.data_provider[Data.TEMPLATES] self.store_id = self.data_provider[Data.STORE_FK] self.rds_conn = PSProjectConnector(self.project_name, DbUsers.CalculationEng) # self.kpi_static_data = self.get_kpi_static_data() self.kpi_results_queries = [] self.store_info = self.data_provider[Data.STORE_INFO] self.store_type = self.store_info['store_type'].iloc[0] # self.rules = pd.read_excel(TEMPLATE_PATH).set_index('store_type').to_dict('index') self.ps_data_provider = PsDataProvider(self.data_provider, self.output) self.scif = self.data_provider[Data.SCENE_ITEM_FACTS] self.match_product_in_scene = self.data_provider[Data.MATCHES] self.ignore_stacking = False self.facings_field = 'facings' if not self.ignore_stacking else 'facings_ign_stack' self.manufacturer_fk = self.all_products['manufacturer_fk'][self.all_products['manufacturer_name'] == 'CCNA'].iloc[0] # self.scene_data = self.load_scene_data() # self.kpi_set_fk = kpi_set_fk self.templates = {} self.parse_template() self.toolbox = GENERALToolBox(self.data_provider) self.SOS = SOS_calc(self.data_provider) self.survey = Survey_calc(self.data_provider) self._merge_matches_and_all_product()
def __init__(self, data_provider, output): self.output = output self.data_provider = data_provider self.common_v2 = CommonV2(self.data_provider) self.common_v1 = CommonV1(self.data_provider) self.data_provider.common_v2 = self.common_v2 self.data_provider.common_v1 = self.common_v1 self.project_name = self.data_provider.project_name self.session_uid = self.data_provider.session_uid self.products = self.data_provider[Data.PRODUCTS] self.all_products = self.data_provider[Data.ALL_PRODUCTS] self.match_product_in_scene = self.data_provider[Data.MATCHES] self.visit_date = self.data_provider[Data.VISIT_DATE] self.session_info = self.data_provider[Data.SESSION_INFO] self.scene_info = self.data_provider[Data.SCENES_INFO] self.store_id = self.data_provider[Data.STORE_FK] self.scif = self.data_provider[Data.SCENE_ITEM_FACTS] self.rds_conn = PSProjectConnector(self.project_name, DbUsers.CalculationEng) self.channel = self.get_store_channel(self.store_id) self.kpi_static_data = self.common_v2.get_kpi_static_data() self.data_provider.kpi_sheets = {} self.kpi_sheets = self.data_provider.kpi_sheets self.old_kpi_static_data = self.common_v1.get_kpi_static_data() for name in SHEETS_NAME: parsed_template = ParseTemplates.parse_template(TEMPLATE_PATH, sheet_name=name) self.kpi_sheets[name] = parsed_template[parsed_template['Channel'] == self.channel] self.data_provider.sos = SOS(self.data_provider, output=None) self.data_provider.assortment = Assortment(self.data_provider, output=None)
def __init__(self, data_provider, output, common_db2): self.output = output self.data_provider = data_provider # self.common_db = Common(self.data_provider, SUB_PROJECT) # self.common_db2 = common_db2 self.common_db2 = CommonV3(self.data_provider) self.common_scene = CommonV2(self.data_provider) self.project_name = self.data_provider.project_name self.session_uid = self.data_provider.session_uid self.manufacturer_fk = 1 self.products = self.data_provider[Data.PRODUCTS] self.all_products = self.data_provider[Data.ALL_PRODUCTS] self.match_product_in_scene = self.data_provider[Data.MATCHES] self.visit_date = self.data_provider[Data.VISIT_DATE] self.session_info = self.data_provider[Data.SESSION_INFO] self.scene_info = self.data_provider[Data.SCENES_INFO] self.store_id = self.data_provider[Data.STORE_FK] self.store_info = self.data_provider[Data.STORE_INFO] self.scif = self.data_provider[Data.SCENE_ITEM_FACTS] self.scif = self.scif[~(self.scif['product_type'] == 'Irrelevant')] self.sw_scenes = self.get_sw_scenes() # we don't need to check scenes without United products self.survey = Survey(self.data_provider, self.output) self.sos = SOS(self.data_provider, self.output) self.results = self.data_provider[Data.SCENE_KPI_RESULTS] self.templates = {} self.region = self.store_info['region_name'].iloc[0] self.store_type = self.store_info['store_type'].iloc[0] self.program = self.store_info['additional_attribute_3'].iloc[0] self.sales_center = self.store_info['additional_attribute_5'].iloc[0] if self.store_type in STORE_TYPES: ##### self.store_type = STORE_TYPES[self.store_type] #### self.store_attr = self.store_info['additional_attribute_3'].iloc[0] # self.kpi_static_data = self.common_db.get_kpi_static_data() self.total_score = 0 self.sub_scores = defaultdict(int) self.sub_totals = defaultdict(int) self.ignore_stacking = False self.facings_field = 'facings' if not self.ignore_stacking else 'facings_ign_stack' for sheet in Const.SHEETS_CMA: self.templates[sheet] = pd.read_excel(TEMPLATE_PATH, sheetname=sheet).fillna('') self.tools = Shared(self.data_provider, self.output)
def __init__(self, data_provider, output, common_db2): self.output = output self.data_provider = data_provider self.common_db = Common(self.data_provider, SUB_PROJECT) self.common_db2 = common_db2 self.project_name = self.data_provider.project_name self.session_uid = self.data_provider.session_uid self.products = self.data_provider[Data.PRODUCTS] self.all_products = self.data_provider[Data.ALL_PRODUCTS] self.match_product_in_scene = self.data_provider[Data.MATCHES] self.visit_date = self.data_provider[Data.VISIT_DATE] self.session_info = self.data_provider[Data.SESSION_INFO] self.scene_info = self.data_provider[Data.SCENES_INFO] self.store_id = self.data_provider[Data.STORE_FK] self.store_info = self.data_provider[Data.STORE_INFO] self.scif = self.data_provider[Data.SCENE_ITEM_FACTS] self.scif = self.scif[~(self.scif['product_type'] == 'Irrelevant')] self.sw_scenes = self.get_relevant_scenes( ) # we don't need to check scenes without United products self.survey = Survey(self.data_provider, self.output) self.sos = SOS(self.data_provider, self.output) self.results = self.data_provider[Data.SCENE_KPI_RESULTS] self.region = self.store_info['region_name'].iloc[0] self.store_type = self.store_info['store_type'].iloc[0] self.program = self.store_info['additional_attribute_3'].iloc[0] self.sales_center = self.store_info['additional_attribute_5'].iloc[0] if self.store_type in STORE_TYPES: ##### self.store_type = STORE_TYPES[self.store_type] #### self.store_attr = self.store_info['additional_attribute_3'].iloc[0] # self.kpi_static_data = self.common_db.get_kpi_static_data() self.ignore_stacking = False self.facings_field = 'facings' if not self.ignore_stacking else 'facings_ign_stack' self.sub_scores = defaultdict(int) self.sub_totals = defaultdict(int) self.templates = self.get_template() self.hierarchy = self.templates[Const.KPIS].set_index( Const.KPI_NAME)[Const.PARENT].to_dict() self.templates = self.get_relevant_template(self.templates) self.children = self.templates[Const.KPIS][Const.KPI_NAME] self.tools = Shared(self.data_provider, self.output)
class JEFFToolBox: LEVEL1 = 1 LEVEL2 = 2 LEVEL3 = 3 def __init__(self, data_provider, output, commonv2): self.k_engine = BaseCalculationsScript(data_provider, output) self.output = output self.data_provider = data_provider self.common = commonv2 self.project_name = self.data_provider.project_name self.session_uid = self.data_provider.session_uid self.visit_date = self.data_provider[Data.VISIT_DATE] self.session_info = self.data_provider[Data.SESSION_INFO] self.scene_info = self.data_provider[Data.SCENES_INFO] self.all_products = self.data_provider[Data.ALL_PRODUCTS] self.templates = self.data_provider[Data.TEMPLATES] self.store_id = self.data_provider[Data.STORE_FK] self.rds_conn = PSProjectConnector(self.project_name, DbUsers.CalculationEng) # self.kpi_static_data = self.get_kpi_static_data() self.kpi_results_queries = [] self.store_info = self.data_provider[Data.STORE_INFO] self.store_type = self.store_info['store_type'].iloc[0] # self.rules = pd.read_excel(TEMPLATE_PATH).set_index('store_type').to_dict('index') self.ps_data_provider = PsDataProvider(self.data_provider, self.output) self.scif = self.data_provider[Data.SCENE_ITEM_FACTS] self.match_product_in_scene = self.data_provider[Data.MATCHES] self.ignore_stacking = False self.facings_field = 'facings' if not self.ignore_stacking else 'facings_ign_stack' self.manufacturer_fk = self.all_products['manufacturer_fk'][self.all_products['manufacturer_name'] == 'CCNA'].iloc[0] # self.scene_data = self.load_scene_data() # self.kpi_set_fk = kpi_set_fk self.templates = {} self.parse_template() self.toolbox = GENERALToolBox(self.data_provider) self.SOS = SOS_calc(self.data_provider) self.survey = Survey_calc(self.data_provider) self._merge_matches_and_all_product() def _merge_matches_and_all_product(self): """ This method merges the all product data with the match product in scene DataFrame """ self.match_product_in_scene = self.match_product_in_scene.merge(self.all_products, on='product_fk', how='left') def parse_template(self): self.templates['SOS'] = pd.read_excel(TEMPLATE_PATH) def main_calculation(self): """ This function calculates the KPI results. """ self.calculate_sos() def calculate_sos(self): for i, row in self.templates[SOS].iterrows(): for scene in (self.scif['scene_fk'].unique()).tolist(): parent_kpi_fk = 0 kpi_name = row['KPI Name'].strip() kpi_fk = self.common.get_kpi_fk_by_kpi_name(kpi_name) num_param1 = row['numerator_param1'] num_values1 = self.sanitize_values(row['numerator_value1']) num_param2 = row['numerator_param2'] num_values2 = self.sanitize_values(row['numerator_value2']) den_param1 = row['numerator_param1'] den_values1 = self.sanitize_values(row['numerator_value1']) den_param2 = row['denominator_param2'] den_values2 = self.sanitize_values(row['denominator_value2']) num_exclude_param1 = row['numerator_exclude_param1'] num_exclude_value1 = self.sanitize_values(row['numerator_exclude_value1']) num_exclude_param2 = row['numerator_exclude_param2'] num_exclude_value2 = self.sanitize_values(row['numerator_exclude_value2']) den_exclude_param = row['denominator_exclude_param'] den_exclude_value = self.sanitize_values(row['denominator_exclude_value']) parent_kpi_name = row['Parent'] if not pd.isna(parent_kpi_name): parent_kpi_fk = self.common.get_kpi_fk_by_kpi_name(parent_kpi_name.strip()) filters = {num_param1: num_values1, num_param2: num_values2, 'product_type': ['POS', 'SKU', 'OTHER'], 'scene_fk': scene} filters = self.delete_filter_nan(filters) general_filters = {den_param1: den_values1, den_param2: den_values2, 'product_type': ['SKU', 'OTHER'], 'scene_fk': scene} general_filters = self.delete_filter_nan(general_filters) if not pd.isna(num_exclude_param1): if 'ALL' in num_values1: if not pd.isna(num_exclude_param1): all_unique_values = (self.all_products[num_exclude_param1].unique()).tolist() excluded_list = list(set(all_unique_values) - set(num_exclude_value1)) filters[num_exclude_param1] = excluded_list if 'ALL' in num_values2: if not pd.isna(num_exclude_param2): all_unique_values = (self.all_products[num_exclude_param2].unique()).tolist() excluded_list = list(set(all_unique_values) - set(num_exclude_value2)) filters[num_exclude_param2] = excluded_list if not pd.isna(den_exclude_param): if 'ALL' in den_values2: all_unique_values = (self.all_products[den_exclude_param].unique()).tolist() excluded_list = list(set(all_unique_values) - set(den_exclude_value)) general_filters[den_exclude_param] = excluded_list ratio = self.SOS.calculate_share_of_shelf(filters, **general_filters) shelf_count_list = (self.match_product_in_scene['shelf_number'][self.match_product_in_scene['scene_fk'] == scene]. unique()).tolist() shelf_count = max(shelf_count_list) if shelf_count_list else 0 result = ratio score = (ratio * shelf_count) if parent_kpi_fk == 0: self.common.write_to_db_result(fk=kpi_fk, numerator_id=self.manufacturer_fk, numerator_result=0, denominator_id=scene, denominator_result=0, result=result, score=score) else: self.common.write_to_db_result(fk=kpi_fk, numerator_id=self.manufacturer_fk, numerator_result=0, denominator_id=scene,denominator_result=0, result=result, score=score, identifier_parent=parent_kpi_fk) @staticmethod def sanitize_values(item): if pd.isna(item): return item else: items = [x.strip() for x in item.split(',')] return items @staticmethod def delete_filter_nan(filters): for key in filters.keys(): if type(filters[key]) is not list: if pd.isna(filters[key]): del filters[key] return filters def calculate_availability_df(self, **filters): """ :param filters: These are the parameters which the data frame is filtered by. :return: Total number of SKUs facings appeared in the filtered Scene Item Facts data frame. """ if set(filters.keys()).difference(self.scif.keys()): scif_mpis_diff = self.match_product_in_scene[['scene_fk', 'product_fk'] + list(self.match_product_in_scene.keys().difference( self.scif.keys()))] # a patch for the item_id field which became item_id_x since it was added to product table as attribute. item_id = 'item_id' if 'item_id' in self.scif.columns else 'item_id_x' merged_df = pd.merge(self.scif[self.scif.facings != 0], scif_mpis_diff, how='outer', left_on=['scene_fk', item_id], right_on=['scene_fk', 'product_fk']) filtered_df = \ merged_df[self.toolbox.get_filter_condition(merged_df, **filters)] # filtered_df = \ # self.match_product_in_scene[self.toolbox.get_filter_condition(self.match_product_in_scene, **filters)] else: filtered_df = self.scif[self.toolbox.get_filter_condition(self.scif, **filters)] if self.facings_field in filtered_df.columns: availability_df = filtered_df else: availability_df = filtered_df return availability_df
class CMASOUTHWESTToolBox: EXCLUDE_FILTER = 0 INCLUDE_FILTER = 1 CONTAIN_FILTER = 2 def __init__(self, data_provider, output, common_db2): self.output = output self.data_provider = data_provider # self.common_db = Common(self.data_provider, SUB_PROJECT) # self.common_db2 = common_db2 self.common_db2 = CommonV3(self.data_provider) self.common_scene = CommonV2(self.data_provider) self.project_name = self.data_provider.project_name self.session_uid = self.data_provider.session_uid self.manufacturer_fk = 1 self.products = self.data_provider[Data.PRODUCTS] self.all_products = self.data_provider[Data.ALL_PRODUCTS] self.match_product_in_scene = self.data_provider[Data.MATCHES] self.visit_date = self.data_provider[Data.VISIT_DATE] self.session_info = self.data_provider[Data.SESSION_INFO] self.scene_info = self.data_provider[Data.SCENES_INFO] self.store_id = self.data_provider[Data.STORE_FK] self.store_info = self.data_provider[Data.STORE_INFO] self.scif = self.data_provider[Data.SCENE_ITEM_FACTS] self.scif = self.scif[~(self.scif['product_type'] == 'Irrelevant')] self.sw_scenes = self.get_sw_scenes() # we don't need to check scenes without United products self.survey = Survey(self.data_provider, self.output) self.sos = SOS(self.data_provider, self.output) self.results = self.data_provider[Data.SCENE_KPI_RESULTS] self.templates = {} self.region = self.store_info['region_name'].iloc[0] self.store_type = self.store_info['store_type'].iloc[0] self.program = self.store_info['additional_attribute_3'].iloc[0] self.sales_center = self.store_info['additional_attribute_5'].iloc[0] if self.store_type in STORE_TYPES: ##### self.store_type = STORE_TYPES[self.store_type] #### self.store_attr = self.store_info['additional_attribute_3'].iloc[0] # self.kpi_static_data = self.common_db.get_kpi_static_data() self.total_score = 0 self.sub_scores = defaultdict(int) self.sub_totals = defaultdict(int) self.ignore_stacking = False self.facings_field = 'facings' if not self.ignore_stacking else 'facings_ign_stack' for sheet in Const.SHEETS_CMA: self.templates[sheet] = pd.read_excel(TEMPLATE_PATH, sheetname=sheet).fillna('') self.tools = Shared(self.data_provider, self.output) # main functions: def main_calculation(self, *args, **kwargs): """ This function gets all the scene results from the SceneKPI, after that calculates every session's KPI, and in the end it calls "filter results" to choose every KPI and scene and write the results in DB. """ main_template = self.templates[Const.KPIS] main_template = main_template[main_template[Const.SESSION_LEVEL] == 'Y'] if self.region in Const.REGIONS: for i, main_line in main_template.iterrows(): store_type = self.does_exist(main_line, Const.STORE_TYPE) if store_type is None or self.store_type in store_type: self.calculate_main_kpi(main_line) self.write_scene_parent() self.write_sub_parents() self.write_parent() # self.write_to_db_result( # self.common_db.get_kpi_fk_by_kpi_name(SUB_PROJECT, 1), score=self.total_score, level=1) def calculate_main_kpi(self, main_line): """ This function gets a line from the main_sheet, transfers it to the match function, and checks all of the KPIs in the same name in the match sheet. :param main_line: series from the template of the main_sheet. """ kpi_name = main_line[Const.KPI_NAME] kpi_type = main_line[Const.TYPE] if kpi_name not in Const.ALL_SCENE_KPIS: # placeholder- need to check for unintended consequences relevant_scif = self.scif[self.scif['scene_id'].isin(self.sw_scenes)] else: relevant_scif = self.scif.copy() scene_types = self.does_exist(main_line, Const.SCENE_TYPE) scene_level = self.does_exist(main_line, Const.SCENE_LEVEL) store_attrs = main_line[Const.PROGRAM].split(',') result = score = target = None general_filters = {} if scene_types: relevant_scif = relevant_scif[relevant_scif['template_name'].isin(scene_types)] general_filters['template_name'] = scene_types scene_groups = self.does_exist(main_line, Const.TEMPLATE_GROUP) if scene_groups: relevant_scif = relevant_scif[relevant_scif['template_group'].isin(scene_groups)] general_filters['template_group'] = scene_groups if kpi_type == 'shelves bonus': relevant_template = self.templates['shelves'] else: relevant_template = self.templates[kpi_type] relevant_template = relevant_template[relevant_template[Const.KPI_NAME] == kpi_name] function = self.get_kpi_function(kpi_type) for i, kpi_line in relevant_template.iterrows(): if not self.store_attr or (store_attrs[0] != '' and self.store_attr not in store_attrs)\ or relevant_scif.empty: continue result, score, target = function(kpi_line, relevant_scif, general_filters) if result is None and score is None and target is None: continue self.update_parents(kpi_name, result, score) if isinstance(result, tuple): self.write_to_all_levels(kpi_name=kpi_name, result=result[0], score=score, target=target, num=result[1], den=result[2]) else: self.write_to_all_levels(kpi_name=kpi_name, result=result, score=score, target=target) else: pass def write_to_session_level(self, kpi_name, result=0): """ Writes a result in the DF :param kpi_name: string :param result: boolean """ result_dict = {Const.KPI_NAME: kpi_name, Const.RESULT: result * 1} self.session_results = self.session_results.append(result_dict, ignore_index=True) def write_to_all_levels(self, kpi_name, result, score, target=None, scene_fk=None, reuse_scene=False, num=None, den=None): """ Writes the final result in the "all" DF, add the score to the red score and writes the KPI in the DB :param kpi_name: str :param result: int :param display_text: str :param weight: int/float :param scene_fk: for the scene's kpi :param reuse_scene: this kpi can use scenes that were used """ result_dict = {Const.KPI_NAME: kpi_name, Const.RESULT: result, Const.SCORE: score, Const.THRESHOLD: target} # self.all_results = self.all_results.append(result_dict, ignore_index=True) self.write_to_db(kpi_name, score, result=result, threshold=target, num=num, den=den) # availability: def calculate_availability(self, kpi_line, relevant_scif): """ checks if all the lines in the availability sheet passes the KPI (there is at least one product in this relevant scif that has the attributes). :param relevant_scif: filtered scif :param isnt_dp: if "store attribute" in the main sheet has DP, and the store is not DP, we shouldn't calculate DP lines :param kpi_line: line from the availability sheet :return: boolean """ filtered_scif = self.filter_scif_availability(kpi_line, relevant_scif) target = kpi_line[Const.TARGET] return filtered_scif[filtered_scif['facings'] > 0]['facings'].count() >= target def filter_scif_specific(self, relevant_scif, kpi_line, name_in_template, name_in_scif): """ takes scif and filters it from the template :param relevant_scif: the current filtered scif :param kpi_line: line from one sheet (availability for example) :param name_in_template: the column name in the template :param name_in_scif: the column name in SCIF :return: """ values = self.does_exist(kpi_line, name_in_template) if values: if name_in_scif in Const.NUMERIC_VALUES_TYPES: values = [float(x) for x in values] return relevant_scif[relevant_scif[name_in_scif].isin(values)] return relevant_scif def filter_scif_availability(self, kpi_line, relevant_scif): """ calls filter_scif_specific for every column in the template of availability :param kpi_line: :param relevant_scif: :return: """ names_of_columns = { Const.MANUFACTURER: "manufacturer_name", Const.BRAND: "brand_name", Const.TRADEMARK: "att2", Const.SIZE: "size", Const.NUM_SUB_PACKAGES: "number_of_sub_packages", # CCBOTTLERSUSConst.PREMIUM_SSD: "Premium SSD", # CCBOTTLERSUSConst.INNOVATION_BRAND: "Innovation Brand", } for name in names_of_columns: relevant_scif = self.filter_scif_specific(relevant_scif, kpi_line, name, names_of_columns[name]) return relevant_scif # SOS: def calculate_sos(self, kpi_line, relevant_scif, general_filters): """ calculates SOS line in the relevant scif. :param kpi_line: line from SOS sheet. :param relevant_scif: filtered scif. :param isnt_dp: if "store attribute" in the main sheet has DP, and the store is not DP, we should filter all the DP products out of the numerator. :return: boolean """ kpi_name = kpi_line[Const.KPI_NAME] # relevant_scif = self.filter_by_type_value(relevant_scif, den_type, den_value) general_filters['product_type'] = (['Empty', 'Irrelevant'], 0) relevant_scif = relevant_scif[self.get_filter_condition(relevant_scif, **general_filters)] if kpi_line['range'] == 'Y': upper_limit, lower_limit = self.get_sos_targets(kpi_name, sos_range=True) target = None else: upper_limit, lower_limit = None, None target = self.get_sos_targets(kpi_name) sos_filters = self.get_kpi_line_filters(kpi_line, name='numerator') general_filters = self.get_kpi_line_filters(kpi_line, name='denominator') num_scif = relevant_scif[self.get_filter_condition(relevant_scif, **sos_filters)] den_scif = relevant_scif[self.get_filter_condition(relevant_scif, **general_filters)] sos_value, num, den = self.tools.sos_with_num_and_dem(kpi_line, num_scif, den_scif, self.facings_field) if sos_value is None: return None, None, None if target: target *= 100 score = 1 if sos_value >= target else 0 target = '{}%'.format(int(target)) elif not target and upper_limit and lower_limit: score = 1 if (lower_limit <= sos_value <= upper_limit) else 0 target = '{}% - {}%'.format(lower_limit, upper_limit) else: score = 0 target = None return (sos_value, num, den), score, target # Targets: def get_sos_targets(self, kpi_name, sos_range=False): targets_template = self.templates[Const.TARGETS] store_targets = targets_template.loc[(targets_template[Const.PROGRAM] == self.program) & (targets_template['region'] == self.region)] filtered_targets_to_kpi = store_targets.loc[targets_template['KPI name'] == kpi_name] if sos_range: if not filtered_targets_to_kpi.empty: range = filtered_targets_to_kpi['target'].values[0].split(' - ') upper_limit = int(range[1].replace('%', '').strip()) lower_limit = int(range[0].replace('%', '').strip()) else: upper_limit, lower_limit = None, None return upper_limit, lower_limit else: if not filtered_targets_to_kpi.empty: target = float(filtered_targets_to_kpi[Const.TARGET].values[0]) else: target = None return target def get_targets(self, kpi_name): targets_template = self.templates[Const.TARGETS] store_targets = targets_template.loc[(targets_template[Const.PROGRAM] == self.program) & (targets_template['region'] == self.region)] filtered_targets_to_kpi = store_targets.loc[targets_template['KPI name'] == kpi_name] if not filtered_targets_to_kpi.empty: target = filtered_targets_to_kpi[Const.TARGET].values[0] else: target = None return target @staticmethod def get_kpi_line_filters(kpi_line, name=''): if name: name = name.lower() + ' ' filters = defaultdict(list) attribs = [x.lower() for x in kpi_line.index] kpi_line.index = attribs c = 1 while 1: if '{}param {}'.format(name, c) in attribs and kpi_line['{}param {}'.format(name, c)]: filters[kpi_line['{}param {}'.format(name, c)]] += (kpi_line['{}value {}'.format(name, c)].split(',')) else: if c > 3: # just in case someone inexplicably chose a nonlinear numbering format. break c += 1 return filters @staticmethod def get_kpi_line_targets(kpi_line): mask = kpi_line.index.str.contains('Target') if mask.any(): targets = kpi_line.loc[mask].replace('', np.nan).dropna() targets.index = [int(x.split(Const.SEPERATOR)[1].split(' ')[0]) for x in targets.index] targets = targets.to_dict() else: targets = {} return targets @staticmethod def extrapolate_target(targets, c): while 1: if targets[c]: target = targets[c] break else: c -= 1 if c < 0: target = 0 break return target def calculate_facings_ntba(self, kpi_line, relevant_scif, general_filters): # if not self.store_attr in kpi_line[Const.PROGRAM].split(','): # return 0, 0, 0 scenes = relevant_scif['scene_fk'].unique().tolist() targets = self.get_kpi_line_targets(kpi_line) facings_filters = self.get_kpi_line_filters(kpi_line) score = 0 passed = 0 sum_facings = 0 sum_target = 0 for scene in scenes: scene_scif = relevant_scif[relevant_scif['scene_fk'] == scene] facings = scene_scif[self.get_filter_condition(scene_scif, **facings_filters)][self.facings_field].sum() num_bays = self.match_product_in_scene[self.match_product_in_scene['scene_fk'] == scene]['bay_number'].max() max_given = max(list(targets.keys())) print('Num bays is', num_bays) if num_bays in targets: target = targets[num_bays] else: target = None if target is None: # if num bays exceeds doors given in targets, use largest option as target target = self.extrapolate_target(targets, max_given) if facings >= target: # Please note, 0 > None evaluates true, so 0 facings is a pass when no target is set score += 1 sum_facings += facings sum_target += target if score == len(scenes): passed = 1 # return score, passed, len(scenes) return score, passed, len(scenes) def calculate_ratio(self, kpi_line, relevant_scif, general_filters): sos_filters = self.get_kpi_line_filters(kpi_line) general_filters['product_type'] = (['Empty', 'Irrelevant'], 0) scenes = relevant_scif[self.get_filter_condition(relevant_scif, **general_filters)]['scene_fk'].unique().tolist() us = 0 them = 0 if not scenes: return None, None, None for scene in scenes: sos_filters['scene_fk'] = scene sos_value = self.sos.calculate_share_of_shelf(sos_filters, **general_filters) if sos_value >= .8: us += 1 else: them += 1 passed = 0 if us - them >= 0: passed = 1 if them != 0: score = round((us/float(them))*100, 2) elif us > 0: score = us else: score = 0 target = us + them if target != 1: target = round(((us + them) / 2) * 100, 2) return (score, us, them), passed, 100 def calculate_number_of_shelves(self, kpi_line, relevant_scif, general_filters): """ calculates SOS line in the relevant scif. :param kpi_line: line from SOS sheet. :param relevant_scif: filtered scif. :param isnt_dp: if "store attribute" in the main sheet has DP, and the store is not DP, we should filter all the DP products out of the numerator. :return: boolean """ kpi_name = kpi_line[Const.KPI_NAME] relevant_scif = relevant_scif[relevant_scif['product_type'] != "Empty"] relevant_scenes = relevant_scif['scene_fk'].unique().tolist() numerator_filters = self.get_kpi_line_filters(kpi_line) general_filters['product_type'] = (['Empty', 'Irrelevant'], 0) scene_filters = {'scene_fk': relevant_scenes} target = self.get_targets(kpi_name) if isinstance(target, unicode): target = str(target) if isinstance(target, str): target = float(target.split(' ')[0].strip()) numerator_facings = relevant_scif[self.get_filter_condition(relevant_scif, **numerator_filters)][ self.facings_field].sum() denominator_facings = relevant_scif[self.get_filter_condition(relevant_scif, **general_filters)][ self.facings_field].sum() # general_filters['Southwest Deliver'] = 'Y' # number_of_shelves_value = self.match_product_in_scene[self.get_filter_condition( # self.match_product_in_scene, **general_filters)][['scene_fk', 'bay_number', 'shelf_number']].\ # unique().count() number_of_shelves_value = self.match_product_in_scene[self.get_filter_condition( self.match_product_in_scene, **scene_filters)]\ [['scene_fk', 'bay_number', 'shelf_number']]\ .drop_duplicates().shape[0] number_of_shelves_score = numerator_facings / float(denominator_facings / float(number_of_shelves_value)) if target: score = 1 if number_of_shelves_score >= target else 0 else: score = 1 target = 0 if 'bonus' not in kpi_name.lower(): return number_of_shelves_score, score, target elif not kpi_line[Const.TARGET]: return score, None, None else: return number_of_shelves_score, None, None def write_scene_parent(self): self.results['parent_kpi'] = [int(Const.SCENE_SESSION_KPI[kpi]) if kpi in Const.SCENE_SESSION_KPI else None for kpi in self.results['kpi_level_2_fk']] self.results = self.results[~self.results['parent_kpi'].isnull()] for i, parent_kpi in enumerate(set(self.results['parent_kpi'])): kpi_res = self.results[self.results['parent_kpi'] == parent_kpi] num, den, score = self.aggregate(kpi_res, parent_kpi) parent_name = self.common_db2.kpi_static_data.set_index('pk').loc[parent_kpi, 'type'] self.sub_totals[parent_name] = den self.sub_scores[parent_name] = num self.write_hierarchy(kpi_res, i, parent_name) def write_hierarchy(self, kpi_res, i, parent_name): for j, kpi_line in kpi_res.iterrows(): kpi_fk = kpi_line['scene_kpi_fk'] self.common_db2.write_to_db_result(0, parent_fk=i, scene_result_fk=kpi_fk, should_enter=True, identifier_parent=self.common_db2.get_dictionary( parent_name=parent_name), hierarchy_only=1, numerator_id=Const.MANUFACTURER_FK, denominator_id=self.store_id) def aggregate(self, kpi_res, parent_kpi): if Const.BEHAVIOR[parent_kpi] == 'PASS': num = kpi_res['score'].sum() den = kpi_res['parent_kpi'].count() else: num = kpi_res['numerator_result'].sum() den = kpi_res['denominator_result'].sum() score = kpi_res['score'].sum() return num, den, score # Number of shelves def old_calculate_number_of_shelves(self, kpi_line, relevant_scif, general_filters): """ calculates SOS line in the relevant scif. :param kpi_line: line from SOS sheet. :param relevant_scif: filtered scif. :param isnt_dp: if "store attribute" in the main sheet has DP, and the store is not DP, we should filter all the DP products out of the numerator. :return: boolean """ kpi_name = kpi_line[Const.KPI_NAME] relevant_scif = relevant_scif[relevant_scif['product_type'] != "Empty"] den_type = kpi_line[Const.DEN_TYPES_1] den_value = kpi_line[Const.DEN_VALUES_1].split(',') # relevant_scif = self.filter_by_type_value(relevant_scif, den_type, den_value) num_type = kpi_line[Const.NUM_TYPES_1] num_value = kpi_line[Const.NUM_VALUES_1].split(',') # num_scif = self.filter_by_type_value(relevant_scif, num_type, num_value) target = self.get_sos_targets(kpi_name) general_filters[den_type] = den_value if kpi_line[Const.DEN_TYPES_2]: den_type_2 = kpi_line[Const.DEN_TYPES_2] den_value_2 = kpi_line[Const.DEN_VALUES_2].split(',') general_filters[den_type_2] = den_value_2 numerator_filters = {num_type: num_value} if kpi_line[Const.NUM_TYPES_2]: num_type_2 = kpi_line[Const.NUM_TYPES_2] num_value_2 = kpi_line[Const.NUM_VALUES_2].split(',') numerator_filters[num_type_2] = num_value_2 numerator_facings = self.scif[self.get_filter_condition(self.scif, **numerator_filters)][ self.facings_field].sum() denominator_facings = self.scif[self.get_filter_condition(self.scif, **general_filters)][ self.facings_field].sum() general_filters['Southwest Deliver'] = 'Y' number_of_shelves_value = self.match_product_in_scene[self.get_filter_condition( self.match_product_in_scene, **general_filters)]['shelf_number'].unique().count() number_of_shelves_score = numerator_facings / float(denominator_facings / float(number_of_shelves_value)) if target: score = 1 if number_of_shelves_score >= target else 0 else: score = 1 target = 0 return number_of_shelves_score, score, target # helpers: def get_column_name(self, field_name, df): """ checks what the real field name in DttFrame is (if it exists in the DF or exists in the "converter" sheet). :param field_name: str :param df: scif/products :return: real column name (if exists) """ if field_name in df.columns: return field_name if field_name.upper() in self.converters[Const.NAME_IN_TEMP].str.upper().tolist(): field_name = self.converters[self.converters[Const.NAME_IN_TEMP].str.upper() == field_name.upper()][ Const.NAME_IN_DB].iloc[0] return field_name return None def filter_by_type_value(self, relevant_scif, type_name, value): """ filters scif with the type and value :param relevant_scif: current filtered scif :param type_name: str (from the template) :param value: str :return: new scif """ if type_name == "": return relevant_scif values = value.split(', ') new_type_name = self.get_column_name(type_name, relevant_scif) if not new_type_name: print "There is no field '{}'".format(type_name) return relevant_scif if new_type_name in Const.NUMERIC_VALUES_TYPES: values = [float(x) for x in values] return relevant_scif[relevant_scif[new_type_name].isin(values)] @staticmethod def exclude_scif(exclude_line, relevant_scif): """ filters products out of the scif :param exclude_line: line from the exclusion sheet :param relevant_scif: current filtered scif :return: new scif """ exclude_products = exclude_line[Const.PRODUCT_EAN].split(', ') return relevant_scif[~(relevant_scif['product_ean_code'].isin(exclude_products))] @staticmethod def does_exist(kpi_line, column_name): """ checks if kpi_line has values in this column, and if it does - returns a list of these values :param kpi_line: line from template :param column_name: str :return: list of values if there are, otherwise None """ if column_name in kpi_line.keys() and kpi_line[column_name] != "": cell = kpi_line[column_name] if type(cell) in [int, float]: return [cell] elif type(cell) in [unicode, str]: if ", " in cell: return cell.split(", ") else: return cell.split(',') return None def get_kpi_function(self, kpi_type): """ transfers every kpi to its own function :param kpi_type: value from "sheet" column in the main sheet :return: function """ if kpi_type == Const.AVAILABILITY: return self.calculate_availability elif kpi_type == Const.SOS: return self.calculate_sos elif kpi_type == Const.SHELVES or kpi_type == Const.SHELVES_BONUS: return self.calculate_number_of_shelves elif kpi_type == Const.SHELVES_BONUS: return self.calculate_number_of_shelves_bonus elif kpi_type == Const.FACINGS: return self.calculate_facings_ntba elif kpi_type == Const.RATIO: return self.calculate_ratio elif kpi_type == Const.PURITY: return self.sos_with_num_and_dem else: Log.warning("The value '{}' in column sheet in the template is not recognized".format(kpi_type)) return None def get_filter_condition(self, df, **filters): """ :param df: The data frame to be filters. :param filters: These are the parameters which the data frame is filtered by. Every parameter would be a tuple of the value and an include/exclude flag. INPUT EXAMPLE (1): manufacturer_name = ('Diageo', DIAGEOAUPNGAMERICAGENERALToolBox.INCLUDE_FILTER) INPUT EXAMPLE (2): manufacturer_name = 'Diageo' :return: a filtered Scene Item Facts data frame. """ if not filters: return df['pk'].apply(bool) if self.facings_field in df.keys(): filter_condition = (df[self.facings_field] > 0) else: filter_condition = None for field in filters.keys(): if field in df.keys(): if isinstance(filters[field], tuple): value, exclude_or_include = filters[field] else: value, exclude_or_include = filters[field], self.INCLUDE_FILTER if not value: continue if not isinstance(value, list): value = [value] if exclude_or_include == self.INCLUDE_FILTER: condition = (df[field].isin(value)) elif exclude_or_include == self.EXCLUDE_FILTER: condition = (~df[field].isin(value)) elif exclude_or_include == self.CONTAIN_FILTER: condition = (df[field].str.contains(value[0], regex=False)) for v in value[1:]: condition |= df[field].str.contains(v, regex=False) else: continue if filter_condition is None: filter_condition = condition else: filter_condition &= condition else: Log.warning('field {} is not in the Data Frame'.format(field)) return filter_condition def choose_and_write_results(self): """ writes all the KPI in the DB: first the session's ones, second the scene's ones and in the end the ones that depends on the previous ones. After all it writes the red score """ # self.scenes_results.to_csv('results/{}/scene {}.csv'.format(self.calculation_type, self.session_uid))#### # self.session_results.to_csv('results/{}/session {}.csv'.format(self.calculation_type, self.session_uid))#### main_template = self.templates[Const.KPIS] self.write_session_kpis(main_template) # self.write_condition_kpis(main_template) # self.write_missings(main_template) self.write_to_db(SUB_PROJECT, 0) # result_dict = {Const.KPI_NAME: 'RED SCORE', Const.SCORE: self.red_score}#### # self.all_results = self.all_results.append(result_dict, ignore_index=True)#### # self.all_results.to_csv('results/{}/{}.csv'.format(self.calculation_type, self.session_uid))#### def write_missings(self, main_template): """ write 0 in all the KPIs that didn't get score :param main_template: """ for i, main_line in main_template.iterrows(): kpi_name = main_line[Const.KPI_NAME] if not self.all_results[self.all_results[Const.KPI_NAME] == kpi_name].empty: continue result = 0 display_text = main_line[Const.DISPLAY_TEXT] weight = main_line[Const.WEIGHT] self.write_to_all_levels(kpi_name, result, display_text, weight) def write_session_kpis(self, main_template): """ iterates all the session's KPIs and saves them :param main_template: main_sheet. """ # session_template = main_template[main_template[Const.CONDITION] == ""] # if self.calculation_type == Const.SOVI: # session_template = session_template[session_template[Const.SESSION_LEVEL] == Const.V] for i, main_line in main_template.iterrows(): kpi_name = main_line[Const.KPI_NAME] result = self.session_results[self.session_results[Const.KPI_NAME] == kpi_name] if result.empty: continue result = result.iloc[0][Const.RESULT] display_text = main_line[Const.DISPLAY_TEXT] weight = main_line[Const.WEIGHT] self.write_to_all_levels(kpi_name, result, display_text, weight) def write_regular_scene_kpis(self, scene_template): """ lets the regular KPIs choose their scenes (if they passed). Like in the incremental part - if KPI passed some scenes, we will choose the scene that the children passed :param scene_template: filtered main_sheet (only scene KPIs, and without the passed incremental) :return: the new template (without the KPI written already) """ for i, main_line in scene_template.iterrows(): kpi_name = main_line[Const.KPI_NAME] reuse_scene = main_line[Const.REUSE_SCENE] == Const.V kpi_results = self.scenes_results[self.scenes_results[Const.KPI_NAME] == kpi_name] if not reuse_scene: kpi_results = kpi_results[~(kpi_results[Const.SCENE_FK].isin(self.used_scenes))] true_results = kpi_results[kpi_results[Const.RESULT] > 0] display_text = main_line[Const.DISPLAY_TEXT] weight = main_line[Const.WEIGHT] if true_results.empty: continue true_results = true_results.sort_values(by=Const.RESULT, ascending=False) scene_fk = true_results.iloc[0][Const.SCENE_FK] self.write_to_all_levels(kpi_name, true_results.iloc[0][Const.RESULT], display_text, weight, scene_fk=scene_fk, reuse_scene=reuse_scene) scene_template = scene_template[~(scene_template[Const.KPI_NAME] == kpi_name)] return scene_template def write_not_passed_scene_kpis(self, scene_template): """ lets the KPIs not passed choose their scenes. :param scene_template: filtered main_sheet (only scene KPIs, and without the passed KPIs) """ for i, main_line in scene_template.iterrows(): kpi_name = main_line[Const.KPI_NAME] reuse_scene = main_line[Const.REUSE_SCENE] == Const.V kpi_results = self.scenes_results[self.scenes_results[Const.KPI_NAME] == kpi_name] if not reuse_scene: kpi_results = kpi_results[~(kpi_results[Const.SCENE_FK].isin(self.used_scenes))] display_text = main_line[Const.DISPLAY_TEXT] weight = main_line[Const.WEIGHT] if kpi_results.empty: continue scene_fk = kpi_results.iloc[0][Const.SCENE_FK] self.write_to_all_levels(kpi_name, 0, display_text, weight, scene_fk=scene_fk, reuse_scene=reuse_scene) def write_scene_kpis(self, main_template): """ iterates every scene_kpi that does not depend on others, and choose the scene they will take: 1. the incrementals take their scene (if they passed). 2. the regular KPIs that passed choose their scenes. 3. the ones that didn't pass choose their random scenes. :param main_template: main_sheet. """ scene_template = main_template[(main_template[Const.SESSION_LEVEL] != Const.V) & (main_template[Const.CONDITION] == "")] scene_template = self.write_incremental_kpis(scene_template) scene_template = self.write_regular_scene_kpis(scene_template) self.write_not_passed_scene_kpis(scene_template) def write_condition_kpis(self, main_template): """ writes all the KPI that depend on other KPIs by checking if the parent KPI has passed and in which scene. :param main_template: main_sheet """ condition_template = main_template[main_template[Const.CONDITION] != ''] for i, main_line in condition_template.iterrows(): condition = main_line[Const.CONDITION] kpi_name = main_line[Const.KPI_NAME] if self.calculation_type == Const.MANUAL or main_line[Const.SESSION_LEVEL] == Const.V: kpi_results = self.session_results[self.session_results[Const.KPI_NAME] == kpi_name] else: kpi_results = self.scenes_results[self.scenes_results[Const.KPI_NAME] == kpi_name] condition_result = self.all_results[(self.all_results[Const.KPI_NAME] == condition) & (self.all_results[Const.RESULT] > 0)] if condition_result.empty: continue condition_result = condition_result.iloc[0] condition_scene = condition_result[Const.SCENE_FK] if condition_scene and Const.SCENE_FK in kpi_results: results = kpi_results[kpi_results[Const.SCENE_FK] == condition_scene] else: results = kpi_results if results.empty: continue result = results.iloc[0][Const.RESULT] display_text = main_line[Const.DISPLAY_TEXT] weight = main_line[Const.WEIGHT] scene_fk = results.iloc[0][Const.SCENE_FK] if Const.SCENE_FK in kpi_results else None self.write_to_all_levels(kpi_name, result, display_text, weight, scene_fk=scene_fk) def get_sw_scenes(self): return self.scif[self.scif['Southwest Deliver'] == 'Y']['scene_id'].unique().tolist() def get_weight_factor(self): sum_weights = self.templates[Const.KPIS][Const.WEIGHT].sum() return sum_weights / 100.0 def get_score(self, weight): return weight / self.weight_factor def update_parents(self, kpi_name, result, score): parent = self.get_kpi_parent(kpi_name) if parent != SUB_PROJECT: if 'Bonus' in parent: self.update_sub_score(kpi_name, passed=result) else: self.update_sub_score(kpi_name, passed=score) def get_kpi_parent(self, kpi_name): type_name = '{} {}'.format(SUB_PROJECT, kpi_name) kpi_family_fk = int(self.common_db2.kpi_static_data.set_index('type')\ .loc[type_name, 'kpi_family_fk']) if kpi_family_fk in Const.KPI_FAMILY_KEY: return Const.KPI_FAMILY_KEY[kpi_family_fk] else: return SUB_PROJECT def update_sub_score(self, kpi_name, passed=0, parent=None): if not parent: parent = self.get_kpi_parent(kpi_name) if parent == SUB_PROJECT: parent = '{} {}'.format(SUB_PROJECT, kpi_name) if 'Bonus' not in kpi_name: self.sub_totals[parent] += 1 if passed: self.sub_scores[parent] += passed else: self.sub_totals[parent] += 0 self.sub_scores[parent] += 0 def write_to_db(self, kpi_name, score, result=None, threshold=None, num=None, den=None): """ writes result in the DB :param kpi_name: str :param score: float :param display_text: str :param result: str :param threshold: int """ kpi_fk = self.common_db2.get_kpi_fk_by_kpi_type('{} {}'.format(SUB_PROJECT, kpi_name)) parent = self.get_kpi_parent(kpi_name) delta = 0 if isinstance(threshold, str) and '%' in threshold: if score == 0: targ = float(threshold.split('-')[0].replace('%', ''))/100 delta = round((targ * den) - num) threshold = self.tools.result_values[threshold.replace(' ', '')] if parent != SUB_PROJECT: if score == 1: score = Const.PASS elif score == 0: score = Const.FAIL else: score = 'bonus' score = self.tools.result_values[score] self.common_db2.write_to_db_result(fk=kpi_fk, score=score, result=result, should_enter=True, target=threshold, numerator_result=num, denominator_result=den, weight=delta, identifier_parent=self.common_db2.get_dictionary(parent_name=parent), numerator_id=Const.MANUFACTURER_FK, denominator_id=self.store_id) # self.write_to_db_result( # self.common_db.get_kpi_fk_by_kpi_name(kpi_name, 2), score=score, level=2) # self.write_to_db_result( # self.common_db.get_kpi_fk_by_kpi_name(kpi_name, 3), score=score, level=3, # threshold=threshold, result=result) def write_to_db_result(self, fk, level, score, set_type=Const.SOVI, **kwargs): """ This function creates the result data frame of every KPI (atomic KPI/KPI/KPI set), and appends the insert SQL query into the queries' list, later to be written to the DB. """ if kwargs: kwargs['score'] = score attributes = self.create_attributes_dict(fk=fk, level=level, **kwargs) else: attributes = self.create_attributes_dict(fk=fk, score=score, level=level) if level == self.common_db.LEVEL1: table = self.common_db.KPS_RESULT elif level == self.common_db.LEVEL2: table = self.common_db.KPK_RESULT elif level == self.common_db.LEVEL3: table = self.common_db.KPI_RESULT else: return query = insert(attributes, table) self.common_db.kpi_results_queries.append(query) def create_attributes_dict(self, score, fk=None, level=None, display_text=None, set_type=Const.SOVI, **kwargs): """ This function creates a data frame with all attributes needed for saving in KPI results tables. or you can send dict with all values in kwargs """ kpi_static_data = self.kpi_static_data if set_type == Const.SOVI else self.kpi_static_data_integ if level == self.common_db.LEVEL1: if kwargs: kwargs['score'] = score values = [val for val in kwargs.values()] col = [col for col in kwargs.keys()] attributes = pd.DataFrame(values, columns=col) else: kpi_set_name = kpi_static_data[kpi_static_data['kpi_set_fk'] == fk]['kpi_set_name'].values[0] attributes = pd.DataFrame( [(kpi_set_name, self.session_uid, self.store_id, self.visit_date.isoformat(), format(score, '.2f'), fk)], columns=['kps_name', 'session_uid', 'store_fk', 'visit_date', 'score_1', 'kpi_set_fk']) elif level == self.common_db.LEVEL2: if kwargs: kwargs['score'] = score values = [val for val in kwargs.values()] col = [col for col in kwargs.keys()] attributes = pd.DataFrame(values, columns=col) else: kpi_name = kpi_static_data[kpi_static_data['kpi_fk'] == fk]['kpi_name'].values[0].replace("'", "\\'") attributes = pd.DataFrame( [(self.session_uid, self.store_id, self.visit_date.isoformat(), fk, kpi_name, score)], columns=['session_uid', 'store_fk', 'visit_date', 'kpi_fk', 'kpk_name', 'score']) elif level == self.common_db.LEVEL3: data = kpi_static_data[kpi_static_data['atomic_kpi_fk'] == fk] kpi_fk = data['kpi_fk'].values[0] kpi_set_name = kpi_static_data[kpi_static_data['atomic_kpi_fk'] == fk]['kpi_set_name'].values[0] display_text = data['kpi_name'].values[0] if kwargs: kwargs = self.add_additional_data_to_attributes(kwargs, score, kpi_set_name, kpi_fk, fk, datetime.utcnow().isoformat(), display_text) values = tuple([val for val in kwargs.values()]) col = [col for col in kwargs.keys()] attributes = pd.DataFrame([values], columns=col) else: attributes = pd.DataFrame( [(display_text, self.session_uid, kpi_set_name, self.store_id, self.visit_date.isoformat(), datetime.utcnow().isoformat(), score, kpi_fk, fk)], columns=['display_text', 'session_uid', 'kps_name', 'store_fk', 'visit_date', 'calculation_time', 'score', 'kpi_fk', 'atomic_kpi_fk']) else: attributes = pd.DataFrame() return attributes.to_dict() def add_additional_data_to_attributes(self, kwargs_dict, score, kpi_set_name, kpi_fk, fk, calc_time, display_text): kwargs_dict['score'] = score kwargs_dict['kps_name'] = kpi_set_name kwargs_dict['kpi_fk'] = kpi_fk kwargs_dict['atomic_kpi_fk'] = fk kwargs_dict['calculation_time'] = calc_time kwargs_dict['session_uid'] = self.session_uid kwargs_dict['store_fk'] = self.store_id kwargs_dict['visit_date'] = self.visit_date.isoformat() kwargs_dict['display_text'] = display_text return kwargs_dict def kpi_parent_result(self, parent, num, den): if parent in Const.PARENT_NOT_RATIO: result = num else: if den: result = round((float(num) / den)*100, 2) else: result = 0 return result def write_sub_parents(self): for sub_parent in self.sub_totals.keys(): # for sub_parent in set(Const.KPI_FAMILY_KEY.values()): kpi_fk = self.common_db2.get_kpi_fk_by_kpi_type(sub_parent) num = self.sub_scores[sub_parent] den = self.sub_totals[sub_parent] result = self.kpi_parent_result(sub_parent, num, den) if 'Bonus' in sub_parent: den = 0 self.common_db2.write_to_db_result(fk=kpi_fk, numerator_result=num, numerator_id=self.manufacturer_fk, denominator_id=self.store_id, denominator_result=den, result=result, score=result, target=100, identifier_result=self.common_db2.get_dictionary( parent_name=sub_parent), identifier_parent=self.common_db2.get_dictionary( parent_name=Const.PARENT_HIERARCHY[sub_parent]), should_enter=True) def write_parent(self): kpi_fk = self.common_db2.get_kpi_fk_by_kpi_name(SUB_PROJECT) num = sum([self.sub_scores[key] for key, value in Const.PARENT_HIERARCHY.items() if value == Const.CMA]) den = sum([self.sub_totals[key] for key, value in Const.PARENT_HIERARCHY.items() if value == Const.CMA]) if den: result = num * 100.0 / den self.common_db2.write_to_db_result(fk=kpi_fk, numerator_result=num, numerator_id=self.manufacturer_fk, denominator_id=self.store_id, denominator_result=den, result=result, score=result, target=100, identifier_result=self.common_db2.get_dictionary( parent_name=SUB_PROJECT)) def commit_results(self): """ committing the results in both sets """ pass # self.common_db.delete_results_data_by_kpi_set() # self.common_db.commit_results_data_without_delete() self.common_db2.commit_results_data()
class CCBOTTLERSUSCMASOUTHWESTToolBox: EXCLUDE_FILTER = 0 INCLUDE_FILTER = 1 CONTAIN_FILTER = 2 def __init__(self, data_provider, output, common_v2): self.output = output self.data_provider = data_provider self.project_name = self.data_provider.project_name self.session_uid = self.data_provider.session_uid self.products = self.data_provider[Data.PRODUCTS] self.all_products = self.data_provider[Data.ALL_PRODUCTS] self.match_product_in_scene = self.data_provider[Data.MATCHES] self.visit_date = self.data_provider[Data.VISIT_DATE] self.session_info = self.data_provider[Data.SESSION_INFO] self.scene_info = self.data_provider[Data.SCENES_INFO] self.store_id = self.data_provider[Data.STORE_FK] self.store_info = self.data_provider[Data.STORE_INFO] self.scif = self.data_provider[Data.SCENE_ITEM_FACTS] self.scif = self.scif[~(self.scif['product_type'] == 'Irrelevant')] self.sw_scenes = self.get_sw_scenes( ) # we don't need to check scenes without United products self.survey = Survey(self.data_provider, self.output) self.sos = SOS(self.data_provider, self.output) self.templates = {} self.common_db = Common(self.data_provider, CMA_COMPLIANCE) self.common_db2 = common_v2 self.common_scene = CommonV2(self.data_provider) self.region = self.store_info['region_name'].iloc[0] self.store_type = self.store_info['store_type'].iloc[0] self.program = self.store_info['additional_attribute_3'].iloc[0] self.sales_center = self.store_info['additional_attribute_5'].iloc[0] if self.store_type in STORE_TYPES: ##### self.store_type = STORE_TYPES[self.store_type] #### self.store_attr = self.store_info['additional_attribute_3'].iloc[0] self.kpi_static_data = self.common_db.get_kpi_static_data() self.total_score = 0 self.sub_scores = defaultdict(int) self.sub_totals = defaultdict(int) self.ignore_stacking = False self.facings_field = 'facings' if not self.ignore_stacking else 'facings_ign_stack' for sheet in Const.SHEETS_CMA: self.templates[sheet] = pd.read_excel(TEMPLATE_PATH, sheetname=sheet).fillna('') # main functions: def main_calculation(self, *args, **kwargs): """ This function gets all the scene results from the SceneKPI, after that calculates every session's KPI, and in the end it calls "filter results" to choose every KPI and scene and write the results in DB. """ main_template = self.templates[Const.KPIS] if self.region in Const.REGIONS: for i, main_line in main_template.iterrows(): store_type = self.does_exist(main_line, Const.STORE_TYPE) if store_type is None or self.store_type in store_type: self.calculate_main_kpi(main_line) self.write_sub_parents() self.write_parent() self.write_to_db_result(self.common_db.get_kpi_fk_by_kpi_name( CMA_COMPLIANCE, 1), score=self.total_score, level=1) def calculate_main_kpi(self, main_line): """ This function gets a line from the main_sheet, transfers it to the match function, and checks all of the KPIs in the same name in the match sheet. :param main_line: series from the template of the main_sheet. """ kpi_name = main_line[Const.KPI_NAME] kpi_type = main_line[Const.TYPE] relevant_scif = self.scif[self.scif['scene_id'].isin(self.sw_scenes)] scene_types = self.does_exist(main_line, Const.SCENE_TYPE) scene_level = self.does_exist(main_line, Const.SCENE_LEVEL) store_attrs = main_line[Const.PROGRAM].split(',') result = score = target = None general_filters = {} if scene_types: relevant_scif = relevant_scif[relevant_scif['template_name'].isin( scene_types)] general_filters['template_name'] = scene_types scene_groups = self.does_exist(main_line, Const.TEMPLATE_GROUP) if scene_groups: relevant_scif = relevant_scif[relevant_scif['template_group'].isin( scene_groups)] general_filters['template_group'] = scene_groups if kpi_type == 'shelves bonus': relevant_template = self.templates['shelves'] else: relevant_template = self.templates[kpi_type] relevant_template = relevant_template[relevant_template[Const.KPI_NAME] == kpi_name] function = self.get_kpi_function(kpi_type) for i, kpi_line in relevant_template.iterrows(): if not self.store_attr or (store_attrs[0] != '' and self.store_attr not in store_attrs)\ or relevant_scif.empty: continue if scene_level: self.scene_level_kpis(kpi_line, relevant_scif, general_filters, function) else: result, score, target = function(kpi_line, relevant_scif, general_filters) # write in DF: if result is None and score is None and target is None: continue if 'Bonus' in self.get_kpi_parent(kpi_name): self.update_sub_score(kpi_name, passed=result) else: self.update_sub_score(kpi_name, passed=score) if target is None: target = 0 self.write_to_all_levels(kpi_name=kpi_name, result=result, score=score, target=target) else: pass def write_to_session_level(self, kpi_name, result=0): """ Writes a result in the DF :param kpi_name: string :param result: boolean """ result_dict = {Const.KPI_NAME: kpi_name, Const.RESULT: result * 1} self.session_results = self.session_results.append(result_dict, ignore_index=True) def write_to_all_levels(self, kpi_name, result, score, target=None, scene_fk=None, reuse_scene=False): """ Writes the final result in the "all" DF, add the score to the red score and writes the KPI in the DB :param kpi_name: str :param result: int :param display_text: str :param weight: int/float :param scene_fk: for the scene's kpi :param reuse_scene: this kpi can use scenes that were used """ result_dict = { Const.KPI_NAME: kpi_name, Const.RESULT: result, Const.SCORE: score, Const.THRESHOLD: target } # self.all_results = self.all_results.append(result_dict, ignore_index=True) self.write_to_db(kpi_name, score, result=result, threshold=target) # availability: def calculate_availability(self, kpi_line, relevant_scif): """ checks if all the lines in the availability sheet passes the KPI (there is at least one product in this relevant scif that has the attributes). :param relevant_scif: filtered scif :param isnt_dp: if "store attribute" in the main sheet has DP, and the store is not DP, we shouldn't calculate DP lines :param kpi_line: line from the availability sheet :return: boolean """ filtered_scif = self.filter_scif_availability(kpi_line, relevant_scif) target = kpi_line[Const.TARGET] return filtered_scif[ filtered_scif['facings'] > 0]['facings'].count() >= target def filter_scif_specific(self, relevant_scif, kpi_line, name_in_template, name_in_scif): """ takes scif and filters it from the template :param relevant_scif: the current filtered scif :param kpi_line: line from one sheet (availability for example) :param name_in_template: the column name in the template :param name_in_scif: the column name in SCIF :return: """ values = self.does_exist(kpi_line, name_in_template) if values: if name_in_scif in Const.NUMERIC_VALUES_TYPES: values = [float(x) for x in values] return relevant_scif[relevant_scif[name_in_scif].isin(values)] return relevant_scif def filter_scif_availability(self, kpi_line, relevant_scif): """ calls filter_scif_specific for every column in the template of availability :param kpi_line: :param relevant_scif: :return: """ names_of_columns = { Const.MANUFACTURER: "manufacturer_name", Const.BRAND: "brand_name", Const.TRADEMARK: "att2", Const.SIZE: "size", Const.NUM_SUB_PACKAGES: "number_of_sub_packages", # CCBOTTLERSUSConst.PREMIUM_SSD: "Premium SSD", # CCBOTTLERSUSConst.INNOVATION_BRAND: "Innovation Brand", } for name in names_of_columns: relevant_scif = self.filter_scif_specific(relevant_scif, kpi_line, name, names_of_columns[name]) return relevant_scif # SOS: def calculate_sos(self, kpi_line, relevant_scif, general_filters): """ calculates SOS line in the relevant scif. :param kpi_line: line from SOS sheet. :param relevant_scif: filtered scif. :param isnt_dp: if "store attribute" in the main sheet has DP, and the store is not DP, we should filter all the DP products out of the numerator. :return: boolean """ kpi_name = kpi_line[Const.KPI_NAME] relevant_scif = relevant_scif[relevant_scif['product_type'] != "Empty"] den_type = kpi_line[Const.DEN_TYPES_1] den_value = kpi_line[Const.DEN_VALUES_1].split(',') # relevant_scif = self.filter_by_type_value(relevant_scif, den_type, den_value) num_type = kpi_line[Const.NUM_TYPES_1] num_value = kpi_line[Const.NUM_VALUES_1].split(',') # num_scif = self.filter_by_type_value(relevant_scif, num_type, num_value) general_filters['product_type'] = (['Empty', 'Irrelevant'], 0) if kpi_line['range'] == 'Y': upper_limit, lower_limit = self.get_sos_targets(kpi_name, sos_range=True) target = None else: upper_limit, lower_limit = None, None target = self.get_sos_targets(kpi_name) general_filters[den_type] = den_value if kpi_line[Const.DEN_TYPES_2]: den_type_2 = kpi_line[Const.DEN_TYPES_2] den_value_2 = kpi_line[Const.DEN_VALUES_2].split(',') general_filters[den_type_2] = den_value_2 sos_filters = {num_type: num_value} if kpi_line[Const.NUM_TYPES_2]: num_type_2 = kpi_line[Const.NUM_TYPES_2] num_value_2 = kpi_line[Const.NUM_VALUES_2].split(',') sos_filters[num_type_2] = num_value_2 sos_value = self.sos.calculate_share_of_shelf(sos_filters, **general_filters) sos_value *= 100 sos_value = round(sos_value, 2) if target: target = target * 100 score = 1 if sos_value >= target else 0 elif not target and upper_limit and lower_limit: score = 1 if ( lower_limit * 100 <= sos_value <= upper_limit * 100) else 0 target = '{}% - {}%'.format(lower_limit, upper_limit) else: score = 1 target = 0 return sos_value, score, target # Targets: def get_sos_targets(self, kpi_name, sos_range=False): targets_template = self.templates[Const.TARGETS] store_targets = targets_template.loc[ (targets_template[Const.PROGRAM] == self.program) & (targets_template['region'] == self.region)] filtered_targets_to_kpi = store_targets.loc[ targets_template['KPI name'] == kpi_name] if sos_range: if not filtered_targets_to_kpi.empty: range = filtered_targets_to_kpi['target'].values[0].split( ' - ') upper_limit = int(range[1].replace('%', '').strip()) lower_limit = int(range[0].replace('%', '').strip()) else: upper_limit, lower_limit = None, None return upper_limit, lower_limit else: if not filtered_targets_to_kpi.empty: target = float(filtered_targets_to_kpi[Const.TARGET].values[0]) else: target = None return target def get_targets(self, kpi_name): targets_template = self.templates[Const.TARGETS] store_targets = targets_template.loc[ (targets_template[Const.PROGRAM] == self.program) & (targets_template['region'] == self.region)] filtered_targets_to_kpi = store_targets.loc[ targets_template['KPI name'] == kpi_name] if not filtered_targets_to_kpi.empty: target = filtered_targets_to_kpi[Const.TARGET].values[0] else: target = None return target @staticmethod def get_kpi_line_filters(kpi_line): filters = {} attribs = list(kpi_line.index) c = 1 while 1: if 'Param {}'.format(c) in attribs and kpi_line['Param {}'.format( c)]: filters[kpi_line['Param {}'.format(c)]] = kpi_line[ 'Value {}'.format(c)].split(',') else: if c > 3: # just in case someone inexplicably chose a nonlinear numbering format. break c += 1 return filters @staticmethod def get_kpi_line_targets(kpi_line): mask = kpi_line.index.str.contains('Target') if mask.any(): targets = kpi_line.loc[mask].replace('', np.nan).dropna() targets.index = [ int(x.split(Const.SEPERATOR)[1].split(' ')[0]) for x in targets.index ] targets = targets.to_dict() else: targets = {} return targets @staticmethod def extrapolate_target(targets, c): while 1: if targets[c]: target = targets[c] break else: c -= 1 if c < 0: target = 0 break return target def scene_level_kpis(self, kpi_line, scif, general_filters, func): num_filters = self.get_kpi_line_filters(kpi_line) general_filters['product_type'] = (['Empty', 'Irrelevant'], 0) scenes = scif['scene_fk'].unique().tolist() if not isinstance(scenes, list): scenes = [scenes] total_num = 0 total_den = 0 for scene in scenes: # self.data_provider.load_scene_data(self.session_uid, scene) self.common_scene.scene_id = scene scene_scif = scif[scif['scene_fk'] == scene] if scif.empty: pass Log.warning('Match product in scene is empty for this scene') else: num, ratio, den = func(kpi_line, scene_scif, num_filters, general_filters) total_num += num total_den += den self.common_scene.commit_results_data(result_entity='scene') self.common_scene.kpi_results = pd.DataFrame( columns=self.common_db2.COLUMNS) # self.common_db2.write_to_db_result(fk=2161, numerator_result=total_num, # denominator_result=total_den, result=ratio, # identifier_result=self.common_db2.get_dictionary( # parent_name='Total Coke Cooler Purity'), # should_enter=True) def sos_with_num_and_dem(self, kpi_line, relevant_scif, num_filters, general_filters): kpi_fk = self.common_db2.get_kpi_fk_by_kpi_name(kpi_line['KPI name']) num_scif = relevant_scif[self.get_filter_condition( relevant_scif, **num_filters)] den_scif = relevant_scif[self.get_filter_condition( relevant_scif, **general_filters)] try: Validation.is_empty_df(den_scif) Validation.is_empty_df(num_scif) Validation.df_columns_equality(den_scif, num_scif) Validation.is_subset(den_scif, num_scif) except Exception, e: msg = "Data verification failed: {}.".format(e) raise Exception(msg) num = num_scif[self.facings_field].sum() den = den_scif[self.facings_field].sum() ratio = num / float(den) # numerator_id=product_fk, self.common_scene.write_to_db_result(fk=kpi_fk, numerator_result=num, denominator_result=den, result=ratio, by_scene=True) # self.common_scene.write_to_db_result(fk=kpi_fk, numerator_result=num, # denominator_result=den, result=ratio, by_scene=True # identifier_parent=self.common_db2.get_dictionary( # parent_name='Total Coke Cooler Purity'), # should_enter=True) return num, ratio, den
class FSOPToolBox: LEVEL1 = 1 LEVEL2 = 2 LEVEL3 = 3 def __init__(self, data_provider, output, commonv2): self.k_engine = BaseCalculationsScript(data_provider, output) self.output = output self.data_provider = data_provider self.common = commonv2 self.project_name = self.data_provider.project_name self.session_uid = self.data_provider.session_uid self.visit_date = self.data_provider[Data.VISIT_DATE] self.session_info = self.data_provider[Data.SESSION_INFO] self.scene_info = self.data_provider[Data.SCENES_INFO] self.all_products = self.data_provider[Data.ALL_PRODUCTS] self.templates = self.data_provider[Data.TEMPLATES] self.store_id = self.data_provider[Data.STORE_FK] self.rds_conn = PSProjectConnector(self.project_name, DbUsers.CalculationEng) self.kpi_results_queries = [] self.store_info = self.data_provider[Data.STORE_INFO] self.store_type = self.store_info['store_type'].iloc[0] # self.rules = pd.read_excel(TEMPLATE_PATH).set_index('store_type').to_dict('index') self.ps_data_provider = PsDataProvider(self.data_provider, self.output) self.scif = self.data_provider[Data.SCENE_ITEM_FACTS] self.match_product_in_scene = self.data_provider[Data.MATCHES] self.ignore_stacking = False self.facings_field = 'facings' if not self.ignore_stacking else 'facings_ign_stack' self.manufacturer_fk = \ self.all_products['manufacturer_fk'][self.all_products['manufacturer_name'] == 'CCNA'].iloc[0] # self.scene_data = self.load_scene_data() # self.kpi_set_fk = kpi_set_fk self.templates = {} self.parse_template() self.toolbox = GENERALToolBox(self.data_provider) self.SOS = SOS_calc(self.data_provider) self.survey = Survey_calc(self.data_provider) self._merge_matches_and_all_product() def _merge_matches_and_all_product(self): """ This method merges the all product data with the match product in scene DataFrame """ self.match_product_in_scene = self.match_product_in_scene.merge(self.all_products, on='product_fk', how='left') def parse_template(self): for sheet in Sheets: self.templates[sheet] = pd.read_excel(TEMPLATE_PATH, sheet_name=sheet) def main_calculation(self): """ This function calculates the KPI results. """ self.calculate_availability() self.calculate_sos() def calculate_availability(self): for i, row in self.templates[Availability].iterrows(): kpi_name = row['KPI Name'] kpi_fk = self.common.get_kpi_fk_by_kpi_name(kpi_name) manufacturers = self.sanitize_values(row['manufacturer']) brands = self.sanitize_values(row['Brand']) container = self.sanitize_values(row['CONTAINER']) attributte_4 = self.sanitize_values(row['att4']) scene_types = self.sanitize_values(row['scene Type']) required_brands = row['number_required_brands'] required_sparkling = row['number_required_Sparkling'] required_still = row['number_required_Still'] required_sku = row['number_required_SKU'] excluded_brands = self.sanitize_values(row['exclude brand']) category = self.sanitize_values(row['category']) # Bandaid Fix - Hunter Approved if isinstance(brands, float): brands_value = (excluded_brands, 0) else: brands_value = brands filters = {'manufacturer_name': manufacturers, 'brand_name': brands_value, 'CONTAINER': container, 'att4': attributte_4, 'template_name': scene_types, 'category': category} filters = self.delete_filter_nan(filters) available_df = self.calculate_availability_df(**filters) score = 0 if pd.notna(required_brands): brands_available = len(available_df['brand_name'].unique()) if brands_available >= int(required_brands): score = 1 else: score = 0 # self.common.write_to_db_result(fk=kpi_fk, numerator_id=0, numerator_result=0, denominator_id=0, # denominator_result=0, score=score ) if pd.notna(required_sparkling and required_still): if required_sparkling <= len(available_df[available_df['att4'] == 'SSD']): if (required_still <= len(available_df[available_df['att4'] == 'Still'])) or ( required_still <= len(available_df[available_df['att4'] == 'STILL'])): score = 1 else: score = 0 elif pd.notna(required_sparkling): if required_sparkling <= len(available_df[available_df['att4'] == 'SSD']): score = 1 else: score = 0 elif pd.notna(required_still): if (required_still <= len(available_df[available_df['att4'] == 'Still'])) or ( required_still <= len(available_df[available_df['att4'] == 'STILL'])): score = 1 else: score = 0 if pd.notna(required_sku): if required_sku <= len(available_df['product_fk'].unique()): score = 1 else: score = 0 self.common.write_to_db_result(fk=kpi_fk, numerator_id=self.manufacturer_fk, numerator_result=0, denominator_id=self.store_id, denominator_result=0, score=score) def calculate_sos(self): for i, row in self.templates[SOS].iterrows(): general_filters = {} kpi_name = row['KPI Name'] kpi_fk = self.common.get_kpi_fk_by_kpi_name(kpi_name) manufacturers = self.sanitize_values(row['manufacturer']) scene_types = self.sanitize_values(row['scene Type']) num_param1 = row['numerator param1'] # attributte_4 num_value1 = self.sanitize_values(row['numerator value1']) # attributte_4 den_param1 = row['denominator param1'] den_value1 = self.sanitize_values(row['denominator value1']) den_param2 = row['denominator param2'] den_value2 = self.sanitize_values(row['denominator value2']) target = row['Target'] product_type= self.sanitize_values(row['product_type']) excluded_brands = self.sanitize_values(row['exclude brand']) filters = {'manufacturer_name': manufacturers, num_param1: num_value1, 'template_name': scene_types, 'brand_name': (excluded_brands, 0), 'product_type': product_type} filters = self.delete_filter_nan(filters) general_filters = {den_param1: den_value1, den_param2: den_value2, 'product_type': product_type, 'template_name': scene_types} general_filters = self.delete_filter_nan(general_filters) if 'manufacturer' in general_filters.keys(): general_filters['manufacturer_name'] = general_filters.pop('manufacturer') ratio = self.SOS.calculate_share_of_shelf(filters, **general_filters) if pd.isna(target): score = ratio else: target = int(target) if (100 * ratio) >= target: score = 1 else: score = 0 self.common.write_to_db_result(fk=kpi_fk, numerator_id=self.manufacturer_fk, numerator_result=0, denominator_id=self.store_id, denominator_result=0, result=ratio, score=score) def delete_filter_nan(self, filters): for key in filters.keys(): if type(filters[key]) is not list: if pd.isna(filters[key]): del filters[key] return filters def calculate_availability_df(self, **filters): """ :param filters: These are the parameters which the data frame is filtered by. :return: Total number of SKUs facings appeared in the filtered Scene Item Facts data frame. """ if set(filters.keys()).difference(self.scif.keys()): scif_mpis_diff = self.match_product_in_scene[['scene_fk', 'product_fk'] + list(self.match_product_in_scene.keys().difference( self.scif.keys()))] # a patch for the item_id field which became item_id_x since it was added to product table as attribute. item_id = 'item_id' if 'item_id' in self.scif.columns else 'item_id_x' merged_df = pd.merge(self.scif[self.scif.facings != 0], scif_mpis_diff, how='outer', left_on=['scene_fk', item_id], right_on=['scene_fk', 'product_fk']) filtered_df = \ merged_df[self.toolbox.get_filter_condition(merged_df, **filters)] # filtered_df = \ # self.match_product_in_scene[self.toolbox.get_filter_condition(self.match_product_in_scene, **filters)] else: filtered_df = self.scif[self.toolbox.get_filter_condition(self.scif, **filters)] if self.facings_field in filtered_df.columns: availability_df = filtered_df else: availability_df = filtered_df return availability_df @staticmethod def sanitize_values(item): if pd.isna(item): return item else: items = [x.strip() for x in item.split(',')] return items
class CCBOTTLERSUSCMAToolBox: def __init__(self, data_provider, output): self.output = output self.data_provider = data_provider self.project_name = self.data_provider.project_name self.session_uid = self.data_provider.session_uid self.products = self.data_provider[Data.PRODUCTS] self.all_products = self.data_provider[Data.ALL_PRODUCTS] self.match_product_in_scene = self.data_provider[Data.MATCHES] self.visit_date = self.data_provider[Data.VISIT_DATE] self.session_info = self.data_provider[Data.SESSION_INFO] self.scene_info = self.data_provider[Data.SCENES_INFO] self.store_id = self.data_provider[Data.STORE_FK] self.store_info = self.data_provider[Data.STORE_INFO] self.scif = self.data_provider[Data.SCENE_ITEM_FACTS] self.united_scenes = self.get_united_scenes() # we don't need to check scenes without United products self.survey = Survey(self.data_provider, self.output) self.sos = SOS(self.data_provider, self.output) self.templates = {} self.common_db = Common(self.data_provider, CMA_COMPLIANCE) self.region = self.store_info['region_name'].iloc[0] self.store_type = self.store_info['store_type'].iloc[0] self.program = self.store_info['additional_attribute_14'].iloc[0] self.sales_center = self.store_info['additional_attribute_5'].iloc[0] if self.store_type in STORE_TYPES: ##### self.store_type = STORE_TYPES[self.store_type] #### self.store_attr = self.store_info['additional_attribute_15'].iloc[0] self.kpi_static_data = self.common_db.get_kpi_static_data() self.total_score = 0 for sheet in Const.SHEETS_CMA: self.templates[sheet] = pd.read_excel(TEMPLATE_PATH, sheetname=sheet).fillna('') # main functions: def main_calculation(self, *args, **kwargs): """ This function gets all the scene results from the SceneKPI, after that calculates every session's KPI, and in the end it calls "filter results" to choose every KPI and scene and write the results in DB. """ main_template = self.templates[Const.KPIS] if self.region in Const.REGIONS: for i, main_line in main_template.iterrows(): store_type = self.does_exist(main_line, Const.STORE_TYPE) if store_type is None or self.store_type in self.does_exist(main_line, Const.STORE_TYPE): self.calculate_main_kpi(main_line) self.write_to_db_result( self.common_db.get_kpi_fk_by_kpi_name(CMA_COMPLIANCE, 1), score=self.total_score, level=1) def calculate_main_kpi(self, main_line): """ This function gets a line from the main_sheet, transfers it to the match function, and checks all of the KPIs in the same name in the match sheet. :param main_line: series from the template of the main_sheet. """ kpi_name = main_line[Const.KPI_NAME] kpi_type = main_line[Const.TYPE] relevant_scif = self.scif[self.scif['scene_id'].isin(self.united_scenes)] scene_types = self.does_exist(main_line, Const.SCENE_TYPE) result = score = target = None general_filters = {} if scene_types: relevant_scif = relevant_scif[relevant_scif['template_name'].isin(scene_types)] general_filters['template_name'] = scene_types scene_groups = self.does_exist(main_line, Const.TEMPLATE_GROUP) if scene_groups: relevant_scif = relevant_scif[relevant_scif['template_group'].isin(scene_groups)] general_filters['template_group'] = scene_groups if kpi_type == Const.SOS: isnt_dp = True if self.store_attr != Const.DP and main_line[Const.STORE_ATTRIBUTE] == Const.DP else False relevant_template = self.templates[kpi_type] relevant_template = relevant_template[relevant_template[Const.KPI_NAME] == kpi_name] kpi_function = self.get_kpi_function(kpi_type) for i, kpi_line in relevant_template.iterrows(): result, score, target = kpi_function(kpi_line, relevant_scif, isnt_dp, general_filters) else: pass if score > 0: self.total_score += 1 self.write_to_all_levels(kpi_name=kpi_name, result=result, score=score, target=target) def calculate_manual_kpi(self, main_line): """ This function gets a line from the main_sheet, transfers it to the match function, and checks all of the KPIs in the same name in the match sheet. :param main_line: series from the template of the main_sheet. """ kpi_name = main_line[Const.KPI_NAME] relevant_template = self.templates[Const.SURVEY] relevant_template = relevant_template[relevant_template[Const.KPI_NAME] == kpi_name] target = len(relevant_template) if main_line[Const.GROUP_TARGET] == Const.ALL \ else main_line[Const.GROUP_TARGET] passed_counter = 0 for i, kpi_line in relevant_template.iterrows(): answer = self.calculate_survey_specific(kpi_line) if answer: passed_counter += 1 result = passed_counter >= target self.write_to_session_level(kpi_name=kpi_name, result=result) # write in DF: def write_to_session_level(self, kpi_name, result=0): """ Writes a result in the DF :param kpi_name: string :param result: boolean """ result_dict = {Const.KPI_NAME: kpi_name, Const.RESULT: result * 1} self.session_results = self.session_results.append(result_dict, ignore_index=True) def write_to_all_levels(self, kpi_name, result, score, target=None, scene_fk=None, reuse_scene=False): """ Writes the final result in the "all" DF, add the score to the red score and writes the KPI in the DB :param kpi_name: str :param result: int :param display_text: str :param weight: int/float :param scene_fk: for the scene's kpi :param reuse_scene: this kpi can use scenes that were used """ result_dict = {Const.KPI_NAME: kpi_name, Const.RESULT: result, Const.SCORE: score, Const.THRESHOLD: target} # self.all_results = self.all_results.append(result_dict, ignore_index=True) self.write_to_db(kpi_name, score, result=result, threshold=target) # survey: def calculate_survey_specific(self, kpi_line, relevant_scif=None, isnt_dp=None): """ returns a survey line if True or False :param kpi_line: line from the survey sheet :param relevant_scif: :param isnt_dp: :return: True or False - if the question gets the needed answer """ question = kpi_line[Const.Q_TEXT] if not question: question_id = kpi_line[Const.Q_ID] if question_id == "": Log.warning("The template has a survey question without ID or text") return False question = ('question_fk', int(question_id)) answers = kpi_line[Const.ACCEPTED_ANSWER].split(',') min_answer = None if kpi_line[Const.REQUIRED_ANSWER] == '' else True for answer in answers: if self.survey.check_survey_answer( survey_text=question, target_answer=answer, min_required_answer=min_answer): return True return False # availability: def calculate_availability_with_same_pack(self, relevant_template, relevant_scif, isnt_dp): """ checks if all the lines in the availability sheet passes the KPI, AND if all of these filtered scif has at least one common product that has the same size and number of sub_packages. :param relevant_template: all the match lines from the availability sheet. :param relevant_scif: filtered scif :param isnt_dp: if "store attribute" in the main sheet has DP, and the store is not DP, we shouldn't calculate DP lines :return: boolean """ packages = None for i, kpi_line in relevant_template.iterrows(): if isnt_dp and kpi_line[Const.MANUFACTURER] in Const.DP_MANU: continue filtered_scif = self.filter_scif_availability(kpi_line, relevant_scif) filtered_scif = filtered_scif.fillna("NAN") target = kpi_line[Const.TARGET] sizes = filtered_scif['size'].tolist() sub_packages_nums = filtered_scif['number_of_sub_packages'].tolist() cur_packages = set(zip(sizes, sub_packages_nums)) if packages is None: packages = cur_packages else: packages = cur_packages & packages if len(packages) == 0: return False if filtered_scif[filtered_scif['facings'] > 0]['facings'].count() < target: return False if len(packages) > 1: return False return True def calculate_availability(self, kpi_line, relevant_scif, isnt_dp): """ checks if all the lines in the availability sheet passes the KPI (there is at least one product in this relevant scif that has the attributes). :param relevant_scif: filtered scif :param isnt_dp: if "store attribute" in the main sheet has DP, and the store is not DP, we shouldn't calculate DP lines :param kpi_line: line from the availability sheet :return: boolean """ if isnt_dp and kpi_line[Const.MANUFACTURER] in Const.DP_MANU: return True filtered_scif = self.filter_scif_availability(kpi_line, relevant_scif) target = kpi_line[Const.TARGET] return filtered_scif[filtered_scif['facings'] > 0]['facings'].count() >= target def filter_scif_specific(self, relevant_scif, kpi_line, name_in_template, name_in_scif): """ takes scif and filters it from the template :param relevant_scif: the current filtered scif :param kpi_line: line from one sheet (availability for example) :param name_in_template: the column name in the template :param name_in_scif: the column name in SCIF :return: """ values = self.does_exist(kpi_line, name_in_template) if values: if name_in_scif in Const.NUMERIC_VALUES_TYPES: values = [float(x) for x in values] return relevant_scif[relevant_scif[name_in_scif].isin(values)] return relevant_scif def filter_scif_availability(self, kpi_line, relevant_scif): """ calls filter_scif_specific for every column in the template of availability :param kpi_line: :param relevant_scif: :return: """ names_of_columns = { Const.MANUFACTURER: "manufacturer_name", Const.BRAND: "brand_name", Const.TRADEMARK: "att2", Const.SIZE: "size", Const.NUM_SUB_PACKAGES: "number_of_sub_packages", # CCBOTTLERSUSConst.PREMIUM_SSD: "Premium SSD", # CCBOTTLERSUSConst.INNOVATION_BRAND: "Innovation Brand", } for name in names_of_columns: relevant_scif = self.filter_scif_specific(relevant_scif, kpi_line, name, names_of_columns[name]) return relevant_scif # SOS: def calculate_sos(self, kpi_line, relevant_scif, isnt_dp, general_filters): """ calculates SOS line in the relevant scif. :param kpi_line: line from SOS sheet. :param relevant_scif: filtered scif. :param isnt_dp: if "store attribute" in the main sheet has DP, and the store is not DP, we should filter all the DP products out of the numerator. :return: boolean """ kpi_name = kpi_line[Const.KPI_NAME] relevant_scif = relevant_scif[relevant_scif['product_type'] != "Empty"] den_type = kpi_line[Const.DEN_TYPES_1] den_value = kpi_line[Const.DEN_VALUES_1].split(',') # relevant_scif = self.filter_by_type_value(relevant_scif, den_type, den_value) num_type = kpi_line[Const.NUM_TYPES_1] num_value = kpi_line[Const.NUM_VALUES_1].split(',') # num_scif = self.filter_by_type_value(relevant_scif, num_type, num_value) target = self.get_sos_targets(kpi_name) general_filters[den_type] = den_value if kpi_line[Const.DEN_TYPES_2]: den_type_2 = kpi_line[Const.DEN_TYPES_2] den_value_2 = kpi_line[Const.DEN_VALUES_2].split(',') general_filters[den_type_2] = den_value_2 sos_filters = {num_type: num_value} if isnt_dp: sos_filters['manufacturer_name'] = (Const.DP_MANU, 0) if kpi_line[Const.NUM_TYPES_2]: num_type_2 = kpi_line[Const.NUM_TYPES_2] num_value_2 = kpi_line[Const.NUM_VALUES_2].split(',') sos_filters[num_type_2] = num_value_2 sos_value = self.sos.calculate_share_of_shelf(sos_filters, **general_filters) sos_value *= 100 sos_value = round(sos_value, 2) if target: score = 1 if sos_value >= target*100 else 0 else: score = 1 target = 0 return sos_value, score, target # SOS majority: def get_sos_targets(self, kpi_name): targets_template = self.templates[Const.TARGETS] store_targets = targets_template.loc[(targets_template['program'] == self.program) & # (targets_template['sales center'] == self.sales_center) & (targets_template['channel'] == self.store_type)] filtered_targets_to_kpi = store_targets.loc[targets_template['KPI name'] == kpi_name] if not filtered_targets_to_kpi.empty: target = filtered_targets_to_kpi[Const.TARGET].values[0] else: target = None return target # return False def calculate_sos_maj(self, kpi_line, relevant_scif, isnt_dp): """ calculates SOS majority line in the relevant scif. Filters the denominator and sends the line to the match function (majority or dominant) :param kpi_line: line from SOS majority sheet. :param relevant_scif: filtered scif. :param isnt_dp: if "store attribute" in the main sheet has DP, and the store is not DP, we should filter all the DP products out of the numerator (and the denominator of the dominant part). :return: boolean """ kpi_name = kpi_line[Const.KPI_NAME] if kpi_line[Const.EXCLUSION_SHEET] == Const.V: exclusion_sheet = self.templates[Const.SKU_EXCLUSION] relevant_exclusions = exclusion_sheet[exclusion_sheet[Const.KPI_NAME] == kpi_name] for i, exc_line in relevant_exclusions.iterrows(): relevant_scif = self.exclude_scif(exc_line, relevant_scif) relevant_scif = relevant_scif[relevant_scif['product_type'] != "Empty"] den_type = kpi_line[Const.DEN_TYPES_1] den_value = kpi_line[Const.DEN_VALUES_1] relevant_scif = self.filter_by_type_value(relevant_scif, den_type, den_value) den_type = kpi_line[Const.DEN_TYPES_2] den_value = kpi_line[Const.DEN_VALUES_2] relevant_scif = self.filter_by_type_value(relevant_scif, den_type, den_value) if kpi_line[Const.MAJ_DOM] == Const.MAJOR: answer = self.calculate_majority_part(kpi_line, relevant_scif, isnt_dp) elif kpi_line[Const.MAJ_DOM] == Const.DOMINANT: answer = self.calculate_dominant_part(kpi_line, relevant_scif, isnt_dp) else: Log.warning("SOS majority does not know '{}' part".format(kpi_line[Const.MAJ_DOM])) answer = False return answer def calculate_majority_part(self, kpi_line, relevant_scif, isnt_dp): """ filters the numerator and checks if the SOS is bigger than 50%. :param kpi_line: line from SOS majority sheet. :param relevant_scif: filtered scif. :param isnt_dp: if "store attribute" in the main sheet has DP, and the store is not DP, we should filter all the DP products out of the numerator. :return: boolean """ num_type = kpi_line[Const.NUM_TYPES_1] num_value = kpi_line[Const.NUM_VALUES_1] num_scif = self.filter_by_type_value(relevant_scif, num_type, num_value) num_type = kpi_line[Const.NUM_TYPES_2] num_value = kpi_line[Const.NUM_VALUES_2] num_scif = self.filter_by_type_value(num_scif, num_type, num_value) if num_scif.empty: return None if isnt_dp: num_scif = num_scif[~(num_scif['manufacturer_name'].isin(Const.DP_MANU))] target = Const.MAJORITY_TARGET return num_scif['facings'].sum() / relevant_scif['facings'].sum() >= target def calculate_dominant_part(self, kpi_line, relevant_scif, isnt_dp): """ filters the numerator and checks if the given value in the given type is the one with the most facings. :param kpi_line: line from SOS majority sheet. :param relevant_scif: filtered scif. :param isnt_dp: if "store attribute" in the main sheet has DP, and the store is not DP, we should filter all the DP products out. :return: boolean """ if isnt_dp: relevant_scif = relevant_scif[~(relevant_scif['manufacturer_name'].isin(Const.DP_MANU))] type_name = self.get_column_name(kpi_line[Const.NUM_TYPES_1], relevant_scif) values = str(kpi_line[Const.NUM_VALUES_1]).split(', ') if type_name in Const.NUMERIC_VALUES_TYPES: values = [float(x) for x in values] max_facings, needed_one = 0, 0 values_type = relevant_scif[type_name].unique().tolist() if None in values_type: values_type.remove(None) current_sum = relevant_scif[relevant_scif[type_name].isnull()]['facings'].sum() if current_sum > max_facings: max_facings = current_sum for value in values_type: current_sum = relevant_scif[relevant_scif[type_name] == value]['facings'].sum() if current_sum > max_facings: max_facings = current_sum if value in values: needed_one += current_sum return needed_one >= max_facings # helpers: def get_column_name(self, field_name, df): """ checks what the real field name in DttFrame is (if it exists in the DF or exists in the "converter" sheet). :param field_name: str :param df: scif/products :return: real column name (if exists) """ if field_name in df.columns: return field_name if field_name.upper() in self.converters[Const.NAME_IN_TEMP].str.upper().tolist(): field_name = self.converters[self.converters[Const.NAME_IN_TEMP].str.upper() == field_name.upper()][ Const.NAME_IN_DB].iloc[0] return field_name return None def filter_by_type_value(self, relevant_scif, type_name, value): """ filters scif with the type and value :param relevant_scif: current filtered scif :param type_name: str (from the template) :param value: str :return: new scif """ if type_name == "": return relevant_scif values = value.split(', ') new_type_name = self.get_column_name(type_name, relevant_scif) if not new_type_name: print "There is no field '{}'".format(type_name) return relevant_scif if new_type_name in Const.NUMERIC_VALUES_TYPES: values = [float(x) for x in values] return relevant_scif[relevant_scif[new_type_name].isin(values)] @staticmethod def exclude_scif(exclude_line, relevant_scif): """ filters products out of the scif :param exclude_line: line from the exclusion sheet :param relevant_scif: current filtered scif :return: new scif """ exclude_products = exclude_line[Const.PRODUCT_EAN].split(', ') return relevant_scif[~(relevant_scif['product_ean_code'].isin(exclude_products))] @staticmethod def does_exist(kpi_line, column_name): """ checks if kpi_line has values in this column, and if it does - returns a list of these values :param kpi_line: line from template :param column_name: str :return: list of values if there are, otherwise None """ if column_name in kpi_line.keys() and kpi_line[column_name] != "": cell = kpi_line[column_name] if type(cell) in [int, float]: return [cell] elif type(cell) in [unicode, str]: return cell.split(", ") return None def get_kpi_function(self, kpi_type): """ transfers every kpi to its own function :param kpi_type: value from "sheet" column in the main sheet :return: function """ if kpi_type == Const.SURVEY: return self.calculate_survey_specific elif kpi_type == Const.AVAILABILITY: return self.calculate_availability elif kpi_type == Const.SOS: return self.calculate_sos elif kpi_type == Const.SOS_MAJOR: return self.calculate_sos_maj else: Log.warning("The value '{}' in column sheet in the template is not recognized".format(kpi_type)) return None def choose_and_write_results(self): """ writes all the KPI in the DB: first the session's ones, second the scene's ones and in the end the ones that depends on the previous ones. After all it writes the red score """ # self.scenes_results.to_csv('results/{}/scene {}.csv'.format(self.calculation_type, self.session_uid))#### # self.session_results.to_csv('results/{}/session {}.csv'.format(self.calculation_type, self.session_uid))#### main_template = self.templates[Const.KPIS] self.write_session_kpis(main_template) # self.write_condition_kpis(main_template) # self.write_missings(main_template) self.write_to_db(CMA_COMPLIANCE, 0) # result_dict = {Const.KPI_NAME: 'RED SCORE', Const.SCORE: self.red_score}#### # self.all_results = self.all_results.append(result_dict, ignore_index=True)#### # self.all_results.to_csv('results/{}/{}.csv'.format(self.calculation_type, self.session_uid))#### def write_missings(self, main_template): """ write 0 in all the KPIs that didn't get score :param main_template: """ for i, main_line in main_template.iterrows(): kpi_name = main_line[Const.KPI_NAME] if not self.all_results[self.all_results[Const.KPI_NAME] == kpi_name].empty: continue result = 0 display_text = main_line[Const.DISPLAY_TEXT] weight = main_line[Const.WEIGHT] self.write_to_all_levels(kpi_name, result, display_text, weight) def write_session_kpis(self, main_template): """ iterates all the session's KPIs and saves them :param main_template: main_sheet. """ # session_template = main_template[main_template[Const.CONDITION] == ""] # if self.calculation_type == Const.SOVI: # session_template = session_template[session_template[Const.SESSION_LEVEL] == Const.V] for i, main_line in main_template.iterrows(): kpi_name = main_line[Const.KPI_NAME] result = self.session_results[self.session_results[Const.KPI_NAME] == kpi_name] if result.empty: continue result = result.iloc[0][Const.RESULT] display_text = main_line[Const.DISPLAY_TEXT] weight = main_line[Const.WEIGHT] self.write_to_all_levels(kpi_name, result, display_text, weight) def write_incremental_kpis(self, scene_template): """ lets the incremental KPIs choose their scenes (if they passed). if KPI passed some scenes, we will choose the scene that the children passed :param scene_template: filtered main_sheet :return: the new template (without the KPI written already) """ incremental_template = scene_template[scene_template[Const.INCREMENTAL] != ""] while not incremental_template.empty: for i, main_line in incremental_template.iterrows(): kpi_name = main_line[Const.KPI_NAME] reuse_scene = main_line[Const.REUSE_SCENE] == Const.V kpi_results = self.scenes_results[self.scenes_results[Const.KPI_NAME] == kpi_name] if not reuse_scene: kpi_results = kpi_results[~(kpi_results[Const.SCENE_FK].isin(self.used_scenes))] true_results = kpi_results[kpi_results[Const.RESULT] > 0] increments = main_line[Const.INCREMENTAL] if ', ' in increments: first_kpi = increments.split(', ')[0] others = increments.replace(', '.format(first_kpi), '') scene_template.loc[scene_template[Const.KPI_NAME] == first_kpi, Const.INCREMENTAL] = others if true_results.empty: scene_template.loc[scene_template[Const.KPI_NAME] == kpi_name, Const.INCREMENTAL] = "" else: true_results = true_results.sort_values(by=Const.RESULT, ascending=False) display_text = main_line[Const.DISPLAY_TEXT] weight = main_line[Const.WEIGHT] scene_fk = true_results.iloc[0][Const.SCENE_FK] self.write_to_all_levels(kpi_name, true_results.iloc[0][Const.RESULT], display_text, weight, scene_fk=scene_fk, reuse_scene=reuse_scene) scene_template = scene_template[~(scene_template[Const.KPI_NAME] == kpi_name)] incremental_template = scene_template[scene_template[Const.INCREMENTAL] != ""] return scene_template def write_regular_scene_kpis(self, scene_template): """ lets the regular KPIs choose their scenes (if they passed). Like in the incremental part - if KPI passed some scenes, we will choose the scene that the children passed :param scene_template: filtered main_sheet (only scene KPIs, and without the passed incremental) :return: the new template (without the KPI written already) """ for i, main_line in scene_template.iterrows(): kpi_name = main_line[Const.KPI_NAME] reuse_scene = main_line[Const.REUSE_SCENE] == Const.V kpi_results = self.scenes_results[self.scenes_results[Const.KPI_NAME] == kpi_name] if not reuse_scene: kpi_results = kpi_results[~(kpi_results[Const.SCENE_FK].isin(self.used_scenes))] true_results = kpi_results[kpi_results[Const.RESULT] > 0] display_text = main_line[Const.DISPLAY_TEXT] weight = main_line[Const.WEIGHT] if true_results.empty: continue true_results = true_results.sort_values(by=Const.RESULT, ascending=False) scene_fk = true_results.iloc[0][Const.SCENE_FK] self.write_to_all_levels(kpi_name, true_results.iloc[0][Const.RESULT], display_text, weight, scene_fk=scene_fk, reuse_scene=reuse_scene) scene_template = scene_template[~(scene_template[Const.KPI_NAME] == kpi_name)] return scene_template def write_not_passed_scene_kpis(self, scene_template): """ lets the KPIs not passed choose their scenes. :param scene_template: filtered main_sheet (only scene KPIs, and without the passed KPIs) """ for i, main_line in scene_template.iterrows(): kpi_name = main_line[Const.KPI_NAME] reuse_scene = main_line[Const.REUSE_SCENE] == Const.V kpi_results = self.scenes_results[self.scenes_results[Const.KPI_NAME] == kpi_name] if not reuse_scene: kpi_results = kpi_results[~(kpi_results[Const.SCENE_FK].isin(self.used_scenes))] display_text = main_line[Const.DISPLAY_TEXT] weight = main_line[Const.WEIGHT] if kpi_results.empty: continue scene_fk = kpi_results.iloc[0][Const.SCENE_FK] self.write_to_all_levels(kpi_name, 0, display_text, weight, scene_fk=scene_fk, reuse_scene=reuse_scene) def write_scene_kpis(self, main_template): """ iterates every scene_kpi that does not depend on others, and choose the scene they will take: 1. the incrementals take their scene (if they passed). 2. the regular KPIs that passed choose their scenes. 3. the ones that didn't pass choose their random scenes. :param main_template: main_sheet. """ scene_template = main_template[(main_template[Const.SESSION_LEVEL] != Const.V) & (main_template[Const.CONDITION] == "")] scene_template = self.write_incremental_kpis(scene_template) scene_template = self.write_regular_scene_kpis(scene_template) self.write_not_passed_scene_kpis(scene_template) def write_condition_kpis(self, main_template): """ writes all the KPI that depend on other KPIs by checking if the parent KPI has passed and in which scene. :param main_template: main_sheet """ condition_template = main_template[main_template[Const.CONDITION] != ''] for i, main_line in condition_template.iterrows(): condition = main_line[Const.CONDITION] kpi_name = main_line[Const.KPI_NAME] if self.calculation_type == Const.MANUAL or main_line[Const.SESSION_LEVEL] == Const.V: kpi_results = self.session_results[self.session_results[Const.KPI_NAME] == kpi_name] else: kpi_results = self.scenes_results[self.scenes_results[Const.KPI_NAME] == kpi_name] condition_result = self.all_results[(self.all_results[Const.KPI_NAME] == condition) & (self.all_results[Const.RESULT] > 0)] if condition_result.empty: continue condition_result = condition_result.iloc[0] condition_scene = condition_result[Const.SCENE_FK] if condition_scene and Const.SCENE_FK in kpi_results: results = kpi_results[kpi_results[Const.SCENE_FK] == condition_scene] else: results = kpi_results if results.empty: continue result = results.iloc[0][Const.RESULT] display_text = main_line[Const.DISPLAY_TEXT] weight = main_line[Const.WEIGHT] scene_fk = results.iloc[0][Const.SCENE_FK] if Const.SCENE_FK in kpi_results else None self.write_to_all_levels(kpi_name, result, display_text, weight, scene_fk=scene_fk) def get_united_scenes(self): return self.scif[self.scif['United Deliver'] == 'Y']['scene_id'].unique().tolist() def get_weight_factor(self): sum_weights = self.templates[Const.KPIS][Const.WEIGHT].sum() return sum_weights / 100.0 def get_score(self, weight): return weight / self.weight_factor def write_to_db(self, kpi_name, score, result=None, threshold=None): """ writes result in the DB :param kpi_name: str :param score: float :param display_text: str :param result: str :param threshold: int """ self.write_to_db_result( self.common_db.get_kpi_fk_by_kpi_name(kpi_name, 2), score=score, level=2) self.write_to_db_result( self.common_db.get_kpi_fk_by_kpi_name(kpi_name, 3), score=score, level=3, threshold=threshold, result=result) def write_to_db_result(self, fk, level, score, set_type=Const.SOVI, **kwargs): """ This function creates the result data frame of every KPI (atomic KPI/KPI/KPI set), and appends the insert SQL query into the queries' list, later to be written to the DB. """ if kwargs: kwargs['score'] = score attributes = self.create_attributes_dict(fk=fk, level=level, **kwargs) else: attributes = self.create_attributes_dict(fk=fk, score=score, level=level) if level == self.common_db.LEVEL1: table = self.common_db.KPS_RESULT elif level == self.common_db.LEVEL2: table = self.common_db.KPK_RESULT elif level == self.common_db.LEVEL3: table = self.common_db.KPI_RESULT else: return query = insert(attributes, table) self.common_db.kpi_results_queries.append(query) def create_attributes_dict(self, score, fk=None, level=None, display_text=None, set_type=Const.SOVI, **kwargs): """ This function creates a data frame with all attributes needed for saving in KPI results tables. or you can send dict with all values in kwargs """ kpi_static_data = self.kpi_static_data if set_type == Const.SOVI else self.kpi_static_data_integ if level == self.common_db.LEVEL1: if kwargs: kwargs['score'] = score values = [val for val in kwargs.values()] col = [col for col in kwargs.keys()] attributes = pd.DataFrame(values, columns=col) else: kpi_set_name = kpi_static_data[kpi_static_data['kpi_set_fk'] == fk]['kpi_set_name'].values[0] attributes = pd.DataFrame( [(kpi_set_name, self.session_uid, self.store_id, self.visit_date.isoformat(), format(score, '.2f'), fk)], columns=['kps_name', 'session_uid', 'store_fk', 'visit_date', 'score_1', 'kpi_set_fk']) elif level == self.common_db.LEVEL2: if kwargs: kwargs['score'] = score values = [val for val in kwargs.values()] col = [col for col in kwargs.keys()] attributes = pd.DataFrame(values, columns=col) else: kpi_name = kpi_static_data[kpi_static_data['kpi_fk'] == fk]['kpi_name'].values[0].replace("'", "\\'") attributes = pd.DataFrame( [(self.session_uid, self.store_id, self.visit_date.isoformat(), fk, kpi_name, score)], columns=['session_uid', 'store_fk', 'visit_date', 'kpi_fk', 'kpk_name', 'score']) elif level == self.common_db.LEVEL3: data = kpi_static_data[kpi_static_data['atomic_kpi_fk'] == fk] kpi_fk = data['kpi_fk'].values[0] kpi_set_name = kpi_static_data[kpi_static_data['atomic_kpi_fk'] == fk]['kpi_set_name'].values[0] display_text = data['kpi_name'].values[0] if kwargs: kwargs = self.add_additional_data_to_attributes(kwargs, score, kpi_set_name, kpi_fk, fk, datetime.utcnow().isoformat(), display_text) values = tuple([val for val in kwargs.values()]) col = [col for col in kwargs.keys()] attributes = pd.DataFrame([values], columns=col) else: attributes = pd.DataFrame( [(display_text, self.session_uid, kpi_set_name, self.store_id, self.visit_date.isoformat(), datetime.utcnow().isoformat(), score, kpi_fk, fk)], columns=['display_text', 'session_uid', 'kps_name', 'store_fk', 'visit_date', 'calculation_time', 'score', 'kpi_fk', 'atomic_kpi_fk']) else: attributes = pd.DataFrame() return attributes.to_dict() def add_additional_data_to_attributes(self, kwargs_dict, score, kpi_set_name, kpi_fk, fk, calc_time, display_text): kwargs_dict['score'] = score kwargs_dict['kps_name'] = kpi_set_name kwargs_dict['kpi_fk'] = kpi_fk kwargs_dict['atomic_kpi_fk'] = fk kwargs_dict['calculation_time'] = calc_time kwargs_dict['session_uid'] = self.session_uid kwargs_dict['store_fk'] = self.store_id kwargs_dict['visit_date'] = self.visit_date.isoformat() kwargs_dict['display_text'] = display_text return kwargs_dict def commit_results(self): """ committing the results in both sets """ self.common_db.delete_results_data_by_kpi_set() self.common_db.commit_results_data_without_delete()
class SOLARBRToolBox: LEVEL1 = 1 LEVEL2 = 2 LEVEL3 = 3 EXCLUDE_EMPTY = False EXCLUDE_FILTER = 0 INCLUDE_FILTER = 1 CONTAIN_FILTER = 2 EMPTY = 'Empty' def __init__(self, data_provider, output): self.output = output self.data_provider = data_provider self.common = Common(self.data_provider) self.commonV2 = CommonV2(self.data_provider) self.project_name = self.data_provider.project_name self.session_uid = self.data_provider.session_uid self.k_engine = BaseCalculationsGroup(data_provider, output) self.products = self.data_provider[Data.PRODUCTS] # self.all_products = self.data_provider[Data.ALL_PRODUCTS] self.match_product_in_scene = self.data_provider[Data.MATCHES] self.visit_date = self.data_provider[Data.VISIT_DATE] self.session_info = self.data_provider[Data.SESSION_INFO] self.scene_info = self.data_provider[Data.SCENES_INFO] self.store_id = self.data_provider[Data.STORE_FK] self.store_info = self.data_provider[Data.STORE_INFO] self.scif = self.data_provider[Data.SCENE_ITEM_FACTS] self.rds_conn = PSProjectConnector(self.project_name, DbUsers.CalculationEng) self.kpi_static_data = self.commonV2.get_kpi_static_data() self.kpi_results_queries = [] self.templates = {} self.all_products = self.commonV2.data_provider[Data.ALL_PRODUCTS] self.session_id = self.data_provider.session_id self.score_templates = {} self.get_templates() self.get_score_template() self.manufacturer_fk = self.all_products[ self.all_products['manufacturer_name'] == 'Coca Cola'].iloc[0] self.sos = SOS(self.data_provider, self.output) self.total_score = 0 self.session_fk = self.data_provider[Data.SESSION_INFO]['pk'].iloc[0] self.toolbox = GENERALToolBox(self.data_provider) self.scenes_info = self.data_provider[Data.SCENES_INFO] self.kpi_results_new_tables_queries = [] def get_templates(self): for sheet in Const.SHEETS_MAIN: self.templates[sheet] = pd.read_excel(MAIN_TEMPLATE_PATH, sheetname=sheet.decode('utf8'), keep_default_na=False) def get_score_template(self): for sheet in Const.SHEETS_SCORE: self.score_templates[sheet] = pd.read_excel(SCORE_TEMPLATE_PATH, sheetname=sheet.decode('utf8'), keep_default_na=False, encoding='utf8') def main_calculation(self, *args, **kwargs): main_template = self.templates[Const.KPIS] for i, main_line in main_template.iterrows(): self.calculate_main_kpi(main_line) self.commonV2.commit_results_data() def calculate_main_kpi(self, main_line): kpi_name = main_line[Const.KPI_NAME] kpi_type = main_line[Const.Type] template_groups = self.does_exist(main_line, Const.TEMPLATE_GROUP) template_groups = self.does_exist(main_line, Const.TEMPLATE_GROUP) exclude_template_groups = main_line[Const.EXCLUDED_TEMPLATE_GROUPS] if exclude_template_groups != None: self.scif = self.scif[~self.scif['template_group'].isin([exclude_template_groups])] general_filters = {} scif_template_groups = self.scif['template_group'].unique().tolist() # encoding_fixed_list = [template_group.replace("\u2013","-") for template_group in scif_template_groups] # scif_template_groups = encoding_fixed_list store_type = self.store_info["store_type"].iloc[0] store_types = self.does_exist(main_line, Const.STORE_TYPES) if store_type in store_types: if template_groups: if ('All' in template_groups) or bool(set(scif_template_groups) & set(template_groups)): if not ('All' in template_groups): general_filters['template_group'] = template_groups if kpi_type == Const.SOVI: relevant_template = self.templates[kpi_type] relevant_template = relevant_template[ relevant_template[Const.KPI_NAME].str.encode('utf-8') == kpi_name.encode('utf-8')] if relevant_template["numerator param 1"].all() and relevant_template[ "denominator param 1"].all(): function = self.get_kpi_function(kpi_type) for i, kpi_line in relevant_template.iterrows(): result, score = function(kpi_line, general_filters) else: pass else: pass @staticmethod def does_exist(kpi_line, column_name): """ checks if kpi_line has values in this column, and if it does - returns a list of these values :param kpi_line: line from template :param column_name: str :return: list of values if there are, otherwise None """ if column_name in kpi_line.keys() and kpi_line[column_name] != "": cell = kpi_line[column_name] if type(cell) in [int, float]: return [cell] elif type(cell) in [unicode, str]: return [x.strip() for x in cell.split(",")] return None def calculate_sos(self, kpi_line, general_filters): kpi_name = kpi_line[Const.KPI_NAME] # get denominator filters for den_column in [col for col in kpi_line.keys() if Const.DEN_TYPE in col]: # get relevant den columns if kpi_line[den_column]: # check to make sure this kpi has this denominator param general_filters[kpi_line[den_column]] = \ kpi_line[den_column.replace(Const.DEN_TYPE, Const.DEN_VALUE)].split( ',') # get associated values general_filters = self.convert_operators_to_values(general_filters) sos_filters = {} # get numerator filters for num_column in [col for col in kpi_line.keys() if Const.NUM_TYPE in col]: # get numerator columns if kpi_line[num_column]: # check to make sure this kpi has this numerator param sos_filters[kpi_line[num_column]] = \ kpi_line[num_column.replace(Const.NUM_TYPE, Const.NUM_VALUE)].split( ',') # get associated values sos_filters = self.convert_operators_to_values(sos_filters) sos_value = self.sos.calculate_share_of_shelf(sos_filters, **general_filters) # sos_value *= 100 sos_value = round(sos_value, 2) score = self.get_score_from_range(kpi_name, sos_value) manufacturer_products = self.all_products[ self.all_products['manufacturer_name'] == sos_filters['manufacturer_name'][0]].iloc[0] manufacturer_fk = manufacturer_products["manufacturer_fk"] filtered_kpi_list = self.kpi_static_data[ self.kpi_static_data['type'].str.encode('utf8') == kpi_name.encode('utf8')] kpi_fk = filtered_kpi_list['pk'].iloc[0] numerator_res, denominator_res = self.get_numerator_and_denominator( sos_filters, **general_filters) if numerator_res is None: numerator_res = 0 denominator_fk = None if general_filters.keys()[0] == 'category': category_fk = self.all_products["category_fk"][ self.all_products['category'] == general_filters['category'][0]].iloc[0] denominator_fk = category_fk elif general_filters.keys()[0] == 'sub_category': try: sub_category_fk = self.all_products["sub_category_fk"][ self.all_products['sub_category'] == general_filters['sub_category'][0]].iloc[0] denominator_fk = sub_category_fk except: sub_brand_fk = 999 denominator_fk = sub_brand_fk elif general_filters.keys()[0] == 'sub_brand': # sub brand table is empty, update when table is updated try: sub_brand_fk = self.all_products["sub_category_fk"][ self.all_products['sub_brand'] == general_filters['sub_brand'][0]].iloc[0] except: sub_brand_fk = 999 denominator_fk = sub_brand_fk self.commonV2.write_to_db_result(fk=kpi_fk, numerator_id=manufacturer_fk, numerator_result=numerator_res, denominator_id=denominator_fk, denominator_result=denominator_res, result=sos_value, score=score, score_after_actions=score, context_id=kpi_fk) return sos_value, score def get_score_from_range(self, kpi_name, sos_value): store_type = str(self.store_info["store_type"].iloc[0].encode('utf8')) self.score_templates[store_type] = self.score_templates[store_type].replace(kpi_name, kpi_name.encode('utf8').rstrip()) score_range = self.score_templates[store_type].query('Kpi == "' + str(kpi_name.encode('utf8')) + '" & Low <= ' + str(sos_value) + ' & High >= ' + str(sos_value) + '') try: score = score_range['Score'].iloc[0] except IndexError: try: Log.error('No score data found for KPI name {} in store type {}'.format( kpi_name.encode('utf8'), store_type)) return 0 except UnicodeDecodeError: Log.error('Unable to generate error for KPI name or store type with weird characters') return 0 return score def convert_operators_to_values(self, filters): if 'number_of_sub_packages' in filters.keys(): value = filters['number_of_sub_packages'] operator, number = [x.strip() for x in re.split('(\d+)', value[0]) if x != ''] if operator == '>=': subpackages_num = self.scif[self.scif['number_of_sub_packages'] >= int( number)]['number_of_sub_packages'].unique().tolist() filters['number_of_sub_packages'] = subpackages_num elif operator == '<=': subpackages_num = self.scif[self.scif['number_of_sub_packages'] <= int( number)]['number_of_sub_packages'].unique().tolist() filters['number_of_sub_packages'] = subpackages_num elif operator == '>': subpackages_num = self.scif[self.scif['number_of_sub_packages'] > int( number)]['number_of_sub_packages'].unique().tolist() filters['number_of_sub_packages'] = subpackages_num elif operator == '<': subpackages_num = self.scif[self.scif['number_of_sub_packages'] < int( number)]['number_of_sub_packages'].unique().tolist() filters['number_of_sub_packages'] = subpackages_num return filters def get_kpi_function(self, kpi_type): """ transfers every kpi to its own function .encode('utf-8') :param kpi_type: value from "sheet" column in the main sheet :return: function """ if kpi_type == Const.SOVI: return self.calculate_sos else: Log.warning( "The value '{}' in column sheet in the template is not recognized".format(kpi_type)) return None @staticmethod def round_result(result): return round(result, 3) def get_numerator_and_denominator(self, sos_filters=None, include_empty=False, **general_filters): if include_empty == self.EXCLUDE_EMPTY and 'product_type' not in sos_filters.keys() + general_filters.keys(): general_filters['product_type'] = (self.EMPTY, self.EXCLUDE_FILTER) pop_filter = self.toolbox.get_filter_condition(self.scif, **general_filters) subset_filter = self.toolbox.get_filter_condition(self.scif, **sos_filters) try: pop = self.scif filtered_population = pop[pop_filter] if filtered_population.empty: return 0, 0 else: subset_population = filtered_population[subset_filter] df = filtered_population subset_df = subset_population sum_field = Fd.FACINGS try: Validation.is_empty_df(df) Validation.is_empty_df(subset_df) Validation.is_subset(df, subset_df) Validation.df_columns_equality(df, subset_df) Validation.validate_columns_exists(df, [sum_field]) Validation.validate_columns_exists(subset_df, [sum_field]) Validation.is_none(sum_field) except Exception, e: msg = "Data verification failed: {}.".format(e) default_value = 0 numerator = TBox.calculate_frame_column_sum(subset_df, sum_field, default_value) denominator = TBox.calculate_frame_column_sum(df, sum_field, default_value) return numerator, denominator except Exception as e: Log.error(e.message) return True
class SOLARBRToolBox: LEVEL1 = 1 LEVEL2 = 2 LEVEL3 = 3 EXCLUDE_EMPTY = False EXCLUDE_FILTER = 0 EMPTY = 'Empty' def __init__(self, data_provider, output): self.output = output self.data_provider = data_provider self.common = Common(self.data_provider) self.project_name = self.data_provider.project_name self.session_uid = self.data_provider.session_uid self.k_engine = BaseCalculationsGroup(data_provider, output) self.products = self.data_provider[Data.PRODUCTS] self.all_products = self.data_provider[Data.ALL_PRODUCTS] self.match_product_in_scene = self.data_provider[Data.MATCHES] self.visit_date = self.data_provider[Data.VISIT_DATE] self.session_info = self.data_provider[Data.SESSION_INFO] self.scene_info = self.data_provider[Data.SCENES_INFO] self.store_id = self.data_provider[Data.STORE_FK] self.store_info = self.data_provider[Data.STORE_INFO] self.scif = self.data_provider[Data.SCENE_ITEM_FACTS] self.rds_conn = PSProjectConnector(self.project_name, DbUsers.CalculationEng) self.kpi_static_data = self.common.get_kpi_static_data() self.kpi_results_queries = [] self.templates = {} self.session_id = self.data_provider.session_id self.score_templates = {} self.get_templates() self.get_score_template() self.manufacturer_fk = self.all_products[ self.all_products['manufacturer_name'] == 'Coca Cola'].iloc[0] self.sos = SOS(self.data_provider, self.output) self.total_score = 0 self.session_fk = self.data_provider[Data.SESSION_INFO]['pk'].iloc[0] self.toolbox = GENERALToolBox(self.data_provider) self.scenes_info = self.data_provider[Data.SCENES_INFO] self.kpi_results_new_tables_queries = [] # self.store_type = self.data_provider.store_type def get_templates(self): for sheet in Const.SHEETS_MAIN: self.templates[sheet] = pd.read_excel(MAIN_TEMPLATE_PATH, sheetname=sheet.decode("utf-8"), keep_default_na=False) def get_score_template(self): for sheet in Const.SHEETS_SCORE: self.score_templates[sheet] = pd.read_excel(SCORE_TEMPLATE_PATH, sheetname=sheet.decode("utf-8"), keep_default_na=False, encoding = "utf-8") def main_calculation(self, *args, **kwargs): main_template = self.templates[Const.KPIS] for i, main_line in main_template.iterrows(): self.calculate_main_kpi(main_line) self.commit_results() def calculate_main_kpi(self, main_line): kpi_name = main_line[Const.KPI_NAME] kpi_type = main_line[Const.Type] scene_types = self.does_exist(main_line, Const.SCENE_TYPES) result = score = 0 general_filters = {} scif_scene_types = self.scif['template_name'].unique().tolist() store_type = str(self.store_info["store_type"].iloc[0]) store_types = self.does_exist_store(main_line, Const.STORE_TYPES) if store_type in store_types: if scene_types: if (('All' in scene_types) or bool(set(scif_scene_types) & set(scene_types))) : if not ('All' in scene_types): general_filters['template_name'] = scene_types if kpi_type == Const.SOVI: relevant_template = self.templates[kpi_type] relevant_template = relevant_template[relevant_template[Const.KPI_NAME] == kpi_name] if relevant_template["numerator param 1"].all() and relevant_template["denominator param"].all(): function = self.get_kpi_function(kpi_type) for i, kpi_line in relevant_template.iterrows(): result, score = function(kpi_line, general_filters) else: pass else: pass @staticmethod def does_exist(kpi_line, column_name): """ checks if kpi_line has values in this column, and if it does - returns a list of these values :param kpi_line: line from template :param column_name: str :return: list of values if there are, otherwise None """ if column_name in kpi_line.keys() and kpi_line[column_name] != "": cell = kpi_line[column_name] if type(cell) in [int, float]: return [cell] elif type(cell) in [unicode, str]: return cell.split(", ") return None @staticmethod def does_exist_store(kpi_line, column_name): """ checks if kpi_line has values in this column, and if it does - returns a list of these values :param kpi_line: line from template :param column_name: str :return: list of values if there are, otherwise None """ if column_name in kpi_line.keys() and kpi_line[column_name] != "": cell = kpi_line[column_name] if type(cell) in [int, float]: return [cell] elif type(cell) in [unicode, str]: return cell.split(",") return None def calculate_sos(self, kpi_line, general_filters): kpi_name = kpi_line[Const.KPI_NAME] den_type = kpi_line[Const.DEN_TYPES_1] den_value = kpi_line[Const.DEN_VALUES_1].split(',') num_type = kpi_line[Const.NUM_TYPES_1] num_value = kpi_line[Const.NUM_VALUES_1].split(',') general_filters[den_type] = den_value sos_filters = {num_type : num_value} if kpi_line[Const.NUM_TYPES_2]: num_type_2 = kpi_line[Const.NUM_TYPES_2] num_value_2 = kpi_line[Const.NUM_VALUES_2].split(',') sos_filters[num_type_2] = num_value_2 sos_value = self.sos.calculate_share_of_shelf(sos_filters, **general_filters) # sos_value *= 100 sos_value = round(sos_value, 2) score = self.get_score_from_range(kpi_name, sos_value) manufacturer_products = self.all_products[ self.all_products['manufacturer_name'] == num_value[0]].iloc[0] manufacturer_fk = manufacturer_products["manufacturer_fk"] all_products = self.all_products[ self.all_products['category'] == den_value[0]].iloc[0] category_fk = all_products["category_fk"] numerator_res, denominator_res = self.get_numerator_and_denominator(sos_filters, **general_filters) self.common.write_to_db_result_new_tables(fk = 1, numerator_id=manufacturer_fk, numerator_result= numerator_res, denominator_id=category_fk, denominator_result= denominator_res, result=sos_value, score= score, score_after_actions= score) return sos_value, score def get_score_from_range(self, kpi_name, sos_value): store_type = str(self.store_info["store_type"].iloc[0]) self.score_templates[store_type] = self.score_templates[store_type].replace(kpi_name, kpi_name.encode("utf-8")) score_range = self.score_templates[store_type].query('Kpi == "' + str(kpi_name.encode("utf-8")) + '" & Low <= ' + str(sos_value) + ' & High >= ' + str(sos_value)+'') score = score_range['Score'].iloc[0] return score def get_kpi_function(self, kpi_type): """ transfers every kpi to its own function .encode('utf-8') :param kpi_type: value from "sheet" column in the main sheet :return: function """ if kpi_type == Const.SOVI: return self.calculate_sos else: Log.warning("The value '{}' in column sheet in the template is not recognized".format(kpi_type)) return None @staticmethod def round_result(result): return round(result, 3) def get_numerator_and_denominator(self, sos_filters=None, include_empty=False, **general_filters): if include_empty == self.EXCLUDE_EMPTY and 'product_type' not in sos_filters.keys() + general_filters.keys(): general_filters['product_type'] = (self.EMPTY, self.EXCLUDE_FILTER) pop_filter = self.toolbox.get_filter_condition(self.scif, **general_filters) subset_filter = self.toolbox.get_filter_condition(self.scif, **sos_filters) try: pop = self.scif filtered_population = pop[pop_filter] if filtered_population.empty: return 0,0 else: subset_population = filtered_population[subset_filter] # ratio = TBox.calculate_ratio_sum_field_in_rows(filtered_population, subset_population, Fd.FACINGS) df = filtered_population subset_df = subset_population sum_field = Fd.FACINGS try: Validation.is_empty_df(df) Validation.is_empty_df(subset_df) Validation.is_subset(df, subset_df) Validation.df_columns_equality(df, subset_df) Validation.validate_columns_exists(df, [sum_field]) Validation.validate_columns_exists(subset_df, [sum_field]) Validation.is_none(sum_field) except Exception, e: msg = "Data verification failed: {}.".format(e) # raise Exception(msg) default_value = 0 numerator = TBox.calculate_frame_column_sum(subset_df, sum_field, default_value) denominator = TBox.calculate_frame_column_sum(df, sum_field, default_value) return numerator, denominator except Exception as e: Log.error(e.message) return True def commit_results(self): insert_queries = self.merge_insert_queries(self.kpi_results_new_tables_queries) self.rds_conn.disconnect_rds() self.rds_conn.connect_rds() cur = self.rds_conn.db.cursor() delete_query = SOLARBRQueries.get_delete_session_results_query(self.session_uid, self.session_id) cur.execute(delete_query) for query in insert_queries: cur.execute(query) self.rds_conn.db.commit() self.rds_conn.disconnect_rds() @staticmethod def merge_insert_queries(insert_queries): query_groups = {} for query in insert_queries: static_data, inserted_data = query.split('VALUES ') if static_data not in query_groups: query_groups[static_data] = [] query_groups[static_data].append(inserted_data) merged_queries = [] for group in query_groups: merged_queries.append('{0} VALUES {1}'.format(group, ',\n'.join(query_groups[group]))) return merged_queries
class ARAToolBox: EXCLUDE_FILTER = 0 INCLUDE_FILTER = 1 CONTAIN_FILTER = 2 def __init__(self, data_provider, output, common_db2): self.output = output self.data_provider = data_provider self.common_db = Common(self.data_provider, SUB_PROJECT) self.common_db2 = common_db2 self.project_name = self.data_provider.project_name self.session_uid = self.data_provider.session_uid self.products = self.data_provider[Data.PRODUCTS] self.all_products = self.data_provider[Data.ALL_PRODUCTS] self.match_product_in_scene = self.data_provider[Data.MATCHES] self.visit_date = self.data_provider[Data.VISIT_DATE] self.session_info = self.data_provider[Data.SESSION_INFO] self.scene_info = self.data_provider[Data.SCENES_INFO] self.store_id = self.data_provider[Data.STORE_FK] self.store_info = self.data_provider[Data.STORE_INFO] self.scif = self.data_provider[Data.SCENE_ITEM_FACTS] self.scif = self.scif[~(self.scif['product_type'] == 'Irrelevant')] self.sw_scenes = self.get_relevant_scenes( ) # we don't need to check scenes without United products self.survey = Survey(self.data_provider, self.output) self.sos = SOS(self.data_provider, self.output) self.results = self.data_provider[Data.SCENE_KPI_RESULTS] self.region = self.store_info['region_name'].iloc[0] self.store_type = self.store_info['store_type'].iloc[0] self.program = self.store_info['additional_attribute_3'].iloc[0] self.sales_center = self.store_info['additional_attribute_5'].iloc[0] if self.store_type in STORE_TYPES: ##### self.store_type = STORE_TYPES[self.store_type] #### self.store_attr = self.store_info['additional_attribute_3'].iloc[0] # self.kpi_static_data = self.common_db.get_kpi_static_data() self.ignore_stacking = False self.facings_field = 'facings' if not self.ignore_stacking else 'facings_ign_stack' self.sub_scores = defaultdict(int) self.sub_totals = defaultdict(int) self.templates = self.get_template() self.hierarchy = self.templates[Const.KPIS].set_index( Const.KPI_NAME)[Const.PARENT].to_dict() self.templates = self.get_relevant_template(self.templates) self.children = self.templates[Const.KPIS][Const.KPI_NAME] self.tools = Shared(self.data_provider, self.output) # main functions: def main_calculation(self, *args, **kwargs): """ This function gets all the scene results from the SceneKPI, after that calculates every session's KPI, and in the end it calls "filter results" to choose every KPI and scene and write the results in DB. """ main_template = self.templates[Const.KPIS] for i, main_line in main_template.iterrows(): self.calculate_main_kpi(main_line) self.write_family_tree() # self.write_to_db_result( def calculate_main_kpi(self, main_line): """ This function gets a line from the main_sheet, transfers it to the match function, and checks all of the KPIs in the same name in the match sheet. :param main_line: series from the template of the main_sheet. """ kpi_name = main_line[Const.KPI_NAME] kpi_type = main_line[Const.TYPE] # if kpi_name not in Const.ALL_SCENE_KPIS: # placeholder- need to check for unintended consequences # relevant_scif = self.scif[self.scif['scene_id'].isin(self.sw_scenes)] # else: # relevant_scif = self.scif.copy() relevant_scif = self.scif.copy() result = score = target = None general_filters = {} scene_types = self.does_exist(main_line, Const.SCENE_TYPE) if scene_types: relevant_scif = relevant_scif[relevant_scif['template_name'].isin( scene_types)] general_filters['template_name'] = scene_types scene_groups = self.does_exist(main_line, Const.TEMPLATE_GROUP) if scene_groups: relevant_scif = relevant_scif[relevant_scif['template_group'].isin( scene_groups)] general_filters['template_group'] = scene_groups relevant_scif = relevant_scif[relevant_scif['product_type'] != "Empty"] relevant_template = self.templates[kpi_type] relevant_template = relevant_template[relevant_template[Const.KPI_NAME] == kpi_name] function = self.get_kpi_function(kpi_type) if not relevant_scif.empty: for i, kpi_line in relevant_template.iterrows(): result, num, den, score, target = function( kpi_line, relevant_scif, general_filters) if (result is None and score is None and target is None) or not den: continue self.update_parents(kpi_name, score) self.write_to_db(kpi_name, kpi_type, score, result=result, threshold=target, num=num, den=den) def get_template(self): template = {} for sheet in Const.SHEETS: template[sheet] = pd.read_excel(TEMPLATE_PATH, sheetname=sheet).fillna('') return template def get_relevant_template(self, template): kpis = template[Const.KPIS] template[Const.KPIS] = kpis[ (self.is_or_none(kpis, Const.REGION, self.region)) & (self.is_or_none(kpis, Const.STORE_TYPE, self.store_type)) & (self.is_or_none(kpis, Const.PROGRAM, self.store_attr)) & (kpis[Const.SESSION_LEVEL] == 'Y') & (kpis[Const.TYPE] != Const.PARENT)] return template def is_or_none(self, template, col, val): if not isinstance(val, list): val = [val] return ((template[col].isin(val)) | (template[col] is None) | (template[col] == '')) # SOS: def calculate_sos(self, kpi_line, relevant_scif, general_filters): """ calculates SOS line in the relevant scif. :param kpi_line: line from SOS sheet. :param relevant_scif: filtered scif. :param isnt_dp: if "store attribute" in the main sheet has DP, and the store is not DP, we should filter all the DP products out of the numerator. :return: boolean """ kpi_name = kpi_line[Const.KPI_NAME] general_filters['product_type'] = (['Empty', 'Irrelevant'], 0) relevant_scif = relevant_scif[self.get_filter_condition( relevant_scif, **general_filters)] target = self.get_targets(kpi_name) sos_filters = self.get_kpi_line_filters(kpi_line, name='numerator') general_filters = self.get_kpi_line_filters(kpi_line, name='denominator') exclude_filters = { key: (val, self.EXCLUDE_FILTER) for key, val in self.get_kpi_line_filters(kpi_line, name='exclude').items() } num_scif = relevant_scif[self.get_filter_condition( relevant_scif, **sos_filters)] den_scif = relevant_scif[self.get_filter_condition( relevant_scif, **general_filters)] if exclude_filters: num_scif = num_scif[self.get_filter_condition( num_scif, **exclude_filters)] sos_value, num, den = self.tools.sos_with_num_and_dem( kpi_line, num_scif, den_scif, self.facings_field) target *= 100 score = 1 if sos_value >= target else 0 target = '{}%'.format(int(target)) return sos_value, num, den, score, target def calculate_min_facings(self, kpi_line, relevant_scif, general_filters): num_scif, den_scif, target = self.calculation_base( kpi_line, relevant_scif, general_filters) num = num_scif[self.facings_field].sum() score = 1 if num >= target else 0 return None, num, None, score, target def calculate_min_skus(self, kpi_line, relevant_scif, general_filters): num_scif, den_scif, target = self.calculation_base( kpi_line, relevant_scif, general_filters) location = self.does_exist(kpi_line, Const.LOCATION) num = num_scif.shape[0] score = 1 if num >= target else 0 return None, num, None, score, target def calculate_ratio(self, kpi_line, relevant_scif, general_filters): min_facings_percent = kpi_line[Const.MIN_FACINGS] sos_filters = self.get_kpi_line_filters(kpi_line) general_filters['product_type'] = (['Empty', 'Irrelevant'], 0) scenes = relevant_scif[self.get_filter_condition( relevant_scif, **general_filters)]['scene_fk'].unique().tolist() us = 0 them = 0 if not scenes: return None, None, None for scene in scenes: sos_filters['scene_fk'] = scene sos_value = self.sos.calculate_share_of_shelf( sos_filters, **general_filters) if sos_value >= min_facings_percent: us += 1 else: them += 1 target = self.get_targets(kpi_line[Const.KPI_NAME]) * 100 ratio, score = self.ratio_score(us, them, target) target = '{}%'.format(int(target)) return ratio, us, them, score, target def calculate_location(self, kpi_line, relevant_scif, general_filters): location = self.does_exist(kpi_line, Const.SHELVES) mpis = self.match_product_in_scene.merge(self.all_products, on='product_fk') mpis = mpis.merge(self.scene_info, on='scene_fk') mpis = mpis.merge(self.data_provider[Data.TEMPLATES], on='template_fk') num_mpis, den_mpis, target = self.calculation_base( kpi_line, mpis, general_filters) den_mpis = num_mpis.copy() num_mpis = num_mpis[num_mpis['shelf_number'].isin(location)] num = num_mpis.shape[0] den = den_mpis.shape[0] target *= 100 ratio, score = self.ratio_score(num, den, target) target = '{}%'.format(int(target)) return ratio, num, den, score, target def calculate_min_shelves(self, kpi_line, relevant_scif, general_filters): """ calculates SOS line in the relevant scif. :param kpi_line: line from SOS sheet. :param relevant_scif: filtered scif. :param isnt_dp: if "store attribute" in the main sheet has DP, and the store is not DP, we should filter all the DP products out of the numerator. :return: boolean """ num_scif, den_scif, target = self.calculation_base( kpi_line, relevant_scif, general_filters) num = num_scif[self.facings_field].sum() den = den_scif[self.facings_field].sum() relevant_scenes = relevant_scif['scene_fk'].unique().tolist() scene_filters = {'scene_fk': relevant_scenes} num_shelves = self.match_product_in_scene[self.get_filter_condition( self.match_product_in_scene, **scene_filters)]\ [['scene_fk', 'bay_number', 'shelf_number']]\ .drop_duplicates().shape[0] ratio, score = self.ratio_score(num, float(den) / num_shelves, target) return ratio, num, None, score, target def calculation_base(self, kpi_line, relevant_scif, general_filters): kpi_name = kpi_line[Const.KPI_NAME] numerator_filters = self.get_kpi_line_filters(kpi_line) target = self.get_targets(kpi_name) num_scif = relevant_scif[self.get_filter_condition( relevant_scif, **general_filters)] num_scif = relevant_scif[self.get_filter_condition( relevant_scif, **numerator_filters)] den_scif = relevant_scif[self.get_filter_condition( relevant_scif, **general_filters)] return num_scif, den_scif, target # helpers: def get_targets(self, kpi_name): targets_template = self.templates[Const.TARGETS] store_targets = targets_template.loc[(self.is_or_none( targets_template, Const.PROGRAM, self.program))] filtered_targets_to_kpi = store_targets.loc[targets_template[ Const.KPI_NAME] == kpi_name] if not filtered_targets_to_kpi.empty: target = filtered_targets_to_kpi[Const.TARGET].values[0] else: target = None return target @staticmethod def ratio_score(num, den, target): ratio = 0 if den: ratio = round(num * 100.0 / den, 2) score = 1 if ratio >= target else 0 return ratio, score def get_kpi_line_filters(self, kpi_orig, name=''): kpi_line = kpi_orig.copy() if name: name = name.lower() + ' ' filters = defaultdict(list) attribs = [x.lower() for x in kpi_line.index] kpi_line.index = attribs c = 1 while 1: if '{}param {}'.format( name, c) in attribs and kpi_line['{}param {}'.format( name, c)]: filters[kpi_line['{}param {}'.format( name, c)]] += self.splitter(kpi_line['{}value {}'.format( name, c)]) else: if c > 3: # just in case someone inexplicably chose a nonlinear numbering format. break c += 1 return filters @staticmethod def get_kpi_line_targets(kpi_line): mask = kpi_line.index.str.contains('Target') if mask.any(): targets = kpi_line.loc[mask].replace('', np.nan).dropna() targets.index = [ int(x.split(Const.SEPERATOR)[1].split(' ')[0]) for x in targets.index ] targets = targets.to_dict() else: targets = {} return targets @staticmethod def splitter(text_str, delimiter=','): ret = [text_str] if hasattr(ret, 'split'): ret = ret.split(delimiter) return ret @staticmethod def does_exist(kpi_line, column_name): """ checks if kpi_line has values in this column, and if it does - returns a list of these values :param kpi_line: line from template :param column_name: str :return: list of values if there are, otherwise None """ if column_name in kpi_line.keys() and kpi_line[column_name] != "": cell = kpi_line[column_name] if type(cell) in [int, float]: return [cell] elif type(cell) in [unicode, str]: if ", " in cell: return cell.split(", ") else: return cell.split(',') return None def get_kpi_function(self, kpi_type): """ transfers every kpi to its own function :param kpi_type: value from "sheet" column in the main sheet :return: function """ if kpi_type == Const.SOS: return self.calculate_sos elif kpi_type == Const.MIN_SHELVES: return self.calculate_min_shelves elif kpi_type == Const.MIN_FACINGS: return self.calculate_min_facings elif kpi_type == Const.LOCATION: return self.calculate_location elif kpi_type == Const.MIN_SKUS: return self.calculate_min_skus elif kpi_type == Const.RATIO: return self.calculate_ratio else: Log.warning( "The value '{}' in column sheet in the template is not recognized" .format(kpi_type)) return None def get_filter_condition(self, df, **filters): """ :param df: The data frame to be filters. :param filters: These are the parameters which the data frame is filtered by. Every parameter would be a tuple of the value and an include/exclude flag. INPUT EXAMPLE (1): manufacturer_name = ('Diageo', DIAGEOAUPNGAMERICAGENERALToolBox.INCLUDE_FILTER) INPUT EXAMPLE (2): manufacturer_name = 'Diageo' :return: a filtered Scene Item Facts data frame. """ if not filters: return df['pk'].apply(bool) if self.facings_field in df.keys(): filter_condition = (df[self.facings_field] > 0) else: filter_condition = None for field in filters.keys(): if field in df.keys(): if isinstance(filters[field], tuple): value, exclude_or_include = filters[field] else: value, exclude_or_include = filters[ field], self.INCLUDE_FILTER if not value: continue if not isinstance(value, list): value = [value] if exclude_or_include == self.INCLUDE_FILTER: condition = (df[field].isin(value)) elif exclude_or_include == self.EXCLUDE_FILTER: condition = (~df[field].isin(value)) elif exclude_or_include == self.CONTAIN_FILTER: condition = (df[field].str.contains(value[0], regex=False)) for v in value[1:]: condition |= df[field].str.contains(v, regex=False) else: continue if filter_condition is None: filter_condition = condition else: filter_condition &= condition else: Log.warning('field {} is not in the Data Frame'.format(field)) return filter_condition def get_relevant_scenes(self): return self.scif[self.scif[Const.DELIVER] == 'Y']['scene_id'].unique().tolist() def get_kpi_name(self, kpi_name, kpi_type): return '{} {} {}'.format(SUB_PROJECT, kpi_name, kpi_type) def get_parent(self, kpi_name): try: parent = self.hierarchy[kpi_name] except Exception as e: parent = None Log.warning( "Warning, Parent KPI not found in column '{}' on template page '{}'" .format(Const.KPI_NAME, Const.KPIS)) return parent def update_parents(self, kpi, score): parent = self.get_parent(kpi) while parent: self.update_sub_score(parent, score=score) parent = self.get_parent(parent) def update_sub_score(self, parent, score=0): self.sub_totals[parent] += 1 self.sub_scores[parent] += score def write_to_db(self, kpi_name, kpi_type, score, result=None, threshold=None, num=None, den=None): """ writes result in the DB :param kpi_name: str :param score: float :param display_text: str :param result: str :param threshold: int """ kpi_fk = self.common_db2.get_kpi_fk_by_kpi_type( self.get_kpi_name(kpi_name, kpi_type)) parent = self.get_parent(kpi_name) delta = 0 if isinstance(threshold, str) and '%' in threshold: threshold = float(threshold.split('-')[0].replace('%', '')) if score == 0: targ = threshold / 100 delta = round((targ * den) - num) else: delta = threshold - num if kpi_name in self.children: if score == 1: score = Const.PASS elif score == 0: score = Const.FAIL score = self.tools.result_values[score] self.common_db2.write_to_db_result( fk=kpi_fk, score=score, result=result, should_enter=True, target=threshold, numerator_result=num, denominator_result=den, weight=delta, identifier_parent=self.common_db2.get_dictionary( parent_name=parent), numerator_id=Const.MANUFACTURER_FK, denominator_id=self.store_id) # self.write_to_db_result( # self.common_db.get_kpi_fk_by_kpi_name(kpi_name, 2), score=score, level=2) # self.write_to_db_result( # self.common_db.get_kpi_fk_by_kpi_name(kpi_name, 3), score=score, level=3, # threshold=threshold, result=result) def write_to_db_result(self, fk, level, score, set_type=Const.SOVI, **kwargs): """ This function creates the result data frame of every KPI (atomic KPI/KPI/KPI set), and appends the insert SQL query into the queries' list, later to be written to the DB. """ if kwargs: kwargs['score'] = score attributes = self.create_attributes_dict(fk=fk, level=level, **kwargs) else: attributes = self.create_attributes_dict(fk=fk, score=score, level=level) if level == self.common_db.LEVEL1: table = self.common_db.KPS_RESULT elif level == self.common_db.LEVEL2: table = self.common_db.KPK_RESULT elif level == self.common_db.LEVEL3: table = self.common_db.KPI_RESULT else: return query = insert(attributes, table) self.common_db.kpi_results_queries.append(query) def create_attributes_dict(self, score, fk=None, level=None, display_text=None, set_type=Const.SOVI, **kwargs): """ This function creates a data frame with all attributes needed for saving in KPI results tables. or you can send dict with all values in kwargs """ kpi_static_data = self.kpi_static_data if set_type == Const.SOVI else self.kpi_static_data_integ if level == self.common_db.LEVEL1: if kwargs: kwargs['score'] = score values = [val for val in kwargs.values()] col = [col for col in kwargs.keys()] attributes = pd.DataFrame(values, columns=col) else: kpi_set_name = kpi_static_data[kpi_static_data['kpi_set_fk'] == fk]['kpi_set_name'].values[0] attributes = pd.DataFrame( [(kpi_set_name, self.session_uid, self.store_id, self.visit_date.isoformat(), format(score, '.2f'), fk)], columns=[ 'kps_name', 'session_uid', 'store_fk', 'visit_date', 'score_1', 'kpi_set_fk' ]) elif level == self.common_db.LEVEL2: if kwargs: kwargs['score'] = score values = [val for val in kwargs.values()] col = [col for col in kwargs.keys()] attributes = pd.DataFrame(values, columns=col) else: kpi_name = kpi_static_data[kpi_static_data['kpi_fk'] == fk]['kpi_name'].values[0].replace( "'", "\\'") attributes = pd.DataFrame( [(self.session_uid, self.store_id, self.visit_date.isoformat(), fk, kpi_name, score)], columns=[ 'session_uid', 'store_fk', 'visit_date', 'kpi_fk', 'kpk_name', 'score' ]) elif level == self.common_db.LEVEL3: data = kpi_static_data[kpi_static_data['atomic_kpi_fk'] == fk] kpi_fk = data['kpi_fk'].values[0] kpi_set_name = kpi_static_data[kpi_static_data['atomic_kpi_fk'] == fk]['kpi_set_name'].values[0] display_text = data['kpi_name'].values[0] if kwargs: kwargs = self.add_additional_data_to_attributes( kwargs, score, kpi_set_name, kpi_fk, fk, datetime.utcnow().isoformat(), display_text) values = tuple([val for val in kwargs.values()]) col = [col for col in kwargs.keys()] attributes = pd.DataFrame([values], columns=col) else: attributes = pd.DataFrame( [(display_text, self.session_uid, kpi_set_name, self.store_id, self.visit_date.isoformat(), datetime.utcnow().isoformat(), score, kpi_fk, fk)], columns=[ 'display_text', 'session_uid', 'kps_name', 'store_fk', 'visit_date', 'calculation_time', 'score', 'kpi_fk', 'atomic_kpi_fk' ]) else: attributes = pd.DataFrame() return attributes.to_dict() def add_additional_data_to_attributes(self, kwargs_dict, score, kpi_set_name, kpi_fk, fk, calc_time, display_text): kwargs_dict['score'] = score kwargs_dict['kps_name'] = kpi_set_name kwargs_dict['kpi_fk'] = kpi_fk kwargs_dict['atomic_kpi_fk'] = fk kwargs_dict['calculation_time'] = calc_time kwargs_dict['session_uid'] = self.session_uid kwargs_dict['store_fk'] = self.store_id kwargs_dict['visit_date'] = self.visit_date.isoformat() kwargs_dict['display_text'] = display_text return kwargs_dict def kpi_parent_result(self, parent, num, den): if parent in Const.PARENT_RATIO: if den: result = round((float(num) / den) * 100, 2) else: result = 0 else: result = num return result def write_family_tree(self): for sub_parent in self.sub_totals.keys(): # for sub_parent in set(Const.KPI_FAMILY_KEY.values()): kpi_type = sub_parent if sub_parent != SUB_PROJECT: kpi_type = '{} {}'.format(SUB_PROJECT, sub_parent) kpi_fk = self.common_db2.get_kpi_fk_by_kpi_type(kpi_type) num = self.sub_scores[sub_parent] den = self.sub_totals[sub_parent] result, score = self.ratio_score(num, den, 1) self.common_db2.write_to_db_result( fk=kpi_fk, numerator_result=num, numerator_id=Const.MANUFACTURER_FK, denominator_id=self.store_id, denominator_result=den, result=result, score=num, target=den, identifier_result=self.common_db2.get_dictionary( parent_name=sub_parent), identifier_parent=self.common_db2.get_dictionary( parent_name=self.get_parent(sub_parent)), should_enter=True) def commit_results(self): """ committing the results in both sets """ pass