class Generator: def __init__(self, data_provider, output): self.data_provider = data_provider self.output = output self.project_name = data_provider.project_name self.session_uid = self.data_provider.session_uid self.common = Common(data_provider) self.tool_box = DIAGEODISPUSToolBox(self.data_provider, self.output, self.common) @log_runtime('Total Calculations', log_start=True) def main_function(self): """ This is the main KPI calculation function. It calculates the score for every KPI set and saves it to the DB. """ if self.tool_box.manual_collection_number.empty: Log.warning('Manual collection numbers is empty for this session') return self.tool_box.main_calculation() self.common.commit_results_data_to_new_tables()
class CCAAUToolBox: LEVEL1 = 1 LEVEL2 = 2 LEVEL3 = 3 EXCLUDE_FILTER = 0 INCLUDE_FILTER = 1 def __init__(self, data_provider, output): self.output = output self.data_provider = data_provider self.common = Common(self.data_provider) self.project_name = self.data_provider.project_name self.session_uid = self.data_provider.session_uid self.products = self.data_provider[Data.PRODUCTS] self.all_products = self.data_provider[Data.ALL_PRODUCTS] self.match_product_in_scene = self.data_provider[Data.MATCHES] self.visit_date = self.data_provider[Data.VISIT_DATE] self.session_info = self.data_provider[Data.SESSION_INFO] self.templates = self.data_provider[Data.TEMPLATES] self.scene_info = self.data_provider[Data.SCENES_INFO] self.store_id = self.data_provider[Data.STORE_FK] self.scif = self.data_provider[Data.SCENE_ITEM_FACTS] self.rds_conn = PSProjectConnector(self.project_name, DbUsers.CalculationEng) self.kpi_static_data = self.common.get_kpi_static_data() self.kpi_results_queries = [] self.template = self.data_provider.all_templates # templates self.kpi_static_data = self.common.get_new_kpi_static_data() self.toolbox = GENERALToolBox(data_provider) kpi_path = os.path.dirname(os.path.realpath(__file__)) base_file = os.path.basename(kpi_path) self.exclude_filters = pd.read_excel(os.path.join( kpi_path[:-len(base_file)], 'Data', 'template.xlsx'), sheetname="Exclude") self.Include_filters = pd.read_excel(os.path.join( kpi_path[:-len(base_file)], 'Data', 'template.xlsx'), sheetname="Include") self.bay_count_kpi = pd.read_excel(os.path.join( kpi_path[:-len(base_file)], 'Data', 'template.xlsx'), sheetname="BayCountKPI") def main_calculation(self): """ This function calculates the KPI results. """ self.calculate_sos() self.calculate_bay_kpi() self.common.commit_results_data_to_new_tables() def calculate_sos(self): """ This function filtering Data frame - "scene item facts" by the parameters in the template. Sending the filtered data frames to linear Sos calculation and facing Sos calculation Writing the results to the new tables in DB """ facing_kpi_fk = self.kpi_static_data[ self.kpi_static_data['client_name'] == 'FACINGS_SOS_SCENE_TYPE_BY_MANUFACTURER']['pk'].iloc[0] linear_kpi_fk = self.kpi_static_data[ self.kpi_static_data['client_name'] == 'LINEAR_SOS_SCENE_TYPE_BY_MANUFACTURER']['pk'].iloc[0] den_facing_exclude_template = self.exclude_filters[ (self.exclude_filters['KPI'] == 'Share of Shelf by Facing') & (self.exclude_filters['apply on'] == 'Denominator')] den_linear_exclude_template = self.exclude_filters[ (self.exclude_filters['KPI'] == 'Share of Shelf by Linear') & (self.exclude_filters['apply on'] == 'Denominator')] num_facing_exclude_template = self.exclude_filters[ (self.exclude_filters['KPI'] == 'Share of Shelf by Facing') & (self.exclude_filters['apply on'] == 'Numerator')] num_linear_exclude_template = self.exclude_filters[ (self.exclude_filters['KPI'] == 'Share of Shelf by Linear') & (self.exclude_filters['apply on'] == 'Numerator')] scene_templates = self.scif['template_fk'].unique().tolist() scene_manufactures = self.scif['manufacturer_fk'].unique().tolist() # exclude filters denominator den_general_facing_filters = self.create_dict_filters( den_facing_exclude_template, self.EXCLUDE_FILTER) den_general_linear_filters = self.create_dict_filters( den_linear_exclude_template, self.EXCLUDE_FILTER) # exclude filters numerator num_general_facing_filters = self.create_dict_filters( num_facing_exclude_template, self.EXCLUDE_FILTER) num_general_linear_filters = self.create_dict_filters( num_linear_exclude_template, self.EXCLUDE_FILTER) df_num_fac = self.filter_2_cond(self.scif, num_facing_exclude_template) df_num_lin = self.filter_2_cond(self.scif, num_linear_exclude_template) df_den_lin = self.filter_2_cond(self.scif, den_facing_exclude_template) df_den_fac = self.filter_2_cond(self.scif, den_linear_exclude_template) for template in scene_templates: for manufacture in scene_manufactures: sos_filters = { "template_fk": (template, self.INCLUDE_FILTER), "manufacturer_fk": (manufacture, self.INCLUDE_FILTER) } tem_filters = {"template_fk": (template, self.INCLUDE_FILTER)} dict_num_facing = dict( (k, v) for d in [sos_filters, num_general_facing_filters] for k, v in d.items()) numerator_facings = self.calculate_share_space( df_num_fac, dict_num_facing)[0] dict_num_linear = dict( (k, v) for d in [sos_filters, num_general_linear_filters] for k, v in d.items()) numerator_linear = self.calculate_share_space( df_num_lin, dict_num_linear)[1] dict_den_facing = dict( (k, v) for d in [tem_filters, den_general_facing_filters] for k, v in d.items()) denominator_facings = self.calculate_share_space( df_den_fac, dict_den_facing)[0] dict_den_linear = dict( (k, v) for d in [tem_filters, den_general_linear_filters] for k, v in d.items()) denominator_linear = self.calculate_share_space( df_den_lin, dict_den_linear)[1] score_facing = 0 if denominator_facings == 0 else ( numerator_facings / denominator_facings) * 100 score_linear = 0 if denominator_linear == 0 else ( numerator_linear / denominator_linear) * 100 self.common.write_to_db_result_new_tables( facing_kpi_fk, manufacture, numerator_facings, score_facing, template, denominator_facings, score_facing) self.common.write_to_db_result_new_tables( linear_kpi_fk, manufacture, numerator_linear, score_linear, template, denominator_linear, score_linear) def create_dict_filters(self, template, param): """ :param template : Template of the desired filtering to data frame :param param : exclude /include :return: Dictionary of filters and parameter : exclude / include by demeaned """ filters_dict = {} template_without_second = template[template['Param 2'].isnull()] for row in template_without_second.iterrows(): filters_dict[row[1]['Param 1']] = (row[1]['Value 1'].split(','), param) return filters_dict def filter_2_cond(self, data_frame, template): """ :param template: Template of the desired filtering :param data_frame : Data frame :return: data frame filtered by entries in the template with 2 conditions """ template_without_second = template[template['Param 2'].notnull()] if template_without_second is not None: for row in template_without_second.iterrows(): data_frame = data_frame.loc[ (~data_frame[row[1]['Param 1']].isin(row[1]['Value 1']. split(','))) | (~data_frame[row[1]['Param 2']].isin(row[1]['Value 2']. split(',')))] return data_frame def calculate_share_space(self, data_frame, filters): """ :param filters: These are the parameters which the data frame is filtered by. :param data_frame : relevant scene item facts data frame (filtered ) :return: The total number of facings and the shelf width (in mm) according to the filters. """ filtered_scif = data_frame[self.toolbox.get_filter_condition( data_frame, **filters)] sum_of_facings = filtered_scif['facings'].sum() space_length = filtered_scif['gross_len_split_stack'].sum() return sum_of_facings, space_length def calculate_bay_kpi(self): bay_kpi_sheet = self.bay_count_kpi kpi = self.kpi_static_data.loc[self.kpi_static_data['type'] == BAY_COUNT_KPI] if kpi.empty: Log.info("CCAAU Calculate KPI Name:{} not found in DB".format( BAY_COUNT_KPI)) else: Log.info("CCAAU Calculate KPI Name:{} found in DB".format( BAY_COUNT_KPI)) bay_kpi_row = bay_kpi_sheet[bay_kpi_sheet['KPI Name'] == BAY_COUNT_KPI] if not bay_kpi_row.empty: scene_types_to_consider = bay_kpi_row['Scene Type'].iloc[0] if scene_types_to_consider == '*': # Consider all scene types scene_types_to_consider = 'all' else: scene_types_to_consider = [ x.strip() for x in scene_types_to_consider.split(',') ] mpis_with_scene = self.match_product_in_scene.merge( self.scene_info, how='left', on='scene_fk') mpis_with_scene_and_template = mpis_with_scene.merge( self.templates, how='left', on='template_fk') if scene_types_to_consider != 'all': mpis_with_scene_and_template = mpis_with_scene_and_template[ mpis_with_scene_and_template['template_name'].isin( scene_types_to_consider)] mpis_template_group = mpis_with_scene_and_template.groupby( 'template_fk') for template_fk, template_data in mpis_template_group: Log.info("Running for template ID {templ_id}".format( templ_id=template_fk, )) total_bays_for_scene_type = 0 scene_group = template_data.groupby('scene_fk') for scene_fk, scene_data in scene_group: Log.info( "KPI Name:{kpi} bay count is {bay_c} for scene ID {scene_id}" .format( kpi=BAY_COUNT_KPI, bay_c=int(scene_data['bay_number'].max()), scene_id=scene_fk, )) total_bays_for_scene_type += int( scene_data['bay_number'].max()) Log.info( "KPI Name:{kpi} total bay count is {bay_c} for template ID {templ_id}" .format( kpi=BAY_COUNT_KPI, bay_c=total_bays_for_scene_type, templ_id=template_fk, )) self.common.write_to_db_result_new_tables( fk=int(kpi['pk'].iloc[0]), numerator_id=int(template_fk), numerator_result=total_bays_for_scene_type, denominator_id=int(self.store_id), denominator_result=total_bays_for_scene_type, result=total_bays_for_scene_type, )
class CCPHLToolBox: LEVEL1 = 1 LEVEL2 = 2 LEVEL3 = 3 def __init__(self, data_provider, output): self.output = output self.data_provider = data_provider self.common = Common(self.data_provider) self.project_name = self.data_provider.project_name self.session_uid = self.data_provider.session_uid self.products = self.data_provider[Data.PRODUCTS] self.all_products = self.data_provider[Data.ALL_PRODUCTS] self.match_product_in_scene = self.data_provider[Data.MATCHES] self.visit_date = self.data_provider[Data.VISIT_DATE] self.session_info = self.data_provider[Data.SESSION_INFO] self.scene_info = self.data_provider[Data.SCENES_INFO] self.store_id = self.data_provider[Data.STORE_FK] self.scif = self.data_provider[Data.SCENE_ITEM_FACTS] self.template_data = pd.read_excel(TEMPLATE_PATH, 'KPIs').fillna('') self.rds_conn = PSProjectConnector(self.project_name, DbUsers.CalculationEng) self.kpi_static_data = self.common.get_new_kpi_static_data() self.kpi_static_data = self.kpi_static_data[self.kpi_static_data['kpi_family_fk'] == CCPHLConsts.CUSTOM_KPIS] self.kpi_results_queries = [] self.mapping_param = {"manufacturer": "manufacturer_name"} self.mapping_entity = {"manufacturer": "manufacturer_fk", "store": "store_id", "scene_type": "template_fk", "brand": "brand_fk", "product": "product_fk"} def main_calculation(self): """ This function calculates the KPI results. """ for group in self.template_data[CCPHLConsts.KPI_GROUP].unique(): kpis = self.template_data[self.template_data[CCPHLConsts.KPI_GROUP] == group] if kpis.empty: print("KPI Group:{} is not valid".format(group)) continue for row_num, kpi in kpis.iterrows(): if kpi[CCPHLConsts.KPI_GROUP] == CCPHLConsts.SHELF_PURITY: self.calculate_self_purity_util(kpi) elif kpi[CCPHLConsts.KPI_GROUP] == CCPHLConsts.SHELF_CUTIL: self.calculate_self_purity_util(kpi) else: continue self.common.commit_results_data_to_new_tables() def calculate_self_purity_util(self, kpi): df_scene_data = self.scif if df_scene_data.empty: return scene_types = [x.strip() for x in kpi[CCPHLConsts.SCENE_TYPE].split(',')] filter_param_name_1 = kpi['filter_param_name_1'].strip() if len(filter_param_name_1) != 0: filter_param_value_1 = kpi['filter_param_value_1'].strip() else: filter_param_value_1 = "" if str(kpi[CCPHLConsts.EXCLUDE_EMPTY]).upper() == 'Y': df_scene_data = df_scene_data[df_scene_data[CCPHLConsts.PRODUCT_FK] != CCPHLConsts.EMPTY] df_scene_data = df_scene_data[df_scene_data[CCPHLConsts.PRODUCT_FK] != CCPHLConsts.GENERAL_EMPTY] if str(kpi[CCPHLConsts.EXCLUDE_IRRELEVANT]).upper() == 'Y': df_scene_data = df_scene_data[df_scene_data[CCPHLConsts.IRRELEVANT] != CCPHLConsts.IRRELEVANT] df_kpi_level_2_fk = self.kpi_static_data[self.kpi_static_data['type'] == kpi['kpi_name']] if df_kpi_level_2_fk.empty: kpi_level_2_fk = 0 else: kpi_level_2_fk = df_kpi_level_2_fk.iloc[0]['pk'] df_scene_data = df_scene_data[df_scene_data['template_name'].isin(scene_types)] group_list = [] for idx in range(1, 5): entity = kpi['entity' + str(idx)].strip() if entity == 'N/A' or len(entity) == 0: continue else: entity = self.mapping_entity[kpi['entity' + str(idx)].strip()] group_list.append(entity) denominator = 0 if group_list[0] == 'store_id': total_facings = df_scene_data['facings'].sum() denominator = float(total_facings) if len(filter_param_value_1) != 0: df_scene_data2 = df_scene_data[df_scene_data[self.mapping_param[filter_param_name_1]]==filter_param_value_1] else: df_scene_data2 = df_scene_data filter_columns = list(group_list) filter_columns.append('facings') df_scene_data2 = df_scene_data2[filter_columns] df_purity = pd.DataFrame(df_scene_data2.groupby(group_list).sum().reset_index()) store_zero_results = str(kpi[CCPHLConsts.STORE_ZERO_RESULTS]).strip().upper() for row_num, row_data in df_purity.iterrows(): numerator = row_data['facings'] if group_list[0] == 'template_fk': df_scene_count = df_scene_data[df_scene_data['template_fk'] == row_data['template_fk']] if df_scene_count.empty: total_facings = 0 else: total_facings = df_scene_count['facings'].sum() denominator = float(total_facings) try: result = round(float(numerator) / float(denominator), 4) except ZeroDivisionError: print("Error: {}".format(ZeroDivisionError.message)) continue if kpi_level_2_fk != 0: if result == 0: if store_zero_results == 'Y': self.common.write_to_db_result_new_tables(fk=kpi_level_2_fk, numerator_id=row_data[group_list[len(group_list)-1]], denominator_id=row_data[group_list[0]], numerator_result=numerator, denominator_result=denominator, result=result, score=result) else: self.common.write_to_db_result_new_tables(fk=kpi_level_2_fk, numerator_id=row_data[group_list[len(group_list) - 1]], denominator_id=row_data[group_list[0]], numerator_result=numerator, denominator_result=denominator, result=result, score=result)
class HEINEKENTWToolBox: LEVEL1 = 1 LEVEL2 = 2 LEVEL3 = 3 DIST_STORE_LVL1 = 1014 OOS_STORE_LVL1 = 1015 DIST_STORE_LVL2 = 1016 OOS_STORE_LVL2 = 1017 DIST_CATEGORY_LVL1 = 1018 OOS_CATEGORY_LVL1 = 1019 DIST_CATEGORY_LVL2 = 1020 OOS_CATEGORY_LVL2 = 1021 DISTRIBUTION = 4 OOS = 5 MANUFACTURER_FK = 175 # heinken manfucturer def __init__(self, data_provider, output): self.output = output self.data_provider = data_provider self.common = Common(self.data_provider) self.project_name = self.data_provider.project_name self.session_uid = self.data_provider.session_uid self.products = self.data_provider[Data.PRODUCTS] self.all_products = self.data_provider[Data.ALL_PRODUCTS] self.match_product_in_scene = self.data_provider[Data.MATCHES] self.visit_date = self.data_provider[Data.VISIT_DATE] self.session_info = self.data_provider[Data.SESSION_INFO] self.scene_info = self.data_provider[Data.SCENES_INFO] self.store_id = self.data_provider[Data.STORE_FK] self.scif = self.data_provider[Data.SCENE_ITEM_FACTS] self.rds_conn = PSProjectConnector(self.project_name, DbUsers.CalculationEng) self.kpi_static_data = self.common.get_new_kpi_static_data() self.kpi_results_queries = [] self.assortment = Assortment(self.data_provider, self.output) def main_calculation(self, *args, **kwargs): """ This function calculates the KPI results. """ lvl3_result = self.assortment.calculate_lvl3_assortment() self.category_assortment_calculation(lvl3_result) self.store_assortment_calculation(lvl3_result) self.common.commit_results_data_to_new_tables() # self.common.commit_results_data_to_new_tables() def category_assortment_calculation(self, lvl3_result): """ This function calculates 3 levels of assortment : level3 is assortment SKU level2 is assortment groups """ if not lvl3_result.empty: # cat_df = self.scif[['product_fk', 'category_fk']] cat_df = self.all_products[['product_fk', 'category_fk']] lvl3_with_cat = lvl3_result.merge(cat_df, on='product_fk', how='left') lvl3_with_cat = lvl3_with_cat[ lvl3_with_cat['category_fk'].notnull()] for result in lvl3_with_cat.itertuples(): if result.in_store == 1: score = self.DISTRIBUTION else: score = self.OOS # Distrubtion self.common.write_to_db_result_new_tables( fk=self.DIST_CATEGORY_LVL1, numerator_id=result.product_fk, numerator_result=score, result=score, denominator_id=result.category_fk, denominator_result=1, score=score, score_after_actions=score) if score == self.OOS: # OOS self.common.write_to_db_result_new_tables( fk=self.OOS_CATEGORY_LVL1, numerator_id=result.product_fk, numerator_result=score, result=score, denominator_id=result.category_fk, denominator_result=1, score=score, score_after_actions=score) category_list = lvl3_with_cat['category_fk'].unique() for cat in category_list: lvl3_result_cat = lvl3_with_cat[lvl3_with_cat["category_fk"] == cat] lvl2_result = self.assortment.calculate_lvl2_assortment( lvl3_result_cat) for result in lvl2_result.itertuples(): denominator_res = result.total res = np.divide(float(result.passes), float(denominator_res)) # Distrubtion self.common.write_to_db_result_new_tables( fk=self.DIST_CATEGORY_LVL2, numerator_id=self.MANUFACTURER_FK, numerator_result=result.passes, denominator_id=cat, denominator_result=denominator_res, result=res, score=res, score_after_actions=res) # OOS self.common.write_to_db_result_new_tables( fk=self.OOS_CATEGORY_LVL2, numerator_id=self.MANUFACTURER_FK, numerator_result=denominator_res - result.passes, denominator_id=cat, denominator_result=denominator_res, result=1 - res, score=(1 - res), score_after_actions=1 - res) return def store_assortment_calculation(self, lvl3_result): """ This function calculates the KPI results. """ for result in lvl3_result.itertuples(): if result.in_store == 1: score = self.DISTRIBUTION else: score = self.OOS # Distrubtion self.common.write_to_db_result_new_tables( fk=self.DIST_STORE_LVL1, numerator_id=result.product_fk, numerator_result=score, result=score, denominator_id=self.store_id, denominator_result=1, score=score) if score == self.OOS: # OOS self.common.write_to_db_result_new_tables( fk=self.OOS_STORE_LVL1, numerator_id=result.product_fk, numerator_result=score, result=score, denominator_id=self.store_id, denominator_result=1, score=score, score_after_actions=score) if not lvl3_result.empty: lvl2_result = self.assortment.calculate_lvl2_assortment( lvl3_result) for result in lvl2_result.itertuples(): denominator_res = result.total if not pd.isnull(result.target) and not pd.isnull( result.group_target_date ) and result.group_target_date <= self.assortment.current_date: denominator_res = result.target res = np.divide(float(result.passes), float(denominator_res)) # Distrubtion self.common.write_to_db_result_new_tables( fk=self.DIST_STORE_LVL2, numerator_id=self.MANUFACTURER_FK, denominator_id=self.store_id, numerator_result=result.passes, denominator_result=denominator_res, result=res, score=res, score_after_actions=res) # OOS self.common.write_to_db_result_new_tables( fk=self.OOS_STORE_LVL2, numerator_id=self.MANUFACTURER_FK, numerator_result=denominator_res - result.passes, denominator_id=self.store_id, denominator_result=denominator_res, result=1 - res, score=1 - res, score_after_actions=1 - res) return
class PERFETTICNToolBox: LEVEL1 = 1 LEVEL2 = 2 LEVEL3 = 3 def __init__(self, data_provider, output): self.output = output self.data_provider = data_provider self.common = Common(self.data_provider) self.project_name = self.data_provider.project_name self.session_uid = self.data_provider.session_uid self.products = self.data_provider[Data.PRODUCTS] self.all_products = self.data_provider[Data.ALL_PRODUCTS] self.match_product_in_scene = self.data_provider[Data.MATCHES] self.visit_date = self.data_provider[Data.VISIT_DATE] self.session_info = self.data_provider[Data.SESSION_INFO] self.scene_info = self.data_provider[Data.SCENES_INFO] self.store_id = self.data_provider[Data.STORE_FK] self.scif = self.data_provider[Data.SCENE_ITEM_FACTS] self.rds_conn = PSProjectConnector(self.project_name, DbUsers.CalculationEng) self.kpi_static_data = self.common.get_new_kpi_static_data() self.kpi_results_queries = [] self.store_info = self.data_provider[Data.STORE_INFO] self.assortment = Assortment(self.data_provider, self.output) self.template = self.data_provider.all_templates def main_calculation(self, *args, **kwargs): self.display_count() self.assortment_calculation() self.common.commit_results_data_to_new_tables() def display_count(self): """This function calculates how many displays find from types secondary shelf """ num_brands = {} display_info = self.scif['template_fk'] display_fks = display_info.unique() template_seco = self.template[ self.template['included_in_secondary_shelf_report'] == 'Y']['template_fk'] display_fks = list( filter(lambda x: x in template_seco.values, display_fks)) count_fk = self.kpi_static_data[self.kpi_static_data['client_name'] == 'COUNT OF DISPLAY']['pk'].iloc[0] for value in display_fks: num_brands[value] = display_info[display_info == value].count() score = num_brands[value] self.common.write_to_db_result_new_tables(count_fk, value, None, score, score, score, score) return def assortment_calculation(self): """ This function calculates 3 levels of assortment : level3 is assortment SKU level2 is assortment groups level1 how many groups passed out of all """ lvl3_result = self.assortment.calculate_lvl3_assortment() for result in lvl3_result.itertuples(): score = result.in_store if score >= 1: score = 100 self.common.write_to_db_result_new_tables( result.kpi_fk_lvl3, result.product_fk, result.in_store, score, result.assortment_group_fk, 1, score) if not lvl3_result.empty: lvl2_result = self.assortment.calculate_lvl2_assortment( lvl3_result) for result in lvl2_result.itertuples(): denominator_res = result.total res = np.divide(float(result.passes), float(denominator_res)) if result.passes >= 1: score = 100 else: score = 0 self.common.write_to_db_result_new_tables( result.kpi_fk_lvl2, result.assortment_group_fk, result.passes, (res * 100), result.assortment_super_group_fk, denominator_res, score) if not lvl2_result.empty: lvl1_result = self.assortment.calculate_lvl1_assortment( lvl2_result) for result in lvl1_result.itertuples(): denominator_res = result.total res = np.divide(float(result.passes), float(denominator_res)) if res >= 0: score = 100 else: score = 0 self.common.write_to_db_result_new_tables( fk=result.kpi_fk_lvl1, numerator_id=result.assortment_super_group_fk, numerator_result=result.passes, denominator_result=denominator_res, result=(res * 100), score=score) return