def calculate_liberty(self): Log.info('starting calculate_liberty') try: tool_box = LIBERTYToolBox(self.data_provider, self.output, self.common_v2) tool_box.main_calculation() except Exception as e: Log.error('failed to calculate LIBERTY KPIs due to: {}'.format(e.message))
def calculate_msc(self): Log.info('starting calculate_msc') try: tool_box = MSCToolBox(self.data_provider, self.output, self.common_v2) tool_box.main_calculation() except Exception as e: Log.error('failed to calculate MSC Compliance due to: {}'.format(e.message))
def main_function(self): jg = JsonGenerator('ccru') kpi_set_name = self.tool_box.set_name try: jg.create_json('FIFA KPIs.xlsx', kpi_set_name, sheetname=kpi_set_name) except: Log.error('Session store "{}" is not set to calculation'.format( self.tool_box.session_info.store_type)) calc_start_time = datetime.datetime.utcnow() Log.info('Calculation Started at {}'.format(calc_start_time)) score = 0 score += self.tool_box.check_weighted_average( jg.project_kpi_dict.get('kpi_data')[0]) score += self.tool_box.calculate_share_of_cch_collers( jg.project_kpi_dict.get('kpi_data')[0]) score += self.tool_box.check_survey_answer( jg.project_kpi_dict.get('kpi_data')[0]) score += self.tool_box.weighted_cooler_standard( jg.project_kpi_dict.get('kpi_data')[0]) attributes_for_table1 = pd.DataFrame( [(kpi_set_name, self.session_uid, self.store_id, self.visit_date.isoformat(), format(score, '.2f'), None)], columns=[ 'kps_name', 'session_uid', 'store_fk', 'visit_date', 'score_1', 'kpi_set_fk' ]) self.tool_box.write_to_db_result(attributes_for_table1, 'level1') self.tool_box.commit_results_data() calc_finish_time = datetime.datetime.utcnow() Log.info('Calculation time took {}'.format(calc_finish_time - calc_start_time))
def calculate_sovi(self): Log.info('starting calculate_sovi') try: tool_box = SOVIToolBox(self.data_provider, self.output, self.common_v2) tool_box.main_calculation() except Exception as e: Log.error('failed to calculate SOVI due to :{}'.format(e.message))
def get_availability_and_price_calculation_parameters(self, atomic_kpi): condition_filters = {} relevant_columns = filter( lambda x: x.startswith('type') or x.startswith('value'), atomic_kpi.index.values) for column in relevant_columns: if atomic_kpi[column]: if column.startswith('type'): condition_number = str(column.strip('type')) matching_value_col = filter( lambda x: x.startswith('value') and str(x[len(x) - 1]) == condition_number, relevant_columns) value_col = matching_value_col[0] if len( matching_value_col) > 0 else None if value_col: value_list = map( lambda x: self.get_string_or_number( atomic_kpi[column], x), self.split_and_strip(atomic_kpi[value_col])) condition_filters.update({ atomic_kpi[column]: value_list[0] if len(value_list) == 1 else value_list }) else: Log.error( 'condition {} does not have corresponding value column' .format(column)) return condition_filters
def process_session(self): try: Log.debug(self.log_prefix + ' Retrieving data') self.match_display_in_scene = self._get_match_display_in_scene_data( ) # if there are no display tags there's no need to retrieve the rest of the data. if self.match_display_in_scene.empty: Log.debug(self.log_prefix + ' No display tags') self._delete_previous_data() else: self.displays = self._get_displays_data() self.match_product_in_scene = self._get_match_product_in_scene_data( ) self._delete_previous_data() self._handle_promotion_wall_display() self._handle_cube_or_4_sided_display() self._handle_table_display() self._handle_rest_display() if self.on_ace: Log.debug(self.log_prefix + ' Committing share of display calculations') self.project_connector.db.commit() Log.info(self.log_prefix + ' Finished calculation') except Exception as e: Log.error( 'Share of display calculation for session: \'{0}\' error: {1}'. format(self.session_uid, str(e))) raise e
def get_compliance(self, manual_planogram_data=None, manual_scene_data=None): """ This function filters the irrelevant products out, creates a matrix that matches the bays of the POG and the scene and scores them, find the best way to match the bays and returns the match tags. :return: DF of match_product_in_scene_fk with the tags """ tag_compliance = pd.DataFrame( columns=[Keys.MATCH_FK, Keys.COMPLIANCE_STATUS_FK]) try: self.planogram_matches = self._get_planogram_data() if manual_planogram_data is \ None else manual_planogram_data self.scene_matches = self._get_matches( ) if manual_scene_data is None else manual_scene_data self.planogram_matches = self.planogram_matches[ self.planogram_matches[Keys.STACKING_LAYER] == 1] self.planogram_matches = self.planogram_matches[[ Keys.BAY_NUMBER, Keys.SHELF_NUMBER_FROM_BOTTOM, Keys.FACING_SEQUENCE_NUMBER, Keys.BRAND_FK, Keys.PRODUCT_FK ]] self.planogram_matches.rename( columns={Keys.PRODUCT_FK: PLANOGRAM_PRODUCT_FK}, inplace=True) self.planogram_matches.rename( columns={Keys.BRAND_FK: PLANOGRAM_BRAND_FK}, inplace=True) self.scene_matches = self.scene_matches.merge( self.planogram_matches, how='left') planogram_products = set( self.planogram_matches[PLANOGRAM_PRODUCT_FK].tolist()) tag_compliance = self.local_get_tag_planogram_compliance( self.scene_matches, planogram_products) except Exception as e: Log.error("Calculated compliance has failed: " + e.message) return tag_compliance[[Keys.MATCH_FK, Keys.COMPLIANCE_STATUS_FK]]
def check_kpi_config_db_temp_recon(self, df_kpi, sheet_name): result = True lst_exceptions = [] category_kpi = self.db_category_kpi.copy() for row_num, row_data in df_kpi.iterrows(): found = False dict_exception = {} for c_row_num, c_row_data in category_kpi.iterrows(): if row_data['Category Name'] == c_row_data[ 'category_name'] and row_data['KPI name'].replace( " ", "") == c_row_data['kpi_name'].replace( " ", "") and row_data[ 'KPI Type'] == c_row_data['kpi_type']: found = True break if not found: result = False dict_exception['Sheet Name'] = sheet_name dict_exception['Category Name'] = row_data['Category Name'] dict_exception['KPI Type'] = row_data['KPI Type'] dict_exception['KPI name'] = row_data['KPI name'] lst_exceptions.append(dict_exception) if is_debug: Log.error(dict_exception) self.create_excel_log(lst_exceptions, 'kpi_config_db_temp_recon') return result
def dependency_reorder(self): kpis = self.template[Const.KPIS].copy() name_to_index = kpis.reset_index().set_index(Const.KPI_NAME)['index'].to_dict() dependent_index = list(kpis[kpis[Const.DEPENDENT].notnull()].index) kpis_index = list(set(kpis.index) - set(dependent_index)) set_index = set(kpis_index) c = 0 while dependent_index: i = dependent_index.pop(0) kpi = kpis.loc[i, Const.KPI_NAME] dependencies = self.read_cell_from_line(kpis.loc[i, :], Const.DEPENDENT) met = True for dependency in dependencies: if name_to_index[dependency] not in set_index: met = False if met: kpis_index.append(i) set_index.add(i) c = 0 else: dependent_index.append(i) c += 1 if c > kpis.shape[0] * 1.1: Log.error('Circular Dependency Found: KPIs Affected {}'.format( [kpis.loc[i, Const.KPI_NAME] for i in dependent_index])) break self.template[Const.KPIS] = kpis.reindex(index=pd.Index(kpis_index)).reset_index(drop=True)
def reconcile_hierarchy_entries(self): Log.info( "Reconcile Hierarchy Sheet KPIs with Other KPI Sheets - Started") result = True column_names = ['KPI name', 'Category Name'] lst_exceptions = [] for sheet_name, q_kpi_template in self.q_kpi_templates.items(): kpi_template = q_kpi_template[column_names].copy() hierarchy_filter = self.hierarchy_template[ self.hierarchy_template['KPI Type'] == sheet_name][column_names].copy() for row_num, row_data in hierarchy_filter.iterrows(): found = False for kpi_row_num, kpi_row_data in kpi_template.iterrows(): if row_data['KPI name'] == kpi_row_data[ 'KPI name'] and row_data[ 'Category Name'] == kpi_row_data[ 'Category Name']: found = True break if not found: dict_exception = {} result = False dict_exception['Sheet name'] = sheet_name dict_exception['Category Name'] = row_data['Category Name'] dict_exception['KPI name'] = row_data['KPI name'] lst_exceptions.append(dict_exception) if is_debug: Log.error(dict_exception) self.create_excel_log(lst_exceptions, 'reconcile_hierarchy_entries') Log.info( "Reconcile Hierarchy Sheet KPIs with Other KPI Sheets - Completed") return result
def check_perfect_execution(self): Log.info("Perfect Execution Check - Started") result = True sheet_name = 'Perfect Execution' k_column_names = ['KPI test name', 'Category Name'] h_column_names = ['KPI name', 'Category Name'] kpi_template = self.q_kpi_templates[sheet_name][k_column_names].copy() hierarchy_filter = self.hierarchy_template[h_column_names].copy() lst_exceptions = [] for kpi_row_num, kpi_row_data in kpi_template.iterrows(): found = False for row_num, row_data in hierarchy_filter.iterrows(): if kpi_row_data['KPI test name'] == row_data[ 'KPI name'] and kpi_row_data[ 'Category Name'] == row_data['Category Name']: found = True break if not found: dict_exception = {} result = False dict_exception['Sheet name'] = sheet_name dict_exception['Category Name'] = kpi_row_data['Category Name'] dict_exception['KPI test name'] = kpi_row_data['KPI test name'] lst_exceptions.append(dict_exception) if is_debug: Log.error(dict_exception) self.create_excel_log(lst_exceptions, 'perfect_execution_check') Log.info("Perfect Execution Check - Completed") return result
def filter_scif_by_template_columns(kpi_line, type_base, value_base, relevant_scif, exclude=False): filters = {} # get denominator filters for den_column in [col for col in kpi_line.keys() if type_base in col]: # get relevant den columns if kpi_line[ den_column]: # check to make sure this kpi has this denominator param filters[kpi_line[den_column]] = \ [value.strip() for value in kpi_line[den_column.replace(type_base, value_base)].split( ',')] # get associated values for key in filters.iterkeys(): if key not in relevant_scif.columns.tolist(): Log.error('{} is not a valid parameter type'.format(key)) continue if exclude: relevant_scif = relevant_scif[~( relevant_scif[key].isin(filters[key]))] else: relevant_scif = relevant_scif[relevant_scif[key].isin( filters[key])] return relevant_scif
def calculate_sequence(self, population, location=None, additional=None): """ :param location: The locations parameters which the sequences are checked for. E.g: {'template_group': 'Primary Shelf'}. :param population: These are the parameters which the sequences are checked for. E.g: {'product_fk': [1, 2, 3]}, {'brand_name': ['brand1', 'brand2', 'brand3']}. :param additional: Additional attributes for the sequence calculation: 1. direction (str): LEFT/RIGHT/UP/DOWN - the direction of the sequence. 2. exclude_filter (dict): In order to exclude data from the population 3. check_all_sequences (boolean): Should we calculated all of the sequences or should we stop when one passed 4. strict_mode (boolean): Should it be the the exact sequence or any permutation is valid. 5. include_stacking (boolean): Should we consider stacked products or not 6. allowed_products_filters (dict): These are the parameters that are allowed to corrupt the sequence without failing it. E.g: {ProductsConsts.PRODUCT_TYPE: [ProductTypeConsts.POS, ProductTypeConsts.EMPTY]} 7. minimum_tags_per_entity (int): The number of straight facings for every entity in the sequence. 8. adjacency_overlap_ratio (float): Minimal threshold the overlap between the products must exceeds to be considered as adjacent. E.g: If the population includes 2 products [1, 2] and minimum_tags_per_filters = 3 with default values: 111222-pass! 1122-fail! 1112-fail! :return: A DataFrame with the following fields: cluster (Graph), scene_fk and direction """ try: self._sequence_calculation(population, location, additional) except Exception as err: Log.error("Sequence calculation failed due to the following error: {}".format(err)) finally: return self._results_df
def main_calculation_red_score(self): set_score = 0 try: set_name = self.kpi_sheets[Const.KPIS].iloc[len(self.kpi_sheets[Const.KPIS]) - 1][ Const.KPI_NAME] kpi_fk = self.common_v2.get_kpi_fk_by_kpi_type(set_name) set_identifier_res = self.common_v2.get_dictionary(kpi_fk=kpi_fk) if self.store_type in self.kpi_sheets[Const.KPIS].keys().tolist(): for i in xrange(len(self.kpi_sheets[Const.KPIS]) - 1): params = self.kpi_sheets[Const.KPIS].iloc[i] percent = self.get_percent(params[self.store_type]) if percent == 0: continue kpi_score = self.main_calculation_lvl_2(identifier_parent=set_identifier_res, params=params) set_score += kpi_score * percent else: Log.warning('The store-type "{}" is not recognized in the template'.format(self.store_type)) return kpi_names = {Const.column_name1: set_name} set_fk = self.get_kpi_fk_by_kpi_path(self.common.LEVEL1, kpi_names) if set_fk: try: self.common.write_to_db_result(score=set_score, level=self.common.LEVEL1, fk=set_fk) except Exception as exception: Log.error('Exception in the set {} writing to DB: {}'.format(set_name, exception.message)) self.common_v2.write_to_db_result(fk=kpi_fk, numerator_id=self.own_manuf_fk, denominator_id=self.store_id, score=set_score, result=set_score, identifier_result=set_identifier_res, should_enter=True) except Exception as exception: Log.error('Exception in the kpi-set calculating: {}'.format(exception.message)) pass
def _get_final_compliance_scored_couples_part(self, final_compliance_tag): """ Iterates the matched couples (starting in the scene bay with the minimum matches to POG bays) that have scores, chooses for every scene bay its pog bay and adds them compliance to the list. :param final_compliance_tag: DF. :return: updated final_compliance_tag. """ try: for scene_bay in self.all_combinations_matches.index: line = self.all_combinations_matches.loc[scene_bay] if line[SUM] == 0 or True not in line.drop(SUM).values: continue scores = self.all_combinations_scores.loc[scene_bay].sort_values(ascending=False) score = scores.iloc[0] if score == 0: continue pog_bay = scores.index[0] final_compliance_tag = final_compliance_tag.append(self.all_combinations_compliances[pog_bay][scene_bay], ignore_index=True) self._delete_bay_from_dfs(scene_bay, pog_bay) # self.chosen_permutation.append((scene_bay, pog_bay)) return final_compliance_tag except Exception as e: Log.error("First step in the compliance calculation has failed: " + e.message) return pd.DataFrame(columns=[Keys.MATCH_FK, Keys.COMPLIANCE_STATUS_FK])
def create_masking_and_matches(self): try: self.masking_data = transform_maskings_flat( *retrieve_maskings_flat( self.data_provider.project_name, self.data_provider.scenes_info['scene_fk'].to_list())) except Exception as err: Log.error('Could not retrieve masking, error: {}'.format(err)) return pd.DataFrame() self.matches_df = self.matches.merge(self.data_provider.all_products, on='product_fk') self.matches_df = self.matches_df[self.matches_df['scene_fk'] == self.scene_id] # smart_attribute_data = \ # self.adp.get_match_product_in_probe_state_values(self.matches_df['probe_match_fk'].unique().tolist()) # # self.matches_df = pd.merge(self.matches_df, smart_attribute_data, on='probe_match_fk', how='left') # self.matches_df['match_product_in_probe_state_fk'].fillna(0, inplace=True) self.matches_df['pk'] = self.matches_df['scene_match_fk'] self.probe_groups = self.adp.get_probe_groups() self.matches_df = self.matches_df.merge(self.probe_groups, on='probe_match_fk', how='left') self.pos_matches_df = self.matches_df[self.matches_df['category'] == 'POS'] self.pos_masking_data = self.adp.get_masking_data(self.matches_df, y_axis_threshold=41, x_axis_threshold=100)
def run_project_calculations(self): self.timer.start() try: MarsUsGenerator(self.data_provider, self.output).main_function() except Exception: Log.error('Mars US kpis not calculated') self.timer.stop('KPIGenerator.run_project_calculations')
def safety_func(self, group, func, args): try: func(*args) Log.info('{} KPIs Calculated'.format(group)) except Exception as e: Log.error('ERROR {} KPIs Failed to Calculate'.format(group)) Log.error(e)
def main_calculation(self, *args, **kwargs): """ This function calculates the KPI results. """ try: if self.kpi_sheet.empty: Log.error("'kpi_list' sheet in setup file is empty.") return kpi_types = [ x.strip() for x in self.kpi_sheet[Consts.KPI_TYPE].unique() ] for kpi_type in kpi_types: kpis = self.kpi_sheet[self.kpi_sheet[Consts.KPI_TYPE] == kpi_type] if kpi_type == Consts.FSOS: self.main_sos_calculations(kpis) elif kpi_type == Consts.ADJACENCY: self.main_adjacency_calculations(kpis) else: Log.warning( "KPI_TYPE:{kt} not found in setup=>kpi_list sheet.". format(kt=kpi_type)) continue self.common.commit_results_data() return except Exception as err: Log.error( "LionJP KPI calculation failed due to the following error: {}". format(err))
def run_project_calculations(self): self.timer.start() # use log.time_message tool_box = INTEG4KPIToolBox(self.data_provider, self.output) kpi_set_name = tool_box.set_name if kpi_set_name == CANTEEN: INTEG4CanteenCalculations(self.data_provider, self.output).main_function() elif kpi_set_name == PETROL: INTEG4PetrolCalculations(self.data_provider, self.output).main_function() elif kpi_set_name == HORECA: INTEG4HoReCaCalculations(self.data_provider, self.output).main_function() elif kpi_set_name == FT: INTEG4FTCalculations(self.data_provider, self.output).main_function() elif kpi_set_name == HYPERMARKET: INTEG4HypermarketCalculations(self.data_provider, self.output).main_function() elif kpi_set_name == SUPERMARKET: INTEG4SupermarketCalculations(self.data_provider, self.output).main_function() elif kpi_set_name == SUPERETTE: INTEG4SuperetteCalculations(self.data_provider, self.output).main_function() elif kpi_set_name == FAST_FOOD: INTEG4FastFoodCalculations(self.data_provider, self.output).main_function() else: Log.error('Session store "{}" is not set to calculation'.format( tool_box.session_info.store_type)) # todo add all supported store types self.timer.stop('INTEG4Calculations.run_project_calculations')
def convert_base_size_values(value): try: new_value = float(value.split()[0]) if value not in [None, ''] else None except IndexError: Log.error('Could not convert base size value for {}'.format(value)) new_value = None return new_value
def _get_parent_kpi_fk_by_kpi_type(self, kpi_type): try: return self.common.get_kpi_fk_by_kpi_type( Const.HIERARCHY[kpi_type]) except: Log.error('No parent found for {}'.format(kpi_type)) return 0
def calculate_sos(self, atomic_params): """ :param atomic_params: dict - atomic kpi line from the template :return: the percent of SOS (if it's binary - 100 if more than target, otherwise 0). """ numerator_type = atomic_params[Const.ENTITY_TYPE_NUMERATOR] numerator_value = atomic_params[Const.NUMERATOR] denominator_type = atomic_params[Const.ENTITY_TYPE_DENOMINATOR] denominator_value = atomic_params[Const.DENOMINATOR] in_or_not = atomic_params[Const.IN_NOT_IN] filter_type = Converters.convert_type(atomic_params[Const.TYPE_FILTER]) filter_value = atomic_params[Const.VALUE_FILTER] denominator_filters = self.get_default_filters(denominator_type, denominator_value) numerator_filters = self.get_default_filters(numerator_type, numerator_value) if in_or_not: numerator_filters = self.update_filters(numerator_filters, in_or_not, filter_type, filter_value) denominator_filters = self.update_filters(denominator_filters, in_or_not, filter_type, filter_value) atomic_score = self.tools.calculate_share_of_shelf( sos_filters=numerator_filters, **denominator_filters) * 100 if atomic_params[Const.SCORE] == Const.BINARY: try: return 100 * (atomic_score >= float(atomic_params[Const.targets_line][ self.store_type])) except ValueError: Log.warning('The target for {} is bad in store {}'.format( atomic_params[Const.ATOMIC_NAME], self.store_type)) return 0.0 elif atomic_params[Const.SCORE] != Const.NUMERIC: Log.error('The score is not numeric and not binary.') return atomic_score
def get_kpi_score_value_pk_by_value(self, value): pk = None try: pk = self.kpi_score_values[self.kpi_score_values['value'] == value]['pk'].values[0] except: Log.error('Value {} does not exist'.format(value)) return pk
def commit_results_data(self, result_entity=SESSION, scene_session_hierarchy=False): # def commit_results_data(self, by_scene=False, scene_session_hierarchy=False): """ We need to "save place" (transaction) for all the queries, enter the first pk to refresh_pks and then create queries function, and commit all those queries (in the tree, only the necessary ones) """ insert_queries = self.merge_insert_queries( self.kpi_results[self.QUERY].tolist()) if not insert_queries: return self.refresh_parents() delete_queries = {'delete_old_session_specific_tree_query': ''} if result_entity == self.SCENE: delete_queries[ 'delete_old_session_specific_tree_query'] = self.queries.get_delete_specific_tree_queries( self.scene_id, self.HIERARCHY_SESSION_TABLE) delete_queries[ 'delete_old_tree_query'] = self.queries.get_delete_tree_scene_queries( self.scene_id, self.HIERARCHY_SCENE_TABLE) delete_queries[ 'delete_query'] = self.queries.get_delete_scene_results_query_from_new_tables( self.scene_id) elif result_entity == self.SESSION: # delete_queries['delete_old_tree_query'] = self.queries.get_delete_tree_queries(self.session_id, self.HIERARCHY_SESSION_TABLE) # delete_queries['delete_old_tree_query_part2'] = self.queries.get_delete_tree_queries_parent_fk(self.session_id, self.HIERARCHY_SESSION_TABLE) 'below temp removed' pass # delete_queries['delete_query'] = self.queries.get_delete_session_results_query_from_new_tables(self.session_id) else: Log.error('Cannot Calculate results per {}'.format(result_entity)) return local_con = PSProjectConnector(self.project_name, DbUsers.CalculationEng) cur = local_con.db.cursor() for key, value in delete_queries.iteritems(): if key == 'delete_old_session_specific_tree_query' and not value: continue cur.execute(value) # if delete_old_session_specific_tree_query: # cur.execute(delete_old_session_specific_tree_query) # cur.execute(delete_old_tree_query) # cur.execute(delete_query) Log.info('Start committing results') cur.execute(insert_queries[0] + ";") cur.execute(self.queries.get_last_id()) last_id = cur.fetchmany() self.refresh_pks(int(last_id[0][0])) table_dfs = self.kpi_results.set_index(self.HIERARCHY_TABLE).groupby( self.HIERARCHY_TABLE) for table, df in table_dfs: insert_tree_queries = self.make_insert_queries_hierarchy(table, df) if insert_tree_queries: insert_tree_queries = self.merge_insert_queries( insert_tree_queries)[0] + ";" cur.execute(insert_tree_queries) local_con.db.commit()
def run(sd=None, ed=None, to=None, pivoted=None): try: marsru_report = MARSRU2_SANDMARSRU_KPIsStatistics(sd, ed, to, pivoted) marsru_report.create_report() return 0 except Exception as e: Log.error(REPORT_NAME + ' has failed with {}'.format(str(e))) return 1
def get_kpi_score_value_pk_by_value(self, value): pk = None # I want to stop code - maybe w/o try/except? try: pk = self.kpi_score_values[self.kpi_score_values['value'] == value]['pk'].values[0] except: Log.error('Value {} does not exist'.format(value)) return pk
def get_custom_entity_value(self, value): try: custom_fk = self.custom_entity_table['pk'][ self.custom_entity_table['name'] == value].iloc[0] return custom_fk except IndexError: Log.error('No custom entity found for: {}'.format(value)) return None
def calculate_percentage_from_numerator_denominator(numerator_result, denominator_result): try: ratio = numerator_result / denominator_result except Exception as e: Log.error(e.message) ratio = 0 if not isinstance(ratio, (float, int)): ratio = 0 return round(ratio * 100, 2)
def get_template_data(self): template_data = {} try: sheet_names = pd.ExcelFile(self.template_path).sheet_names for sheet in sheet_names: template_data[sheet] = parse_template(self.template_path, sheet, lower_headers_row_index=0) except IOError as e: Log.error('Template {} does not exist. {}'.format(KPIS_TEMPLATE_NAME, repr(e))) return template_data