def create_position_graphs(self, scene_id=None): """ This function creates a facings Graph for each scene of the given session. """ calc_start_time = datetime.datetime.utcnow() if scene_id: scenes = [scene_id] else: scenes = self.match_product_in_scene['scene_fk'].unique() for scene in scenes: matches = self.match_product_in_scene[ (self.match_product_in_scene['scene_fk'] == scene) & (self.match_product_in_scene['stacking_layer'] == 1)] matches['distance_from_end_of_shelf'] = matches[ 'n_shelf_items'] - matches['facing_sequence_number'] scene_graph = igraph.Graph(directed=True) edges = [] for f in xrange(len(matches)): facing = matches.iloc[f] facing_name = str(facing[VERTEX_FK_FIELD]) scene_graph.add_vertex(facing_name) # adding attributes to vertex vertex = scene_graph.vs.find(facing_name) for attribute in self.ATTRIBUTES_TO_SAVE: vertex[attribute] = facing[attribute] surrounding_products = self.get_surrounding_products( facing, matches) for direction in surrounding_products.keys(): for pk in surrounding_products[direction]: edge = dict(source=facing_name, target=str(pk), direction=direction) edges.append(edge) for edge in edges: scene_graph.add_edge(**edge) self.position_graphs[scene] = scene_graph calc_finish_time = datetime.datetime.utcnow() Log.info('Creation of position graphs for scenes {} took {}'.format( scenes, calc_finish_time - calc_start_time))
def get_numerator_and_denominator(self, sos_filters=None, include_empty=False, **general_filters): if include_empty == self.EXCLUDE_EMPTY and 'product_type' not in sos_filters.keys() + general_filters.keys(): general_filters['product_type'] = (self.EMPTY, self.EXCLUDE_FILTER) pop_filter = self.toolbox.get_filter_condition(self.scif, **general_filters) subset_filter = self.toolbox.get_filter_condition(self.scif, **sos_filters) try: pop = self.scif filtered_population = pop[pop_filter] if filtered_population.empty: return 0, 0 else: subset_population = filtered_population[subset_filter] df = filtered_population subset_df = subset_population sum_field = Fd.FACINGS try: Validation.is_empty_df(df) Validation.is_empty_df(subset_df) Validation.is_subset(df, subset_df) Validation.df_columns_equality(df, subset_df) Validation.validate_columns_exists(df, [sum_field]) Validation.validate_columns_exists(subset_df, [sum_field]) Validation.is_none(sum_field) except Exception, e: msg = "Data verification failed: {}.".format(e) default_value = 0 numerator = TBox.calculate_frame_column_sum(subset_df, sum_field, default_value) denominator = TBox.calculate_frame_column_sum(df, sum_field, default_value) return numerator, denominator except Exception as e: Log.error(e.message) return True
def distribution_group_level(self, lvl_2_result): """ This function create df sql results, results of distribution on group level based assortment :param lvl_2_result: df of assortment results in group level :return: df of sql results for oos assortment group level """ lvl_2_result = lvl_2_result.copy() live_kpi_dist = self.get_kpi_fk(self.LIVE_DIST) lvl_2_result.loc[:, 'kpi_level_2_fk'] = live_kpi_dist lvl_2_result.loc[lvl_2_result['target'] == -1, 'target'] = None lvl_2_result.loc[:, 'denominator_result'] = \ lvl_2_result.apply(lambda row: row['target'] if (row['target'] >= 0 and row['group_target_date'] > self.current_date) else row['denominator_result'], axis=1) lvl_2_result.loc[:, 'result'] = lvl_2_result.numerator_result / lvl_2_result.denominator_result self.manipulate_result_row(lvl_2_result) self._add_visit_summary_kpi_entities(lvl_2_result) lvl_2_result = lvl_2_result[self.LVL2_SESSION_RESULTS_COL] Log.info('Distribution group level is done ') return lvl_2_result
def main_function(self): """ This is the main KPI calculation function. It calculates the score for every KPI set and saves it to the DB. """ if self.tool_box.scif.empty: Log.warning('Scene item facts is empty for this session') set_names = self.tool_box.kpi_static_data['kpi_set_name'].unique( ).tolist() # self.tool_box.tools.update_templates() # set_names = ['Footcare - Refill', 'Footcare - Tights', 'Footcare - Insoles', 'Footcare - Gadgets', # 'Aircare - Refill', 'Aircare - Candles & Waxmelts', 'Aircare - Gadgets', 'Aircare - Spray', # 'ADW - Brand Group', 'ADW - Products', 'SWB - Products', 'SWB - Brand Group', 'MPC - Sagrotan', # 'MPC - Bath, Kitchen & Liquid', 'MPC - Wipes', 'MPC - Cillit', 'Displays', 'Gondola Ends', # 'Second Placement', 'Location'] # set_names = ['Footcare', 'Aircare'] for kpi_set_name in set_names: self.tool_box.main_calculation(set_name=kpi_set_name) Log.info('Downloading templates took {}'.format( self.tool_box.download_time)) self.tool_box.commit_results_data()
def _get_final_compliance_unscored_couples_part(self, final_compliance_tag): """ Iterates the matched couples (starting in the scene bay with the minimum matches to POG bays) that don't have scores, chooses for every scene bay its pog bay and adds them compliance to the list. :param final_compliance_tag: DF. :return: updated final_compliance_tag. """ try: for scene_bay in self.all_combinations_matches.index: line = self.all_combinations_matches.loc[scene_bay] if line[SUM] == 0 or True not in line.drop(SUM).values: continue pog_bay = line.drop(SUM).sort_values(ascending=False).index[0] final_compliance_tag = final_compliance_tag.append(self.all_combinations_compliances[pog_bay][scene_bay], ignore_index=True) self._delete_bay_from_dfs(scene_bay, pog_bay) # self.chosen_permutation.append((scene_bay, pog_bay)) return final_compliance_tag except Exception as e: Log.error("Second step in the compliance calculation has failed: " + e.message) return pd.DataFrame(columns=[Keys.MATCH_FK, Keys.COMPLIANCE_STATUS_FK])
def _calculate_distribution_and_oos(self, lvl3_data, policy, is_dist): """ This method calculates the 3 levels of the assortment. :param lvl3_data: Assortment SKU level results + category_fk column. :param policy: חלבי או טירת צבי - this policy is matching for scene types and products as well """ if lvl3_data.empty: Log.warning(Consts.LOG_EMPTY_ASSORTMENT_DATA_PER_POLICY.format(policy.encode(HelperConsts.UTF8))) return store_level_kpi_fk, cat_lvl_fk, sku_level_fk = self._get_assortment_kpi_fks(policy, is_distribution=is_dist) store_res = self._calculate_store_level_assortment(lvl3_data, is_distribution=is_dist) category_res = self._calculate_category_level_assortment(lvl3_data, is_distribution=is_dist) sku_level_res = self._calculate_sku_level_assortment(lvl3_data, is_distribution=is_dist) self._save_results_for_assortment(ProductsConsts.MANUFACTURER_FK, store_res, store_level_kpi_fk) self._save_results_for_assortment(ScifConsts.CATEGORY_FK, category_res, cat_lvl_fk, store_level_kpi_fk) self._save_results_for_assortment(ProductsConsts.PRODUCT_FK, sku_level_res, sku_level_fk, cat_lvl_fk) if not is_dist: # New addition in order to support OOS reasons and NCC report sku_no_policy_kpi, policy_level_kpi, sku_level_kpi = self._get_oos_reason_and_ncc_kpis(policy) self._save_results_for_assortment(ProductsConsts.PRODUCT_FK, sku_level_res, sku_no_policy_kpi, None, True) self._save_results_for_assortment(ProductsConsts.PRODUCT_FK, sku_level_res, sku_level_kpi, None, True) self._save_results_for_assortment(ProductsConsts.MANUFACTURER_FK, store_res, policy_level_kpi, None, True)
def get_kpi_function(self, kpi_type): """ transfers every kpi to its own function :param kpi_type: value from "sheet" column in the main sheet :return: function """ if kpi_type == Const.AVAILABILITY: return self.calculate_availability elif kpi_type == Const.DOUBLE_AVAILABILITY: return self.calculate_double_availability elif kpi_type == Const.FACINGS: return self.calculate_facings elif kpi_type == Const.SHARE_OF_DISPLAYS: return self.calculate_facings elif kpi_type == Const.DISPLAY_PRESENCE: return self.calculate_facings else: Log.warning( "The value '{}' in column sheet in the template is not recognized" .format(kpi_type)) return None
def calculate_count_posm_per_scene(self, kpi_fk): if self.match_display_in_scene.empty: Log.info("No POSM detected at scene level for session: {}".format(self.session_uid)) return False grouped_data = self.match_display_in_scene.groupby(['scene_fk', 'display_fk']) for data_tup, scene_data_df in grouped_data: scene_fk, display_fk = data_tup posm_count = len(scene_data_df) template_fk = self.scene_info[self.scene_info['scene_fk'] == scene_fk].get('template_fk') if not template_fk.empty: cur_template_fk = int(template_fk) else: Log.info("JRIJP: Scene ID {scene} is not complete and not found in scene Info.".format( scene=scene_fk)) continue self.common.write_to_db_result(fk=kpi_fk, numerator_id=display_fk, denominator_id=self.store_id, context_id=cur_template_fk, result=posm_count, score=scene_fk)
def save_latest_templates(self): """ This function reads the latest templates from the Cloud, and saves them in a local path. """ if not os.path.exists(self.local_templates_path): os.makedirs(self.local_templates_path) dir_name = self.get_latest_directory_date_from_cloud( self.cloud_templates_path.format(''), self.amz_conn) files = [ f.key for f in self.amz_conn.bucket.list( self.cloud_templates_path.format(dir_name)) ] for file_path in files: file_name = file_path.split('/')[-1] with open(os.path.join(self.local_templates_path, file_name), 'wb') as f: self.amz_conn.download_file(file_path, f) with open(os.path.join(self.local_templates_path, UPDATED_DATE_FILE), 'wb') as f: f.write(datetime.utcnow().strftime(UPDATED_DATE_FORMAT)) Log.info('Latest version of templates has been saved to cache')
def get_default_filters(type_name, value_name): """ :param type_name: string that contains list of types :param value_name: string that contains list of values in the same length :return: filter as dict. """ if ',' in type_name: types = type_name.split(',') types = map(lambda x: x.strip(), types) values = value_name.split(',') values = map(lambda x: x.strip(), values) filters = {} if len(types) != len(values): Log.warning('there are {} types and {} values, should be the same amount'.format( len(types), len(values))) else: for i in xrange(len(types)): filters[Converters.convert_type(types[i])] = values[i] else: filters = {Converters.convert_type(type_name): map(lambda x: x.strip(), value_name.split(','))} return filters
def distribution_sku_level(self, lvl_3_result): """ This function receive df = lvl_3_result assortment with data regarding the assortment products This function turn the sku_assortment_results to be in a shape of db result. return distribution_db_results df """ lvl_3_result.rename(columns={ 'product_fk': 'numerator_id', 'assortment_group_fk': 'denominator_id', 'in_store': 'result', 'kpi_fk_lvl3': 'kpi_level_2_fk' }, inplace=True) lvl_3_result.loc[:, 'result'] = lvl_3_result.apply( lambda row: self.kpi_result_value(row.result), axis=1) lvl_3_result = lvl_3_result.assign( numerator_result=lvl_3_result['result'], denominator_result=lvl_3_result['result'], score=lvl_3_result['result']) lvl_3_result = self.filter_df_by_col(lvl_3_result, self.SKU_LEVEL) Log.info('Distribution sku level is done ') return lvl_3_result
def calculate_energy_drinks(self, shelf_occupation_dict, product_list_field): """ this function calculates score for energy drinks category """ score = 0 for shelf_number in range( 1, shelf_occupation_dict.get(NUM_OF_SHELVES) + 1): for bay_number in range(1, shelf_occupation_dict.get(NUM_OF_BAYS) + 1): # get the current probe to calculate - specific shelf, bay, and only in main_placement scene type curr_probe = get_curr_probe( shelf_occupation_dict.get(DF), shelf_number, bay_number, shelf_occupation_dict.get(MAIN_PLACEMENT_SCENES)) if not curr_probe.empty: score += self.calculate_category(curr_probe, product_list_field) Log.info("category score " + str(score)) return score
def main_function(self): """ This is the main KPI calculation function. It calculates the score for every KPI set and saves it to the DB. """ if self.tool_box.scif.empty: Log.warning('Scene item facts is empty for this session') log_runtime('Updating templates')( self.tool_box.tools.update_templates)() # Todo: Handle Activation Standard ? # calculate_activation_standard = False # if self.tool_box.ACTIVATION_STANDARD in set_names: # set_names.remove(self.tool_box.ACTIVATION_STANDARD) # calculate_activation_standard = True # if calculate_activation_standard: # self.tool_box.calculate_activation_standard() set_names = self.tool_box.kpi_static_data['kpi_set_name'].unique( ).tolist() self.tool_box.main_calculation(set_names=set_names) self.tool_box.commit_results_data()
def main_calculation(self): try: self.calculate_planogram_compliance(Const.PROMOTIONAL_TRAY) except Exception as e: Log.error('{}'.format(e)) try: self.calculate_planogram_compliance(Const.TOBACCO_CENTER) except Exception as e: Log.error('{}'.format(e)) try: self.calculate_oos() except Exception as e: Log.error('{}'.format(e)) try: self.calculate_sos() except Exception as e: Log.error('{}'.format(e)) try: self.common.commit_results_data() except Exception as e: Log.error('{}'.format(e))
def add_sets_to_static(self): set_names = self.data[self.SET_NAME].unique().tolist() existing_set_names = self.kpi_static_data['kpi_set_name'].unique( ).tolist() set_names_to_add = set(set_names).difference(existing_set_names) if set_names_to_add: cur = self.aws_conn.db.cursor() for set_name in set_names_to_add: level1_query = \ """ INSERT INTO static.kpi_set (name, missing_kpi_score, enable, normalize_weight, expose_to_api, is_in_weekly_report) VALUES ('{0}', '{1}', '{2}', '{3}', '{4}', '{5}'); """.format(set_name.encode('utf-8'), 'Bad', 'Y', 'N', 'N', 'N') cur.execute(level1_query) self.sets_added[set_name.encode('utf-8')] = cur.lastrowid self.kpi_counter['set'] += 1 self.aws_conn.db.commit() set_names_ignored = set(set_names).difference(set_names_to_add) if set_names_ignored: Log.debug("KPI Sets '{}' already exist. Ignored".format( set_names_ignored))
def unpack_all_external_targets(self): targets_df = self.external_targets.drop_duplicates(subset=[ 'operation_type', 'kpi_level_2_fk', 'key_json', 'data_json' ]) output_targets = pd.DataFrame( columns=targets_df.columns.values.tolist()) if not targets_df.empty: keys_df = self.unpack_external_targets_json_fields_to_df( targets_df, field_name='key_json') data_df = self.unpack_external_targets_json_fields_to_df( targets_df, field_name='data_json') targets_df = targets_df.merge(keys_df, on='pk', how='left') targets_df = targets_df.merge(data_df, on='pk', how='left') kpi_data = self.kpi_static_data[['pk', 'type']] kpi_data.rename(columns={'pk': 'kpi_level_2_fk'}, inplace=True) output_targets = targets_df.merge(kpi_data, on='kpi_level_2_fk', how='left') if output_targets.empty: Log.warning('KPI External Targets Results are empty') return output_targets
def reconcile_hierarchy_entries(self): Log.info( "Reconcile Hierarchy Sheet KPIs with Other KPI Sheets - Started") result = True column_names = ['KPI name', 'Category Name'] lst_exceptions = [] for sheet_name, q_kpi_template in self.q_kpi_templates.items(): kpi_template = q_kpi_template[column_names].copy() hierarchy_filter = self.hierarchy_template[ self.hierarchy_template['KPI Type'] == sheet_name][column_names].copy() for row_num, row_data in hierarchy_filter.iterrows(): found = False for kpi_row_num, kpi_row_data in kpi_template.iterrows(): if row_data['KPI name'] == kpi_row_data[ 'KPI name'] and row_data[ 'Category Name'] == kpi_row_data[ 'Category Name']: found = True break if not found: dict_exception = {} result = False dict_exception['Sheet name'] = sheet_name dict_exception['Category Name'] = row_data['Category Name'] dict_exception['KPI name'] = row_data['KPI name'] lst_exceptions.append(dict_exception) if is_debug: Log.error(dict_exception) self.create_excel_log(lst_exceptions, 'reconcile_hierarchy_entries') Log.info( "Reconcile Hierarchy Sheet KPIs with Other KPI Sheets - Completed") return result
def safety_func(self, group, func, args): try: func(*args) Log.info('{} KPIs Calculated'.format(group)) except Exception as e: Log.error('ERROR {} KPIs Failed to Calculate'.format(group)) Log.error(e)
def calculate_and_save_distribution_and_oos(self, valid_scif, assortment_product_fks, distribution_kpi_fk, oos_kpi_fk): """Function to calculate distribution and OOS percentage. Saves distribution and oos percentage as values. """ Log.info("Calculate distribution and OOS for {}".format(self.project_name)) scene_products = pd.Series(valid_scif["item_id"].unique()) total_products_in_assortment = len(assortment_product_fks) count_of_assortment_prod_in_scene = assortment_product_fks.isin(scene_products).sum() oos_count = total_products_in_assortment - count_of_assortment_prod_in_scene # count of lion sku / all sku assortment count if not total_products_in_assortment: Log.info("No assortments applicable for session {sess}.".format(sess=self.session_uid)) return 0 distribution_perc = count_of_assortment_prod_in_scene / float(total_products_in_assortment) oos_perc = 1 - distribution_perc self.common_v2.write_to_db_result(fk=distribution_kpi_fk, numerator_id=self.own_manufacturer_fk, numerator_result=count_of_assortment_prod_in_scene, denominator_id=self.store_id, denominator_result=total_products_in_assortment, context_id=self.store_id, result=distribution_perc, score=distribution_perc, identifier_result="{}_{}".format(DST_MAN_BY_STORE_PERC, self.store_id), should_enter=True ) self.common_v2.write_to_db_result(fk=oos_kpi_fk, numerator_id=self.own_manufacturer_fk, numerator_result=oos_count, denominator_id=self.store_id, denominator_result=total_products_in_assortment, context_id=self.store_id, result=oos_perc, score=oos_perc, identifier_result="{}_{}".format(OOS_MAN_BY_STORE_PERC, self.store_id), should_enter=True )
def main_calculation(self, *args, **kwargs): """ This function calculates the KPI results. """ try: if self.kpi_sheet.empty: Log.error("'kpi_list' sheet in setup file is empty.") return kpi_types = [ x.strip() for x in self.kpi_sheet[Consts.KPI_TYPE].unique() ] for kpi_type in kpi_types: kpis = self.kpi_sheet[self.kpi_sheet[Consts.KPI_TYPE] == kpi_type] if kpi_type == Consts.FSOS: self.main_sos_calculations(kpis) elif kpi_type == Consts.ADJACENCY: self.main_adjacency_calculations(kpis) else: Log.warning( "KPI_TYPE:{kt} not found in setup=>kpi_list sheet.". format(kt=kpi_type)) continue self.common.commit_results_data() return except Exception as err: Log.error( "LionJP KPI calculation failed due to the following error: {}". format(err))
def check_perfect_execution(self): Log.info("Perfect Execution Check - Started") result = True sheet_name = 'Perfect Execution' k_column_names = ['KPI test name', 'Category Name'] h_column_names = ['KPI name', 'Category Name'] kpi_template = self.q_kpi_templates[sheet_name][k_column_names].copy() hierarchy_filter = self.hierarchy_template[h_column_names].copy() lst_exceptions = [] for kpi_row_num, kpi_row_data in kpi_template.iterrows(): found = False for row_num, row_data in hierarchy_filter.iterrows(): if kpi_row_data['KPI test name'] == row_data[ 'KPI name'] and kpi_row_data[ 'Category Name'] == row_data['Category Name']: found = True break if not found: dict_exception = {} result = False dict_exception['Sheet name'] = sheet_name dict_exception['Category Name'] = kpi_row_data['Category Name'] dict_exception['KPI test name'] = kpi_row_data['KPI test name'] lst_exceptions.append(dict_exception) if is_debug: Log.error(dict_exception) self.create_excel_log(lst_exceptions, 'perfect_execution_check') Log.info("Perfect Execution Check - Completed") return result
def main_function(self): comment = None if self.session_feedback in SESSION_FEEDBACK: status = self.UNSUCCESSFUL_TM elif self.scene_info.empty: status = self.UNSUCCESSFUL_OTHER comment = 'No Scenes' elif all(self.scene_info['template_name'].str.contains( EXTERIOR, flags=re.IGNORECASE)): status = self.UNSUCCESSFUL_OTHER comment = 'All scenes are exterior' elif self.session_feedback.upper() != OK_FEEDBACK: status = self.UNSUCCESSFUL_OTHER comment = "Session feedback is not '{}'".format(OK_FEEDBACK) else: status = self.SUCCESSFUL if status == self.SUCCESSFUL: Log.info('Session is successful') elif status == self.UNSUCCESSFUL_TM: Log.info('Session is unsuccessful (TM)') self.save_review(exclude_status=1, resolution_code=2, action_code=6) elif status == self.UNSUCCESSFUL_OTHER: Log.info('Session is unsuccessful (Other) - {}'.format(comment)) self.save_review(exclude_status=1, resolution_code=2, action_code=5)
def main_function(self): jg = INTEG4JsonGenerator('integ4') jg.create_json('Fastfood.xlsx', FAST_FOOD) calc_start_time = datetime.datetime.utcnow() Log.info('Calculation Started at {}'.format(calc_start_time)) if not self.tool_box.scif.empty: score = 0 score += self.tool_box.check_availability( jg.project_kpi_dict.get('kpi_data')[0]) score += self.tool_box.facings_sos( jg.project_kpi_dict.get('kpi_data')[0]) score += self.tool_box.check_survey_answer( jg.project_kpi_dict.get('kpi_data')[0]) score += self.tool_box.check_number_of_facings_given_answer_to_survey( jg.project_kpi_dict.get('kpi_data')[0]) attributes_for_table1 = pd.DataFrame( [(FAST_FOOD, self.session_uid, self.store_id, self.visit_date.isoformat(), format(score, '.2f'), None)], columns=[ 'kps_name', 'session_uid', 'store_fk', 'visit_date', 'score_1', 'kpi_set_fk' ]) self.tool_box.write_to_db_result(attributes_for_table1, 'level1') else: Log.warning('Scene item facts is empty for this session') self.tool_box.commit_results_data() calc_finish_time = datetime.datetime.utcnow() Log.info('Calculation time took {}'.format(calc_finish_time - calc_start_time))
def upload_new_templates(self, immediate_change=False): """ This function uploads the new template, along with the latest version of the rest of the templates, to a new directory (with name as the current date's) in the Cloud. """ if not self.templates_to_upload: Log.info(self.log_suffix + 'No new templates are ready for upload') else: if not immediate_change: next_day = (datetime.utcnow() + timedelta(1)).strftime("%y%m%d") else: next_day = datetime.utcnow().strftime("%y%m%d") templates_path_in_cloud = self.templates_path.format(next_day) latest_templates = self.get_latest_templates() for set_name in self.templates_to_upload: self.amz_conn.save_file(templates_path_in_cloud, set_name, self.templates_to_upload[set_name]) os.remove(self.templates_to_upload[set_name]) if set_name in latest_templates: latest_templates.pop(set_name) Log.info(self.log_suffix + 'New templates for sets {} were uploaded'.format(self.templates_to_upload.keys())) for template_name in latest_templates: temp_file_path = '{}/{}_temp'.format(os.getcwd(), template_name) f = open(temp_file_path, 'wb') self.amz_conn.download_file(latest_templates[template_name], f) f.close() self.amz_conn.save_file(templates_path_in_cloud, template_name, temp_file_path) os.remove(temp_file_path) Log.info(self.log_suffix + 'Existing templates for sets {} were aligned with the new ones'.format(latest_templates.keys())) return True
def get_relevant_categories_for_session(self): """ This function returns a list of the relevant categories according to the store type. The parameter additional_attribute_2 defines the visit type for each store. We have 3 types: Visit LRB (Beverages and Juices), Visit Snack and Visit (= All of them). The function is doing intersection between the categories in SCIF and the categories by store type. :return: List of the relevant categories """ categories_in_scif = self.scif[Const.CATEGORY].unique().tolist() if None in categories_in_scif: categories_in_scif.remove(None) if not categories_in_scif: Log.warning("No categories at scene item facts!") return [] store_type = self.store_info[Const.ADDITIONAL_ATTRIBUTE_2].values[0] if not store_type: Log.warning( "Invalid additional_attribute_2 for store id = {}".format( self.store_id)) return [] if Const.SNACKS.upper() in store_type.upper(): relevant_categories = [Const.SNACKS] elif Const.LRB.upper() in store_type.upper(): relevant_categories = [Const.JUICES, Const.BEVERAGES] else: relevant_categories = [Const.SNACKS, Const.JUICES, Const.BEVERAGES] categories_for_session = list( set(relevant_categories).intersection(set(categories_in_scif))) if not categories_for_session: Log.warning( "There aren't matching categories in scif for this store.") return categories_for_session
def main_function(self): jg = JsonGenerator('ccru') kpi_set_name = self.tool_box.set_name try: jg.create_json('FIFA KPIs.xlsx', kpi_set_name, sheetname=kpi_set_name) except: Log.error('Session store "{}" is not set to calculation'.format( self.tool_box.session_info.store_type)) calc_start_time = datetime.datetime.utcnow() Log.info('Calculation Started at {}'.format(calc_start_time)) score = 0 score += self.tool_box.check_weighted_average( jg.project_kpi_dict.get('kpi_data')[0]) score += self.tool_box.calculate_share_of_cch_collers( jg.project_kpi_dict.get('kpi_data')[0]) score += self.tool_box.check_survey_answer( jg.project_kpi_dict.get('kpi_data')[0]) score += self.tool_box.weighted_cooler_standard( jg.project_kpi_dict.get('kpi_data')[0]) attributes_for_table1 = pd.DataFrame( [(kpi_set_name, self.session_uid, self.store_id, self.visit_date.isoformat(), format(score, '.2f'), None)], columns=[ 'kps_name', 'session_uid', 'store_fk', 'visit_date', 'score_1', 'kpi_set_fk' ]) self.tool_box.write_to_db_result(attributes_for_table1, 'level1') self.tool_box.commit_results_data() calc_finish_time = datetime.datetime.utcnow() Log.info('Calculation time took {}'.format(calc_finish_time - calc_start_time))
def record_all_products(self): kpi = self.kpi_static_data[ (self.kpi_static_data[KPI_FAMILY] == PS_KPI_FAMILY) & (self.kpi_static_data[TYPE] == ALL_PROD_KPI) & (self.kpi_static_data['delete_time'].isnull())] if kpi.empty: Log.info("KPI Name:{} not found in DB".format(ALL_PROD_KPI)) else: Log.info("KPI Name:{} found in DB".format(ALL_PROD_KPI)) num_of_prods = 0 # it will have a substitution product; where the facings are aggregated self.scif.dropna(subset=['facings'], inplace=True) for index, each_row in self.scif.iterrows(): self.common.write_to_db_result( fk=int(kpi.iloc[0].pk), numerator_id=int(each_row.product_fk), numerator_result=int(each_row.facings), denominator_id=int(each_row.scene_id), denominator_result=0, score=1, context_id=int(each_row.template_fk), ) num_of_prods += 1 Log.info( "{proj} - For session: {sess}, {prod_count} products were written for kpi: {kpi_name}" .format( proj=self.project_name, sess=self.session_uid, prod_count=num_of_prods, kpi_name=kpi.iloc[0].type, ))
def main_calculation_red_score(self): set_score = 0 try: set_name = self.kpi_sheets[Const.KPIS].iloc[len(self.kpi_sheets[Const.KPIS]) - 1][ Const.KPI_NAME] kpi_fk = self.common_v2.get_kpi_fk_by_kpi_type(set_name) set_identifier_res = self.common_v2.get_dictionary(kpi_fk=kpi_fk) if self.store_type in self.kpi_sheets[Const.KPIS].keys().tolist(): for i in xrange(len(self.kpi_sheets[Const.KPIS]) - 1): params = self.kpi_sheets[Const.KPIS].iloc[i] percent = self.get_percent(params[self.store_type]) if percent == 0: continue kpi_score = self.main_calculation_lvl_2(identifier_parent=set_identifier_res, params=params) set_score += kpi_score * percent else: Log.warning('The store-type "{}" is not recognized in the template'.format(self.store_type)) return kpi_names = {Const.column_name1: set_name} set_fk = self.get_kpi_fk_by_kpi_path(self.common.LEVEL1, kpi_names) if set_fk: try: self.common.write_to_db_result(score=set_score, level=self.common.LEVEL1, fk=set_fk) except Exception as exception: Log.error('Exception in the set {} writing to DB: {}'.format(set_name, exception.message)) self.common_v2.write_to_db_result(fk=kpi_fk, numerator_id=self.own_manuf_fk, denominator_id=self.store_id, score=set_score, result=set_score, identifier_result=set_identifier_res, should_enter=True) except Exception as exception: Log.error('Exception in the kpi-set calculating: {}'.format(exception.message)) pass
def _insert_into_display_surface(self, display_surface): """ Inserts into probedata.display_surface the displays identified in each scene and its size. For each display it updates the new record pk in order to use as a foreign key when inserting into report.display_visit_summary. :param display_surface: :return: """ # Optional performance improvement # 1.Use df instead of to_dict Log.debug(self.log_prefix + ' Inserting to probedata.display_surface') display_surface_dict = display_surface.to_dict('records') query = '''insert into probedata.display_surface ( scene_fk , display_fk , surface ) values {};''' for display in display_surface_dict[:-1]: query_line = self._get_display_surface_query_line( display) + ',' + '{}' query = query.format(query_line) query = query.format( self._get_display_surface_query_line(display_surface_dict[-1])) self.cur.execute(query) self.project_connector.db.commit() last_insert_id = self.cur.lastrowid row_count = self.cur.rowcount if row_count == len(display_surface_dict): for j in range(0, len(display_surface_dict)): display_surface_dict[j]['display_surface_fk'] = last_insert_id last_insert_id += 1 else: msg = self.log_prefix + ' error: not all display were inserted.' Log.error(msg) raise Exception(msg) return pd.DataFrame(display_surface_dict)
def check_allowed_values(self): Log.info("Allowed Values Check - Started") result = True allowed_values = [ column for column in self.data_mapping if column.get("allowed_values", False) ] lst_exceptions = [] for allowed_value in allowed_values: name = allowed_value['xl_column_name'] value = allowed_value['allowed_values'] check = self.hierarchy_template[~self.hierarchy_template[name].str. lower().isin(value)] if not check.empty: for idx, row_data in check.iterrows(): dict_exception = dict() dict_exception[ 'exception'] = "Invalid input, allowed_values: {}".format( value) dict_exception[ 'message'] = "row_number: {}, column_name: {}, value: {}".format( idx + 3, name, row_data[name]) if is_debug: Log.info(dict_exception) lst_exceptions.append(dict_exception) result = False if result: Log.info("Allowed Values Check - Completed") return result