Beispiel #1
0
    def get_all_kpi_data(self):
        results_list_new_db = []
        scores_dict = {}
        if self.templates_info.empty:
            Log.info(
                "All KPI: session: {} doesnt have relevant external targets".
                format(self.session_uid))
            return [], {}
        main_children = self.templates_info[self.templates_info[
            self.template.KPI_GROUP] == self.RED_SCORE]
        for c in xrange(0, len(main_children)):
            main_child = main_children.iloc[c]
            main_child_kpi_fk = self.get_new_kpi_fk(
                main_child)  # kpi fk from new tables
            main_kpi_identifier = self.commonV2.get_dictionary(
                kpi_fk=main_child_kpi_fk)
            if self.validate_store_type(main_child):
                children = self.templates_info[self.templates_info[
                    self.template.KPI_GROUP].str.encode(
                        HelperConsts.UTF8) == main_child[
                            self.template.KPI_NAME].encode(HelperConsts.UTF8)]
                scores = []
                for i in xrange(len(children)):
                    child = children.iloc[i]
                    numerator, denominator, result_new_db, numerator_id = 0, 0, 0, None
                    kpi_weight = self.validate_kpi_run(child)
                    if kpi_weight is not False:
                        kpi_type = child[self.template.KPI_TYPE]
                        result = threshold = None
                        if kpi_type == self.SURVEY:
                            score, result, threshold, survey_answer_fk = self.check_survey(
                                child)
                            threshold = None
                            numerator, denominator, result_new_db = 1, 1, score * 100
                            numerator_id = survey_answer_fk
                        elif kpi_type == self.SHARE_OF_SHELF:
                            score, result, threshold, result_new_db, numerator, denominator = \
                                self.calculate_share_of_shelf(child)
                        elif kpi_type == self.NUMBER_OF_SCENES:
                            scene_types = self.get_scene_types(child)
                            result = self.general_tools.calculate_number_of_scenes(
                                **{SCENE_TYPE_FIELD: scene_types})
                            numerator, denominator, result_new_db = result, 1, result
                            score = 1 if result >= 1 else 0
                        else:
                            Log.warning(
                                "KPI of type '{}' is not supported via assortments"
                                .format(kpi_type))
                            continue
                        if score is not False:
                            if score is None:
                                points = 0
                            else:
                                points = float(
                                    child[self.template.WEIGHT]
                                ) if kpi_weight is True else kpi_weight
                                scores.append((points, score))
                            atomic_fk = self.get_atomic_fk(main_child, child)
                            self.write_to_db_result(
                                atomic_fk, (score, result, threshold, points),
                                level=self.LEVEL3)
                            identifier_parent = main_kpi_identifier
                            child_name = '{}-{}'.format(child[self.template.TRANSLATION], 'Atomic') \
                                if main_child[self.template.KPI_NAME] == child[self.template.KPI_NAME] else child[
                                self.template.TRANSLATION]
                            child.set_value(self.template.TRANSLATION,
                                            child_name)
                            child_kpi_fk = self.get_new_kpi_fk(
                                child)  # kpi fk from new tables
                            results_list_new_db.append(
                                self.get_new_kpi_dict(
                                    child_kpi_fk,
                                    result_new_db,
                                    score,
                                    numerator,
                                    denominator,
                                    weight=points,
                                    target=denominator,
                                    identifier_parent=identifier_parent,
                                    numerator_id=numerator_id))
                max_points = sum([score[0] for score in scores])
                actual_points = sum([score[0] * score[1] for score in scores])
                percentage = 0 if max_points == 0 else round(
                    (actual_points / float(max_points)) * 100, 2)

                kpi_name = main_child[self.template.TRANSLATION]
                kpi_fk = self.kpi_static_data[
                    self.kpi_static_data['kpi_name'].str.encode(
                        HelperConsts.UTF8) == kpi_name.encode(
                            HelperConsts.UTF8)]['kpi_fk'].values[0]
                self.write_to_db_result(
                    kpi_fk, (actual_points, max_points, percentage),
                    level=self.LEVEL2)
                scores_dict[kpi_name] = (max_points, actual_points)
                results_list_new_db.append(
                    self.get_new_kpi_dict(
                        main_child_kpi_fk,
                        percentage,
                        percentage,
                        actual_points,
                        max_points,
                        target=max_points,
                        weight=actual_points,
                        identifier_result=main_kpi_identifier,
                        identifier_parent=self.RED_SCORE))
        return results_list_new_db, scores_dict
Beispiel #2
0
    def handle_survey_atomics(self, atomic_id, atomic_name, parent_name):
        # bring the kpi rows from the survey sheet
        rows = self.survey_sheet.loc[self.survey_sheet[Const.TEMPLATE_KPI_ID]
                                     == atomic_id]
        temp = rows[Const.TEMPLATE_STORE_TYPE]
        row_store_filter = rows[(
            temp.apply(lambda r: self.store_type_filter in
                       [item.strip() for item in r.split(",")])) |
                                (temp == "")]

        if row_store_filter.empty:
            return 0
        else:
            # find the answer to the survey in session
            question_text = row_store_filter[
                Const.TEMPLATE_SURVEY_QUESTION_TEXT].values[0]
            question_answer_template = row_store_filter[
                Const.TEMPLATE_TARGET_ANSWER].values[0]
            score = row_store_filter[Const.TEMPLATE_SCORE].values[0]

            survey_result = self.survey.get_survey_answer(
                ('question_text', question_text))
            if not survey_result:
                return 0
            if '-' in question_answer_template:
                numbers = question_answer_template.split('-')
                try:
                    numeric_survey_result = int(survey_result)
                except:
                    Log.warning("Survey question - " + str(question_text) +
                                " - doesn't have a numeric result")
                    return 0
                if numeric_survey_result < int(
                        numbers[0]) or numeric_survey_result > int(numbers[1]):
                    return 0
                condition = row_store_filter[
                    Const.TEMPLATE_CONDITION].values[0]
                if condition != "":
                    second_question_text = row_store_filter[
                        Const.TEMPLATE_SECOND_SURVEY_QUESTION_TEXT].values[0]
                    second_survey_result = self.survey.get_survey_answer(
                        ('question_text', second_question_text))
                    if not second_survey_result:
                        second_survey_result = 0
                    second_numeric_survey_result = int(second_survey_result)
                    survey_result = 1 if numeric_survey_result >= second_numeric_survey_result else -1
                else:
                    survey_result = 1
            else:
                question_answer_template = question_answer_template.split(',')
                question_answer_template = [
                    item.strip() for item in question_answer_template
                ]
                if survey_result in question_answer_template:
                    survey_result = 1
                else:
                    survey_result = -1
        final_score = score if survey_result == 1 else 0

        try:
            atomic_pk = self.common_v2.get_kpi_fk_by_kpi_name(atomic_name)
        except IndexError:
            Log.warning("There is no matching Kpi fk for kpi name: " +
                        atomic_name)
            return 0
        self.common_v2.write_to_db_result(fk=atomic_pk,
                                          numerator_id=self.region_fk,
                                          numerator_result=0,
                                          denominator_result=0,
                                          denominator_id=self.store_id,
                                          result=survey_result,
                                          score=final_score,
                                          identifier_result=atomic_name,
                                          identifier_parent=parent_name,
                                          should_enter=True,
                                          parent_fk=3)
        return final_score
    def calculate_display_compliance(self):
        kpi_display_presence = self.kpi_static_data[
            (self.kpi_static_data[KPI_TYPE_COL] == GSK_DISPLAY_PRESENCE)
            & (self.kpi_static_data['delete_time'].isnull())]
        kpi_display_sku_compliance = self.kpi_static_data[
            (self.kpi_static_data[KPI_TYPE_COL] == GSK_DISPLAY_SKU_COMPLIANCE)
            & (self.kpi_static_data['delete_time'].isnull())]
        kpi_display_price_compliance = self.kpi_static_data[
            (self.kpi_static_data[KPI_TYPE_COL] == GSK_DISPLAY_PRICE_COMPLIANCE
             )
            & (self.kpi_static_data['delete_time'].isnull())]
        kpi_display_bay_purity = self.kpi_static_data[
            (self.kpi_static_data[KPI_TYPE_COL] == GSK_DISPLAY_BAY_PURITY)
            & (self.kpi_static_data['delete_time'].isnull())]
        kpi_display_presence_sku = self.kpi_static_data[
            (self.kpi_static_data[KPI_TYPE_COL] == GSK_DISPLAY_PRESENCE_SKU)
            & (self.kpi_static_data['delete_time'].isnull())]
        secondary_display_targets = self.targets[
            self.targets['kpi_fk'] == kpi_display_presence['pk'].iloc[0]]
        # if no targets return
        if secondary_display_targets.empty:
            Log.warning(
                'There is no target policy for calculating secondary display compliance.'
            )
            return False
        else:
            current_scene_fk = self.scene_info.iloc[0].scene_fk
            display_per_sku_per_scene_calculated = False
            target_matched = False
            for idx, each_target in secondary_display_targets.iterrows():
                if target_matched:
                    Log.info(
                        'The session: {sess} - scene: {scene} has matched one target '
                        'and won\'t run for another.'.format(
                            sess=self.session_uid, scene=current_scene_fk))
                    continue
                # loop through each external target to fit the current store
                has_posm_recognized = False
                multi_posm_or_bay = False
                mandatory_sku_compliance = False
                optional_sku_compliance = False
                price_compliance = False
                is_scene_relevant = False
                scene_relevant_targets = pd.DataFrame()
                # check store relevance
                store_relevant_targets = each_target[STORE_IDENTIFIERS].dropna(
                )
                _bool_store_check_df = self.store_info[list(store_relevant_targets.keys())] \
                                       == store_relevant_targets.values
                is_store_relevant = _bool_store_check_df.all(axis=None)
                if is_store_relevant:
                    # check scene relevance
                    scene_relevant_targets = each_target[
                        SCENE_IDENTIFIERS].dropna()
                    _bool_scene_check_df = self.scene_info[list(scene_relevant_targets.keys())] \
                                           == scene_relevant_targets.values
                    is_scene_relevant = _bool_scene_check_df.all(axis=None)
                if is_store_relevant and is_scene_relevant and not target_matched:
                    # calculate display compliance for the matched external targets
                    target_matched = True
                    Log.info(
                        'The session: {sess} - scene: {scene} is relevant for calculating '
                        'secondary display compliance.'.format(
                            sess=self.session_uid, scene=current_scene_fk))
                    posm_relevant_targets = each_target[
                        POSM_IDENTIFIERS].dropna()
                    mandatory_eans = _sanitize_csv(
                        posm_relevant_targets[MANDATORY_EANS_KEY])
                    optional_posm_eans = []
                    if OPTIONAL_EAN_KEY in posm_relevant_targets:
                        optional_posm_eans = _sanitize_csv(
                            posm_relevant_targets[OPTIONAL_EAN_KEY])
                    # save detailed sku presence
                    posm_to_check = each_target[POSM_PK_KEY]
                    # FIND THE SCENES WHICH HAS THE POSM to check for multiposm or multibays
                    is_posm_absent = self.match_display_in_scene[
                        self.match_display_in_scene['display_fk'] ==
                        posm_to_check].empty
                    if is_posm_absent:
                        Log.info(
                            'The scene: {scene} is relevant but POSM {pos} is not present. '
                            'Save and start new scene.'.format(
                                scene=current_scene_fk, pos=posm_to_check))
                        # calculate display per sku -- POSM is absent
                        if not display_per_sku_per_scene_calculated:
                            display_per_sku_per_scene_calculated = self.save_display_presence_per_sku(
                                kpi=kpi_display_presence_sku,
                                numerator_result=0,  # 0 posm not recognized
                            )
                        if len(self.match_product_in_scene['bay_number'].unique()) > 1 or \
                                len(self.match_display_in_scene) > 1:
                            Log.info(
                                'The scene: {scene} is relevant and multi_bay_posm is True. '
                                'Purity per bay is calculated and going to next scene.'
                                .format(scene=current_scene_fk,
                                        pos=posm_to_check))
                            multi_posm_or_bay = True
                            self.save_purity_per_bay(kpi_display_bay_purity)
                        self.save_display_compliance_data([
                            {
                                'pk': kpi_display_presence.iloc[0].pk,
                                'result': int(has_posm_recognized),
                                'score': int(multi_posm_or_bay),
                                'numerator_id': posm_to_check,
                                'numerator_result': posm_to_check
                            },
                            {
                                'pk': kpi_display_sku_compliance.iloc[0].pk,
                                'result': float(mandatory_sku_compliance),
                                'score': float(optional_sku_compliance),
                                'denominator_id': posm_to_check,
                                'denominator_result': posm_to_check
                            },
                            {
                                'pk': kpi_display_price_compliance.iloc[0].pk,
                                'result': float(price_compliance),
                                'score': float(price_compliance),
                                'denominator_id': posm_to_check,
                                'denominator_result': posm_to_check
                            },
                        ])
                        continue
                    # this scene has the posm
                    Log.info(
                        'The scene: {scene} is relevant and POSM {pos} is present.'
                        .format(scene=current_scene_fk, pos=posm_to_check))
                    has_posm_recognized = True
                    # check if this scene has multi bays or multi posm
                    if len(self.match_product_in_scene['bay_number'].unique()) > 1 or \
                            len(self.match_display_in_scene) > 1:
                        # Its multi posm or bay -- only purity calc per bay is possible
                        Log.info(
                            'The scene: {scene} is relevant and POSM {pos} is present but multi_bay_posm is True. '
                            'Purity per bay is calculated and going to next scene.'
                            .format(scene=current_scene_fk, pos=posm_to_check))
                        multi_posm_or_bay = True
                        # calculate display per sku for multi posm/multi bay
                        if not display_per_sku_per_scene_calculated:
                            display_per_sku_per_scene_calculated = self.save_display_presence_per_sku(
                                kpi=kpi_display_presence_sku,
                                numerator_result=2,  # 2 multi posm
                            )
                        self.save_display_compliance_data([
                            {
                                'pk': kpi_display_presence.iloc[0].pk,
                                'result': int(has_posm_recognized),
                                'score': int(multi_posm_or_bay),
                                'numerator_id': posm_to_check,
                                'numerator_result': posm_to_check
                            },
                            {
                                'pk': kpi_display_sku_compliance.iloc[0].pk,
                                'result': float(mandatory_sku_compliance),
                                'score': float(optional_sku_compliance),
                                'denominator_id': posm_to_check,
                                'denominator_result': posm_to_check
                            },
                            {
                                'pk': kpi_display_price_compliance.iloc[0].pk,
                                'result': float(price_compliance),
                                'score': float(price_compliance),
                                'denominator_id': posm_to_check,
                                'denominator_result': posm_to_check
                            },
                        ])
                        self.save_purity_per_bay(kpi_display_bay_purity)
                        continue

                    Log.info(
                        'The scene: {scene} is relevant and POSM {pos} is present with only one bay.'
                        .format(scene=current_scene_fk, pos=posm_to_check))
                    # save purity per bay
                    self.save_purity_per_bay(kpi_display_bay_purity)
                    # calculate display per sku for ALL SUCCESS
                    if not display_per_sku_per_scene_calculated:
                        display_per_sku_per_scene_calculated = self.save_display_presence_per_sku(
                            kpi=kpi_display_presence_sku,
                            posm_to_check=posm_to_check,
                            numerator_result=1,  # 1--one one posm
                            mandatory_eans=mandatory_eans,
                            optional_posm_eans=optional_posm_eans)
                    # calculate compliance
                    mandatory_sku_compliance = self.get_ean_presence_rate(
                        mandatory_eans)
                    optional_sku_compliance = self.get_ean_presence_rate(
                        optional_posm_eans)
                    if mandatory_sku_compliance:
                        price_compliance = self.get_price_presence_rate(
                            mandatory_eans)
                    self.save_display_compliance_data([
                        {
                            'pk': kpi_display_presence.iloc[0].pk,
                            'result': int(has_posm_recognized),
                            'score': int(multi_posm_or_bay),
                            'numerator_id': posm_to_check,
                            'numerator_result': posm_to_check
                        },
                        {
                            'pk': kpi_display_sku_compliance.iloc[0].pk,
                            'result': float(mandatory_sku_compliance),
                            'score': float(optional_sku_compliance),
                            'denominator_id': posm_to_check,
                            'denominator_result': posm_to_check
                        },
                        {
                            'pk': kpi_display_price_compliance.iloc[0].pk,
                            'result': float(price_compliance),
                            'score': float(price_compliance),
                            'denominator_id': posm_to_check,
                            'denominator_result': posm_to_check
                        },
                    ])
                    continue
                else:
                    # the session/store is not part of the KPI targets
                    Log.info(
                        'The session: {sess} - scene: {scene}, the current kpi target [pk={t_pk}] '
                        'is not valid. Keep Looking...'.format(
                            sess=self.session_uid,
                            scene=current_scene_fk,
                            t_pk=each_target.external_target_fk))
                    if scene_relevant_targets.empty:
                        # Store failed
                        Log.info(
                            "Store info is {curr_data} but target is {store_data}"
                            .format(
                                curr_data=self.store_info.iloc[0][list(
                                    store_relevant_targets.keys())].to_json(),
                                store_data=store_relevant_targets.to_json()))
                    else:
                        Log.info(
                            "Scene info is {curr_data} but target is {store_data}"
                            .format(
                                curr_data=self.scene_info.iloc[0][list(
                                    scene_relevant_targets.keys())].to_json(),
                                store_data=scene_relevant_targets.to_json()))
                    continue
            else:
                if not display_per_sku_per_scene_calculated:
                    # check if its secondary display type
                    if not self.templates.loc[
                        (self.templates['template_group'] ==
                         'Secondary display')
                            & (~self.templates['template_name'].isin(
                                ['Clipstrip', 'Hangsell']))].empty:
                        Log.info(
                            "Secondary Display => Session: {sess} - scene {scene} didn't qualify "
                            "any external targets.".format(
                                sess=self.session_uid,
                                scene=self.scene_info.iloc[0].scene_fk,
                            ))
                        display_per_sku_per_scene_calculated = self.save_display_presence_per_sku(
                            kpi=kpi_display_presence_sku,
                            numerator_result=0)  # 0--posm not recognized
    def scene_main_calculation(self):
        if self.scene_tool_box.match_product_in_scene.empty:
            Log.warning('Match product in scene is empty for this scene')

        self.scene_tool_box.main_function()
        self.scene_tool_box.commit_results()
    def calculate_facings_sos_kpi(self, kpi_df):
        kpi_name = kpi_df[Const.KPI_NAME].values[0]
        kpi_fk = self.common.get_kpi_fk_by_kpi_name(kpi_name)
        if kpi_fk is None:
            Log.warning("There is no matching Kpi fk for kpi name: " +
                        kpi_name)
            return
        entity_name = kpi_df[Const.NUMERATOR_ENTITY].values[0]
        entity_name_for_fk = Const.NAME_TO_FK[entity_name]

        # iterate all categories (if kpi_df length > 1)
        for i, row in kpi_df.iterrows():
            self.kpi_excluding = row[[
                Const.EXCLUDE_EMPTY, Const.EXCLUDE_HANGER,
                Const.EXCLUDE_IRRELEVANT, Const.EXCLUDE_POSM,
                Const.EXCLUDE_OTHER, Const.STACKING, Const.EXCLUDE_SKU,
                Const.EXCLUDE_STOCK, Const.EXCLUDE_OSD
            ]]
            df = self.filter_df(row)
            df = df[df['width_mm_advance'] != -1]
            if df.empty:
                continue

            if row[Const.PER_SCENE_TYPE] == Const.EACH:
                scene_types = row[Const.SCENE_TYPE].split(",")
                scene_types = [item.strip() for item in scene_types]
                scene_types = set(scene_types).intersection(
                    set(df['template_name']))
            else:
                scene_types = [""]

            # Iterate scene types
            for sc in scene_types:
                filters = {}
                if sc != "":
                    try:
                        context_id = self.templates[
                            self.templates['template_name'] ==
                            sc]['template_fk'].iloc[0]
                    except Exception as ex:
                        Log.warning("No scene type with the following name: " +
                                    str(sc) + ", warning: " + str(ex))
                        continue
                    filters['template_name'] = sc
                else:
                    context_id = 0

                category = row[Const.CATEGORY]
                if category != "":
                    denominator_id = self.all_products[
                        self.all_products['category'] ==
                        category]['category_fk'].iloc[0]
                    filters['category'] = category
                else:
                    denominator_id = self.store_id

                all_denominators = df[entity_name].drop_duplicates().tolist()
                if row[Const.NUMERATOR] != "":
                    all_denominators = [row[Const.NUMERATOR]]
                denominator = self.tools.get_filter_condition(df,
                                                              **filters).sum()
                if denominator == 0:
                    continue

                # iterate all entities
                for entity in all_denominators:
                    filters[entity_name] = entity
                    numerator = self.tools.get_filter_condition(
                        df, **filters).sum()
                    del filters[entity_name]
                    if numerator == 0:
                        continue
                    result = float(numerator) / float(denominator)
                    numerator_id = df[df[entity_name] ==
                                      entity][entity_name_for_fk].values[0]
                    self.common.write_to_db_result(
                        fk=kpi_fk,
                        numerator_id=numerator_id,
                        denominator_id=denominator_id,
                        context_id=context_id,
                        numerator_result=numerator,
                        denominator_result=denominator,
                        result=result,
                        score=result)
    def run_project_calculations(self):
        self.timer.start()  # use log.time_message

        project_name = self.data_provider.project_name

        if self.data_provider.visit_date.isoformat() < '2019-01-01':
            # kpi_file_name = '2018/MARS KPIs.xlsx'
            # kpi_range_targets_sheet_names = [2217, 2220, 2390, 2391, 2317, 2254]
            # kpi_channels = None
            Log.error(
                "Error: The visit date is out of date: {}. The sessions cannot be calculated."
                "".format(self.data_provider.visit_date.isoformat()))
            return

        elif self.data_provider.visit_date.isoformat() < '2019-12-29':
            # kpi_file_name = '2019/MARS KPIs.xlsx'
            # kpi_range_targets_sheet_names = [4317, 4650, 4254]  # , 4388, 4389
            # kpi_channels = [kpi_file_name, 'channels', 'channels']
            Log.error(
                "Error: The visit is out of date threshold [2019-12-29]: {} . The session cannot be calculated."
                "".format(self.data_provider.visit_date.isoformat()))
            return

        elif self.data_provider.visit_date.isoformat() < '2020-06-14':
            kpi_file_name = '2020/MARS KPIs.xlsx'
            kpi_range_targets_sheet_names = [4317, 4650, 4254]
            kpi_channels = [kpi_file_name, 'channels', 'channels']

        else:
            kpi_file_name = '2020_06_14/MARS KPIs.xlsx'
            kpi_range_targets_sheet_names = [4317, 4650, 4254]
            kpi_channels = [kpi_file_name, 'channels', 'channels']

        # [file name, key, sheet name]
        kpi_template = \
            [kpi_file_name, 'kpi_data', 'KPI']
        kpi_golden_shelves = \
            [kpi_file_name, 'golden_shelves', 'golden_shelves']
        kpi_answers_translation = \
            [kpi_file_name, 'survey_answers_translation', 'survey_answers_translation']
        kpi_sku_lists = \
            [kpi_file_name, 'sku_lists', 'sku_lists']
        kpi_range_targets = \
            [kpi_file_name, 'range_targets', kpi_range_targets_sheet_names]

        jg = MARSRU_PRODJSONGenerator(project_name)

        kpis_sheet_name = None
        if kpi_channels:
            jg.create_template_json(kpi_channels[0], kpi_channels[1],
                                    kpi_channels[2])
            store_types = jg.project_kpi_dict.get('channels')
            for store_type in store_types:
                if store_type['Store type'] == self.data_provider.store_type:
                    kpis_sheet_name = store_type['KPIs_Channel']
                    break
        else:
            kpis_sheet_name = 'KPI'

        if not kpis_sheet_name:
            Log.warning(
                "Error: Store channel is not defined for Store ID [{}] with Store type [{}]"
                "".format(
                    self.data_provider.store_fk,
                    (self.data_provider.store_type if
                     self.data_provider.store_type else '').encode('utf-8')))
            return

        jg.create_template_json(kpi_template[0], kpi_template[1],
                                kpis_sheet_name)
        jg.create_template_json(kpi_golden_shelves[0], kpi_golden_shelves[1],
                                kpi_golden_shelves[2])
        jg.create_template_json(kpi_answers_translation[0],
                                kpi_answers_translation[1],
                                kpi_answers_translation[2])
        jg.create_template_json(kpi_sku_lists[0], kpi_sku_lists[1],
                                kpi_sku_lists[2])
        jg.create_template_json(kpi_range_targets[0], kpi_range_targets[1],
                                kpi_range_targets[2])
        kpi_templates = jg.project_kpi_dict

        tool_box = MARSRU_PRODKPIToolBox(kpi_templates, self.data_provider,
                                         self.output)

        # Todo - Uncomment the OSA before deploying!!!
        tool_box.handle_update_custom_scif()
        tool_box.calculate_osa()
        tool_box.check_availability(kpi_templates.get('kpi_data'))
        tool_box.check_survey_answer(kpi_templates.get('kpi_data'))
        tool_box.check_number_of_scenes(kpi_templates.get('kpi_data'))
        tool_box.custom_average_shelves(kpi_templates.get('kpi_data'))
        tool_box.custom_number_bays(kpi_templates.get('kpi_data'))
        # tool_box.check_price(kpi_templates.get('kpi_data'))
        # tool_box.brand_blocked_in_rectangle(kpi_templates.get('kpi_data'))
        tool_box.custom_marsru_1(kpi_templates.get('kpi_data'))
        tool_box.check_layout_size(kpi_templates.get('kpi_data'))
        tool_box.golden_shelves(kpi_templates.get('kpi_data'))
        tool_box.facings_by_brand(kpi_templates.get('kpi_data'))
        # tool_box.multiple_brands_blocked_in_rectangle(kpi_templates.get('kpi_data'))
        # tool_box.negative_neighbors(kpi_templates.get('kpi_data'))
        tool_box.get_total_linear(kpi_templates.get('kpi_data'))
        tool_box.get_placed_near(kpi_templates.get('kpi_data'))
        tool_box.check_availability_on_golden_shelves(
            kpi_templates.get('kpi_data'))
        tool_box.check_for_specific_display(kpi_templates.get('kpi_data'))

        # the order is important - source KPIs must be calculated first (above)
        tool_box.check_range_kpis(kpi_templates.get('kpi_data'))
        tool_box.check_kpi_results(kpi_templates.get('kpi_data'))
        tool_box.check_block_and_neighbors_by_shelf(
            kpi_templates.get('kpi_data'))

        kpi_sets = tool_box.results_and_scores.keys()
        kpi_sets.remove('*')

        # Saving to old tables
        for kpi_set in kpi_sets:
            tool_box.write_to_db_result_level1(kpi_set[0])
        tool_box.commit_results_data()

        # Saving to new tables
        for kpi_set in kpi_sets:
            tool_box.store_to_new_kpi_tables_level0(kpi_set[1])
        tool_box.common.commit_results_data()

        self.timer.stop(
            'MARSRU2_SANDProjectCalculations.run_project_calculations')
Beispiel #7
0
    def calculate_shelf_position(self):
        kpi_name = Const.KPI_POSITION
        kpi_fk = self.get_kpi_fk_by_kpi_type(kpi_name)
        parent_fk = self.get_parent_fk(kpi_name)

        parent_kpi_name = Const.KPIS_HIERACHY[kpi_name]
        grand_parent_fk = self.get_parent_fk(parent_kpi_name)

        grand_parent_kpi_name = Const.KPIS_HIERACHY[parent_kpi_name]
        great_grand_parent_fk = self.get_parent_fk(grand_parent_kpi_name)

        numerator_facings = 0
        denominator_facings = 0
        scene_ratios = []

        if self.relevant_scif.empty:
            score = 0
            self.write_to_db(
                fk=kpi_fk,
                numerator_id=self.manufacturer_pk,
                numerator_result=numerator_facings,
                denominator_id=self.store_id,
                denominator_result=denominator_facings,
                score=score,
            )
        else:
            scene_ids = list(self.relevant_scif.scene_id.unique())
            for scene_id in scene_ids:
                scene_name = self.relevant_scif['template_name'][
                    self.relevant_scif['scene_id'] == scene_id].iloc[0]
                scene_name_fk = self.relevant_scif['template_fk'][
                    self.relevant_scif['scene_id'] == scene_id].iloc[0]
                frentes_target_df = self.main_template[
                    self.main_template['NOMBRE DE TAREA'] == scene_name]
                ean_codes = frentes_target_df['PRODUCT EAN'].unique().tolist()
                ean_codes_count = len(ean_codes)
                passing_ean = 0
                for ean_code in ean_codes:
                    filtered_frentes_df = frentes_target_df[
                        frentes_target_df['PRODUCT EAN'] == ean_code]

                    score = 100

                    if score != 0:
                        try:

                            bay = 0
                            shelf = 0

                            product_fk = self.all_products['product_fk'][
                                self.all_products['product_ean_code'] == str(
                                    ean_code)]

                            if product_fk.empty:
                                product_fk = -1
                            else:
                                product_fk = self.all_products['product_fk'][
                                    self.all_products['product_ean_code'] ==
                                    str(ean_code)].iloc[0]

                            for index, row in filtered_frentes_df.iterrows():
                                target_bay = row['PUERTA']
                                target_shelf = row['PARRILLA']
                                if str(ean_code) in self.relevant_scif[
                                        'product_ean_code'].tolist():
                                    relevant_matches = self.matches[
                                        self.matches['scene_fk'] == scene_id]

                                    filtered_matches_df = relevant_matches[
                                        (relevant_matches['bay_number'] ==
                                         target_bay)
                                        & (relevant_matches['shelf_number'] ==
                                           target_shelf) &
                                        (relevant_matches['product_fk']
                                         == product_fk)]

                                    if filtered_matches_df.empty:
                                        filtered_product_only_df = \
                                        relevant_matches[['product_fk', 'bay_number', 'shelf_number']][
                                            relevant_matches['product_fk'] == product_fk].drop_duplicates()

                                        if filtered_product_only_df.empty:

                                            score = 0
                                            self.write_to_db(
                                                fk=kpi_fk,
                                                numerator_id=bay,
                                                numerator_result=target_bay,
                                                denominator_id=shelf,
                                                denominator_result=target_shelf,
                                                score=score,
                                                context_id=product_fk,
                                                result=ean_code,
                                                identifier_parent=parent_fk,
                                                identifier_result=kpi_fk,
                                                should_enter=True)
                                        else:
                                            for p_index, p_row in filtered_product_only_df.iterrows(
                                            ):
                                                score = 0
                                                bay = p_row['bay_number']
                                                shelf = p_row['shelf_number']

                                                self.write_to_db(
                                                    fk=kpi_fk,
                                                    numerator_id=bay,
                                                    numerator_result=target_bay,
                                                    denominator_id=shelf,
                                                    denominator_result=
                                                    target_shelf,
                                                    score=score,
                                                    context_id=product_fk,
                                                    result=ean_code,
                                                    identifier_parent=parent_fk,
                                                    identifier_result=kpi_fk,
                                                    should_enter=True)
                                    else:
                                        score = 100
                                        bay = target_bay
                                        shelf = target_shelf

                                        self.write_to_db(
                                            fk=kpi_fk,
                                            numerator_id=bay,
                                            numerator_result=target_bay,
                                            denominator_id=shelf,
                                            denominator_result=target_shelf,
                                            score=score,
                                            context_id=product_fk,
                                            result=ean_code,
                                            identifier_parent=parent_fk,
                                            identifier_result=kpi_fk,
                                            should_enter=True)

                                else:
                                    score = 0

                                    self.write_to_db(
                                        fk=kpi_fk,
                                        numerator_id=bay,
                                        numerator_result=target_bay,
                                        denominator_id=shelf,
                                        denominator_result=target_shelf,
                                        score=score,
                                        context_id=product_fk,
                                        result=ean_code,
                                        identifier_parent=parent_fk,
                                        identifier_result=kpi_fk,
                                        should_enter=True)

                        except:
                            Log.warning("Failed in acomodo")

                if ean_codes_count != 0:
                    ratio = round(passing_ean / float(ean_codes_count),
                                  2) * 100
                else:
                    ratio = 0
                scene_ratios.append(ratio)
                self.write_to_db(fk=parent_fk,
                                 numerator_id=scene_name_fk,
                                 numerator_result=passing_ean,
                                 denominator_id=scene_id,
                                 denominator_result=ean_codes_count,
                                 result=ratio,
                                 score=ratio,
                                 identifier_parent=grand_parent_fk,
                                 identifier_result=parent_fk,
                                 should_enter=True)

            kpi_weight = Const.KPI_WEIGHTS[grand_parent_kpi_name]
            final_ratio = self.calculate_average_ratio(scene_ratios)
            score = round(((final_ratio * .01) * kpi_weight), 2)
            self.write_to_db(fk=grand_parent_fk,
                             numerator_id=0,
                             denominator_id=self.store_id,
                             result=final_ratio,
                             score=score,
                             identifier_parent=great_grand_parent_fk,
                             identifier_result=grand_parent_fk,
                             should_enter=True)
        return final_ratio
Beispiel #8
0
    def calculate_block_adjacency(self, kpi_line, relevant_scif):
        kpi_result = 0
        for scene in relevant_scif.scene_fk.unique():
            scene_filter = {'scene_fk': scene}
            location_filter = {'scene_id': scene}
            mpis = self.filter_df(self.mpis, scene_filter)
            mpis = mpis[mpis['stacking_layer'] >= 1]
            # allowed = {'product_type': ['Other', 'Empty']}
            if kpi_line[Const.TESTED_PARAM] == kpi_line[Const.ANCHOR_PARAM]:
                filters = {
                    kpi_line[Const.ANCHOR_PARAM]: [
                        kpi_line[Const.ANCHOR_VALUE],
                        kpi_line[Const.TESTED_VALUE]
                    ]
                }
            elif kpi_line[Const.TESTED_PARAM] == '':
                filters = {
                    kpi_line[Const.ANCHOR_PARAM]: kpi_line[Const.ANCHOR_VALUE]
                }
            else:
                filters = {
                    kpi_line[Const.ANCHOR_PARAM]: kpi_line[Const.ANCHOR_VALUE],
                    kpi_line[Const.TESTED_PARAM]: kpi_line[Const.TESTED_VALUE]
                }
            items = set(self.filter_df(mpis, filters)['scene_match_fk'].values)
            additional = {'minimum_facing_for_block': 2}
            # allowed_items = set(self.filter_df(mpis, allowed)['scene_match_fk'].values)
            if not (items):
                break

            block_result = self.block.network_x_block_together(
                filters, location=location_filter, additional=additional)

            passed_blocks = block_result[block_result['is_block'] ==
                                         True].cluster.tolist()

            if passed_blocks and kpi_line[Const.LIST_ATTRIBUTE]:
                match_fk_list = set(match for cluster in passed_blocks
                                    for node in cluster.nodes()
                                    for match in cluster.node[node]
                                    ['group_attributes']['match_fk_list'])

                all_graph = AdjacencyGraph(
                    mpis,
                    None,
                    self.products,
                    product_attributes=['rect_x', 'rect_y'],
                    name=None,
                    adjacency_overlap_ratio=.4)
                # associate all nodes in the master graph to their associated match_fks
                match_to_node = {
                    int(node['match_fk']): i
                    for i, node in all_graph.base_adjacency_graph.nodes(
                        data=True)
                }
                # create a dict of all match_fks to their corresponding nodes
                node_to_match = {
                    val: key
                    for key, val in match_to_node.items()
                }
                edge_matches = set(
                    sum([[
                        node_to_match[i]
                        for i in all_graph.base_adjacency_graph[
                            match_to_node[match]].keys()
                    ] for match in match_fk_list], []))
                adjacent_matches = edge_matches - match_fk_list
                adj_mpis = mpis[(mpis['scene_match_fk'].isin(adjacent_matches))
                                & (~mpis['product_type'].isin(
                                    ['Empty', 'Irrelevant', 'Other', 'POS']))]

                for value in adj_mpis[kpi_line[
                        Const.LIST_ATTRIBUTE]].unique().tolist():
                    if kpi_line[Const.LIST_ATTRIBUTE] == 'brand_name':
                        numerator_fk = adj_mpis[adj_mpis['brand_name'] ==
                                                value].brand_fk.values[0]
                    else:
                        if value is not None:
                            try:
                                numerator_fk = \
                                    self.custom_entity_data[self.custom_entity_data['name']
                                                            == value].pk.values[0]
                            except IndexError:
                                Log.warning(
                                    'Custom entity "{}" does not exist'.format(
                                        value))
                                continue
                        else:
                            continue

                    result_dict = self.build_dictionary_for_db_insert(
                        kpi_name=kpi_line[Const.KPI_NAME],
                        numerator_id=numerator_fk,
                        numerator_result=1,
                        result=1,
                        denominator_id=scene,
                        denominator_result=1)
                    self.common.write_to_db_result(**result_dict)
                return
            elif kpi_line[
                    Const.
                    LIST_ATTRIBUTE]:  # return if this is a list_attribute KPI with no passing blocks
                return
            if passed_blocks:  # exit loop if this isn't a list_attribute KPI, but has passing blocks
                kpi_result = 1
                break
        if kpi_line[
                Const.
                LIST_ATTRIBUTE]:  # handle cases where there are no relevant products,
            return  # so we miss the other check above
        template_fk = relevant_scif['template_fk'].values[0]
        result_dict = self.build_dictionary_for_db_insert(
            kpi_name=kpi_line[Const.KPI_NAME],
            numerator_id=999,
            numerator_result=kpi_result,
            result=kpi_result,
            denominator_id=template_fk,
            denominator_result=1)
        self.common.write_to_db_result(**result_dict)
        return
Beispiel #9
0
    def calculate_red_score(self):
        kpi_source_json = self.json.create_kpi_source('KPI_Source.xlsx', self.pos_kpi_set_name)
        self.kpi_source_json = kpi_source_json
        kpi_source = {}
        for row in kpi_source_json:
            # Log.info('SOURCE: {}'.format(row.get(SOURCE)))
            kpi_source[row.pop(SOURCE)] = row
        if kpi_source:
            pass

        # elif self.test_store == "Y":
        #     Log.warning('Warning. Session cannot be calculated: '
        #                 'Store is a test store. '
        #                 'Store ID {1}.'
        #                 .format(self.pos_kpi_set_name, self.store_id))
        #     return

        else:
            Log.warning('Warning. Session cannot be calculated. '
                        'POS KPI Set name in store attribute is invalid - {0}. '
                        'Store ID {1}.'
                        .format(self.pos_kpi_set_name, self.store_id))
            return

        mr_targets = {}
        for kpi_set, params in kpi_source.items():
            if params.get(MR_TARGET) is not None:
                mr_targets.update({kpi_set: params[MR_TARGET]})
                mr_targets.update({params[SET]: params[MR_TARGET]})
        self.tool_box.mr_targets = mr_targets

        kpi_sets_types_to_calculate = [POS, SPIRITS]
        for kpi_set_type in kpi_sets_types_to_calculate:
            if kpi_source[kpi_set_type][SET]:
                Log.debug('KPI calculation stage: {}'.format(kpi_source[kpi_set_type][SET]))
                self.tool_box.set_kpi_set(kpi_source[kpi_set_type][SET], kpi_set_type)
                self.json.project_kpi_dict['kpi_data'] = []
                self.json.create_kpi_data_json('kpi_data', kpi_source[kpi_set_type][FILE],
                                               sheet_name=kpi_source[kpi_set_type][SHEET],
                                               pos_kpi_set_name=self.pos_kpi_set_name)
                self.calculate_red_score_kpi_set(self.json.project_kpi_dict.get('kpi_data')[0],
                                                 kpi_source[kpi_set_type][SET],
                                                 mr_targets.get(kpi_set_type))

        if kpi_source[GAPS][SET]:
            Log.debug('KPI calculation stage: {}'.format(kpi_source[GAPS][SET]))
            self.tool_box.set_kpi_set(kpi_source[GAPS][SET], GAPS)
            self.json.create_kpi_data_json(
                'gaps', kpi_source[GAPS][FILE], sheet_name=kpi_source[GAPS][SHEET])
            self.tool_box.calculate_gaps_old(self.json.project_kpi_dict.get('gaps'))
            self.tool_box.calculate_gaps_new(self.json.project_kpi_dict.get('gaps'),
                                             kpi_source[GAPS][SET])

        if kpi_source[BENCHMARK][SET]:
            Log.debug('KPI calculation stage: {}'.format(kpi_source[BENCHMARK][SET]))
            self.tool_box.set_kpi_set(kpi_source[BENCHMARK][SET], BENCHMARK)
            self.json.create_kpi_data_json(
                'benchmark', kpi_source[BENCHMARK][FILE], sheet_name=kpi_source[BENCHMARK][SHEET])
            self.tool_box.calculate_benchmark(self.json.project_kpi_dict.get('benchmark'),
                                              kpi_source[BENCHMARK][SET])

        if kpi_source[CONTRACT][FILE]:
            Log.debug('Importing Contract Execution template')
            self.json.create_kpi_data_json(
                'contract', kpi_source[CONTRACT][FILE], sheet_name=kpi_source[CONTRACT][SHEET])

        if kpi_source[TOPSKU][SET]:
            Log.debug('KPI calculation stage: {}'.format(kpi_source[TOPSKU][SET]))
            include_to_contract = True if self.json.project_kpi_dict.get('contract') else False
            self.tool_box.set_kpi_set(kpi_source[TOPSKU][SET], TOPSKU)
            self.tool_box.calculate_top_sku(include_to_contract,
                                            kpi_source[TOPSKU][SET])

        if self.json.project_kpi_dict.get('contract'):
            if kpi_source[EQUIPMENT][SET]:
                equipment_target_data = self.tool_box.get_equipment_target_data()
                if equipment_target_data:
                    if kpi_source[TARGET][SET]:
                            Log.debug('KPI calculation stage: {}'.format(kpi_source[TARGET][SET]))
                            self.tool_box.set_kpi_set(kpi_source[TARGET][SET], TARGET)
                            self.json.project_kpi_dict['kpi_data'] = []
                            self.json.create_kpi_data_json('kpi_data', kpi_source[TARGET][FILE],
                                                           sheet_name=kpi_source[TARGET][SHEET])
                            self.calculate_red_score_kpi_set(self.json.project_kpi_dict.get('kpi_data')[0],
                                                             kpi_source[TARGET][SET])

                            Log.debug('KPI calculation stage: {}'.format(kpi_source[EQUIPMENT][SET]))
                            self.tool_box.set_kpi_set(kpi_source[EQUIPMENT][SET], EQUIPMENT)
                            self.tool_box.calculate_equipment_execution(self.json.project_kpi_dict.get('contract'),
                                                                        kpi_source[EQUIPMENT][SET],
                                                                        kpi_source[KPI_CONVERSION][FILE],
                                                                        equipment_target_data)

            if kpi_source[CONTRACT][SET]:
                Log.debug('KPI calculation stage: {}'.format(kpi_source[CONTRACT][SET]))
                self.tool_box.set_kpi_set(kpi_source[CONTRACT][SET], CONTRACT)
                self.tool_box.calculate_contract_execution(self.json.project_kpi_dict.get('contract'),
                                                           kpi_source[CONTRACT][SET])
Beispiel #10
0
 def scene_score(self):
     if self.scene_tool_box.match_product_in_scene.empty:
         Log.warning('Match product in scene is empty for this scene')
     else:
         self.scene_tool_box.main_function()
         self.common.commit_results_data()
Beispiel #11
0
    def calculate_adjacency(self, kpi_line, relevant_scif):
        kpi_result = 0
        for scene in relevant_scif.scene_fk.unique():
            scene_filter = {'scene_fk': scene}
            mpis = self.filter_df(self.mpis, scene_filter)
            mpis = mpis[mpis['stacking_layer'] >= 1]
            # allowed = {'product_type': ['Other', 'Empty']}
            filters = {
                kpi_line[Const.ANCHOR_PARAM]: kpi_line[Const.ANCHOR_VALUE]
            }
            # determine if there are any matching products in the scene
            items = set(self.filter_df(mpis, filters)['scene_match_fk'].values)
            # allowed_items = set(self.filter_df(mpis, allowed)['scene_match_fk'].values)
            # items.update(allowed_items)
            if not (items):
                break

            all_graph = AdjacencyGraph(mpis,
                                       None,
                                       self.products,
                                       product_attributes=['rect_x', 'rect_y'],
                                       name=None,
                                       adjacency_overlap_ratio=.4)

            match_to_node = {
                int(node['match_fk']): i
                for i, node in all_graph.base_adjacency_graph.nodes(data=True)
            }
            node_to_match = {val: key for key, val in match_to_node.items()}
            edge_matches = set(
                sum([[
                    node_to_match[i] for i in all_graph.base_adjacency_graph[
                        match_to_node[item]].keys()
                ] for item in items], []))
            adjacent_items = edge_matches - items
            adj_mpis = mpis[(mpis['scene_match_fk'].isin(adjacent_items))
                            & (~mpis['product_type'].isin(
                                ['Empty', 'Irrelevant', 'Other', 'POS']))]

            if kpi_line[Const.LIST_ATTRIBUTE]:
                for value in adj_mpis[kpi_line[
                        Const.LIST_ATTRIBUTE]].unique().tolist():
                    if kpi_line[Const.LIST_ATTRIBUTE] == 'brand_name':
                        numerator_fk = adj_mpis[adj_mpis['brand_name'] ==
                                                value].brand_fk.values[0]
                    else:
                        if value is not None:
                            try:
                                numerator_fk = \
                                    self.custom_entity_data[self.custom_entity_data['name']
                                                            == value].pk.values[0]
                            except IndexError:
                                Log.warning(
                                    'Custom entity "{}" does not exist'.format(
                                        value))
                                continue
                        else:
                            continue

                    result_dict = self.build_dictionary_for_db_insert(
                        kpi_name=kpi_line[Const.KPI_NAME],
                        numerator_id=numerator_fk,
                        numerator_result=1,
                        result=1,
                        denominator_id=scene,
                        denominator_result=1)
                    self.common.write_to_db_result(**result_dict)
                return
            else:
                if kpi_line[Const.TESTED_VALUE] in adj_mpis[kpi_line[
                        Const.TESTED_PARAM]].unique().tolist():
                    kpi_result = 1
                    break

        if kpi_line[
                Const.
                LIST_ATTRIBUTE]:  # handle cases where there are no relevant products,
            return  # so we miss the other check above
        template_fk = relevant_scif['template_fk'].values[0]
        result_dict = self.build_dictionary_for_db_insert(
            kpi_name=kpi_line[Const.KPI_NAME],
            numerator_id=999,
            numerator_result=kpi_result,
            result=kpi_result,
            denominator_id=template_fk,
            denominator_result=1)
        self.common.write_to_db_result(**result_dict)
 def _get_own_manufacturer(self):
     """ Gets own_manufacturer fk according to the assortment product list """
     if self._data_provider.own_manufacturer.empty:
         Log.warning('This project doesnt have own manufacturer ')
     return int(self._data_provider.own_manufacturer['param_value'].iloc[0])
Beispiel #13
0
    def calculate_nestle_score(self, set_name):
        """
        This function calculates the KPI results.
        """
        set_scores = {}
        main_children = self.templates_data[self.templates_data[
            self.templates_class.KPI_GROUP] == set_name]
        for c in xrange(len(main_children)):
            main_child = main_children.iloc[c]
            children = self.templates_data[self.templates_data[
                self.templates_class.KPI_GROUP] == main_child[
                    self.templates_class.KPI_NAME]]
            scores = []
            for i in xrange(len(children)):
                child = children.iloc[i]
                kpi_type = child[self.templates_class.KPI_TYPE]
                if not self.store_type in child[
                        self.templates_class.STORE_TYPE]:
                    continue
                if not set(child[self.templates_class.SCENE_TYPE].split(
                        self.templates_class.SEPARATOR)) & set(
                            self.scif['template_name'].unique().tolist()):
                    continue
                if kpi_type == self.BLOCK_TOGETHER:
                    score = self.calculate_block_together_sets(child)
                elif kpi_type == self.FACING_COUNT:
                    score = self.calculate_facing_count(child)
                elif kpi_type == self.AVAILABILITY:
                    score = self.calculate_availability(child)
                elif kpi_type == self.FACING_SOS:
                    score = self.calculate_facing_sos(child)
                elif kpi_type == self.SHELF_POSITION:
                    score = self.calculate_shelf_position(child)
                else:
                    Log.warning(
                        "KPI of type '{}' is not supported".format(kpi_type))
                    continue
                if score is not None:
                    child_score_weight = child[self.templates_class.WEIGHT]
                    atomic_fk = self.get_atomic_fk(child)
                    self.write_to_db_result(atomic_fk,
                                            score,
                                            level=self.LEVEL3)
                    if isinstance(score, tuple):
                        score = score[0]
                    weighted_score = score * float(child_score_weight)
                    scores.append(weighted_score)

            if not scores:
                scores = [0]
            if scores:
                score_type = main_child[self.templates_class.SCORE]
                score_weight = float(main_child[self.templates_class.WEIGHT])
                if score_type == self.templates_class.SUM_OF_SCORES:
                    score = sum(scores)
                else:
                    score = 0
                kpi_name = main_child[self.templates_class.KPI_NAME]
                kpi_fk = self.kpi_static_data[self.kpi_static_data['kpi_name']
                                              == kpi_name]['kpi_fk'].values[0]
                # self.write_to_db_result(kpi_fk, score, level=self.LEVEL2)
                set_scores[kpi_fk] = (score_weight, score)
        # total_weight = sum([score[0] for score in set_scores.values()])
        for kpi_fk in set_scores.keys():
            self.write_to_db_result(kpi_fk,
                                    set_scores[kpi_fk][1],
                                    level=self.LEVEL2)
        # set_score = sum([score[0] * score[1] for score in set_scores.values()]) / total_weight
        set_score = round(
            sum([score[0] * score[1] for score in set_scores.values()]), 2)
        set_fk = self.kpi_static_data[self.kpi_static_data['kpi_set_name'] ==
                                      set_name]['kpi_set_fk'].values[0]
        self.write_to_db_result(set_fk, set_score, level=self.LEVEL1)
    def calculate_display_facing(self, kpi_df):

        kpi_name = kpi_df[Const.KPI_NAME].values[0]
        kpi_fk = self.common.get_kpi_fk_by_kpi_name(kpi_name,
                                                    get_numerator=False)
        if kpi_fk is None:
            Log.warning("There is no matching Kpi fk for kpi name: " +
                        kpi_name)
            return
        entity_name = kpi_df[Const.NUMERATOR_ENTITY].values[0]
        entity_name_for_fk = Const.NAME_TO_FK[entity_name]
        results_dict = {}

        # Iterate all rows of the KPI, each is calculated differently,
        # added to an aggregated dictionary on the end of the loops
        for i, row in kpi_df.iterrows():
            scene_size = row[Const.SCENE_SIZE]
            self.kpi_excluding = row[[
                Const.EXCLUDE_EMPTY, Const.EXCLUDE_HANGER,
                Const.EXCLUDE_IRRELEVANT, Const.EXCLUDE_POSM,
                Const.EXCLUDE_OTHER, Const.STACKING, Const.EXCLUDE_SKU,
                Const.EXCLUDE_STOCK, Const.EXCLUDE_OSD
            ]]
            # filter df to the specific template row
            df = self.filter_df(row)
            df = df[df['width_mm_advance'] != -1]
            if df.empty:
                continue
            number_of_scenes = len(df['scene_fk'].unique())

            if row[Const.PER_SCENE_TYPE] == Const.EACH:
                scene_types = row[Const.SCENE_TYPE].split(",")
                scene_types = [item.strip() for item in scene_types]
                scene_types = set(scene_types).intersection(
                    set(df['template_name']))
            else:
                scene_types = [""]

            # Iterate scene types
            for sc in scene_types:
                filters = {}
                if sc != "":
                    try:
                        context_id = self.templates[
                            self.templates['template_name'] ==
                            sc]['template_fk'].iloc[0]
                    except Exception as ex:
                        Log.warning("No scene type with the following name: " +
                                    str(sc) + ", warning: " + str(ex))
                        continue
                    filters['template_name'] = sc
                    if scene_size != "":
                        scenes = df['scene_fk'].unique()
                    else:
                        scenes = [""]
                else:
                    context_id = 0
                    # if scene_size != "":
                    #     scene_size *= number_of_scenes
                    scenes = [""]

                # Iterate scenes, inorder to get exact ratios between each scene with fixed 4000 mm size
                for scene in scenes:
                    # If iterating scenes, replacing all current filters with {scene_fk: scene}
                    if scene != "":
                        filters = {'scene_fk': scene}
                    category = row[Const.CATEGORY]
                    if category != "":
                        if category == Const.EACH:
                            categories = set(self.df['category'])
                        else:
                            categories = [category]
                    else:
                        categories = [""]

                    # Iterate categories
                    for category in categories:
                        if category != "":
                            denominator_id = self.all_products[
                                self.all_products['category'] ==
                                category]['category_fk'].iloc[0]
                            filters['category'] = category
                            all_numerators = self.df[
                                self.df['category'] ==
                                category][entity_name].drop_duplicates(
                                ).values.tolist()
                        else:
                            denominator_id = self.store_id
                            all_numerators = df[entity_name].drop_duplicates(
                            ).values.tolist()

                        if row[Const.NUMERATOR] != "":
                            all_numerators = [row[Const.NUMERATOR]]
                        denominator = df[self.tools.get_filter_condition(
                            df, **filters)].shape[0]
                        if denominator == 0:
                            continue
                        elif scene_size != "":
                            denominator *= scene_size

                        # Iterate entities (manufacturer / product_fk...)
                        for entity in all_numerators:
                            filters[entity_name] = entity
                            numerator = df[self.tools.get_filter_condition(
                                df, **filters)].shape[0]
                            del filters[entity_name]
                            if scene_size != "":
                                numerator *= scene_size
                            try:
                                numerator_id = self.all_products[
                                    self.all_products[entity_name] ==
                                    entity][entity_name_for_fk].values[0]
                            except Exception as ex:
                                Log.warning("No entity in this name " +
                                            entity + ", warning: " + str(ex))
                                numerator_id = -1
                            if (numerator_id, denominator_id,
                                    context_id) not in results_dict.keys():
                                results_dict[numerator_id, denominator_id,
                                             context_id] = [
                                                 numerator, denominator
                                             ]
                            else:
                                results_dict[numerator_id, denominator_id, context_id] = \
                                    map(sum, zip(results_dict[numerator_id, denominator_id, context_id],
                                                 [numerator, denominator]))
        if len(results_dict) == 0:
            return

        self.save_result_to_kpi_table(kpi_fk, results_dict)
    def handle_sos_packs_atomics(self,atomic_id, atomic_name, parent_kpi):

        count_result = 0

        # bring the kpi rows from the sos sheet
        rows = self.sos_packs_sheet.loc[self.sos_packs_sheet[Const.KPI_ID] == atomic_id]

        # get a single row
        row = self.find_row(rows)
        if row.empty:
            return

        # enter only if there is a matching store, region and state
        target = row[Const.TARGET].values[0]
        target_secondary = row[Const.SECONDARY_TARGET].values[0]
        target_packs = row[Const.PACKS_TARGET].values[0]
        result_type = row[Const.RESULT_TYPE].values[0]
        weight = row[Const.WEIGHT].values[0]

        # get the filters
        filters = self.get_filters_from_row(row.squeeze())

        df = self.match_product_in_scene.copy()
        df = pd.merge(df, self.all_products, on="product_fk")
        product_size = row[Const.PRODUCT_SIZE].values[0]
        if product_size != "":
            df = self.filter_product_size(df, product_size)

        df_packs = self.count_of_scenes_packs(df, filters)
        df_packs = df_packs[df_packs['num_packs'] >= target_packs]
        number_of_valid_scenes = len(df_packs)

        if (number_of_valid_scenes >= target_secondary):
            count_result = weight

        # count number of facings
        if ('form_factor' in filters.keys()):
            del filters['form_factor']
        df_numirator = self.count_of_scenes_facings(df,filters)
        # count_of_total_facings = self.count_of_facings(df,filters)
        df_numirator = df_numirator.rename(columns={'face_count': 'facings_nom'})
        count_of_total_facings = df_numirator['facings_nom'].sum()
        for f in ['manufacturer_name', 'brand_name']:
            if f in filters:
                del filters[f]
        df_denominator = self.count_of_scenes_facings(df, filters)
        scene_types_groupby = pd.merge(df_numirator, df_denominator, how='left', on='scene_fk')
        df_target_filtered = scene_types_groupby[(scene_types_groupby['facings_nom'] /
                                                                scene_types_groupby['face_count']) * 100 >= target]
        number_of_valid_scenes = len(df_target_filtered)
        if target_secondary == "":
            target_secondary = 1
        if number_of_valid_scenes >= target_secondary:
            count_result = weight

        number_of_valid_scenes = len(set(df_target_filtered['scene_fk']).union(set(df_packs['scene_fk'])))
        number_of_not_valid_scenes = len(df_denominator['scene_fk'].drop_duplicates())

        try:
            atomic_pk = self.common_db.get_kpi_fk_by_kpi_type(atomic_name)
        except IndexError:
            Log.warning("There is no matching Kpi fk for kpi name: " + atomic_name)
            return

        self.parent_kpis[parent_kpi] += count_result

        if result_type == 1:
            self.common_db.write_to_db_result(fk=atomic_pk, numerator_id=1, denominator_id=self.store_fk,
                                denominator_result_after_actions=3,
                                numerator_result=number_of_valid_scenes, identifier_parent=parent_kpi,
                                denominator_result=number_of_not_valid_scenes, result=count_result, should_enter=True)
        elif result_type == 2:
            self.common_db.write_to_db_result(fk=atomic_pk, numerator_id=1, denominator_id=self.store_fk,
                                numerator_result=count_of_total_facings, identifier_parent=parent_kpi,
                                denominator_result_after_actions=2,
                                denominator_result=0, result=count_result, should_enter=True)
        elif result_type == 3:
            self.common_db.write_to_db_result(fk=atomic_pk, numerator_id=1, denominator_id=self.store_fk,
                                numerator_result=number_of_valid_scenes, identifier_parent=parent_kpi,
                                denominator_result_after_actions=2,
                                denominator_result=0, result=count_result, should_enter=True)
Beispiel #16
0
    def calculate_block_together(self, allowed_products_filters=None, include_empty=EXCLUDE_EMPTY, **filters):
        """
        :param allowed_products_filters: These are the parameters which are allowed to corrupt the block without failing it.
        :param include_empty: This parameter dictates whether or not to discard Empty-typed products.
        :param filters: These are the parameters which the blocks are checked for.
        :return: True - if in (at least) one of the scenes all the relevant SKUs are grouped together in one block;
                 otherwise - returns False.
        """
        relevant_scenes = self.scif[self.get_filter_condition(self.scif, **filters)]['scene_id'].unique().tolist()
        for field in filters.keys():
            if field not in self.all_products.columns:
                filters.pop(field, None)
        if relevant_scenes:
            for scene in relevant_scenes:
                scene_graph = self.position_graphs.get(scene).copy()
                relevant_vertices = None
                for field in filters.keys():
                    values = filters[field] if isinstance(filters[field], (list, float)) else [filters[field]]
                    vertices_for_field = set()
                    for value in values:
                        condition = {field: value}
                        vertices = {v.index for v in scene_graph.vs.select(**condition)}
                        vertices_for_field = vertices_for_field.union(vertices)
                    if relevant_vertices is None:
                        relevant_vertices = vertices_for_field
                    else:
                        relevant_vertices = relevant_vertices.intersection(vertices_for_field)

                if allowed_products_filters:
                    allowed_vertices = None
                    for field in allowed_products_filters.keys():
                        values = allowed_products_filters[field] \
                                                        if isinstance(allowed_products_filters[field], (list, float)) \
                                                        else [allowed_products_filters[field]]
                        vertices_for_field = set()
                        for value in values:
                            condition = {field: value}
                            vertices = {v.index for v in scene_graph.vs.select(**condition)}
                            vertices_for_field = vertices_for_field.union(vertices)
                        if allowed_vertices is None:
                            allowed_vertices = vertices_for_field
                        else:
                            allowed_vertices = allowed_vertices.intersection(vertices_for_field)

                    if include_empty == self.EXCLUDE_EMPTY:
                        try:
                            empty_vertices = {v.index for v in scene_graph.vs.select(product_type='Empty')}
                            allowed_vertices = allowed_vertices.union(empty_vertices)
                        except KeyError:
                            Log.warning("Entity 'product_type' doesn't appear in the vertex attributes")

                    relevant_vertices = relevant_vertices if relevant_vertices is not None else set()
                    allowed_vertices = allowed_vertices if allowed_vertices is not None else set()
                else:
                    allowed_vertices = []

                all_vertices = {v.index for v in scene_graph.vs}
                vertices_to_remove = all_vertices.difference(relevant_vertices.union(allowed_vertices))
                scene_graph.delete_vertices(vertices_to_remove)
                # removing clusters including 'allowed' SKUs only
                clusters = [cluster for cluster in scene_graph.clusters() if set(cluster).difference(allowed_vertices)]
                if len(clusters) == 1:
                    return True
        else:
            Log.debug('None of the scenes contain relevant SKUs')
        return False
    def handle_prod_seq_2_atomics(self, atomic_id, atomic_name, parent_kpi):

        count_result = 0
        result = 0

        # bring the kpi rows in the PROD_SEQ_2 sheet
        rows = self.prod_seq_2_sheet.loc[self.prod_seq_2_sheet[Const.KPI_ID] == atomic_id]

        # get a the correct rows
        temp = rows[Const.STORE_TYPE_TEMPLATE]
        matching_row = rows[(temp.apply(lambda r: self.store_type_filter in r.split(","))) | (temp == "")]
        groups_outside = matching_row[Const.BRAND_GROUP_OUTSIDE].values[0].split(',')
        groups_inside = matching_row[Const.BRAND_GROUP_INSIDE].values[0].split(',')

        del matching_row[Const.BRAND_GROUP_OUTSIDE]
        del matching_row[Const.BRAND_GROUP_INSIDE]

        filters = self.get_filters_from_row(matching_row.squeeze())

        scenes = self.get_scene_list(filters)

        matches = self.match_product_in_scene.copy()
        matches = matches[matches['scene_fk'].isin(scenes)]
        matches_merged = pd.merge(matches, self.all_products, how='left', on='product_fk').fillna(0)
        matches_merged_ns = matches_merged[matches_merged['stacking_layer'] == 1]
        filtered_shelfs = matches_merged_ns[matches_merged_ns['brand_name'].isin(groups_inside)][['scene_fk',
                                                                    'bay_number', 'shelf_number']].drop_duplicates()
        denominator_shelfs = len(filtered_shelfs)
        numerator_shelfs = 0
        for i,row in filtered_shelfs.iterrows():
            working_shelf = matches_merged_ns[(matches_merged_ns['scene_fk'] == row['scene_fk']) & (matches_merged_ns[
                      'bay_number'] == row['bay_number']) & (matches_merged_ns['shelf_number'] == row['shelf_number'])]
            if len(groups_inside) == 1:
                check = True
                for g in groups_outside:
                    if len(working_shelf[working_shelf['brand_name'] == g]) == 0:
                        check = False
                if check == False:
                    continue
            else:
                if len(working_shelf[working_shelf['brand_name'].isin(groups_outside)]) == 0:
                    continue

            result = self.check_order_prod_seq_2(working_shelf, groups_outside, groups_inside)
            if result:
                numerator_shelfs += 1

        target = matching_row[Const.TARGET].values[0]
        weight = matching_row[Const.WEIGHT].values[0]
        if ((denominator_shelfs != 0) and ((numerator_shelfs / denominator_shelfs) * 100 >= target)):
            count_result = weight

        if result == 0:
            return

        try:
            atomic_pk = self.common_db.get_kpi_fk_by_kpi_type(atomic_name)
        except IndexError:
            Log.warning("There is no matching Kpi fk for kpi name: " + atomic_name)
            return

        self.parent_kpis[parent_kpi] += count_result

        self.common_db.write_to_db_result(fk=atomic_pk, numerator_id=1, identifier_parent=parent_kpi,
                                          denominator_result_after_actions=1,
                                          numerator_result=numerator_shelfs, denominator_id=self.store_fk,
                                          should_enter=True, denominator_result=denominator_shelfs, result=count_result)
Beispiel #18
0
    def calculate_oos_target(self):
        temp = self.oos_sheet[Const.TEMPLATE_STORE_TYPE]
        rows_stores_filter = self.oos_sheet[
            temp.apply(lambda r: self.store_type_filter in
                       [item.strip() for item in r.split(",")])]
        if rows_stores_filter.empty:
            weight = 0
        else:
            weight = rows_stores_filter[Const.TEMPLATE_SCORE].values[0]
        all_data = pd.merge(
            self.scif[["store_id", "product_fk", "facings", "template_name"]],
            self.store_info,
            left_on="store_id",
            right_on="store_fk")
        if all_data.empty:
            return 0
        json_policies = self.oos_policies.copy()
        json_policies[Const.POLICY] = self.oos_policies[Const.POLICY].apply(
            lambda line: json.loads(line))
        diff_policies = json_policies[
            Const.POLICY].drop_duplicates().reset_index()
        diff_table = json_normalize(diff_policies[Const.POLICY].tolist())

        # remove all lists from df
        diff_table = diff_table.applymap(lambda x: x[0]
                                         if isinstance(x, list) else x)
        for col in diff_table.columns:
            diff_table[col] = diff_table[col].str.encode('utf-8')
            att = all_data.iloc[0][col].encode('utf-8')
            if att is None:
                return 0
            diff_table = diff_table[diff_table[col] == att]

        if diff_table.shape[0] > 1:
            Log.warning("There is more than one possible match")
            return 0
        if diff_table.empty:
            Log.warning("There is no matching policy for {}".format(str(all_data[diff_table.columns].iloc[0, :]\
                                                                        .to_dict())))
            return 0
        selected_row = diff_policies.iloc[diff_table.index[0]][Const.POLICY]
        json_policies = json_policies[json_policies[Const.POLICY] ==
                                      selected_row]
        products_to_check = json_policies['product_fk'].tolist()
        products_df = all_data[(
            all_data['product_fk'].isin(products_to_check))][[
                'product_fk', 'facings'
            ]].fillna(0)
        products_df = products_df.groupby('product_fk').sum().reset_index()
        try:
            atomic_pk_sku = self.common_v2.get_kpi_fk_by_kpi_name(
                Const.OOS_SKU_KPI)
        except IndexError:
            Log.warning("There is no matching Kpi fk for kpi name: " +
                        Const.OOS_SKU_KPI)
            return 0
        for product in products_to_check:
            if product not in products_df['product_fk'].values:
                products_df = products_df.append(
                    {
                        'product_fk': product,
                        'facings': 0.0
                    }, ignore_index=True)
        for index, row in products_df.iterrows():
            result = 0 if row['facings'] > 0 else 1
            self.common_v2.write_to_db_result(fk=atomic_pk_sku,
                                              numerator_id=row['product_fk'],
                                              numerator_result=row['facings'],
                                              denominator_id=self.store_id,
                                              result=result,
                                              score=result,
                                              identifier_parent=Const.OOS_KPI,
                                              should_enter=True,
                                              parent_fk=3)

        not_existing_products_len = len(
            products_df[products_df['facings'] == 0])
        result = not_existing_products_len / float(len(products_to_check))
        try:
            atomic_pk = self.common_v2.get_kpi_fk_by_kpi_name(Const.OOS_KPI)
            result_oos_pk = self.common_v2.get_kpi_fk_by_kpi_name(
                Const.OOS_RESULT_KPI)
        except IndexError:
            Log.warning("There is no matching Kpi fk for kpi name: " +
                        Const.OOS_KPI)
            return 0
        score = result * weight
        self.common_v2.write_to_db_result(
            fk=atomic_pk,
            numerator_id=self.region_fk,
            numerator_result=not_existing_products_len,
            denominator_id=self.store_id,
            denominator_result=len(products_to_check),
            result=result,
            score=score,
            identifier_result=Const.OOS_KPI,
            parent_fk=3)
        self.common_v2.write_to_db_result(
            fk=result_oos_pk,
            numerator_id=self.region_fk,
            numerator_result=not_existing_products_len,
            denominator_id=self.store_id,
            denominator_result=len(products_to_check),
            result=result,
            score=result,
            parent_fk=3)
        return score
Beispiel #19
0
    def calculate_distribution(self):
        kpi_name = Const.KPI_DISTRIBUTION
        kpi_fk = self.get_kpi_fk_by_kpi_type(kpi_name)
        parent_fk = self.get_parent_fk(kpi_name)

        parent_kpi_name = Const.KPIS_HIERACHY[kpi_name]
        grand_parent_fk = self.get_parent_fk(parent_kpi_name)
        kpi_weight = Const.KPI_WEIGHTS[parent_kpi_name]

        # place holding these for now, will fix tomorrow feb 19
        score = 0
        numerator_facings = 0
        denominator_facings = 0
        total_ean_codes = 0

        if self.relevant_scif.empty:
            score = 0
            self.write_to_db(fk=kpi_fk,
                             numerator_id=self.manufacturer_pk,
                             numerator_result=numerator_facings,
                             denominator_id=self.store_id,
                             denominator_result=denominator_facings,
                             score=score,
                             identifier_parent=parent_fk,
                             identifier_result=kpi_fk,
                             should_enter=True)
        else:
            scene_ids = list(self.relevant_scif.scene_id.unique())

            for scene_id in scene_ids:
                scene_name = self.relevant_scif['template_name'][
                    self.relevant_scif['scene_id'] == scene_id].iloc[0]
                template_name_fk = self.relevant_scif['template_fk'][
                    self.relevant_scif['scene_id'] == scene_id].iloc[0]

                frentes_target_df = self.main_template[
                    self.main_template['NOMBRE DE TAREA'] == scene_name]
                ean_codes = frentes_target_df['PRODUCT EAN'].unique().tolist()
                ean_code_count = len(ean_codes)
                total_ean_codes += ean_code_count
                found_ean = 0
                for ean_code in ean_codes:
                    try:

                        found_sku_df = self.relevant_scif[
                            self.relevant_scif['product_ean_code'] == str(
                                ean_code)]

                        if found_sku_df.empty:
                            score = 0
                            found_ean += 1

                        else:
                            score = 100

                    except:
                        Log.warning("Distribution KPI Failed.")

                    self.write_to_db(fk=kpi_fk,
                                     numerator_id=scene_id,
                                     numerator_result=ean_code,
                                     denominator_id=template_name_fk,
                                     result=score,
                                     score=score,
                                     identifier_parent=parent_fk,
                                     identifier_result=kpi_fk,
                                     should_enter=True)

        if total_ean_codes != 0:

            numerator_facings = found_ean
            denominator_facings = total_ean_codes
            ratio = round(numerator_facings / float(denominator_facings),
                          2) * 100
        else:
            ratio = 0

        score = round(((ratio * .01) * kpi_weight), 2)

        self.write_to_db(fk=parent_fk,
                         numerator_id=numerator_facings,
                         denominator_id=denominator_facings,
                         result=ratio,
                         score=score,
                         identifier_parent=grand_parent_fk,
                         identifier_result=parent_fk,
                         should_enter=True)
        return ratio
Beispiel #20
0
    def handle_sos_target_atomics(self, atomic_id, atomic_name, parent_name):

        denominator_number_of_total_facings = 0
        count_result = -1

        # bring the kpi rows from the sos sheet
        rows = self.sos_target_sheet.loc[self.sos_target_sheet[
            Const.TEMPLATE_KPI_ID] == atomic_id]

        # get a single row
        row = self.find_row(rows)
        if row.empty:
            return 0

        target = row[Const.TEMPLATE_TARGET_PRECENT].values[0]
        score = row[Const.TEMPLATE_SCORE].values[0]
        df = pd.merge(self.scif,
                      self.store_info,
                      how="left",
                      left_on="store_id",
                      right_on="store_fk")

        # get the filters
        filters = self.get_filters_from_row(row.squeeze())
        numerator_number_of_facings = self.count_of_facings(df, filters)
        if numerator_number_of_facings != 0 and count_result == -1:
            if 'manufacturer_name' in filters.keys():
                deno_manufacturer = row[
                    Const.TEMPLATE_MANUFACTURER_DENOMINATOR].values[0].strip()
                deno_manufacturer = deno_manufacturer.split(",")
                filters['manufacturer_name'] = [
                    item.strip() for item in deno_manufacturer
                ]
                denominator_number_of_total_facings = self.count_of_facings(
                    df, filters)
                percentage = 100 * (numerator_number_of_facings /
                                    denominator_number_of_total_facings)
                count_result = score if percentage >= target else -1

        if count_result == -1:
            return 0

        try:
            atomic_pk = self.common_v2.get_kpi_fk_by_kpi_name(atomic_name)
        except IndexError:
            Log.warning("There is no matching Kpi fk for kpi name: " +
                        atomic_name)
            return 0

        self.common_v2.write_to_db_result(
            fk=atomic_pk,
            numerator_id=self.region_fk,
            numerator_result=numerator_number_of_facings,
            denominator_id=self.store_id,
            denominator_result=denominator_number_of_total_facings,
            result=count_result,
            score=count_result,
            identifier_result=atomic_name,
            identifier_parent=parent_name,
            should_enter=True,
            parent_fk=3)
        return count_result
Beispiel #21
0
    def calculate_self_purity(self, kpi_data):
        score = 0
        if kpi_data.empty:
            return

        group_name = kpi_data.iloc[0][CCMY_SANDConsts.KPI_GROUP]
        identifier_parent = self.common.get_dictionary(kpi_name=group_name)
        scene_types = self.get_scene_types(kpi_data.iloc[0])
        self_purity_scene_list = self.scene_info[
            self.scene_info['template_name'].isin(scene_types)][
                CCMY_SANDConsts.SCENE_FK].unique().tolist()
        template_fk = self.scene_info[self.scene_info['template_name'].isin(
            scene_types)][CCMY_SANDConsts.TEMPLATE_FK].unique().tolist()
        template_fk = template_fk[0] if template_fk != [] else \
            self.data_provider.all_templates[self.data_provider.all_templates['template_name'].isin(
                scene_types)]['template_fk'].iloc[0]

        df_all_shelfs = self.match_product_in_scene

        if self.match_product_in_scene.empty:
            return 0, 0, 0, template_fk

        df_all_shelfs_products = df_all_shelfs.merge(
            self.products, how='inner', on=CCMY_SANDConsts.PRODUCT_FK)
        list_columns = [
            CCMY_SANDConsts.SCENE_FK, CCMY_SANDConsts.BAY_NUMBER,
            CCMY_SANDConsts.SHELF_NUMBER, CCMY_SANDConsts.MANUFACTURER_FK,
            CCMY_SANDConsts.PRODUCT_FK
        ]

        df_all_shelfs_products = pd.DataFrame(
            df_all_shelfs_products.groupby(list_columns).size().reset_index(
                name='count'))

        df_all_shelfs_products = df_all_shelfs_products[df_all_shelfs_products[
            CCMY_SANDConsts.SCENE_FK].isin(self_purity_scene_list)]
        df_shelf_pure = df_all_shelfs_products[[
            CCMY_SANDConsts.SCENE_FK, CCMY_SANDConsts.BAY_NUMBER,
            CCMY_SANDConsts.SHELF_NUMBER
        ]]
        df_shelf_pure.drop_duplicates(subset=None, keep='first', inplace=True)
        df_shelf_pure[CCMY_SANDConsts.IS_PURE] = CCMY_SANDConsts.PURE

        for x, params in kpi_data.iterrows():
            for row_num_x, row_data_x in df_shelf_pure.iterrows():
                for row_num_y, row_data_y in df_all_shelfs_products.iterrows():
                    if ((row_data_x[CCMY_SANDConsts.SCENE_FK]
                         == row_data_y[CCMY_SANDConsts.SCENE_FK]) &
                        (row_data_x[CCMY_SANDConsts.BAY_NUMBER]
                         == row_data_y[CCMY_SANDConsts.BAY_NUMBER]) &
                        (row_data_x[CCMY_SANDConsts.SHELF_NUMBER]
                         == row_data_y[CCMY_SANDConsts.SHELF_NUMBER]) &
                        (row_data_y[CCMY_SANDConsts.MANUFACTURER_FK]
                         == CCMY_SANDConsts.GENERAL_MANUFACTURER) &
                        (row_data_y[CCMY_SANDConsts.PRODUCT_FK]
                         == CCMY_SANDConsts.IRRELEVANT) &
                        (row_data_x[CCMY_SANDConsts.IS_PURE]
                         == CCMY_SANDConsts.PURE)):

                        row_data_x[
                            CCMY_SANDConsts.IS_PURE] = CCMY_SANDConsts.IMPURE
                        print "Impure Shelf={}".format(
                            row_data_y[CCMY_SANDConsts.SHELF_NUMBER])
                        continue
                    elif ((row_data_x[CCMY_SANDConsts.SCENE_FK]
                           == row_data_y[CCMY_SANDConsts.SCENE_FK]) &
                          (row_data_x[CCMY_SANDConsts.BAY_NUMBER]
                           == row_data_y[CCMY_SANDConsts.BAY_NUMBER]) &
                          (row_data_x[CCMY_SANDConsts.SHELF_NUMBER]
                           == row_data_y[CCMY_SANDConsts.SHELF_NUMBER]) &
                          (row_data_y[CCMY_SANDConsts.MANUFACTURER_FK] not in [
                              CCMY_SANDConsts.CCBM,
                              CCMY_SANDConsts.GENERAL_MANUFACTURER
                          ]) & (row_data_y[CCMY_SANDConsts.PRODUCT_FK] !=
                                CCMY_SANDConsts.GENERAL_EMPTY_PRODUCT) &
                          (row_data_x[CCMY_SANDConsts.IS_PURE]
                           == CCMY_SANDConsts.PURE)):

                        row_data_x[
                            CCMY_SANDConsts.IS_PURE] = CCMY_SANDConsts.IMPURE
                        print "Impure Shelf={}".format(
                            row_data_y[CCMY_SANDConsts.SHELF_NUMBER])
                        continue
                    elif ((row_data_x[CCMY_SANDConsts.SCENE_FK]
                           == row_data_y[CCMY_SANDConsts.SCENE_FK]) &
                          (row_data_x[CCMY_SANDConsts.BAY_NUMBER]
                           == row_data_y[CCMY_SANDConsts.BAY_NUMBER]) &
                          (row_data_x[CCMY_SANDConsts.SHELF_NUMBER]
                           == row_data_y[CCMY_SANDConsts.SHELF_NUMBER]) &
                          (row_data_y[CCMY_SANDConsts.MANUFACTURER_FK]
                           == CCMY_SANDConsts.GENERAL_MANUFACTURER) &
                          (row_data_y[CCMY_SANDConsts.PRODUCT_FK] !=
                           CCMY_SANDConsts.GENERAL_EMPTY_PRODUCT) &
                          (row_data_x[CCMY_SANDConsts.IS_PURE]
                           == CCMY_SANDConsts.PURE)):
                        row_data_x[
                            CCMY_SANDConsts.IS_PURE] = CCMY_SANDConsts.IMPURE
                        print "Impure Shelf={}".format(
                            row_data_y[CCMY_SANDConsts.SHELF_NUMBER])
                        continue

            num_of_pure_shelves = 0 if df_shelf_pure.empty else df_shelf_pure[
                CCMY_SANDConsts.IS_PURE].sum()
            total_num_of_shelves = 0 if df_shelf_pure.empty else len(
                df_shelf_pure)
            score = score if total_num_of_shelves == 0 else (
                num_of_pure_shelves / float(total_num_of_shelves))
            df_atomic_kpi = self.kpi_static_data[(self.kpi_static_data[
                CCMY_SANDConsts.ATOMIC_KPI_NAME] == params['KPI Name']) & (
                    self.kpi_static_data['kpi_name'] == group_name)]
            if df_atomic_kpi.empty:
                Log.warning("kpi {} from template, doesn't exist in DB".format(
                    params['KPI Name']))
                return 0
            atomic_kpi_fk = df_atomic_kpi.iloc[0][
                CCMY_SANDConsts.ATOMIC_KPI_FK]
            result = num_of_pure_shelves if df_atomic_kpi[CCMY_SANDConsts.ATOMIC_KPI_NAME].iloc[0] == \
                                            CCMY_SANDConsts.KPI_NUM_PURE_SHELVES else total_num_of_shelves
            self.write_to_db_result(atomic_kpi_fk, (result, result, 0),
                                    level=self.LEVEL3)

            self.insert_db_new_results(params['KPI Name'],
                                       score,
                                       score,
                                       result,
                                       result,
                                       target=0,
                                       identifier_parent=identifier_parent)

            return num_of_pure_shelves, total_num_of_shelves, score, template_fk
Beispiel #22
0
    def handle_survey_combo(self, atomic_id, atomic_name, parent_name):
        # bring the kpi rows from the survey sheet
        numerator = denominator = 0
        rows = self.survey_combo_sheet.loc[self.survey_combo_sheet[
            Const.TEMPLATE_KPI_ID] == atomic_id]
        temp = rows[Const.TEMPLATE_STORE_TYPE]
        row_store_filter = rows[(
            temp.apply(lambda r: self.store_type_filter in
                       [item.strip() for item in r.split(",")])) |
                                (temp == "")]
        if row_store_filter.empty:
            return 0

        condition = row_store_filter[Const.TEMPLATE_CONDITION].values[0]
        condition_type = row_store_filter[
            Const.TEMPLATE_CONDITION_TYPE].values[0]
        score = row_store_filter[Const.TEMPLATE_SCORE].values[0]

        # find the answer to the survey in session
        for i, row in row_store_filter.iterrows():
            question_text = row[Const.TEMPLATE_SURVEY_QUESTION_TEXT]
            question_answer_template = row[Const.TEMPLATE_TARGET_ANSWER]

            survey_result = self.survey.get_survey_answer(
                ('question_text', question_text))
            if not survey_result:
                continue
            if '-' in question_answer_template:
                numbers = question_answer_template.split('-')
                try:
                    numeric_survey_result = int(survey_result)
                except:
                    Log.warning("Survey question - " + str(question_text) +
                                " - doesn't have a numeric result")
                    continue
                if numeric_survey_result < int(
                        numbers[0]) or numeric_survey_result > int(numbers[1]):
                    continue
                numerator_or_denominator = row_store_filter[
                    Const.NUMERATOR_OR_DENOMINATOR].values[0]
                if numerator_or_denominator == Const.DENOMINATOR:
                    denominator += numeric_survey_result
                else:
                    numerator += numeric_survey_result
            else:
                continue
        if condition_type == '%':
            if denominator != 0:
                fraction = 100 * (float(numerator) / float(denominator))
            else:
                if numerator > 0:
                    fraction = 100
                else:
                    fraction = 0
            result = score if fraction >= condition else 0
        else:
            return 0

        try:
            atomic_pk = self.common_v2.get_kpi_fk_by_kpi_name(atomic_name)
        except IndexError:
            Log.warning("There is no matching Kpi fk for kpi name: " +
                        atomic_name)
            return 0
        self.common_v2.write_to_db_result(fk=atomic_pk,
                                          numerator_id=self.region_fk,
                                          numerator_result=numerator,
                                          denominator_result=denominator,
                                          denominator_id=self.store_id,
                                          result=result,
                                          score=result,
                                          identifier_result=atomic_name,
                                          identifier_parent=parent_name,
                                          should_enter=True,
                                          parent_fk=3)
        return result
Beispiel #23
0
    def calculate_sos_vs_target(self, kpi):
        """
        The function filters only the relevant scenes by Location Type and calculates the linear SOS and
        the facing SOS according to Manufacturer and Category set in the target.
         :return:
        """
        location_type = kpi['Location Type']
        kpi_fk = self.common.get_kpi_fk_by_kpi_type(
            SOS_MANUFACTURER_CATEGORY +
            ('_' + location_type if location_type else ''))

        sos_store_policies = self.sos_store_policies[
            self.sos_store_policies['kpi_fk'] == str(kpi_fk)]

        sos_store_policy = None
        store_policy_passed = 0
        for index, policy in sos_store_policies.iterrows():
            sos_store_policy = policy
            store_policy = json.loads(policy['store_policy'])
            store_policy_passed = 1
            for key in store_policy.keys():
                if key in self.store_info.columns.tolist():
                    if self.store_info[key][0] in store_policy[key]:
                        continue
                    else:
                        store_policy_passed = 0
                        break
                else:
                    Log.error("Store Policy attribute is not found: '{}'"
                              ).format(key)
                    store_policy_passed = 0
                    break
            if store_policy_passed:
                break

        score = potential_score = 0
        if store_policy_passed:

            general_filters = {LOCATION_TYPE: location_type}
            sos_policy = json.loads(sos_store_policy['sos_policy'])
            numerator_sos_filters = {
                MANUFACTURER_NAME: sos_policy[NUMERATOR][MANUFACTURER],
                CATEGORY: sos_policy[DENOMINATOR][CATEGORY]
            }
            denominator_sos_filters = {
                CATEGORY: sos_policy[DENOMINATOR][CATEGORY]
            }

            numerator_id = self.all_products.loc[
                self.all_products[MANUFACTURER_NAME] == sos_policy[NUMERATOR]
                [MANUFACTURER]][MANUFACTURER + '_fk'].values[0]
            denominator_id = self.all_products.loc[
                self.all_products[CATEGORY] == sos_policy[DENOMINATOR]
                [CATEGORY]][CATEGORY + '_fk'].values[0]

            ignore_stacking = kpi['Ignore Stacking'] if kpi[
                'Ignore Stacking'] else 0

            numer_facings, numer_linear = self.calculate_share_space(
                ignore_stacking=ignore_stacking,
                **dict(numerator_sos_filters, **general_filters))
            denom_facings, denom_linear = self.calculate_share_space(
                ignore_stacking=ignore_stacking,
                **dict(denominator_sos_filters, **general_filters))

            if kpi['KPI Type'].lower() == LINEAR_SOS_VS_TARGET:
                numerator_result = round(numer_linear, 0)
                denominator_result = round(denom_linear, 0)
                result = numer_linear / float(
                    denom_linear) if denom_linear else 0
            elif kpi['KPI Type'].lower() == FACINGS_SOS_VS_TARGET:
                numerator_result = numer_facings
                denominator_result = denom_facings
                result = numer_facings / float(
                    denom_facings) if denom_facings else 0
            else:
                Log.error("KPI Type is invalid: '{}'").format(kpi['KPI Type'])
                numerator_result = denominator_result = result = 0

            if sos_store_policy['target']:
                sos_target = round(float(sos_store_policy['target']) * 100, 0)
            else:
                Log.error("SOS target is not set for Store ID {}").format(
                    self.store_id)
                sos_target = 0

            result_vs_target = result / (float(sos_target) /
                                         100) * 100 if sos_target else 0
            score = self.score_function(result_vs_target, kpi)
            potential_score = round(float(kpi['Weight']) * 100, 0)

            identifier_kpi = self.common.get_dictionary(kpi_fk=kpi_fk)
            identifier_parent = self.common.get_dictionary(
                kpi_fk=self.common.get_kpi_fk_by_kpi_type(kpi['KPI Group']))
            self.common.write_to_db_result(
                fk=kpi_fk,
                numerator_id=numerator_id,
                numerator_result=numerator_result,
                denominator_id=denominator_id,
                denominator_result=denominator_result,
                result=result,
                score=score,
                weight=potential_score,
                target=sos_target,
                identifier_result=identifier_kpi,
                identifier_parent=identifier_parent,
                should_enter=True)

        else:
            Log.warning("Store Policy is not found for Store ID {}".format(
                self.store_id))

        return score, potential_score, store_policy_passed
    def calculate_assortment_sets(self, set_name):
        """
        This function calculates every Assortment-typed KPI from the relevant sets, and returns the set final score.
        """
        scores = []
        for params in self.set_templates_data[set_name]:
            target = str(params.get(self.store_type, ''))
            if target.isdigit() or target.capitalize() in (
                    self.tools.RELEVANT_FOR_STORE,
                    self.tools.OR_OTHER_PRODUCTS):
                products = str(
                    params.get(self.tools.PRODUCT_EAN_CODE,
                               params.get(self.tools.PRODUCT_EAN_CODE2,
                                          ''))).replace(',', ' ').split()
                target = 1 if not target.isdigit() else int(target)
                kpi_name = params.get(self.tools.GROUP_NAME,
                                      params.get(self.tools.PRODUCT_NAME))
                kpi_static_data = self.kpi_static_data[
                    (self.kpi_static_data['kpi_set_name'] == set_name)
                    & (self.kpi_static_data['kpi_name'] == kpi_name)]
                if len(products) > 1:
                    result = 0
                    for product in products:
                        product_score = self.tools.calculate_assortment(
                            product_ean_code=product)
                        result += product_score
                        try:
                            product_name = \
                                self.all_products[self.all_products['product_ean_code'] == product][
                                    'product_name'].values[
                                    0]
                        except Exception as e:
                            Log.warning(
                                'Product {} is not defined in the DB'.format(
                                    product))
                            continue
                        try:
                            atomic_fk = \
                                kpi_static_data[kpi_static_data['atomic_kpi_name'] == product_name][
                                    'atomic_kpi_fk'].values[
                                    0]
                        except Exception as e:
                            Log.warning(
                                'Product {} is not defined in the DB'.format(
                                    product_name))
                            continue
                        self.write_to_db_result(atomic_fk,
                                                product_score,
                                                level=self.LEVEL3)
                    score = 1 if result >= target else 0
                else:
                    result = self.tools.calculate_assortment(
                        product_ean_code=products)
                    atomic_fk = kpi_static_data['atomic_kpi_fk'].values[0]
                    score = 1 if result >= target else 0
                    self.write_to_db_result(atomic_fk,
                                            score,
                                            level=self.LEVEL3)

                scores.append(score)
                kpi_fk = kpi_static_data['kpi_fk'].values[0]
                self.write_to_db_result(kpi_fk, score, level=self.LEVEL2)

        if not scores:
            return False
        set_score = (sum(scores) / float(len(scores))) * 100
        return set_score