def _handle_promotion_wall_display(self):
     """
     Handles promotion wall display. All display tags in a scene are aggregated to one display with multiple bays
     :return:
     """
     Log.debug(self.log_prefix + ' Starting promotion display')
     promotion_tags = \
         self.match_display_in_scene[self.match_display_in_scene['display_name'].isin(PROMOTION_WALL_DISPLAYS)]
     if not promotion_tags.empty:
         promotion_display_name = promotion_tags['display_name'].values[0]
         display_promotion_wall = promotion_tags.groupby(
             ['display_fk', 'scene_fk'], as_index=False).display_size.sum()
         display_promotion_wall['display_name'] = promotion_display_name
         display_promotion_wall_with_id = self._insert_into_display_surface(
             display_promotion_wall)
         promotion_wall_bays = promotion_tags[['scene_fk',
                                               'bay_number']].copy()
         promotion_wall_bays.drop_duplicates(['scene_fk', 'bay_number'],
                                             inplace=True)
         # only valid tags are relevant
         promotion_wall_valid_bays = self._filter_valid_bays(
             promotion_wall_bays)
         display_promotion_wall_with_id_and_bays = \
             display_promotion_wall_with_id.merge(promotion_wall_valid_bays, on='scene_fk')
         self._calculate_share_of_display(
             display_promotion_wall_with_id_and_bays)
    def _exclude_sos(self, df):
        """
        Calculate 'in_sos' field according to exclude rules.
        :param df:
        :return:
        """
        Log.debug(self.log_prefix + ' calculating in_sos')
        excluded_templates = self.data_provider._data[
            Fields.SOS_EXCLUDED_TEMPLATES]
        excluded_templates['excluded_templates'] = 1

        excluded_template_products = self.data_provider._data[
            Fields.SOS_EXCLUDED_TEMPLATE_PRODUCTS]
        excluded_template_products['excluded_template_products'] = 1

        excluded_products = self.data_provider._data[
            Fields.SOS_EXCLUDED_PRODUCTS]
        excluded_products['excluded_products'] = 1

        df = df.merge(excluded_templates, how='left', on='template_fk') \
               .merge(excluded_products, how='left', on='product_fk') \
               .merge(excluded_template_products, how='left', on=['product_fk', 'template_fk'])

        condition = (df['excluded_templates'] == 1) | \
                    (df['excluded_template_products'] == 1) | (df['excluded_products'] == 1)

        df = df.drop([
            'excluded_templates', 'excluded_template_products',
            'excluded_products'
        ],
                     axis=1)

        df.loc[condition, 'in_sos'] = 0
        df.loc[~condition, 'in_sos'] = 1
        return df
Exemple #3
0
 def check_survey_answer(self, survey_text, target_answer):
     """
     :param survey_text: The name of the survey in the DB.
     :param target_answer: The required answer/s for the KPI to pass.
     :return: True if the answer matches the target; otherwise - False.
     """
     if not isinstance(survey_text, (list, tuple)):
         entity = 'question_text'
         value = survey_text
     else:
         entity, value = survey_text
     value = [value] if not isinstance(value, list) else value
     survey_data = self.survey_response[self.survey_response[entity].isin(
         value)]
     if survey_data.empty:
         Log.debug('Survey with {} = {} doesn\'t exist'.format(
             entity, value))
         return None
     answer_field = 'selected_option_text' if not survey_data[
         'selected_option_text'].empty else 'number_value'
     target_answers = [target_answer
                       ] if not isinstance(target_answer,
                                           (list, tuple)) else target_answer
     survey_answers = survey_data[answer_field].values.tolist()
     for answer in target_answers:
         if answer in survey_answers:
             return True
     return False
    def get_entity_matrix(self, scene_id, entity):
        """
        This function creates a list of lists:
        Each list represents a shelf in the scene - with the given entity for each facing, from left to right.
        """
        if entity not in self.ATTRIBUTES_TO_SAVE:
            Log.debug("Entity '{}' is not set as an attribute in the graph".format(entity))
            return None
        graph = self.get(scene_id).copy()
        edges_to_remove = graph.es.select(direction_ne='left')
        graph.delete_edges([edge.index for edge in edges_to_remove])

        incidents_dict = {}
        matrix = []
        for vertex in graph.vs:
            vertex_id = vertex.index
            incidents = graph.incident(vertex_id)
            if incidents:
                incidents_dict[graph.es[incidents[0]].target] = vertex_id
            else:
                matrix.append([vertex_id])
        for i, row in enumerate(matrix):
            current = row[0]
            while current in incidents_dict.keys():
                current = incidents_dict[current]
                row.append(current)
            for y, index in enumerate(row):
                row[y] = graph.vs[index][entity]
            matrix[i] = row
        return matrix
Exemple #5
0
 def calculate_sos(self,
                   sos_type,
                   kpi_fk,
                   numerator_fk,
                   denominator_fk,
                   subset_filters,
                   pop_filters,
                   context=None):
     ratio = 0
     df = self.match_product_in_scene.merge(self.scif,
                                            how='left',
                                            on=['scene_fk', 'product_fk'])
     df = df.drop(['facings'], axis=1)
     #  denominator
     pop_filter = self.common_sos.get_filter_condition(df, **pop_filters)
     #  numerator
     subset_filter = self.common_sos.get_filter_condition(
         df, **subset_filters)
     try:
         ratio = self.calculate_sos_by_policy(sos_type,
                                              kpi_fk,
                                              numerator_fk,
                                              denominator_fk,
                                              subset_filter=subset_filter,
                                              pop_filter=pop_filter,
                                              context=context)
     except Exception as e:
         Log.debug(
             'calculate_sos_facing_by_scene can not be calculated for scene {}'
             .format(e.message))
     return ratio
Exemple #6
0
 def calculate_sos(self,
                   sos_type,
                   kpi_fk,
                   numerator_fk,
                   denominator_fk,
                   subset_filters,
                   pop_filters,
                   context=None):
     ratio = 0
     #  denominator
     pop_filter = self.common_sos.get_filter_condition(
         self.scif, **pop_filters)
     #  numerator
     subset_filter = self.common_sos.get_filter_condition(
         self.scif, **subset_filters)
     try:
         ratio = self.calculate_sos_by_policy(sos_type,
                                              kpi_fk,
                                              numerator_fk,
                                              denominator_fk,
                                              subset_filter=subset_filter,
                                              pop_filter=pop_filter,
                                              context=context)
     except Exception as e:
         Log.debug(
             'calculate_sos_facing_by_scene can not be calculated for scene {}'
             .format(e.message))
     return ratio
    def create_position_graphs(self, scene_id=None):
        """
        This function creates a facings Graph for each scene of the given session.
        """
        calc_start_time = datetime.datetime.utcnow()
        if scene_id:
            scenes = [scene_id]
        else:
            scenes = self.match_product_in_scene['scene_fk'].unique()
        for scene in scenes:
            matches = self.match_product_in_scene[(self.match_product_in_scene['scene_fk'] == scene) &
                                                  (self.match_product_in_scene['stacking_layer'] == 1)]
            matches['distance_from_end_of_shelf'] = matches['n_shelf_items'] - matches['facing_sequence_number']
            scene_graph = igraph.Graph(directed=True)
            edges = []
            for f in xrange(len(matches)):
                facing = matches.iloc[f]
                facing_name = str(facing[VERTEX_FK_FIELD])
                scene_graph.add_vertex(facing_name)
                # adding attributes to vertex
                vertex = scene_graph.vs.find(facing_name)
                for attribute in self.ATTRIBUTES_TO_SAVE:
                    vertex[attribute] = facing[attribute]

                surrounding_products = self.get_surrounding_products(facing, matches)
                for direction in surrounding_products.keys():
                    for pk in surrounding_products[direction]:
                        edge = dict(source=facing_name, target=str(pk), direction=direction)
                        edges.append(edge)
            for edge in edges:
                scene_graph.add_edge(**edge)

            self.position_graphs[scene] = scene_graph
        calc_finish_time = datetime.datetime.utcnow()
        Log.debug('Creation of position graphs for scenes {} took {}'.format(scenes, calc_finish_time - calc_start_time))
Exemple #8
0
    def calculate_placement_count(self):
        """
        this function calculates number of occurrences for each scene type and adds the correct score to insert query
        which will be later committed to report.kpi_result db
        :return: None
        """
        df = self.scif[['template_name', 'scene_fk']]
        new_kpi_fk = self.common_new.get_kpi_fk_by_kpi_name(PLACEMENT_COUNT)

        # iterate scene types names
        for template_name in df.template_name.unique():
            # get number of occurrences of this scene type
            value = self.get_scene_count(df, template_name)
            # get the atomic kpi name
            atomic_kpi_name = 'no'
            if template_name in self.template_dict:
                atomic_kpi_name = self.template_dict[template_name]
            if atomic_kpi_name != 'no':

                # get the atomic kpi fk of template name
                old_atomic_kpi_fk = self.old_common.get_kpi_fk_by_kpi_name(atomic_kpi_name, self.LEVEL3)

                Log.debug(atomic_kpi_name + " " + str(old_atomic_kpi_fk) + " " + str(value))

                # add insert query for later use
                self.old_common.write_to_db_result(old_atomic_kpi_fk, score=value, level=self.LEVEL3)
                template_fk = self.templates[self.templates['template_name'] == template_name]
                if len(template_fk) == 0:
                    Log.debug('template {} does not exist'.format(template_name))

                else:
                    template_fk = template_fk['template_fk'].values[0]
                    self.common_new.write_to_db_result(fk=new_kpi_fk, numerator_id=template_fk,
                                                       result=value, denominator_id=self.store_id)
 def _params_validation(self, scenes_to_check, sequence_params, population):
     """
     :param scenes_to_check: List of scenes that passed the location filters.
     :param sequence_params: The sequence additional attributes.
     :return: 1 in case of valid params, 0 otherwise.
     """
     if not scenes_to_check:
         Log.debug("There aren't any valid scenes for sequence calculation")
         return 0
     if self._matches.empty:
         Log.debug(
             "Cannot calculate Sequence KPI since the match product in scene is empty!"
         )
         return 0
     if not self._validate_population_structure(population):
         Log.debug("Wrong population structure! Exiting..")
         return 0
     if set(sequence_params.keys()).difference(
             self._get_default_sequence_parameters().keys()):
         Log.debug("Wrong attributes in Additional Attributes! Exiting..")
         return 0
     if not sequence_params[AdditionalAttr.REPEATING_OCCURRENCES] and \
             sequence_params[AdditionalAttr.MIN_TAGS_OF_ENTITY] > 1:
         Log.debug(
             "In case repeating occurrences is False, minimum tags of facings param must be equal to 1"
         )
         return 0
     return 1
 def _delete_previous_data(self):
     """
     Deletes previous data from table: probedata.display_surface (by updating delete_time)
     and report.display_visit_summary. Using temp table for scenes list to prevent a lock
     :return:
     """
     Log.debug(
         self.log_prefix +
         ' Deleting existing data in display_surface and display_visit_summary'
     )
     drop_temp_table_query = "drop temporary table if exists probedata.t_scenes_to_delete_displays;"
     queries = [
         drop_temp_table_query,
         """ create temporary table probedata.t_scenes_to_delete_displays as
             select pk as scene_fk from probedata.scene where session_uid = '{}';"""
         .format(self.session_uid),
         """ delete report.display_item_facts, probedata.display_surface
             from probedata.t_scenes_to_delete_displays
              join probedata.display_surface
                 on probedata.display_surface.scene_fk = probedata.t_scenes_to_delete_displays.scene_fk
              left join report.display_item_facts
                 on report.display_item_facts.display_surface_fk = probedata.display_surface.pk;""",
         drop_temp_table_query
     ]
     self.project_connector.disconnect_rds()
     self.project_connector.connect_rds()
     self.cur = self.project_connector.db.cursor()
     for query in queries:
         self.cur.execute(query)
 def add_kpis_from_template(self):
     self.add_sets_to_static()
     self.add_kpis_to_static()
     self.add_atomics_to_static()
     Log.debug('{} Sets, {} KPIs and {} Atomics have been added'.format(
         self.kpi_counter['set'], self.kpi_counter['kpi'],
         self.kpi_counter['atomic']))
Exemple #12
0
    def calculate_existence_of_blocks(self, conditions, include_empty=EXCLUDE_EMPTY, min_number_of_blocks=1, **filters):
        """
        :param conditions: A dictionary which contains assortment/availability conditions for filtering the blocks,
                           in the form of: {entity_type: (0 for assortment or 1 for availability,
                                                          a list of values =or None=,
                                                          minimum number of assortment/availability)}.
                           For example: {'product_ean_code': ('44545345434', 3)}
        :param include_empty: This parameter dictates whether or not to discard Empty-typed products.
        :param min_number_of_blocks: The number of blocks needed in order for the KPI to pass.
                                     If all appearances are required: == self.ALL.
        :param filters: These are the parameters which the blocks are checked for.
        :return: The number of blocks (from all scenes) which match the filters and conditions.
        """
        filters, relevant_scenes = self.separate_location_filters_from_product_filters(**filters)
        if len(relevant_scenes) == 0:
            Log.debug('Block Together: No relevant SKUs were found for these filters {}'.format(filters))
            return False

        number_of_blocks = 0
        for scene in relevant_scenes:
            scene_graph = self.position_graphs.get(scene).copy()
            blocks, scene_graph = self.get_scene_blocks(scene_graph, allowed_products_filters=None,
                                                        include_empty=include_empty, **filters)
            for block in blocks:
                entities_data = {entity: [] for entity in conditions.keys()}
                for vertex in block:
                    vertex_attributes = scene_graph.vs[vertex].attributes()
                    for entity in conditions.keys():
                        entities_data[entity].append(vertex_attributes[entity])

                block_successful = True
                for entity in conditions.keys():
                    assortment_or_availability, values, minimum_result = conditions[entity]
                    if assortment_or_availability == 0:
                        if values:
                            result = len(set(entities_data[entity]).intersection(values))
                        else:
                            result = len(set(entities_data[entity]))
                    elif assortment_or_availability == 1:
                        if values:
                            result = len([facing for facing in entities_data if facing in values])
                        else:
                            result = len(entities_data[entity])
                    else:
                        continue
                    if result < minimum_result:
                        block_successful = False
                        break
                if block_successful:
                    number_of_blocks += 1
                    if number_of_blocks >= min_number_of_blocks:
                        return True
                else:
                    if min_number_of_blocks == self.ALL:
                        return False

        if number_of_blocks >= min_number_of_blocks or min_number_of_blocks == self.ALL:
            return True
        return False
Exemple #13
0
    def update_atomic_kpi_data(self):
        """
        This function updates the kpi name, description and display_text according to the template.
        It reads the relevant set template, filters the relevant rows that need to be change and than collect and
        execute the queries in the DB.
        ** Note 1: It doesn't update the template itself!! **
        ** Note 2: Use regular excel without any filters!!
        """
        queries_to_execute = []
        update_query = "UPDATE static.atomic_kpi SET name='{}', description='{}', display_text='{}' where pk={};"
        # Get the data
        relevant_kpi_data, filtered_project_data = self.get_relevant_kpi_data_and_template_data(
        )
        if relevant_kpi_data.empty:
            return

        # Iterate over the uploaded template and create the queries
        for i in xrange(len(self.data)):
            row = self.data.iloc[i]
            new_name = row[CCRU_SANDConsts.KPI_ENG_NAME].replace('\n',
                                                                 '').strip()
            display = row[CCRU_SANDConsts.KPI_RUS_NAME].replace('\n',
                                                                '').strip()
            presentation_order = row[CCRU_SANDConsts.SORTING]
            old_atomic_df = relevant_kpi_data[relevant_kpi_data[
                CCRU_SANDConsts.PRESENTATION_ORDER] == presentation_order]
            if len(old_atomic_df) > 1:
                old_atomic_name = filtered_project_data[filtered_project_data[
                    CCRU_SANDConsts.SORTING] == presentation_order][
                        CCRU_SANDConsts.KPI_ENG_NAME].values[0]
                try:
                    old_atom_fk = old_atomic_df[
                        old_atomic_df['atomic_kpi_name'] ==
                        old_atomic_name]['atomic_kpi_fk'].values[0]
                    queries_to_execute.append(
                        update_query.format(new_name, new_name,
                                            display.encode('utf-8'),
                                            old_atom_fk))
                except Exception as e:
                    Log.warning("No KPI defines for name = {}.".format(
                        old_atomic_name))
                    continue
            else:
                old_atom_fk = old_atomic_df['atomic_kpi_fk'].values[0]
                queries_to_execute.append(
                    update_query.format(new_name, new_name,
                                        display.encode('utf-8'), old_atom_fk))

        # Execute the queries
        if queries_to_execute:
            cur = self.aws_conn.db.cursor()
            for query in queries_to_execute:
                cur.execute(query)
                print query
        self.aws_conn.db.commit()
        Log.debug("Total number of executed queries = {}".format(
            len(queries_to_execute)))
Exemple #14
0
    def calculate_promo_compliance(self):
        self.json.create_kpi_data_json('promo', 'KPI_Promo_Tracking.xlsx', sheet_name='2019')
        kpi_data = self.json.project_kpi_dict.get('promo')

        Log.debug('KPI calculation stage: {}'.format('Promo Compliance'))
        self.tool_box.calculate_promo_compliance_store(kpi_data)

        Log.debug('KPI calculation stage: {}'.format('Committing results new'))
        self.tool_box.common.commit_results_data()
    def add_atomics_to_static(self):
        atomics = self.data.drop_duplicates(
            subset=[self.SET_NAME, self.KPI_NAME, self.ATOMIC_NAME],
            keep='first')
        cur = self.aws_conn.db.cursor()
        for i in xrange(len(atomics)):
            atomic = atomics.iloc[i]
            set_name = atomic[self.SET_NAME].replace("'",
                                                     "\\'").encode('utf-8')
            kpi_name = unicode(atomic[self.KPI_NAME]).replace(
                "'", "\\'").encode('utf-8')
            atomic_name = unicode(atomic[self.ATOMIC_NAME]).replace(
                "'", "\\'").encode('utf-8')
            if self.ATOMIC_WEIGHT in atomics.iloc[i].keys(
            ) and atomics.iloc[i][self.ATOMIC_WEIGHT]:
                atomic_weight = float(atomics.iloc[i][self.ATOMIC_WEIGHT])
            else:
                atomic_weight = 'NULL'
            if self.ATOMIC_DISPLAY_TEXT in atomics.iloc[i].keys():
                atomic_display_text = unicode(
                    atomics.iloc[i][self.ATOMIC_DISPLAY_TEXT]).replace(
                        "'", "\\'").encode('utf-8')
            else:
                atomic_display_text = atomic_name

            if self.kpi_static_data[
                (self.kpi_static_data['kpi_set_name'] == set_name)
                    & (self.kpi_static_data['kpi_name'] == kpi_name) &
                (self.kpi_static_data['atomic_kpi_name']
                 == atomic_name)].empty:
                if set_name in self.kpis_added.keys(
                ) and kpi_name in self.kpis_added[set_name].keys():
                    kpi_fk = self.kpis_added[set_name][kpi_name]
                else:
                    kpi_fk = self.kpi_static_data[
                        (self.kpi_static_data['kpi_set_name'] == set_name)
                        & (self.kpi_static_data['kpi_name'] == kpi_name
                           )]['kpi_fk'].values[0]

                level3_query = \
                    """
                    INSERT INTO static.atomic_kpi (kpi_fk, name, description, display_text, presentation_order, display, atomic_weight)
                    VALUES ('{0}', '{1}', '{2}', '{3}', '{4}', '{5}', {6});
                    """.format(kpi_fk, atomic_name, atomic_name, atomic_display_text, 1, 'Y', atomic_weight)
                cur.execute(level3_query)
                self.kpi_counter['atomic'] += 1
            else:
                Log.debug(
                    "Atomic '{}' already exists for KPI '{}' Set '{}'. Ignored"
                    .format(atomic_name, kpi_name, set_name))

            if i % 10 == 0:
                self.aws_conn.db.commit()
                cur = self.aws_conn.db.cursor()

        self.aws_conn.db.commit()
Exemple #16
0
    def calculate_block_together(self, allowed_products_filters=None, include_empty=EXCLUDE_EMPTY,
                                 minimum_block_ratio=1, result_by_scene=False, **filters):
        """
        :param allowed_products_filters: These are the parameters which are allowed to corrupt the block without failing it.
        :param include_empty: This parameter dictates whether or not to discard Empty-typed products.
        :param minimum_block_ratio: The minimum (block number of facings / total number of relevant facings) ratio
                                    in order for KPI to pass (if ratio=1, then only one block is allowed).
        :param result_by_scene: True - The result is a tuple of (number of passed scenes, total relevant scenes);
                                False - The result is True if at least one scene has a block, False - otherwise.
        :param filters: These are the parameters which the blocks are checked for.
        :return: see 'result_by_scene' above.
        """
        filters, relevant_scenes = self.separate_location_filters_from_product_filters(**filters)
        if len(relevant_scenes) == 0:
            if result_by_scene:
                return 0, 0
            else:
                Log.debug('Block Together: No relevant SKUs were found for these filters {}'.format(filters))
                return True
        number_of_blocked_scenes = 0
        cluster_ratios = []
        for scene in relevant_scenes:
            scene_graph = self.position_graphs.get(scene).copy()
            clusters, scene_graph = self.get_scene_blocks(scene_graph, allowed_products_filters=allowed_products_filters,
                                                          include_empty=include_empty, **filters)

            new_relevant_vertices = self.filter_vertices_from_graph(scene_graph, **filters)
            for cluster in clusters:
                relevant_vertices_in_cluster = set(cluster).intersection(new_relevant_vertices)
                if len(new_relevant_vertices) > 0:
                    cluster_ratio = len(relevant_vertices_in_cluster) / float(len(new_relevant_vertices))
                else:
                    cluster_ratio = 0
                cluster_ratios.append(cluster_ratio)
                if cluster_ratio >= minimum_block_ratio:
                    if result_by_scene:
                        number_of_blocked_scenes += 1
                        break
                    else:
                        if minimum_block_ratio == 1:
                            return True
                        else:
                            all_vertices = {v.index for v in scene_graph.vs}
                            non_cluster_vertices = all_vertices.difference(cluster)
                            scene_graph.delete_vertices(non_cluster_vertices)
                            return cluster_ratio, scene_graph
        if result_by_scene:
            return number_of_blocked_scenes, len(relevant_scenes)
        else:
            if minimum_block_ratio == 1:
                return False
            elif cluster_ratios:
                return max(cluster_ratios)
            else:
                return None
Exemple #17
0
    def calculate_product_sequence(self, sequence_filters, direction, empties_allowed=True, irrelevant_allowed=False,
                                   min_required_to_pass=STRICT_MODE, custom_graph=None, **general_filters):
        """
        :param sequence_filters: One of the following:
                        1- a list of dictionaries, each containing the filters values of an organ in the sequence.
                        2- a tuple of (entity_type, [value1, value2, value3...]) in case every organ in the sequence
                           is defined by only one filter (and of the same entity, such as brand_name, etc).
        :param direction: left/right/top/bottom - the direction of the sequence.
        :param empties_allowed: This dictates whether or not the sequence can be interrupted by Empty facings.
        :param irrelevant_allowed: This dictates whether or not the sequence can be interrupted by facings which are
                                   not in the sequence.
        :param min_required_to_pass: The number of sequences needed to exist in order for KPI to pass.
                                     If STRICT_MODE is activated, the KPI passes only if it has NO rejects.
        :param custom_graph: A filtered Positions graph - given in case only certain vertices need to be checked.
        :param general_filters: These are the parameters which the general data frame is filtered by.
        :return: True if the KPI passes; otherwise False.
        """
        if isinstance(sequence_filters, (list, tuple)) and isinstance(sequence_filters[0], (str, unicode)):
            sequence_filters = [{sequence_filters[0]: values} for values in sequence_filters[1]]

        pass_counter = 0
        reject_counter = 0

        if not custom_graph:
            filtered_scif = self.scif[self.get_filter_condition(self.scif, **general_filters)]
            scenes = set(filtered_scif['scene_id'].unique())
            for filters in sequence_filters:
                scene_for_filters = filtered_scif[self.get_filter_condition(filtered_scif, **filters)]['scene_id'].unique()
                scenes = scenes.intersection(scene_for_filters)
                if not scenes:
                    Log.debug('None of the scenes include products from all types relevant for sequence')
                    return True

            for scene in scenes:
                scene_graph = self.position_graphs.get(scene)
                scene_passes, scene_rejects = self.calculate_sequence_for_graph(scene_graph, sequence_filters, direction,
                                                                                empties_allowed, irrelevant_allowed)
                pass_counter += scene_passes
                reject_counter += scene_rejects

                if pass_counter >= min_required_to_pass:
                    return True
                elif min_required_to_pass == self.STRICT_MODE and reject_counter > 0:
                    return False

        else:
            scene_passes, scene_rejects = self.calculate_sequence_for_graph(custom_graph, sequence_filters, direction,
                                                                            empties_allowed, irrelevant_allowed)
            pass_counter += scene_passes
            reject_counter += scene_rejects

        if pass_counter >= min_required_to_pass or reject_counter == 0:
            return True
        else:
            return False
    def is_query_result_valid(self, result, session_uid):
        if len(result) == 0:
            Log.warning('Could not generate MyChemist report for session {}. Query returned empty result'.format(
                session_uid))
            return False

        if len(result[~result['connected_shelf_fk'].isnull()]) > 0:
            Log.debug('This session was generated by IOT device, omitting the report')
            return False

        return True
 def check_connection(self, rds_conn):
     try:
         rds_conn.db.cursor().execute(
             "select pk from probedata.session where session_uid = '{}';".
             format(self.session_uid))
     except:
         rds_conn.disconnect_rds()
         rds_conn.connect_rds()
         Log.debug('DB is reconnected')
         return False
     return True
Exemple #20
0
 def get_product_alt_code(self, product_name):
     """
     takes sub_brand and returns its pk
     :param sub_brand: str
     :param brand_fk: we need it for the parent_id (different brands can have common sub_brand)
     :return: pk
     """
     products = self.all_products[['product_name', 'product_fk', 'alt_code_1']]
     products = products.loc[products['alt_code_1'] == product_name]
     if products.empty:
         Log.debug('Product {} has no valid alt_code_1 attribute'.format(product_name))
         return None
     else:
         return products['product_fk'].iloc[0]
Exemple #21
0
 def calculate_energy_drinks(self, shelf_occupation_dict, product_list_field):
     """
     this function calculates score for energy drinks category
     """
     score = 0
     for shelf_number in range(1, shelf_occupation_dict.get(NUM_OF_SHELVES) + 1):
         for bay_number in range(1, shelf_occupation_dict.get(NUM_OF_BAYS) + 1):
             # get the current probe to calculate - specific shelf, bay, and only in main_placement scene type
             curr_probe = get_curr_probe(shelf_occupation_dict.get(DF), shelf_number, bay_number,
                                         shelf_occupation_dict.get(MAIN_PLACEMENT_SCENES))
             if not curr_probe.empty:
                 score += self.calculate_category(curr_probe, product_list_field)
     Log.debug("category score " + str(score))
     return score
    def calculate_relative_position(self, tested_filters, anchor_filters, direction_data, min_required_to_pass=1,
                                    **general_filters):
        """
        :param tested_filters: The tested SKUs' filters.
        :param anchor_filters: The anchor SKUs' filters.
        :param direction_data: The allowed distance between the tested and anchor SKUs.
                               In form: {'top': 4, 'bottom: 0, 'left': 100, 'right': 0}
                               Alternative form: {'top': (0, 1), 'bottom': (1, 1000), ...} - As range.
        :param min_required_to_pass: The number of appearances needed to be True for relative position in order for KPI
                                     to pass. If all appearances are required: ==a string or a big number.
        :param general_filters: These are the parameters which the general data frame is filtered by.
        :return: True if (at least) one pair of relevant SKUs fits the distance requirements; otherwise - returns False.
        """
        filtered_scif = self.scif[self.get_filter_condition(self.scif, **general_filters)]
        tested_scenes = filtered_scif[self.get_filter_condition(filtered_scif, **tested_filters)]['scene_id'].unique()
        anchor_scenes = filtered_scif[self.get_filter_condition(filtered_scif, **anchor_filters)]['scene_id'].unique()
        relevant_scenes = set(tested_scenes).intersection(anchor_scenes)

        if relevant_scenes:
            pass_counter = 0
            reject_counter = 0
            for scene in relevant_scenes:
                scene_graph = self.position_graphs.get(scene)
                if not len(scene_graph.vs):
                    pass
                tested_vertices = self.filter_vertices_from_graph(scene_graph, **tested_filters)
                anchor_vertices = self.filter_vertices_from_graph(scene_graph, **anchor_filters)
                for tested_vertex in tested_vertices:
                    for anchor_vertex in anchor_vertices:
                        moves = {'top': 0, 'bottom': 0, 'left': 0, 'right': 0}
                        path = scene_graph.get_shortest_paths(anchor_vertex, tested_vertex, output='epath')
                        if path:
                            path = path[0]
                            for edge in path:
                                moves[scene_graph.es[edge]['direction']] += 1
                            if self.validate_moves(moves, direction_data):
                                pass_counter += 1
                                if isinstance(min_required_to_pass, int) and pass_counter >= min_required_to_pass:
                                    return True
                            else:
                                reject_counter += 1
                        else:
                            Log.debug('Tested and Anchor have no direct path')
            if pass_counter > 0 and reject_counter == 0:
                return True
            else:
                return False
        else:
            Log.debug('None of the scenes contain both anchor and tested SKUs')
            return False
Exemple #23
0
    def is_new(self, data, level=3):
        if level == 3:
            existing = self.kpi_static_data[
                (self.kpi_static_data[BATRUConst.SET_FK] == self.set_fk)
                & (self.kpi_static_data[BATRUConst.KPI_FK] == data[0]) &
                (self.kpi_static_data[BATRUConst.ATOMIC_NAME] == data[1])]
        elif level == 2:
            existing = self.kpi_static_data[
                (self.kpi_static_data[BATRUConst.SET_FK] == self.set_fk)
                & (self.kpi_static_data[BATRUConst.KPI_NAME] == data[0])]
        else:
            Log.debug('not valid level for checking new KPIs')
            return False

        return existing.empty
Exemple #24
0
 def calculate_redbull_manufacturer(self, shelf_occupation_dict, product_list_field):
     """
     this function calculates score for Red Bull manufacturer category
     it iterates shelves and bays as if they were matrix and aggregates score for each "cell" or "curr_probe"
     """
     score = 0
     for shelf_number in range(1, shelf_occupation_dict.get(NUM_OF_SHELVES) + 1):
         for bay_number in range(1, shelf_occupation_dict.get(NUM_OF_BAYS) + 1):
             # get the current probe to calculate - specific shelf, bay, and only in main_placement scene type
             curr_probe = get_curr_probe(shelf_occupation_dict.get(DF), shelf_number, bay_number,
                                         shelf_occupation_dict.get(MAIN_PLACEMENT_SCENES))
             if not curr_probe.empty:
                 score += self.calculate_manufacturer(curr_probe, product_list_field)
     Log.debug("manufacturer score " + str(score))
     return score
 def _filter_scenes_by_location(self, location):
     """
     This method returns the relevant scene by location filters.
     :param location: A dictionary with location attributes like scene_id, template_fk, location_type and
     the requested values. E.g: {'template_name': ['great_template'], scene_id: [1, 2, 3]}
     """
     relevant_scenes = self.scif.scene_id.unique().tolist()
     if location is not None:
         conditions = {self.input_parser.LOCATION: location}
         try:
             relevant_scenes = self.input_parser.filter_df(conditions, self.scif).scene_id.unique().tolist()
         except (AttributeError, KeyError):
             Log.debug("location parameter is not in the required structure.")
             relevant_scenes = []
     return relevant_scenes
 def main_function(self):
     """
     This is the main KPI calculation function.
     It calculates the score for every KPI set and saves it to the DB.
     """
     if self.tool_box.scif.empty:
         Log.warning('Scene item facts is empty for session {}'.format(
             self.tool_box.session_uid))
         return
     self.tool_box.main_calculation()
     self.tool_box.commit_results_data()
     self.tool_box.common.commit_results_data()
     if self.tool_box.template_warnings:
         Log.debug('The following templates did not exist in DB - '
                   'fallback to excel. Details: {}'
                   ''.format(self.tool_box.template_warnings))
 def get_compliance(self, manual_planogram_data=None, manual_scene_data=None):
     """
     This function filters the irrelevant products out, creates a matrix that matches the bays of the POG and the
     scene and scores them, find the best way to match the bays and returns the match tags.
     :param manual_planogram_data: match_product_in_planogram (just for testing)
     :param manual_scene_data: match_product_in_scene (just for testing)
     :return: DF of match_product_in_scene_fk with the tags
     """
     tag_compliance = pd.DataFrame(columns=[Keys.MATCH_FK, Keys.COMPLIANCE_STATUS_FK])
     try:
         self.planogram_matches = self._data_provider.planogram_data if manual_planogram_data is\
                                                                   None else manual_planogram_data
         self.scene_matches = self._data_provider.matches if manual_scene_data is None else manual_scene_data
         self._filter_irrelevant_out()
         self.scene_bays = self.scene_matches[Keys.BAY_NUMBER].unique().tolist()
         self.pog_bays = self.planogram_matches[Keys.BAY_NUMBER].unique().tolist()
         if len(self.scene_bays) == 1 and len(self.pog_bays) == 1:
             if self.scene_bays != self.pog_bays:
                 scene_bay_data = self._get_df_of_bay(self.scene_matches, self.scene_bays[0])
                 pog_bay_data = self._get_df_of_bay(self.planogram_matches, self.pog_bays[0])
             else:
                 scene_bay_data = self.scene_matches
                 pog_bay_data = self.planogram_matches
             tag_compliance, score = self._local_get_tag_planogram_compliance(scene_bay_data, pog_bay_data)
         elif len(self.scene_bays) < 5 and len(self.pog_bays) < 5:
             tag_compliance = self._get_iterated_position_full_solution()
         else:
             tag_compliance = self._get_iterated_position_greedy()
         if tag_compliance is None:
             tag_compliance = get_tag_planogram_compliance(self.scene_matches, self.planogram_matches)
         if 1 in tag_compliance['compliance_status_fk'].tolist():
             try:
                 compliance_products = pd.merge(self.scene_matches, tag_compliance, on='match_fk', how='left')
                 pog_products = self.planogram_matches['product_fk'].unique().tolist()
                 wrong_extra_tags = compliance_products[
                     (compliance_products['compliance_status_fk'] == 1) &
                     (compliance_products['product_fk'].isin(pog_products))]['match_fk'].tolist()
                 tag_compliance.loc[tag_compliance['match_fk'].isin(wrong_extra_tags), 'compliance_status_fk'] = 2
             except Exception as er:
                 Log.debug(er.message)
     except Exception as e:
         Log.error("Calculated compliance has failed: " + e.message)
         try:
             tag_compliance = get_tag_planogram_compliance(self.scene_matches, self.planogram_matches)
         except Exception as er:
             Log.error("Calculated compliance has failed: " + er.message)
     return tag_compliance
    def add_kpis_to_static(self):
        kpis = self.data.drop_duplicates(subset=[self.SET_NAME, self.KPI_NAME],
                                         keep='first')
        cur = self.aws_conn.db.cursor()
        for i in xrange(len(kpis)):
            set_name = kpis.iloc[i][self.SET_NAME].replace(
                "'", "\\'").encode('utf-8')
            kpi_name = unicode(kpis.iloc[i][self.KPI_NAME]).replace(
                "'", "\\'").encode('utf-8')
            if self.KPI_WEIGHT in kpis.iloc[i].keys() and kpis.iloc[i][
                    self.KPI_WEIGHT]:
                kpi_weight = float(kpis.iloc[i][self.KPI_WEIGHT])
            else:
                kpi_weight = 'NULL'
            if self.kpi_static_data[
                (self.kpi_static_data['kpi_set_name'] == set_name)
                    & (self.kpi_static_data['kpi_name'] == kpi_name)].empty:
                if set_name in self.sets_added.keys():
                    set_fk = self.sets_added[set_name]
                else:
                    try:
                        set_fk = self.kpi_static_data[
                            self.kpi_static_data['kpi_set_name'] ==
                            set_name]['kpi_set_fk'].values[0]
                    except:
                        set_fk = self.sets_added[set_name]
                level2_query = \
                    """
                    INSERT INTO static.kpi (kpi_set_fk, display_text, weight)
                    VALUES ('{0}', '{1}', {2});
                    """.format(set_fk, kpi_name, kpi_weight)
                cur.execute(level2_query)
                if set_name in self.kpis_added.keys():
                    self.kpis_added[set_name][kpi_name] = cur.lastrowid
                else:
                    self.kpis_added[set_name] = {kpi_name: cur.lastrowid}
                self.kpi_counter['kpi'] += 1
            else:
                Log.debug(
                    "KPI '{}' already exists for KPI Set '{}'. Ignored".format(
                        kpi_name, set_name))

            if i % 10 == 0:
                self.aws_conn.db.commit()
                cur = self.aws_conn.db.cursor()

        self.aws_conn.db.commit()
 def add_sets_to_static(self):
     set_names = self.data[self.SET_NAME].unique().tolist()
     existing_set_names = self.kpi_static_data['kpi_set_name'].unique(
     ).tolist()
     set_names_to_add = set(set_names).difference(existing_set_names)
     if set_names_to_add:
         cur = self.aws_conn.db.cursor()
         for set_name in set_names_to_add:
             level1_query = \
                 """
                 INSERT INTO static.kpi_set (name, missing_kpi_score, enable, normalize_weight, expose_to_api, is_in_weekly_report)
                 VALUES ('{0}', '{1}', '{2}', '{3}', '{4}', '{5}');
                 """.format(set_name.encode('utf-8'), 'Bad', 'Y', 'N', 'N', 'N')
             cur.execute(level1_query)
             self.sets_added[set_name.encode('utf-8')] = cur.lastrowid
             self.kpi_counter['set'] += 1
         self.aws_conn.db.commit()
     set_names_ignored = set(set_names).difference(set_names_to_add)
     if set_names_ignored:
         Log.debug("KPI Sets '{}' already exist. Ignored".format(
             set_names_ignored))
    def _insert_into_display_surface(self, display_surface):
        """
        Inserts into probedata.display_surface the displays identified in each scene and its size.
        For each display it updates the new record pk in order to use as a foreign key when inserting into
        report.display_visit_summary.
        :param display_surface:
        :return:
        """
        # Optional performance improvement
        # 1.Use df instead of to_dict
        Log.debug(self.log_prefix + ' Inserting to probedata.display_surface')
        display_surface_dict = display_surface.to_dict('records')
        query = '''insert into probedata.display_surface
                        (
                            scene_fk
                            , display_fk
                            , surface
                        )
                        values
                         {};'''
        for display in display_surface_dict[:-1]:
            query_line = self._get_display_surface_query_line(
                display) + ',' + '{}'
            query = query.format(query_line)
        query = query.format(
            self._get_display_surface_query_line(display_surface_dict[-1]))
        self.cur.execute(query)
        self.project_connector.db.commit()
        last_insert_id = self.cur.lastrowid
        row_count = self.cur.rowcount
        if row_count == len(display_surface_dict):
            for j in range(0, len(display_surface_dict)):
                display_surface_dict[j]['display_surface_fk'] = last_insert_id
                last_insert_id += 1
        else:
            msg = self.log_prefix + ' error: not all display were inserted.'
            Log.error(msg)
            raise Exception(msg)

        return pd.DataFrame(display_surface_dict)