示例#1
0
    def retrievePreviousManifest(self, parent_trace, manifest_dict):
        '''
        Given a manifest expressed as a dict with a certain version N, will retrieve the same manifest
        but with version N-1, and return is a dict.

        If no prior version exists, it returns None
        '''
        new_handle                  = ManifestUtils().inferHandle(parent_trace, manifest_dict)
        new_version                 = new_handle.version

        if new_version < 1: # versions should be 1, 2, 3, .. etc, not 0 or below
            raise ApodeixiError(parent_trace, "Invalid manifest with a version below 1",
                                            data = {"version given": str(new_version),
                                                    "manifest handle": new_handle.display(parent_trace)})
        # Check that if we are doing an update a prior version does exist
        prior_handle            = new_handle
        prior_handle.version    = new_version - 1

        # Check if we are in a rollover situation. If so we also use last year's name in the handle, not
        # the one in manifest_dict.
        #
        # For example, when rolling from "FY 22" to "FY 23", we want to do version integrity by looking if a prior version
        #   exists in name 
        #                               "modernization.fy-22.astrea.official",  (which is given by label.rollFromName(-))
        #  instead of looking in name 
        #                               "modernization.fy-23.astrea.official",  (which is given by manifestNameFromLabel(-))
        # where we wouldn't find one.    
        #            
        roll_from_name          = RolloverUtils().get_rollFromName(parent_trace, manifest_dict)
        if roll_from_name != None:
            prior_handle.name   = roll_from_name

        prior_manifest, prior_manifest_path     = self.retrieveManifest(parent_trace, prior_handle)

        return prior_manifest
    def persistManifest(self, parent_trace, manifest_dict):
        '''
        Persists manifest_dict as a yaml object and returns a ManifestHandle that uniquely identifies it.
        '''
        kind = manifest_dict['kind']
        suffix = ''

        version = ManifestUtils().get_manifest_version(parent_trace,
                                                       manifest_dict)
        if version != None and len(str(version).strip()) > 0:
            suffix = '.' + str(version)
        manifest_file = self.test_case_name + "." + kind + suffix + ".yaml"
        my_trace = parent_trace.doing("Persisting manifest",
                                      data={
                                          'manifests_dir':
                                          self.output_manifests_dir,
                                          'manifest_file': manifest_file
                                      },
                                      origination={
                                          'concrete class':
                                          str(self.__class__.__name__),
                                          'signaled_from':
                                          __file__
                                      })
        if True:
            YAML_Utils().save(my_trace,
                              data_dict=manifest_dict,
                              path=self.output_manifests_dir + '/' +
                              manifest_file)
            handle = ManifestUtils().inferHandle(my_trace, manifest_dict)
            return handle
示例#3
0
    def persistManifest(self, parent_trace, manifest_dict):
        '''
        Persists manifest_dict as a yaml object and returns a ManifestHandle that uniquely identifies it.

        Will raise an ApodeixiError if version consistency is violated, i.e., can only save
        manifest_dict with version N+1 if N is the highest version existing in the KnowledgeStore, if any. 
        If no prior version of the manifests exists then the manifest_dict must have version number equal to 1.
        '''
        my_trace                        = parent_trace.doing("Checking version consistency")
        if True:
            self.checkDuplicateManifest(my_trace, manifest_dict)

            prior_manifest = self.retrievePreviousManifest(my_trace, manifest_dict)

            new_handle                  = ManifestUtils().inferHandle(my_trace, manifest_dict)
            new_version                 = new_handle.version

            # Check that if we are doing an update a prior version does exist
            if new_version > 1 and prior_manifest == None: 
                raise ApodeixiError(my_trace, "Can't persist manifest with version " + str(new_version) 
                                                + " because no prior manifest exist with version " + str(new_version - 1),
                                            data = {"manifest handle": new_handle.display(my_trace)})
           
        my_trace                        = parent_trace.doing("Persisting manifest")
        handle                          = super().persistManifest(parent_trace, manifest_dict)
        return handle
示例#4
0
    def registerForeignKeyConstraints(self, parent_trace, all_manifests_dict):
        '''
         Register foreign key constraint milestones -> big rocks
        '''
        super().registerForeignKeyConstraints(parent_trace, all_manifests_dict)

        my_trace = parent_trace.doing(
            "Registering a foreign key constraint between milestones and big rocks"
        )

        big_rock_dict = all_manifests_dict[0]
        milestones_dict = all_manifests_dict[1]

        big_rock_handle = ManifestUtils().inferHandle(my_trace, big_rock_dict)
        milestones_handle = ManifestUtils().inferHandle(
            my_trace, milestones_dict)

        entries = ForeignKeyConstraintEntries(my_trace, big_rock_handle,
                                              self.store)

        entity_dict = milestones_dict['assertion']['milestone']
        entity_uids = [
            e_uid for e_uid in entity_dict.keys()
            if not e_uid.endswith("-name")
        ]

        for e_uid in entity_uids:
            referencing_path = ["assertion", "milestone", e_uid, "big-rock"]
            link = ForeignKeyLink(my_trace, milestones_handle,
                                  referencing_path, self.store)
            entries.addLink(my_trace, link)

        foreign_key_constraints = self.store.getForeignKeyConstraints(my_trace)
        foreign_key_constraints.registerEntries(my_trace, entries)
 def __init__(self):
     self.acronyminfo_list = [
     ]  # This is populated later by the build_schema* methods
     # To avoid circular dependencies, we had to move this import to the very few functions in this module that need
     # ManifestUtils
     from apodeixi.knowledge_base.manifest_utils import ManifestUtils
     self.MU = ManifestUtils()
     return
class PostResponse(Response):
    '''
    Data structure used as a response to a post request on the knowledge base
    '''
    def __init__(self):
        super().__init__()
        # To avoid circular dependencies, we had to move this import to the very few functions in this module that need
        # ManifestUtils
        from apodeixi.knowledge_base.manifest_utils import ManifestUtils
        self.MU = ManifestUtils()

    def recordCreation(self, parent_trace, manifest_dict, manifest_nb):
        '''
        Used to enrich the content of this PostResponse by recording that a manifest was created

        @param manifest_dict A dictionary representation of a manifest. It must have 'metadata.name', 'metadata.namespace' and 'kind'
                                since those are mandatory fields for all manifests.
        '''

        handle = self.MU.inferHandle(parent_trace, manifest_dict)
        self.manifest_handles_dict[Response.CREATED][manifest_nb] = handle

    def recordUpdate(self, parent_trace, manifest_dict, manifest_nb):
        '''
        Used to enrich the content of this PostResponse by recording that a manifest was updated

        @param manifest_dict A dictionary representation of a manifest. It must have 'metadata.name', 
                                'metadata.namespace' and 'kind'
                                since those are mandatory fields for all manifests.
        '''
        handle = self.MU.inferHandle(parent_trace, manifest_dict)
        self.manifest_handles_dict[Response.UPDATED][manifest_nb] = handle

    def recordUnchanged(self, parent_trace, manifest_dict, manifest_nb):
        '''
        Used to enrich the content of this PostResponse by recording that a manifest was not changed

        @param manifest_dict A dictionary representation of a manifest. It must have 'metadata.name', 
                                'metadata.namespace' and 'kind'
                                since those are mandatory fields for all manifests.
        '''
        handle = self.MU.inferHandle(parent_trace, manifest_dict)
        self.manifest_handles_dict[Response.UNCHANGED][manifest_nb] = handle

    def recordArchival(self, parent_trace, original_handle, archival_handle):
        self.posting_handles_dict[Response.ARCHIVED].append(
            [original_handle, archival_handle])

    def recordOptionalForm(self, parent_trace, form_request):
        self.form_requests_dict[Response.OPTIONAL_FORMS].append(form_request)
示例#7
0
    def describe_post_response(self, parent_trace, post_response, store):
        '''
        Returns a string suitable for display in the Apodeixi CLI.

        The string is formatted as a table that provides information on what Apodeixi did in response to a user
        initiated posting.

        The table has a row per manifest that was involved, with a description of what changed, if anything.
        '''
        description_table                   = []
        description_headers                 = ["Manifest", "Event", "Entities added", "Entities removed",
                                                    "Entities changed", "Entities unchanged", "Namespace", "Name"]
        for manifest_handle in post_response.allManifests(parent_trace): 
            loop_trace                      = parent_trace.doing("Creating summary for 1 manifest",
                                                        data = {"manifest handle": manifest_handle.display(parent_trace)})
            description                     = ManifestUtils().describe_manifest(    loop_trace, 
                                                                                    manifest_handle     = manifest_handle, 
                                                                                    store               = store, 
                                                                                    post_response       = post_response)
            # Important: order in list must match the order of the headers in `description_headers`. Required by
            # the tabulate Python package.
            description_table.append([      description.manifest_filename, 
                                            description.event, 
                                            description.entities_added_desc, 
                                            description.entities_removed_desc, 
                                            description.entities_changed_desc, 
                                            description.entities_unchanged_desc,
                                            description.namespace,
                                            description.name])

        manifests_description               = "\nKnowledgeBase activity:\n\n"
        manifests_description               += tabulate(description_table, headers=description_headers)
        manifests_description               += "\n"

        return manifests_description
示例#8
0
    def checkDuplicateManifest(self, parent_trace, manifest_dict):
        '''
        Given a manifest expressed as a dict with a certain version N, will confirm that the store
        does not already have a manifest with version N.

        If it does, this method raises an ApodeixiError

        '''
        new_handle                  = ManifestUtils().inferHandle(parent_trace, manifest_dict)
        new_version                 = new_handle.version

        if new_version < 1: # versions should be 1, 2, 3, .. etc, not 0 or below
            raise ApodeixiError(parent_trace, "Invalid manifest with a version below 1",
                                            data = {"version given": str(new_version),
                                                    "manifest handle": new_handle.display(parent_trace)})
        # Check that no manifest exists with this version
        duplicate_manifest, duplicate_manifest_path     = self.retrieveManifest(parent_trace, new_handle)
        if duplicate_manifest != None:
            raise ApodeixiError(parent_trace, "Invalid duplicate manifest: one already exists for the given version",
                                            data = {"version given": str(new_version),
                                                    "manifest handle": new_handle.display(parent_trace)})
    def _getMatchingManifests(self, parent_trace, folder, manifest_handle):
        '''
        Returns two lists of the same length:

        * A list of dictionaries, one per manifest that matches the given manifest handle
        * A list of filenames, which is where each of those manifests was retrieved from

        The search is done over the space of objects in the store that lie "at or below the folder", where
        the notion of "folder" depends on the concrete store class. For filesystem-based stores, "folder" would
        literally be a directory of some filesystem mount.

        @param folder A string scoping a subset of the store
        @param manifest_handle A ManifestHandle instance that (should) uniquely identify a single manifest in the store
        @param suffix A string representing a valid "file extension" type used for manifests, where the logical
                        notion of "file extension" is up to each concrete store class to define. For filesystem-based
                        stores, the suffix string is literally a file extension in the filesystem, such as ".yaml"
                        for stores that persist manifests as yaml files.
        '''
        matching_manifests = []  # List of dictionaries, one per manifest
        matching_filenames = [
        ]  # List of filename strings. Will be 1-1 lined up with matching_manifests

        # Two areas where to search for manifests: input area, and output area. First the input area
        for filename in self._getFilenames(parent_trace, folder):
            my_trace = parent_trace.doing("Loading manifest from file",
                                          data={
                                              'filename': filename,
                                              'folder': folder
                                          },
                                          origination={
                                              'concrete class':
                                              str(self.__class__.__name__),
                                              'signaled_from':
                                              __file__
                                          })
            manifest_dict = YAML_Utils().load(my_trace,
                                              path=folder + '/' + filename)
            inferred_handle = ManifestUtils().inferHandle(
                my_trace, manifest_dict)
            if inferred_handle == manifest_handle:
                matching_filenames.append(filename)
                matching_manifests.append(manifest_dict)

        return matching_manifests, matching_filenames
 def __init__(self):
     super().__init__()
     # To avoid circular dependencies, we had to move this import to the very few functions in this module that need
     # ManifestUtils
     from apodeixi.knowledge_base.manifest_utils import ManifestUtils
     self.MU = ManifestUtils()
示例#11
0
def diff(kb_session, manifest_api, kind, namespace, name):
    '''
    Makes a diff between two versions of a manifest.
    
    For a list of valid MANIFEST_APIs and KINDs, try 'get apis'
    
    For a list of valid NAMESPACEs and NAMEs, try 'get assertions'

    MANIFEST_API must be a versionless manifest API. 
    
    Example: 'delivery-planning.journeys.a6i.io', (as opposed to 'delivery-planning.journeys.a6i.io/v1a'). 
    '''
    timer = ApodeixiTimer()
    func_trace = FunctionalTrace(parent_trace=None, path_mask=None)
    root_trace = func_trace.doing("CLI call to post",
                                  origination={'signaled_from': __file__})

    kb_operation_succeeded = False
    try:

        my_trace = root_trace.doing(
            "Invoking ManifestUtils's postByFile service")

        diff_result = ManifestUtils().diff_manifest(
            parent_trace=my_trace,
            store=kb_session.store,
            manifest_api_name=manifest_api,
            namespace=namespace,
            name=name,
            kind=kind,
            version1=None,
            version2=None)
        kb_operation_succeeded = True

        diff_description = CLI_Utils().describe_diff_response(
            my_trace, kb_session, diff_result)

        # GOTCHA:
        # Make sure to remove non-ascii characters before passing the description to click.echo, since it
        # will raise errors if there are characters like \uFFFFD in the description
        #
        diff_description = StringUtils().to_ascii(diff_description)

        click.echo(diff_description)
        output = "Success"
        click.echo(output)
        click.echo(timer.elapsed_time_message())
    except ApodeixiError as ex:
        error_msg = CLI_ErrorReporting(kb_session).report_a6i_error(
            parent_trace=root_trace, a6i_error=ex)
        if kb_operation_succeeded:
            error_msg                       = "KnowledgeBase operation completed, but run into a problem when preparing "\
                                                + "a description of the response:\n"\
                                                + error_msg
        # GOTCHA
        #       Use print, not click.echo or click exception because they don't correctly display styling
        #       (colors, underlines, etc.). So use vanilla Python print and then exit
        print(error_msg)
        _sys.exit()
    except Exception as ex:
        try:
            error_msg = CLI_ErrorReporting(kb_session).report_generic_error(
                parent_trace=root_trace, generic_error=ex)
            if kb_operation_succeeded:
                error_msg                   = "KnowledgeBase operation completed, but run into a problem when preparing "\
                                                + "a description of the response:\n"\
                                                + error_msg
        except Exception as ex2:
            error_msg                       = "CLI run into trouble: found error:\n\n\t" + str(ex) + "\n\n" \
                                                + "To make things worse, when trying to produce an error log file with a "\
                                                + "stack trace, run into an additional error:\n\n\t" + str(ex2)
        # GOTCHA
        #       Use print, not click.echo or click exception because they don't correctly display styling
        #       (colors, underlines, etc.). So use vanilla Python print and then exit
        print(error_msg)
        _sys.exit()
class UID_Acronym_Schema():
    '''
    Class defining the schema of a manifest from the acronym perspective.

    This is logically represented as an ordered list of AcronymInfo objects. Such a list can be generated in two different ways:

    * During posting cycles (i.e., Excel files are being read), it can be generated from a list of intervals. The order of
      the intervals establishes the order of the AcronymInfo objects in the schema.

    * During generation cycles (i.e., an Excel file is being generated from a manifest_dict), it can be generated by an algorithm
      that traverses the manifest_dict to read the UID and entity fields in the tree represented by the manifest_dict.
    '''
    def __init__(self):
        self.acronyminfo_list = [
        ]  # This is populated later by the build_schema* methods
        # To avoid circular dependencies, we had to move this import to the very few functions in this module that need
        # ManifestUtils
        from apodeixi.knowledge_base.manifest_utils import ManifestUtils
        self.MU = ManifestUtils()
        return

    def __str__(self):
        str_list = [str(info) for info in self.acronyminfo_list]
        return str(str_list)

    def acronym_infos(self):
        return self.acronyminfo_list

    def schema_info_for_UID(self, parent_trace, a_uid):
        '''
        Returns a pair:

        * An AcronymInfo object representing the schema properties corresponsing to the given UID `a_uid`
        * A string corresponding to the name of the UID column that should be used in a DataFrame or Excel representation
          for that `a_uid`. Typically these strings are like "UID", "UID-1", "UID-2", etc.

        @param a_uid A string representing a UID. It may be a full UID like "BR3.MR2.SR1" or just a leaf UID like "SR1".

        '''
        leaf_uid = a_uid.split(".")[-1]
        acronym = UID_Utils().parseToken(parent_trace, leaf_uid)[0]
        acronyminfo_guesses = [
            info for info in self.acronyminfo_list if info.acronym == acronym
        ]
        if len(acronyminfo_guesses) != 1:
            raise ApodeixiError(
                parent_trace,
                "UID Acronym schema is either not initialized or corrupted: "
                " it does not recognize a unique acronym for entity's UID",
                data={
                    "entity_UID": str(leaf_uid),
                    "inferred acronyms": str(self.acronyminfo_list)
                })
        acronyminfo = acronyminfo_guesses[0]
        level = self.acronyminfo_list.index(acronyminfo)
        UID = Interval.UID
        if level == 0:
            UID_COL = UID
        else:
            UID_COL = UID + '-' + str(
                level)  # We start at "UID-1", "UID-2", etc. "UID" is on

        return acronyminfo, UID_COL

    def build_schema_from_intervals(self, parent_trace, parser, interval_list):
        '''
        This method is intended to be used when parsing Excel files as part of the process of generating manifests.

        @param parser An instance of the BreakdownTree class. This must be the instance that is parsing Excel files, and
                    whose state is relied upon by this method to correctly create the schema.
        @param interval_list A list of Interval objects
        '''
        result = []
        for interval in interval_list:
            entity_name = interval.entity_name
            acronym = parser.getAcronym(parent_trace, entity_name)
            result.append(AcronymInfo(acronym, entity_name))

        self.acronyminfo_list = result

    def build_schema_from_manifest(self, parent_trace, manifest_dict):
        '''
        This method is intended to be used when generating Excel files.

        See documentation of self.build_schema_from_manifest_content, to which this method delegates
        '''
        entity = self.MU.infer_entity(parent_trace=parent_trace,
                                      manifest_dict=manifest_dict,
                                      manifest_nickname="Some manifest")
        contents_path = 'assertion.' + entity
        assertion_dict = manifest_dict['assertion']
        content_dict = assertion_dict[entity]
        self.build_schema_from_manifest_content(parent_trace,
                                                content_dict,
                                                parent_path=contents_path)

    def build_schema_from_manifest_content(self, parent_trace, content_dict,
                                           parent_path):
        '''
        This method is intended to be used when generating Excel files.

        It inspects the manifest's contents (the `content_dict`), and based on that the acronym schema is
        constructed.

        Implementation notes:

        The algorithm used requires first doing a full pass through the whole "tree" (looking at `content_dict` as a tree)
        because local inference would be buggy. It would lead to the wrong UID columns being defined in the DataFrame.
        
        Example:
        
        Consider a path in `content_dict` involving these UIDs: A1, A1.I1, A1.II.AS1.

        If we inferred level-based UID column names from these, we might think that A1 corresponds to "UID", that
        A1.I1 corresponds to "UID-1", and that A1.I1.AS1 corresponds to "UID-2".

        However, such an algorithm was found to be buggy in real life, which is why this class AcronymSchema is needed.

        This example exhibits the bug. Consider we are supposed to create a DataFrame like this, where every column
        is an entity:

               Area |  Indicator        |  Sub Indicator    | Applicable Space
            ====================================================================
             Adopt  |  %containeraized  |                   | Components
                    |  %testing         | Functional Tests  | Scenarios.functional
                    |                   | Performance Tests | 
 

        In this example there are 4 acronyms: A (for Area), I (for Indicator), SI (for Sub Indicator), and
        AS (for Applicable Area)

        The first row has no SubIndicator, so the leaf entity would get a full UID of A1.I1.AS1, whereas the other
        two paths (i.e., rows) would get full UIDs of A1.I2.SI1.AS1 and A1.I2.SI2

        If we assigned level-based UID column names, we would incorrectly use UID-2 for the interval
        [Applicable Space] in row1, and use UID-2 for a different interval [Sub Indicator] for the other two rows.

        This would be a bug, that would corrupt the DataFrame constructed by this class. When this bug was found, the
        effect was that the DataFrame sported "UID-2" appearing as two separate columns, causing errors downstream in code
        that assumed that each column name was unique.

        So to fix this problem, this method does a pass through the entire `content_dict` to get a a "schema", basically
        an object that logially represents list of acronyms (and entities, not shown here), in this example would be:

            ["A", "I", SI", "AS"]

        That way other Apodexi processing code can use methods like self.schema_info_for_UID when finding out 
        the leveled-UID column name to use for an full UID. 

        The implementation of this methods is in two passes (sort of a map-reduce)

        * First pass is recursive, going through the `content_dict` and getting a list of lists, one for each path.
          In our example that would produce (notice not all acronyms appear in all lists, and in some cases may
          not all appear in even 1 list)

            [ ["A", "I", "AS"], ["A", "I", SI", "AS"], ["A", "I", SI"]]

        * Second pass then reduces this to a single list that has the property that it includes all acronyms listed
          in any of the lists in the first pass, in the same order. In the example, that is ["A", "I", SI", "AS"]
        '''
        # all_acronyms_list is a list of lists of _AcronymInfo objects
        all_acronym_info_lists = self._map_acronyminfo_lists(parent_trace,
                                                             content_dict,
                                                             parent_path,
                                                             parent_uid=None)

        # Now the "reduce" phase
        result = []
        working_acronyminfo_lists = all_acronym_info_lists.copy()
        MAX_LOOPS = 1000  # To avoid inadvertent infinite loops if there is a bug in the logic in the loop
        level = 1  # The level in the the tree we are looking at
        while level < MAX_LOOPS and len(working_acronyminfo_lists) > 0:
            loop_trace = parent_trace.doing(
                "Determining next acronym to append to the acronyms list",
                data={
                    "result so far":
                    ListUtils().print(parent_trace, result),
                    "pending to explore":
                    ListUtils().print(parent_trace, working_acronyminfo_lists)
                })
            first_acronyminfo = self._find_first_acronyminfo(
                loop_trace, working_acronyminfo_lists, content_dict, level)
            if not first_acronyminfo in result:
                result.append(first_acronyminfo)
            next_working_lists = []
            for a_list in working_acronyminfo_lists:
                # Remove the first member of the lists, since we already processed them and we know they were all
                # consistent (same acronym and, padding aside, same entity_name) because self._find_first_acronyminfo checked
                modified_list = a_list[1:len(a_list)]

                if len(modified_list) > 0:
                    next_working_lists.append(modified_list)
            # Initialize state for next cycle in loop
            level += 1
            working_acronyminfo_lists = next_working_lists

        self.acronyminfo_list = result

    def pad_uid(self, parent_trace, a_full_uid):
        '''
        Utility method that can be used by callers that need to compare the padded and unpadded full UIDs.
        Some explanation:

        Unpadded UIDs are used in the paths through the manifest trees. For an acronym schema like
        [BR(big-rock), SR(Sub rock), and TR(Tiny rock)], unpadded UIDs are things like BR2.SR3 and BR1.TR1

        However, the UID fields themselves inside the manifests must be "padded" if the user skips an entity, so that
        that knowledge of having skipped an entity is available later when the manifest is represented as a DataFrame or in 
        Excel

        In our example, padding BR1.TR1 results in BR1.SR0.TR1, since the end-user skipped the sub-rock entity on that path.

        Padding BR2.SR3 yields no change (still BR2.SR3), since in that case the user skipped no entity.
        '''
        tokens = a_full_uid.split(".")
        padded_tokens = []
        all_acronyms = [info.acronym for info in self.acronym_infos()]
        for idx in range(len(tokens)):

            acronym, nb = UID_Utils().parseToken(parent_trace, tokens[idx])
            if not acronym in all_acronyms:
                raise ApodeixiError(
                    parent_trace,
                    "Can't pad UID because it uses an acronym not in the schema",
                    data={
                        "bad acronym": str(acronym),
                        "uid": str(a_full_uid),
                        "schema": str(self)
                    })
            schema_idx = all_acronyms.index(acronym)
            if schema_idx < len(padded_tokens):
                raise ApodeixiError(
                    parent_trace,
                    "Can't pad UID because it has an acronym out of order with regards to the "
                    + "acronym schema. It must appear at index " +
                    str(schema_idx) + " but " +
                    "that is already taken by the partially constructed padded tokens so far",
                    data={
                        "bad acronym": str(acronym),
                        "uid": str(a_full_uid),
                        "schema": str(self),
                        "padded tokens so far": str(padded_tokens)
                    })
            # Now pad, if required
            for pad_idx in range(len(padded_tokens), schema_idx):
                pad_acronym = all_acronyms[pad_idx]
                padded_tokens.append(pad_acronym + "0")

            # Now add our payload
            padded_tokens.append(tokens[idx])
        return ".".join(padded_tokens)

    def find_entities(self, parent_trace, content_dict):
        '''
        Finds and returns a list of entities for `content_dict`, defined as children that are a dictionary and not
        a scalar.

        Normally there is a unique such - after all, the acronym schema prescribes that there be a unique acronym
        at each level of the schema. However, there might be several because of the boundary case when the user
        skips entities.

        For example, suppose a manifest has an acronym schema of [BR(big-rock), MR(medium-rocks), TR(tiny-rocks)]
        A user can create an Excel posting leading to UIDs BR1.MR0.TR1 and BR1.MR1.TR1

        In that case, if content_dict = manifest_dict["assertion"]["big-rock"]["BR1"], then it has two sub-entities,
        since both of these exist:

                manifest_dict["assertion"]["big-rock"]["BR1"]["medium-rocks"]["MR1"][UID] = BR1.MR1

                manifest_dict["assertion"]["big-rock"]["BR1"]["tiny-rocks"]["TR1"][UID] = BR1.MR0.TR1

        That means that content_dict has two sub-entities: "medium-rocks" and "tiny-rocks"

        For that reason this method returns a list. It may be empty if the content_dict is a leaf node of
        a manifest and has not dict-valued children.

        In the eventuality that there are multipe children of the dictionary that are also dictionaries, it
        raises an ApodeixiError.
        
        '''
        sub_entities = []
        for k in content_dict.keys():
            child = content_dict[k]
            if type(child) == dict:
                sub_entities.append(k)

        return sub_entities

    def _find_first_acronyminfo(self, parent_trace, all_acronyminfo_lists,
                                content_dict, level):
        '''
        This is a helper method to the "reduce" phase of the algorithm used by method _find_acronym_list.
        Refer to the documentation of that method for an explanation of the context for the algorithm.

        This algorithm checks the consistency of `all_acronyminfo_lists`'s first members: they should
        all be for the same acronym and, except where padding occurs, the same entity_name (when there is padding
        the entity_name is None).

        Raises an ApodeixiError if the consistency check fails.

        Otherwise it returns an AcronymInfo object assembled from the unique acronym and unique entity_name just found.

        @param all_acronyminfo_list A list of lists, where inner lists contains _AcronymInfo objects
        @param level An int, starting at 1, that tells us the level in the manifest tree we are looking at, assuming
                    padding (i.e., should match the number of tokens in any full UID field (hence padded, being a field)
                    for the acronyminfo we'll return)
        '''
        candidates = [
            a_list[0] for a_list in all_acronyminfo_lists if len(a_list) > 0
        ]
        # Remove duplicates, if any
        candidates = list(set(candidates))

        my_trace = parent_trace.doing(
            "Checking that all paths give us the same acronyms and entities at this level",
            data={"level": str(level)})
        candidate_acronyms = list(set([info.acronym for info in candidates
                                       ]))  # Reduce duplicates using a set
        if len(candidate_acronyms) > 1:
            raise ApodeixiError(
                my_trace,
                "Manifest seems corrupted: found multiple acronyms at the same level, "
                + "should have exactly 1",
                data={
                    "competing acronyms":
                    ListUtils().print(my_trace, candidate_acronyms)
                })
        if len(candidate_acronyms) == 0:
            raise ApodeixiError(
                my_trace,
                "Manifest seems corrupted: found no acronym for a given level, "
                + "should have exactly 1")
        acronym = candidate_acronyms[0]
        # Exclude candidates with a null entity_name - that is legal and happens when there is padding due to end user skipping
        # some entities
        candidate_entities = list(
            set([
                info.entity_name for info in candidates
                if info.entity_name != None
            ]))
        if len(candidate_entities) > 1:
            raise ApodeixiError(
                my_trace,
                "Manifest seems corrupted: found multiple entities at the same level "
                + "and for the same acronym. Should have exactly 1",
                data={
                    "acronym":
                    str(acronym),
                    "competing entities":
                    ListUtils().print(my_trace, candidate_entities)
                })
        if len(candidate_entities) == 0:
            raise ApodeixiError(
                my_trace,
                "Manifest seems corrupted: found no entity for an acronym, " +
                "should have exactly 1",
                data={"acronym": str(acronym)})

        entity_name = candidate_entities[0]
        result = AcronymInfo(acronym, entity_name)

        return result

    def _map_acronyminfo_lists(self,
                               parent_trace,
                               content_dict,
                               parent_path,
                               parent_uid,
                               level=0):
        '''
        This is a recursive helper method to the "map-reduce" algorithm used by method _find_acronym_list. 
        Refer to the documentation of that method for an explanation of the context for the algorithm.

        This method returns a list of lists, where the inner list consist of _AcronymInfo objects.

        @level An integer, to tells us where we are in the recursion. Starts at 0, so it must equal
                the number of tokens in parent_uid.
                Helpful to disambiguate the index to use
                for an AcronymInfo object in the returned value, particularly when the user skipped some 
                intermediate entities so can't rely on the length of parent_path for such a determination.
        '''
        my_trace = parent_trace.doing("Mapping acronym lists for '" +
                                      parent_path + "''",
                                      data={'signaledFrom': __file__})
        if True:
            if parent_path == None or len(parent_path.strip()) == 0:
                raise ApodeixiError(
                    my_trace,
                    "Can't process a parent_path that is null or blank")

        # parent_path is something like "assertion.big-rock" when this method is first called, and
        # like  "assertion.big-rock.BR1.Sub rock" when this method is calls recursively on itself
        path_tokens = parent_path.split('.')
        entity_name = path_tokens[
            -1]  # like "big-rock" on 1st call, and "Sub rock" on recursive call

        entity_uids = [
            key for key in content_dict.keys() if not key.endswith('-name')
        ]

        # Will be one per "path" within the "tree" represented by `content_dict`, consisting of the acronyms
        # encountered along that path, in order.
        all_acronyms_result = []

        my_trace = parent_trace.doing("Mapping acronyms under of '" +
                                      str(parent_path) + "'",
                                      data={'signaledFrom': __file__})

        # On a first call we loop through something like e_uid = "BR1", "BR2", "BR3", .... For that call
        #       parent_uid = None and parent_path = "assertion.big-rock"
        # On a recursive call with parent_uid = "BR1" we loop through e_uid = "SR1", "SR2", "SR3", .... In this case
        #       parent_path = "assertion.big-rock.BR1.Sub rock"
        for e_uid in entity_uids:
            loop_trace = parent_trace.doing("Looping on entity with UID '" +
                                            str(e_uid) + "'",
                                            data={'signaledFrom': __file__})
            if parent_uid == None:
                full_e_uid = e_uid
            else:
                full_e_uid = parent_uid + '.' + e_uid

            e_path = parent_path + '.' + e_uid

            e_dict = content_dict[e_uid]

            inner_trace = loop_trace.doing("Checking tree under '" + e_path +
                                           "' is well formed",
                                           data={'signaledFrom': __file__})
            if True:
                # Check e.g. if content_dict = manifest_dict["assertion"]["big-rock"]["BR1"]["SubRock"]
                # and e_uid = "SR2", that content_dict["SR2"] exists and is a dictionary
                if e_dict == None:
                    raise ApodeixiError(
                        inner_trace,
                        "Badly formatted tree: found nothing under '" +
                        e_path + "'")
                if type(e_dict) != dict:
                    raise ApodeixiError(
                        inner_trace,
                        "Badly formatted tree: expected dictionary at '" +
                        e_path + "' but instead found a " + str(type(e_dict)))

            inner_trace = loop_trace.doing("Getting acronym lists under '" +
                                           e_path + "'",
                                           data={'signaledFrom': __file__})
            sub_entities = self.find_entities(
                inner_trace, e_dict)  # Something like "Sub rock"
            # Now we gear up to make a recursive call. For example, if we have been processing the interval
            # ["UID", "big-rock"] and e_dict = content_df["BR1"], we are now going to take the plunge into
            # the unique sub-entity "Sub rock" and make a recursive call to process interval
            # ["UID-1", "Sub rock"] passing content_df["BR1"]["Sub rock"] as the content to process.
            #
            # For our e_path = "assertion"."big-rock"."BR1" we pass a path of "assertion"."big-rock"."BR1"."Sub rock"
            # we set "ourselves" ("BR1") as the parent_uid in the recursive call
            next_level_infos = self._next_level_acronym_info(
                parent_trace=inner_trace,
                e_uid=e_uid,
                content_dict=content_dict,
                entity_name=entity_name,
                level=level)
            if len(sub_entities) == 0:
                all_acronyms_result.append(next_level_infos)
            else:
                for sub_entity in sub_entities:
                    inner_trace = loop_trace.doing(
                        "Making a recursive call for '" + sub_entity + "'",
                        data={'signaledFrom': __file__})

                    acronyminfos_subresult = self._map_acronyminfo_lists(
                        parent_trace=inner_trace,
                        content_dict=e_dict[sub_entity],
                        parent_path=e_path + '.' + sub_entity,
                        parent_uid=full_e_uid,
                        level=level + len(next_level_infos))
                    e_acronym = UID_Utils().parseToken(loop_trace, e_uid)[0]
                    for acronyminfos_sublist in acronyminfos_subresult:
                        # Check we are not about to put duplicate acronyms - if so, that is an error with the `content_df`
                        if e_acronym in [
                                info.acronym for info in acronyminfos_sublist
                        ]:
                            raise ApodeixiError(
                                inner_trace,
                                "Looks like manifest is corrupted because the same acronym is "
                                +
                                " used at different levels. An acronym should be used in only 1 level",
                                data={
                                    "Problem at UID":
                                    str(full_e_uid),
                                    "Acronyms below UID":
                                    ListUtils().print(inner_trace,
                                                      acronyminfos_sublist)
                                })
                        acronyms_list = next_level_infos.copy()
                        acronyms_list.extend(acronyminfos_sublist)
                        all_acronyms_result.append(acronyms_list)

        return all_acronyms_result

    def _next_level_acronym_info(self, parent_trace, e_uid, content_dict,
                                 entity_name, level):
        '''
        Helper method for self._map_acronyminfo_lists. The latter is a recursive method, and this method is
        used when the recursion "hits bottom", or when aggregating results from a recursive call.

        It returns a list of AcronymInfo objects, which normally would be a singleton: the AcronymInfo for the
        very next level in the tree:

                        [AcronymInfo(e_acronym, entity_name)] where e_acronym is e_uid's acronym.

        *HOWEVER*, there is a boundary case that could lead to a but unless we return a list with more than one 
        element: when the user skipped some intermediate entity.

        Consider this example: the acronym schema should be [BR(big rock), SR(sub rock), TR(tiny rock)]
        but the user skipped sub-rock sometimes.

        Anticipating this might happen, full UIDs were generated in the UID Store that put a "0" whenever
        an entity is skipped. For example, BR1.SR0.TR1 instead of BR1.TR1

        Assume further that we are a point in the recursion where 
        
            content_dict = manifest_dict["assertion"][big-rocks][BR1][Tiny rocks], and content_dict[TR1][UID] = BR1.SR0.TR1

        Now, another path of the recursion would be perhaps

            content_dict2 = manifest_dict["assertion"][big-rocks][BR2][Sub rocks], and content_dict2[SR3][UID] = BR2.SR3
        
        In this situation, it would be wrong for us to return

            [AcronymInfo(TR, Tiny rocks)]

        because the other path, which is at the same level in the manifest_dict tree, would return

            [AcronymInfo(SR, Sub rocks)]
        
        which will trigger an error in our subsequent processing, since we would think that at this level there are two valid
        acronyms for the next level down: TR and SR, and only one acronym is allowed per level.

        Therefore, the *correct* behaviour is to *pad* that list returned from this method to ensure that TR is never
        at the same level as SR.

        That means returning [AcronymInfo(SR, None), AcronymInfo(TR), "Tiny rocks")]
        
        The level of padding can be determined by looking at content_dict[UID]
        '''
        padded_uid = content_dict[e_uid][Interval.UID]

        padded_tokens = padded_uid.split(".")

        # Check consistency of UID field with the path UID
        my_trace = parent_trace.doing(
            "Checking that the UID field is for a UID that extends the prior level",
            data={
                "path incremental UID": str(e_uid),
                "level": str(level),
                "dict['UID']": str(padded_uid)
            })
        if len(padded_tokens) < level + 1:
            raise ApodeixiError(
                my_trace,
                "UID field lacks the required tokens: expected at least " +
                str(level + 1) + " tokens, "
                " but found only " + len(padded_tokens) + " in the UID field")
        # Check any extra tokens is only padding as we add that padding
        my_trace = parent_trace.doing(
            "Padding a list of AcronymInfos below a node in the manifest tree "
            + " due to user skipping entities",
            data={
                "path incremental UID": str(e_uid),
                "level": str(level),
                "dict['UID']": str(padded_uid)
            })
        result = []
        for idx in range(level, len(padded_tokens) - 1):
            some_acronym, some_val = UID_Utils().parseToken(
                my_trace, padded_tokens[idx])
            if some_val != 0:
                raise ApodeixiError(
                    my_trace,
                    "Corrupted manifest: token '" + str(padded_tokens[idx]) +
                    "' in UID field '" + str(padded_uid) +
                    "' should have only been padding, i.e. a value of 0")
            # Add the padding
            result.append(
                AcronymInfo(some_acronym, None)
            )  # We put None for the entity because we don't know it, but that's OK

        # Any required padding is in, so now we can safely add the e_uid's acronym info
        e_acronym = UID_Utils().parseToken(my_trace, e_uid)[0]
        result.append(AcronymInfo(e_acronym, entity_name))

        return result
    def check_foreign_key_constraints(self, parent_trace, manifest_dict):
        '''
        This method is intended to be used as a check before updating a manifest in the store, to ensure that the
        updated version of a manifest does not remove any UIDs that are referenced by some other manifest.

        If such an integrity violation is found, this method raises an ApodeixiError. Otherwise it silently returns.

        Integrity checks are done against all the links that have been registered with this class against the manifest
        in question.
        '''
        handle = ManifestUtils().inferHandle(parent_trace, manifest_dict)
        if handle.version == 0:
            return  # This is a create, not an update, so nothing to check

        pertinent_constraints = [
            (h, fkc_entries) for (h, fkc_entries) in self.registry.items()
            if h.getManifestType() == handle.getManifestType()
        ]
        if len(pertinent_constraints) == 0:
            return  # There are no constraints registered against this manifest type

        manifest_uids = ManifestUtils().get_manifest_uids(
            parent_trace, manifest_dict)

        if True:
            # Take the union of all links across all of the pertinent constraints
            all_links = list(
                _itertools.chain(*[
                    fck_entries.links
                    for (h, fck_entries) in pertinent_constraints
                ]))

            # Take the union of all referencing types across all links across all pertinent_constraints
            referencing_handle_types = [
                link.referencing_handle.getManifestType() for link in all_links
            ]

            #Remove duplicates
            referencing_handle_types = list(set(referencing_handle_types))

            filtered_links = []
            for ht in referencing_handle_types:
                # Each referencing handle type may appear in multiple pertinent constraints.
                # For example, consider the case of milestones manifests that reference big-rocks manifests.
                #
                # In that example, suppose that there is a constraint under big-rock's version 2, which contains
                # links for milestones' version 2, say. Imagine that big-rocks are posted a few times, elevating
                # the big-rock version to version 8. Meanwhile, milestones is still at version 2. If milestones is
                # then posted, the milestone's version changes to 3, and since it points to version 8 of big-rocks, that
                # leads to a new ForeignKeyConstraintEntries constraint created (for big-rock version 8) under which
                # there would be a list of links for milestone (version 3)
                #
                # Thus, if we have to check constraints for referencing handle type ht=milestones, then we would have
                # multiple big-rock entries in the constraints data structure, each of them with milestones links.
                #
                # Of these multiple links, we only care about the ones for which the milestone version is highest.
                #
                # Hence we need to search for the highest version of the referencing manifest in the links,
                # and then only enforce those links when checking constraints.
                #
                #
                matching_links = [
                    link for link in all_links
                    if link.referencing_handle.getManifestType() == ht
                ]

                latest_version = max([
                    link.referencing_handle.version for link in matching_links
                ])
                latest_links = [
                    link for link in matching_links
                    if link.referencing_handle.version == latest_version
                ]
                # GOTCHA: while there is only one ManifestType in this loop, there might be multiple links since a
                # single referencing manifest instance has a link per path. So we use extend, not a append,
                # since there are multiple links to add, not 1
                filtered_links.extend(latest_links)

        # We will aggregate all foreign key constraint violations (if any) in a dictionary where the keys
        # are ManifestHandles for the referencing manifests, and the values are lists of UIDs that were removed
        # but are relied upon by those referencing manifests
        violations = {}
        for link in filtered_links:  #constraint_entries.links:
            referenced_uids = link.referenced_uids
            link_violations = [
                uid for uid in referenced_uids if not uid in manifest_uids
            ]
            if len(link_violations) > 0:
                violations[link.referencing_handle] = link_violations

        if len(violations) > 0:
            violation_uids = []
            for ref_handle in violations.keys():
                violation_uids.extend(violations[ref_handle])
            # For the message, remove duplicate among the referencing handles. This happens since a referencing handle gets
            # a link for every path in the referencing manifest that contains UIDs from the referenced manifest.
            # So we create a set to remove duplicates, and then re-create a list from it
            referencing_handles = list(set(violations.keys()))
            ref_handle_msg = "\n".join([
                ref_handle.display(parent_trace)
                for ref_handle in referencing_handles
            ])
            raise ApodeixiError(
                parent_trace,
                "Foreign key violation for manifest: there are " +
                str(len(violations)) +
                " other manifests that reference UIDs that were removed from manifest",
                data={
                    "Manifest": handle.display(parent_trace),
                    "Problem UIDs": str(violation_uids),
                    "Referencing manifests": ref_handle_msg
                })
示例#14
0
    def _build_manifestsXLWriteconfig(self, parent_trace, manifestInfo_dict):
        '''
        Overwrites parent's implementation

        Creates and returns an AsExcel_Config_Table containing the configuration data for how to lay out and format
        all the manifests of `manifestInfo_dict` onto an Excel spreadsheet
        '''
        xlw_config_table                    = AsExcel_Config_Table()
        #x_offset                            = 3 
        y_offset                            = 1

        # Products need to be grouped by line of business, and then displayed in that order. So we do a bit
        # of pandas manipulation for that
        products_df                         = manifestInfo_dict['product.0'].getManifestContents(parent_trace)
        if "lineOfBusiness" in products_df.columns:
            # In this case, products have already been mapped to LOBs, so we will need to display products
            # grouped by LOB. First step: sort products by LOB so that they are grouped by LOB, and make
            # this the order how we display them (see below the definition of my_prod_mapper lambda, that uses
            # this sorted DataFrame)
            sorted_products_df          = ManifestUtils().sort_manifest_df_by_foreign_uid(
                                                                        parent_trace        = parent_trace, 
                                                                        manifest_df         = products_df, 
                                                                        foreign_uid_column  = "lineOfBusiness")
            ORIGINAL_INDEX_COL          = "index"     
            # Pad a UID top-level column, so that it is never blank, since that is how we will map rows
            # Reason UID might be blank is that a product may have sub-products, so product_df['UID'] would be blank
            # for the sub-product rows, since they "inherit" the UID from a prior row.
            sorted_products_df          = ManifestUtils().pad_manifest_dataframe(   parent_trace        = parent_trace, 
                                                                                    manifest_df         = sorted_products_df, 
                                                                                    padding_column      = Interval.UID)
                     
        else:
            # In this case, we must be displaying a template, so mapping isn't established yet. So
            # display in the same order
            sorted_products_df              = products_df

        for key in manifestInfo_dict:
            loop_trace                      = parent_trace.doing("Creating layout configurations for manifest '"
                                                                + str(key) + "'")
            manifest_info                   = manifestInfo_dict[key]
            data_df                         = manifest_info.getManifestContents(parent_trace)
            editable_cols = [col for col in data_df.columns if not col.startswith('UID')]
            if key == 'product.0':
                x_offset                    = 3 # Start 3 columns over so that line of business can go to the left
                hidden_cols                 = ["lineOfBusiness"]
                right_margin                = 0
                num_formats                 = {}
                excel_formulas              = None

                def my_prod_mapper(manifest_df, manifest_df_row_number, representer):
                    if not "UID" in manifest_df.columns:
                        # Implement vanilla behavior - no UID means this must be a template
                        excel_row           = y_offset + 1 + manifest_df_row_number # An extra '1' because of the headers
                        final_excel_row     = y_offset + len(manifest_df.index) # Don't do len(index)-1 since headers add a row
                        return excel_row, final_excel_row
                    else:
                        # This is the tricky case: we must not display products in the order the are listed in the manifest,
                        # but in an order that segments them into groups based on line-of-business, i.e.,
                        # in the order in which they appear in sorted_products_df
                        # So algorithm is:
                        #   1. Get the UID of the product for this manifest_df_row_number
                        #   2. Find the idx in sorted_products_df where that product UID appears
                        #   3. Drive the excel row number from that idx
                        if len(manifest_df.index) != len(sorted_products_df.index):
                            raise ApodeixiError(loop_trace, "Misalignment problem: two supposedly equivalent DataFrames "
                                                                + "(up to sorting) have different number of rows",
                                                                data = {"Number of rows": str(manifest_df.index) +
                                                                            " vs " + str(sorted_products_df.index)})

                        # Pad a UID top-level column, so that it is never blank, since that is how we will map rows
                        # Reason UID might be blank is that a product may have sub-products, so manifest_df['UID'] would be blank
                        # for the sub-product rows, since they "inherit" the UID from a prior row.
                        '''
                        padded_manifest_df  = ManifestUtils().pad_manifest_dataframe(   
                                                                                    parent_trace        = loop_trace, 
                                                                                    manifest_df         = manifest_df, 
                                                                                    padding_column      = Interval.UID)
                        PADDED_UID_COL      = Interval.UID + "_PADDED"

                        prod_uid            = padded_manifest_df[PADDED_UID_COL].iloc[manifest_df_row_number] 
                        sorted_idx_list     = sorted_products_df.index[sorted_products_df[Interval.UID] == prod_uid].tolist()
                        '''
                        sorted_idx_list     = sorted_products_df.index[sorted_products_df[ORIGINAL_INDEX_COL] == manifest_df_row_number].tolist()

                        if len(sorted_idx_list) != 1:
                            raise ApodeixiError(loop_trace, "Misalignment problem: not a unique entry in product manifest for "
                                                                + "a given UID",
                                                                data = {"manifest": key, 
                                                                        "manifest_df_row_number": str(manifest_df_row_number),
                                                                        "number of entries": str(len(sorted_idx_list))})
                        sorted_idx          = sorted_idx_list[0]
                        excel_row           = y_offset + 1 + sorted_idx # An extra '1' because of the headers
                        final_excel_row     = y_offset + len(manifest_df.index) # Don't do len(index)-1 since headers add a row
                        return excel_row, final_excel_row

                df_xy_2_excel_xy_mapper   = my_prod_mapper
            elif key == 'line-of-business.1':
                x_offset                    = 1 # Lay LOB column to the left of product
                hidden_cols                 = []
                right_margin                = 0
                num_formats                 = {}
                excel_formulas              = None

                def my_lob_mapper(manifest_df, manifest_df_row_number, representer):
                    if not "UID" in manifest_df.columns:
                        # Implement vanilla behavior - no UID means this must be a template
                        excel_row           = y_offset + 1 + manifest_df_row_number # An extra '1' because of the headers
                        final_excel_row     = y_offset + len(manifest_df.index) # Don't do len(index)-1 since headers add a row
                        return excel_row, final_excel_row
                    else:
                        # This is the tricky case: we must not display products in the order the are listed in the manifest,
                        # but in an order that segments them into groups based on line-of-business, i.e.,
                        # in the order in which they appear in sorted_products_df
                        # So algorithm is:
                        #   1. Get the UID of the product for this manifest_df_row_number
                        #   2. Find the idx in sorted_products_df where that product UID appears
                        #   3. Drive the excel row number from that idx
                        lob_uid             = manifest_df['UID'].iloc[manifest_df_row_number] 
                        sorted_idx_list     = sorted_products_df.index[sorted_products_df['lineOfBusiness'] == lob_uid].tolist()

                        if len(sorted_idx_list) == 0:
                            raise ApodeixiError(loop_trace, "Misalignment problem: an LOB is not associated to any product",
                                                                data = {"manifest": key, "LOB UID": str(lob_uid)})
                        # Display the LOB on the first product that is associated with it
                        sorted_idx          = sorted_idx_list[0]
                        excel_row           = y_offset + 1 + sorted_idx # An extra '1' because of the headers
                        final_excel_row     = y_offset + len(sorted_products_df.index) # Don't do len(index)-1 since headers add a row
                        return excel_row, final_excel_row

                df_xy_2_excel_xy_mapper     = my_lob_mapper
            else:
                raise ApodeixiError(loop_trace, "Invalid manifest key: '" + str(key) + "'")
            xlw_config  = ManifestXLWriteConfig(sheet                       = SkeletonController.GENERATED_FORM_WORKSHEET,
                                                manifest_name               = key, 
                                                read_only                   = False,
                                                is_transposed               = False,   
                                                viewport_width              = 100,  
                                                viewport_height             = 40,   
                                                max_word_length             = 20, 
                                                editable_cols               = editable_cols,
                                                hidden_cols                 = hidden_cols,  
                                                num_formats                 = num_formats, 
                                                excel_formulas              = excel_formulas,
                                                df_xy_2_excel_xy_mapper   = df_xy_2_excel_xy_mapper,
                                                editable_headers            = [],   
                                                x_offset                    = x_offset,    
                                                y_offset                    = y_offset)
            
            #x_offset                        += data_df.shape[1] -len(hidden_cols) + right_margin
            xlw_config_table.addManifestXLWriteConfig(loop_trace, xlw_config)
        return xlw_config_table
示例#15
0
        def checkReferentialIntegrity(self, parent_trace):
            '''
            Used to check that the values of Posting Label fields are valid. Does not return a value, but will
            raise an exception if any field is "invalid".

            Sometimes this validation might be against data configured in the ApodeixiConfig. Example: "organization"

            In other situations the validation is against the existence of static data objects which the label
            references. Example: "product" in the case of the Journeys domain.

            NOTE: This method is intended to be called *after* label.read(-) has completed, including any label.read(-)
            implemented by derived classes. 
            That is why it can't be called within label.read(-) at the PostingLabel parent class level,
            and why the design choice was made to have the calling code invoke this check right after calling label.read()
            '''
            super().checkReferentialIntegrity(parent_trace)

            # In addition to checks made by the parent class, we want to check that references to read-only manifests
            # are correct. Specifically, we want to make sure that milestones manifest references the most recent version
            # of the big-rocks manifest, before we accept the submitted Excel for the milestones manifest.
            #
            # So we check that the version of the big-rocks in the Posting Label is indeed the most recent version of the
            # big-rocks.
            my_trace = parent_trace.doing(
                "Checking milestones reference most recent big-rocks")
            ME = MilestonesController
            manifest_api_name = self.controller.getManifestAPI().apiName()
            organization = self.organization(my_trace)
            kb_area = self.knowledgeBaseArea(my_trace)
            FMT = StringUtils(
            ).format_as_yaml_fieldname  # Abbreviation for readability
            namespace = FMT(organization + '.' + kb_area)
            manifest_name = self.controller.manifestNameFromLabel(
                my_trace, label=self, kind=ME.REFERENCED_KIND)

            manifest_dict, manifest_path = self.controller.store.findLatestVersionManifest(
                parent_trace=my_trace,
                manifest_api_name=manifest_api_name,
                namespace=namespace,
                name=manifest_name,
                kind=ME.REFERENCED_KIND)

            BIG_ROCKS_MANIFEST_NB = 0
            referenced_manifest_nb = BIG_ROCKS_MANIFEST_NB
            last_version_nb = ManifestUtils().get_manifest_version(
                my_trace, manifest_dict)

            submitted_version_nb = self.priorVersion(my_trace,
                                                     referenced_manifest_nb)

            if submitted_version_nb < last_version_nb:
                raise ApodeixiError(
                    my_trace,
                    "Excel form needs to be refreshed and re-submitted because it does not reference "
                    + "the most recent version of the '" + ME.REFERENCED_KIND +
                    "'. Request a new form " + "for '" + ME.MY_KIND +
                    "' to reflect the correct version for '" +
                    ME.REFERENCED_KIND +
                    "' and re-apply your changes to that form, and re-submit",
                    data={
                        "version submitted": str(submitted_version_nb),
                        "latest version": str(last_version_nb)
                    })
            if submitted_version_nb > last_version_nb:
                raise ApodeixiError(
                    my_trace,
                    "Excel form needs to be refreshed and re-submitted because it references "
                    + "a non-existent version of the '" + ME.REFERENCED_KIND +
                    "'. Request a new form " + "for '" + ME.MY_KIND +
                    "' to reflect the correct version for '" +
                    ME.REFERENCED_KIND +
                    "' and re-apply your changes to that form, and re-submit",
                    data={
                        "version submitted": str(submitted_version_nb),
                        "latest version": str(last_version_nb)
                    })