def securityFileSourceFile(cntlr, ownerObject, filepath, binary, stripDeclaration): # handle FileSource file requests which can return encrypted contents if ownerObject.hasEncryption: for entrypointfile in ownerObject.entrypointfiles: if (filepath == entrypointfile.get("file") or any(filepath == ixfile.get("file") for ixfile in entrypointfile.get("ixds",())) ) and "key" in entrypointfile and "iv" in entrypointfile: ownerObject.cipherIv = base64.decodebytes(entrypointfile["iv"].encode()) ownerObject.cipherKey = base64.decodebytes(entrypointfile["key"].encode()) break # set new iv, key based on entrypointfiles # may be a non-entry file (xsd, linkbase, jpg) using entry's iv, key if os.path.exists(filepath + ENCRYPTED_FILE_SUFFIX) and ownerObject.cipherKey is not None and ownerObject.cipherIv is not None: encrdata = io.open(filepath + ENCRYPTED_FILE_SUFFIX, "rb").read() cipher = AES.new(ownerObject.cipherKey, AES.MODE_CBC, iv=ownerObject.cipherIv) bytesdata = cipher.decrypt(encrdata) encrdata = None # dereference before decode operation if binary: # return bytes return (FileSource.FileNamedBytesIO(filepath, bytesdata[0:-bytesdata[-1]]), ) # trim AES CBC padding # detect encoding if there is an XML header encoding = XmlUtil.encoding(bytesdata[0:512], default=cntlr.modelManager.disclosureSystem.defaultXmlEncoding if cntlr else 'utf-8') # return decoded string text = bytesdata[0:-bytesdata[-1]].decode(encoding or 'utf-8') # trim AES CBC padding and decode bytesdata = None # dereference before text operation if stripDeclaration: # file source may strip XML declaration for libxml xmlDeclarationMatch = FileSource.XMLdeclaration.search(text) if xmlDeclarationMatch: # remove it for lxml start,end = xmlDeclarationMatch.span() text = text[0:start] + text[end:] return (FileSource.FileNamedStringIO(filepath, initial_value=text), encoding) return None
def setupPackageEntrypoints(cntlr, options, filesource, entrypointFiles, *args, **kwargs): # check package entries formula code if getattr(options, "checkPackageEntries", False) and not entrypointFiles: for packageInfo in sorted( PackageManager.packagesConfig.get("packages", []), key=lambda packageInfo: (packageInfo.get("name", ""), packageInfo.get("version", ""))): cntlr.addToLog(_("Package %(package)s Version %(version)s"), messageArgs={ "package": packageInfo["name"], "version": packageInfo["version"] }, messageCode="info", level=logging.INFO) filesource = FileSource.openFileSource(packageInfo["URL"], cntlr) if filesource.isTaxonomyPackage: # if archive is also a taxonomy package, activate mappings filesource.loadTaxonomyPackageMappings() for name, urls in packageInfo.get("entryPoints", {}).items(): for url in urls: if filesource and filesource.isArchive: cntlr.addToLog( _(" EntryPont %(entryPoint)s: %(url)s"), messageArgs={ "entryPoint": name or urls[0][2], "url": url[1] }, messageCode="info", level=logging.INFO) entrypointFiles.append({"file": url[1]})
def func_json_data(xule_context, *args): """Read a json file/url. Arguments: file_url (string or url) Returns a dictionary/list of the json data. """ file_url = args[0] if file_url.type not in ('string', 'uri'): raise XuleProcessingError(_("The file url argument of the json-dta() function must be a string or uri, found '{}'.".format(file_url.value)), xule_context) from arelle import PackageManager mapped_file_url = PackageManager.mappedUrl(file_url.value) # Using the FileSource object in arelle. This will open the file and handle taxonomy package mappings. from arelle import FileSource file_source = FileSource.openFileSource(file_url.value, xule_context.global_context.cntlr) file = file_source.file(file_url.value, binary=True) # file is tuple of one item as a BytesIO stream. Since this is in bytes, it needs to be converted to text via a decoder. # Assuming the file is in utf-8. data_source = [x.decode('utf-8') for x in file[0].readlines()] try: json_source = json.loads(''.join(data_source)) #except JSONDecodeError: except ValueError: raise XuleProcessingError(_("The file '{}' is not a valid JSON file.".format(file_url.value)), xule_context) x = xv.system_collection_to_xule(json_source, xule_context) return xv.system_collection_to_xule(json_source, xule_context)
def update_rule_set_map(cntlr, new_map, overwrite=False): # Open the new map from arelle import FileSource file_source = FileSource.openFileSource(new_map, cntlr) file_object = file_source.file(new_map)[0] try: new_map = json.load(file_object, object_pairs_hook=collections.OrderedDict) except ValueError: raise XuleProcessingError( _("New rule set map file does not appear to be a valid JSON file. File: {}" .format(new_map))) if overwrite: rule_set_map = new_map else: # update rule_set_map = get_rule_set_map(cntlr) rule_set_map.update(new_map) #update the rule set map with get_rule_set_map_file(cntlr, 'w') as rule_set_file: json.dump(rule_set_map, rule_set_file) if overwrite: cntlr.addToLog(_("Rule set map file replaced"), "xule") else: cntlr.addToLog(_("Rule set map file updated"), "xule")
def load(modelManager, url, nextaction, base=None): from arelle import (ModelDocument, FileSource) modelXbrl = create(modelManager) if isinstance(url,FileSource.FileSource): modelXbrl.fileSource = url url = modelXbrl.fileSource.url else: modelXbrl.fileSource = FileSource.FileSource(url) modelXbrl.modelDocument = ModelDocument.load(modelXbrl, url, base, isEntry=True) # at this point DTS is fully discovered but schemaLocated xsd's are not yet loaded modelDocumentsSchemaLocated = set() while True: # need this logic because each new pass may add new urlDocs modelDocuments = set(modelXbrl.urlDocs.values()) - modelDocumentsSchemaLocated if not modelDocuments: break modelDocument = modelDocuments.pop() modelDocumentsSchemaLocated.add(modelDocument) modelDocument.loadSchemalocatedSchemas() #from arelle import XmlValidate #uncomment for trial use of lxml xml schema validation of entry document #XmlValidate.xmlValidate(modelXbrl.modelDocument) modelManager.cntlr.webCache.saveUrlCheckTimes() modelManager.showStatus(_("xbrl loading finished, {0}...").format(nextaction)) return modelXbrl
def create(modelManager, newDocumentType=None, url=None, schemaRefs=None, createModelDocument=True, isEntry=False): from arelle import (ModelDocument, FileSource) modelXbrl = ModelXbrl(modelManager) modelXbrl.locale = modelManager.locale if newDocumentType: modelXbrl.fileSource = FileSource.FileSource(url) if createModelDocument: modelXbrl.modelDocument = ModelDocument.create(modelXbrl, newDocumentType, url, schemaRefs=schemaRefs, isEntry=isEntry) return modelXbrl
def commandLineFilingStart(cntlr, options, filesource, entrypointFiles, *args, **kwargs): global skipExpectedInstanceComparison skipExpectedInstanceComparison = getattr(options, "skipExpectedInstanceComparison", False) if isinstance(entrypointFiles, list): # check for any inlineDocumentSet in list for entrypointFile in entrypointFiles: _ixds = entrypointFile.get("ixds") if isinstance(_ixds, list): # build file surrogate for inline document set _files = [e["file"] for e in _ixds if isinstance(e, dict)] if len(_files) == 1: urlsByType = {} if os.path.isfile(_files[0]) and any(_files[0].endswith(e) for e in (".zip", ".ZIP", ".tar.gz" )): # check if an archive file filesource = FileSource.openFileSource(_files[0], cntlr) if filesource.isArchive: for _archiveFile in (filesource.dir or ()): # .dir might be none if IOerror filesource.select(_archiveFile) identifiedType = Type.identify(filesource, filesource.url) if identifiedType in (Type.INSTANCE, Type.INLINEXBRL): urlsByType.setdefault(identifiedType, []).append(filesource.url) filesource.close() elif os.path.isdir(_files[0]): _fileDir = _files[0] for _localName in os.listdir(_fileDir): _file = os.path.join(_fileDir, _localName) if os.path.isfile(_file): filesource = FileSource.openFileSource(_file, cntlr) identifiedType = Type.identify(filesource, filesource.url) if identifiedType in (Type.INSTANCE, Type.INLINEXBRL): urlsByType.setdefault(identifiedType, []).append(filesource.url) filesource.close() if urlsByType: _files = [] # use inline instances, if any, else non-inline instances for identifiedType in (Type.INLINEXBRL, Type.INSTANCE): for url in urlsByType.get(identifiedType, []): _files.append(url) if _files: break # found inline (or non-inline) entrypoint files, don't look for any other type if len(_files) > 0: docsetSurrogatePath = os.path.join(os.path.dirname(_files[0]), IXDS_SURROGATE) entrypointFile["file"] = docsetSurrogatePath + IXDS_DOC_SEPARATOR.join(_files)
def runFromXml(self): testGenFileName = r"C:\Users\Herm Fischer\Documents\mvsl\projects\Arelle\roland test cases\1000-Concepts\index.xml" filesource = FileSource.FileSource(testGenFileName) startedAt = time.time() LogHandler(self) # start logger modelTestcases = self.modelManager.load(filesource, _("views loading")) self.addToLog( _("[info] loaded in {0:.2} secs").format(time.time() - startedAt)) if modelTestcases.modelDocument.type == ModelDocument.Type.TESTCASESINDEX: for testcasesElement in modelTestcases.modelDocument.iter( tag="testcases"): rootAttr = testcasesElement.get("root") title = testcasesElement.get("title") self.addToLog(_("[info] testcases {0}").format(title)) if rootAttr is not None: base = os.path.join( os.path.dirname(modelTestcases.modelDocument.filepath), rootAttr) + os.sep else: base = self.filepath for testcaseElement in testcasesElement.iterchildren( tag="testcase"): uriFrom = testcaseElement.get("uriFrom") uriTo = testcaseElement.get("uriTo") modelDTSfrom = modelDTSto = None self.addToLog( _("[info] testcase uriFrom {0}").format(uriFrom)) if uriFrom is not None and uriTo is not None: modelDTSfrom = ModelXbrl.load( modelTestcases.modelManager, uriFrom, _("loading from DTS"), base=base) modelDTSto = ModelXbrl.load( modelTestcases.modelManager, uriTo, _("loading to DTS"), base=base) if modelDTSfrom is not None and modelDTSto is not None: # generate differences report reportName = os.path.basename(uriFrom).replace( "from.xsd", "report.xml") reportFile = os.path.dirname( uriFrom) + "\\report\\" + reportName reportFullPath = self.webCache.normalizeUrl( reportFile, base) try: os.makedirs(os.path.dirname(reportFullPath)) except WindowsError: pass # dir already exists ModelVersReport.ModelVersReport( modelTestcases).diffDTSes( reportFullPath, modelDTSfrom, modelDTSto)
def run(self): self.messages = [] modelManager = ModelManager.initialize(self) # filesource = FileSource.FileSource("./aapl-20180929.xml") filesource = FileSource.FileSource( "https://www.sec.gov/Archives/edgar/data/320187/000032018719000051/nke-20190531.xml" ) modelXbrl = modelManager.load( "https://www.sec.gov/Archives/edgar/data/320187/000032018719000051/nke-20190531.xml", _("views loading")) for fact in modelXbrl.facts: print(fact)
def open_json_file(cntlr, file_name): # Open the new map from arelle import FileSource file_source = FileSource.openFileSource(file_name, cntlr) # FileSource does not handle reading JSPON files. If the file is not binary, FileSource assumes it is XML # Read the file as binary and then decode. file_object = file_source.file(file_name, binary=True)[0] file_content = file_object.read().decode() try: return json.loads(file_content, object_pairs_hook=collections.OrderedDict) except ValueError: raise XuleProcessingError(_("New map file does not appear to be a valid JSON file. File: {}".format(file_name)))
def _get_taxonomy_model(self, taxonomy_url, namespace): """Get an xbrl model of the entry_point file. """ start = datetime.datetime.today() rules_taxonomy_filesource = FileSource.openFileSource( taxonomy_url, self.cntlr) modelManager = ModelManager.initialize(self.cntlr) modelXbrl = modelManager.load(rules_taxonomy_filesource) if len({'IOerror', 'FileNotLoadable'} & set(modelXbrl.errors)) > 0: modelXbrl.error( "TaxonomyLoadError", "Cannot open file {} with namespace {}.".format( taxonomy_url, namespace)) else: end = datetime.datetime.today() print("Taxonomy {namespace} loaded in {time}. {entry}".format( namespace=modelXbrl.modelDocument.targetNamespace, time=end - start, entry=taxonomy_url)) return modelXbrl
def create(modelManager, newDocumentType=None, url=None, schemaRefs=None, createModelDocument=True, isEntry=False): from arelle import (ModelDocument, FileSource) modelXbrl = ModelXbrl(modelManager) modelXbrl.locale = modelManager.locale if newDocumentType: modelXbrl.fileSource = FileSource.FileSource( url) # url may be an open file handle, use str(url) below modelXbrl.closeFileSource = True if createModelDocument: modelXbrl.modelDocument = ModelDocument.create( modelXbrl, newDocumentType, str(url), schemaRefs=schemaRefs, isEntry=isEntry) if isEntry: del modelXbrl.entryLoadingUrl return modelXbrl
def func_json_data(xule_context, *args): """Read a json file/url. Arguments: file_url (string or url) Returns a dictionary/list of the json data. """ file_url = args[0] if file_url.type not in ('string', 'uri'): raise XuleProcessingError( _("The file url argument of the json-dta() function must be a string or uri, found '{}'." .format(file_url.value)), xule_context) from arelle import PackageManager mapped_file_url = PackageManager.mappedUrl(file_url.value) # Using the FileSource object in arelle. This will open the file and handle taxonomy package mappings. from arelle import FileSource file_source = FileSource.openFileSource(file_url.value, xule_context.global_context.cntlr) file = file_source.file(file_url.value, binary=True) # file is tuple of one item as a BytesIO stream. Since this is in bytes, it needs to be converted to text via a decoder. # Assuming the file is in utf-8. data_source = [x.decode('utf-8') for x in file[0].readlines()] try: json_source = json.loads(''.join(data_source)) #except JSONDecodeError: except ValueError: raise XuleProcessingError( _("The file '{}' is not a valid JSON file.".format( file_url.value)), xule_context) x = xv.system_collection_to_xule(json_source, xule_context) return xv.system_collection_to_xule(json_source, xule_context)
def createInstance(self, url=None): from arelle import (ModelDocument, FileSource) if self.modelDocument.type == ModelDocument.Type.INSTANCE: # entry already is an instance return self.modelDocument # use existing instance entry point priorFileSource = self.fileSource self.fileSource = FileSource.FileSource(url) if self.uri.startswith("http://"): schemaRefUri = self.uri else: # relativize local paths schemaRefUri = os.path.relpath(self.uri, os.path.dirname(url)) self.modelDocument = ModelDocument.create(self, ModelDocument.Type.INSTANCE, url, schemaRefs=[schemaRefUri], isEntry=True) if priorFileSource: priorFileSource.close() self.closeFileSource = True del self.entryLoadingUrl # reload dts views from arelle import ViewWinDTS for view in self.views: if isinstance(view, ViewWinDTS.ViewDTS): self.modelManager.cntlr.uiThreadQueue.put((view.view, []))
def validateTestcase(self, testcase): self.modelXbrl.info("info", "Testcase", modelDocument=testcase) self.modelXbrl.viewModelObject(testcase.objectId()) if hasattr(testcase, "testcaseVariations"): for modelTestcaseVariation in testcase.testcaseVariations: # update ui thread via modelManager (running in background here) self.modelXbrl.modelManager.viewModelObject(self.modelXbrl, modelTestcaseVariation.objectId()) # is this a versioning report? resultIsVersioningReport = modelTestcaseVariation.resultIsVersioningReport resultIsXbrlInstance = modelTestcaseVariation.resultIsXbrlInstance resultIsTaxonomyPackage = modelTestcaseVariation.resultIsTaxonomyPackage formulaOutputInstance = None inputDTSes = defaultdict(list) baseForElement = testcase.baseForElement(modelTestcaseVariation) # try to load instance document self.modelXbrl.info("info", _("Variation %(id)s %(name)s: %(expected)s - %(description)s"), modelObject=modelTestcaseVariation, id=modelTestcaseVariation.id, name=modelTestcaseVariation.name, expected=modelTestcaseVariation.expected, description=modelTestcaseVariation.description) errorCaptureLevel = modelTestcaseVariation.severityLevel # default is INCONSISTENCY parameters = modelTestcaseVariation.parameters.copy() for readMeFirstUri in modelTestcaseVariation.readMeFirstUris: if isinstance(readMeFirstUri,tuple): # dtsName is for formula instances, but is from/to dts if versioning dtsName, readMeFirstUri = readMeFirstUri elif resultIsVersioningReport: if inputDTSes: dtsName = "to" else: dtsName = "from" else: dtsName = None if resultIsVersioningReport and dtsName: # build multi-schemaRef containing document if dtsName in inputDTSes: dtsName = inputDTSes[dtsName] else: modelXbrl = ModelXbrl.create(self.modelXbrl.modelManager, Type.DTSENTRIES, self.modelXbrl.modelManager.cntlr.webCache.normalizeUrl(readMeFirstUri[:-4] + ".dts", baseForElement), isEntry=True, errorCaptureLevel=errorCaptureLevel) DTSdoc = modelXbrl.modelDocument DTSdoc.inDTS = True doc = modelDocumentLoad(modelXbrl, readMeFirstUri, base=baseForElement) if doc is not None: DTSdoc.referencesDocument[doc] = ModelDocumentReference("import", DTSdoc.xmlRootElement) #fake import doc.inDTS = True elif resultIsTaxonomyPackage: from arelle import PackageManager, PrototypeInstanceObject dtsName = readMeFirstUri modelXbrl = PrototypeInstanceObject.XbrlPrototype(self.modelXbrl.modelManager, readMeFirstUri) PackageManager.packageInfo(self.modelXbrl.modelManager.cntlr, readMeFirstUri, reload=True, errors=modelXbrl.errors) else: # not a multi-schemaRef versioning report if self.useFileSource.isArchive: modelXbrl = ModelXbrl.load(self.modelXbrl.modelManager, readMeFirstUri, _("validating"), base=baseForElement, useFileSource=self.useFileSource, errorCaptureLevel=errorCaptureLevel) else: # need own file source, may need instance discovery filesource = FileSource.FileSource(readMeFirstUri, self.modelXbrl.modelManager.cntlr) if filesource and not filesource.selection and filesource.isArchive: for _archiveFile in filesource.dir: # find instance document in archive filesource.select(_archiveFile) if ModelDocument.Type.identify(filesource, filesource.url) in (ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL): break # use this selection modelXbrl = ModelXbrl.load(self.modelXbrl.modelManager, filesource, _("validating"), base=baseForElement, errorCaptureLevel=errorCaptureLevel) modelXbrl.isTestcaseVariation = True if modelXbrl.modelDocument is None: modelXbrl.error("arelle:notLoaded", _("Testcase %(id)s %(name)s document not loaded: %(file)s"), modelXbrl=testcase, id=modelTestcaseVariation.id, name=modelTestcaseVariation.name, file=os.path.basename(readMeFirstUri)) self.determineNotLoadedTestStatus(modelTestcaseVariation, modelXbrl.errors) modelXbrl.close() elif resultIsVersioningReport or resultIsTaxonomyPackage: inputDTSes[dtsName] = modelXbrl elif modelXbrl.modelDocument.type == Type.VERSIONINGREPORT: ValidateVersReport.ValidateVersReport(self.modelXbrl).validate(modelXbrl) self.determineTestStatus(modelTestcaseVariation, modelXbrl.errors) modelXbrl.close() elif testcase.type == Type.REGISTRYTESTCASE: self.instValidator.validate(modelXbrl) # required to set up dimensions, etc self.instValidator.executeCallTest(modelXbrl, modelTestcaseVariation.id, modelTestcaseVariation.cfcnCall, modelTestcaseVariation.cfcnTest) self.determineTestStatus(modelTestcaseVariation, modelXbrl.errors) self.instValidator.close() modelXbrl.close() else: inputDTSes[dtsName].append(modelXbrl) # validate except for formulas _hasFormulae = modelXbrl.hasFormulae modelXbrl.hasFormulae = False try: for pluginXbrlMethod in pluginClassMethods("TestcaseVariation.Xbrl.Loaded"): pluginXbrlMethod(self.modelXbrl, modelXbrl, modelTestcaseVariation) self.instValidator.validate(modelXbrl, parameters) for pluginXbrlMethod in pluginClassMethods("TestcaseVariation.Xbrl.Validated"): pluginXbrlMethod(self.modelXbrl, modelXbrl) except Exception as err: modelXbrl.error("exception:" + type(err).__name__, _("Testcase variation validation exception: %(error)s, instance: %(instance)s"), modelXbrl=modelXbrl, instance=modelXbrl.modelDocument.basename, error=err, exc_info=True) modelXbrl.hasFormulae = _hasFormulae if resultIsVersioningReport and modelXbrl.modelDocument: versReportFile = modelXbrl.modelManager.cntlr.webCache.normalizeUrl( modelTestcaseVariation.versioningReportUri, baseForElement) if os.path.exists(versReportFile): #validate existing modelVersReport = ModelXbrl.load(self.modelXbrl.modelManager, versReportFile, _("validating existing version report")) if modelVersReport and modelVersReport.modelDocument and modelVersReport.modelDocument.type == Type.VERSIONINGREPORT: ValidateVersReport.ValidateVersReport(self.modelXbrl).validate(modelVersReport) self.determineTestStatus(modelTestcaseVariation, modelVersReport.errors) modelVersReport.close() elif len(inputDTSes) == 2: ModelVersReport.ModelVersReport(self.modelXbrl).diffDTSes( versReportFile, inputDTSes["from"], inputDTSes["to"]) modelTestcaseVariation.status = "generated" else: modelXbrl.error("arelle:notLoaded", _("Testcase %(id)s %(name)s DTSes not loaded, unable to generate versioning report: %(file)s"), modelXbrl=testcase, id=modelTestcaseVariation.id, name=modelTestcaseVariation.name, file=os.path.basename(readMeFirstUri)) modelTestcaseVariation.status = "failed" for inputDTS in inputDTSes.values(): inputDTS.close() del inputDTSes # dereference elif resultIsTaxonomyPackage: self.determineTestStatus(modelTestcaseVariation, modelXbrl.errors) modelXbrl.close() elif inputDTSes: # validate schema, linkbase, or instance modelXbrl = inputDTSes[None][0] for dtsName, inputDTS in inputDTSes.items(): # input instances are also parameters if dtsName: # named instance parameters[dtsName] = (None, inputDTS) #inputDTS is a list of modelXbrl's (instance DTSes) elif len(inputDTS) > 1: # standard-input-instance with multiple instance documents parameters[XbrlConst.qnStandardInputInstance] = (None, inputDTS) # allow error detection in validateFormula if modelXbrl.hasTableRendering or modelTestcaseVariation.resultIsTable: RenderingEvaluator.init(modelXbrl) if modelXbrl.hasFormulae: try: # validate only formulae self.instValidator.parameters = parameters ValidateFormula.validate(self.instValidator) except Exception as err: modelXbrl.error("exception:" + type(err).__name__, _("Testcase formula variation validation exception: %(error)s, instance: %(instance)s"), modelXbrl=modelXbrl, instance=modelXbrl.modelDocument.basename, error=err, exc_info=True) if modelTestcaseVariation.resultIsInfoset and self.modelXbrl.modelManager.validateInfoset: for pluginXbrlMethod in pluginClassMethods("Validate.Infoset"): pluginXbrlMethod(modelXbrl, modelTestcaseVariation.resultInfosetUri) infoset = ModelXbrl.load(self.modelXbrl.modelManager, modelTestcaseVariation.resultInfosetUri, _("loading result infoset"), base=baseForElement, useFileSource=self.useFileSource, errorCaptureLevel=errorCaptureLevel) if infoset.modelDocument is None: modelXbrl.error("arelle:notLoaded", _("Testcase %(id)s %(name)s result infoset not loaded: %(file)s"), modelXbrl=testcase, id=modelTestcaseVariation.id, name=modelTestcaseVariation.name, file=os.path.basename(modelTestcaseVariation.resultXbrlInstance)) modelTestcaseVariation.status = "result infoset not loadable" else: # check infoset ValidateInfoset.validate(self.instValidator, modelXbrl, infoset) infoset.close() if modelTestcaseVariation.resultIsTable: # and self.modelXbrl.modelManager.validateInfoset: # diff (or generate) table infoset resultTableUri = modelXbrl.modelManager.cntlr.webCache.normalizeUrl(modelTestcaseVariation.resultTableUri, baseForElement) if not any(alternativeValidation(modelXbrl, resultTableUri) for alternativeValidation in pluginClassMethods("Validate.TableInfoset")): ViewFileRenderedGrid.viewRenderedGrid(modelXbrl, resultTableUri, diffToFile=True) # false to save infoset files self.instValidator.close() extraErrors = [] for pluginXbrlMethod in pluginClassMethods("TestcaseVariation.Validated"): pluginXbrlMethod(self.modelXbrl, modelXbrl, extraErrors) self.determineTestStatus(modelTestcaseVariation, [e for inputDTSlist in inputDTSes.values() for inputDTS in inputDTSlist for e in inputDTS.errors] + extraErrors) # include infoset errors in status if modelXbrl.formulaOutputInstance and self.noErrorCodes(modelTestcaseVariation.actual): # if an output instance is created, and no string error codes, ignoring dict of assertion results, validate it modelXbrl.formulaOutputInstance.hasFormulae = False # block formulae on output instance (so assertion of input is not lost) self.instValidator.validate(modelXbrl.formulaOutputInstance, modelTestcaseVariation.parameters) self.determineTestStatus(modelTestcaseVariation, modelXbrl.formulaOutputInstance.errors) if self.noErrorCodes(modelTestcaseVariation.actual): # if still 'clean' pass it forward for comparison to expected result instance formulaOutputInstance = modelXbrl.formulaOutputInstance modelXbrl.formulaOutputInstance = None # prevent it from being closed now self.instValidator.close() compareIxResultInstance = getattr(modelXbrl, "extractedInlineInstance", False) and modelTestcaseVariation.resultXbrlInstanceUri if compareIxResultInstance: formulaOutputInstance = modelXbrl # compare modelXbrl to generated output instance errMsgPrefix = "ix" else: # delete input instances before formula output comparision for inputDTSlist in inputDTSes.values(): for inputDTS in inputDTSlist: inputDTS.close() del inputDTSes # dereference errMsgPrefix = "formula" if resultIsXbrlInstance and formulaOutputInstance and formulaOutputInstance.modelDocument: expectedInstance = ModelXbrl.load(self.modelXbrl.modelManager, modelTestcaseVariation.resultXbrlInstanceUri, _("loading expected result XBRL instance"), base=baseForElement, useFileSource=self.useFileSource, errorCaptureLevel=errorCaptureLevel) if expectedInstance.modelDocument is None: self.modelXbrl.error("{}:expectedResultNotLoaded".format(errMsgPrefix), _("Testcase %(id)s %(name)s expected result instance not loaded: %(file)s"), modelXbrl=testcase, id=modelTestcaseVariation.id, name=modelTestcaseVariation.name, file=os.path.basename(modelTestcaseVariation.resultXbrlInstanceUri), messageCodes=("formula:expectedResultNotLoaded","ix:expectedResultNotLoaded")) modelTestcaseVariation.status = "result not loadable" else: # compare facts if len(expectedInstance.facts) != len(formulaOutputInstance.facts): formulaOutputInstance.error("{}:resultFactCounts".format(errMsgPrefix), _("Formula output %(countFacts)s facts, expected %(expectedFacts)s facts"), modelXbrl=modelXbrl, countFacts=len(formulaOutputInstance.facts), expectedFacts=len(expectedInstance.facts), messageCodes=("formula:resultFactCounts","ix:resultFactCounts")) else: formulaOutputFootnotesRelSet = ModelRelationshipSet(formulaOutputInstance, "XBRL-footnotes") expectedFootnotesRelSet = ModelRelationshipSet(expectedInstance, "XBRL-footnotes") def factFootnotes(fact, footnotesRelSet): footnotes = [] footnoteRels = footnotesRelSet.fromModelObject(fact) if footnoteRels: # most process rels in same order between two instances, use labels to sort for i, footnoteRel in enumerate(sorted(footnoteRels, key=lambda r: (r.fromLabel,r.toLabel))): modelObject = footnoteRel.toModelObject if isinstance(modelObject, ModelResource): xml = modelObject.viewText().strip() footnotes.append("Footnote {}: {}".format( i+1, # compare footnote with HTML serialized xml, #re.sub(r'\s+', ' ', collapseWhitespace(modelObject.stringValue)) )) elif isinstance(modelObject, ModelFact): footnotes.append("Footnoted fact {}: {} context: {} value: {}".format( i+1, modelObject.qname, modelObject.contextID, collapseWhitespace(modelObject.value))) return footnotes for expectedInstanceFact in expectedInstance.facts: unmatchedFactsStack = [] formulaOutputFact = formulaOutputInstance.matchFact(expectedInstanceFact, unmatchedFactsStack, deemP0inf=True) if formulaOutputFact is None: if unmatchedFactsStack: # get missing nested tuple fact, if possible missingFact = unmatchedFactsStack[-1] else: missingFact = expectedInstanceFact formulaOutputInstance.error("{}:expectedFactMissing".format(errMsgPrefix), _("Output missing expected fact %(fact)s"), modelXbrl=missingFact, fact=missingFact.qname, messageCodes=("formula:expectedFactMissing","ix:expectedFactMissing")) else: # compare footnotes expectedInstanceFactFootnotes = factFootnotes(expectedInstanceFact, expectedFootnotesRelSet) formulaOutputFactFootnotes = factFootnotes(formulaOutputFact, formulaOutputFootnotesRelSet) if expectedInstanceFactFootnotes != formulaOutputFactFootnotes: formulaOutputInstance.error("{}:expectedFactFootnoteDifference".format(errMsgPrefix), _("Output expected fact %(fact)s expected footnotes %(footnotes1)s produced footnotes %(footnotes2)s"), modelXbrl=(formulaOutputFact,expectedInstanceFact), fact=expectedInstanceFact.qname, footnotes1=expectedInstanceFactFootnotes, footnotes2=formulaOutputFactFootnotes, messageCodes=("formula:expectedFactFootnoteDifference","ix:expectedFactFootnoteDifference")) # for debugging uncomment next line to save generated instance document # formulaOutputInstance.saveInstance(r"c:\temp\test-out-inst.xml") expectedInstance.close() del expectedInstance # dereference self.determineTestStatus(modelTestcaseVariation, formulaOutputInstance.errors) formulaOutputInstance.close() del formulaOutputInstance if compareIxResultInstance: for inputDTSlist in inputDTSes.values(): for inputDTS in inputDTSlist: inputDTS.close() del inputDTSes # dereference # update ui thread via modelManager (running in background here) self.modelXbrl.modelManager.viewModelObject(self.modelXbrl, modelTestcaseVariation.objectId()) self.modelXbrl.modelManager.showStatus(_("ready"), 2000)
def evaluateTableIndex(modelXbrl): disclosureSystem = modelXbrl.modelManager.disclosureSystem if disclosureSystem.EFM: COVER = "1Cover" STMTS = "2Financial Statements" NOTES = "3Notes to Financial Statements" POLICIES = "4Accounting Policies" TABLES = "5Notes Tables" DETAILS = "6Notes Details" UNCATEG = "7Uncategorized" roleDefinitionPattern = re.compile(r"([0-9]+) - (Statement|Disclosure|Schedule|Document) - (.+)") # build EFM rendering-compatible index definitionElrs = dict((roleType.definition, roleType) for roleURI in modelXbrl.relationshipSet(XbrlConst.parentChild).linkRoleUris for roleType in modelXbrl.roleTypes.get(roleURI,())) isRR = any(ns.startswith("http://xbrl.sec.gov/rr/") for ns in modelXbrl.namespaceDocs.keys()) tableGroup = None firstTableLinkroleURI = None firstDocumentLinkroleURI = None sortedRoleTypes = sorted(definitionElrs.items(), key=lambda item: item[0]) for roleDefinition, roleType in sortedRoleTypes: roleType._tableChildren = [] match = roleDefinitionPattern.match(roleDefinition) if roleDefinition else None if not match: roleType._tableIndex = (UNCATEG, "", roleType.roleURI) continue seq, tblType, tblName = match.groups() if isRR: tableGroup = COVER elif not tableGroup: tableGroup = ("Paren" in tblName and COVER or tblType == "Statement" and STMTS or "(Polic" in tblName and NOTES or "(Table" in tblName and TABLES or "(Detail" in tblName and DETAILS or COVER) elif tableGroup == COVER: tableGroup = (tblType == "Statement" and STMTS or "Paren" in tblName and COVER or "(Polic" in tblName and NOTES or "(Table" in tblName and TABLES or "(Detail" in tblName and DETAILS or NOTES) elif tableGroup == STMTS: tableGroup = ((tblType == "Statement" or "Paren" in tblName) and STMTS or "(Polic" in tblName and NOTES or "(Table" in tblName and TABLES or "(Detail" in tblName and DETAILS or NOTES) elif tableGroup == NOTES: tableGroup = ("(Polic" in tblName and POLICIES or "(Table" in tblName and TABLES or "(Detail" in tblName and DETAILS or tblType == "Disclosure" and NOTES or UNCATEG) elif tableGroup == POLICIES: tableGroup = ("(Table" in tblName and TABLES or "(Detail" in tblName and DETAILS or ("Paren" in tblName or "(Polic" in tblName) and POLICIES or UNCATEG) elif tableGroup == TABLES: tableGroup = ("(Detail" in tblName and DETAILS or ("Paren" in tblName or "(Table" in tblName) and TABLES or UNCATEG) elif tableGroup == DETAILS: tableGroup = (("Paren" in tblName or "(Detail" in tblName) and DETAILS or UNCATEG) else: tableGroup = UNCATEG if firstTableLinkroleURI is None and tableGroup == COVER: firstTableLinkroleURI = roleType.roleURI if tblType == "Document" and not firstDocumentLinkroleURI: firstDocumentLinkroleURI = roleType.roleURI roleType._tableIndex = (tableGroup, seq, tblName) # flow allocate facts to roles (SEC presentation groups) if not modelXbrl.qnameDimensionDefaults: # may not have run validatino yet from arelle import ValidateXbrlDimensions ValidateXbrlDimensions.loadDimensionDefaults(modelXbrl) reportedFacts = set() # facts which were shown in a higher-numbered ELR table factsByQname = modelXbrl.factsByQname reportingPeriods = set() nextEnd = None deiFact = {} for conceptName in ("DocumentPeriodEndDate", "DocumentType", "CurrentFiscalPeriodEndDate"): for concept in modelXbrl.nameConcepts[conceptName]: for fact in factsByQname[concept.qname]: deiFact[conceptName] = fact if fact.context is not None: reportingPeriods.add((None, fact.context.endDatetime)) # for instant reportingPeriods.add((fact.context.startDatetime, fact.context.endDatetime)) # for startEnd nextEnd = fact.context.startDatetime duration = (fact.context.endDatetime - fact.context.startDatetime).days + 1 break if "DocumentType" in deiFact: fact = deiFact["DocumentType"] if "-Q" in fact.xValue: # need quarterly and yr to date durations endDatetime = fact.context.endDatetime # if within 2 days of end of month use last day of month endDatetimeMonth = endDatetime.month if (endDatetime + timedelta(2)).month != endDatetimeMonth: # near end of month endOfMonth = True while endDatetime.month == endDatetimeMonth: endDatetime += timedelta(1) # go forward to next month else: endOfMonth = False startYr = endDatetime.year startMo = endDatetime.month - 3 if startMo <= 0: startMo += 12 startYr -= 1 startDatetime = datetime(startYr, startMo, endDatetime.day, endDatetime.hour, endDatetime.minute, endDatetime.second) if endOfMonth: startDatetime -= timedelta(1) endDatetime -= timedelta(1) reportingPeriods.add((startDatetime, endDatetime)) duration = 91 # find preceding compatible default context periods while (nextEnd is not None): thisEnd = nextEnd prevMaxStart = thisEnd - timedelta(duration * .9) prevMinStart = thisEnd - timedelta(duration * 1.1) nextEnd = None for cntx in modelXbrl.contexts.values(): if (cntx.isStartEndPeriod and not cntx.qnameDims and thisEnd == cntx.endDatetime and prevMinStart <= cntx.startDatetime <= prevMaxStart): reportingPeriods.add((None, cntx.endDatetime)) reportingPeriods.add((cntx.startDatetime, cntx.endDatetime)) nextEnd = cntx.startDatetime break elif (cntx.isInstantPeriod and not cntx.qnameDims and thisEnd == cntx.endDatetime): reportingPeriods.add((None, cntx.endDatetime)) stmtReportingPeriods = set(reportingPeriods) sortedRoleTypes.reverse() # now in descending order for i, roleTypes in enumerate(sortedRoleTypes): roleDefinition, roleType = roleTypes # find defined non-default axes in pre hierarchy for table tableFacts = set() tableGroup, tableSeq, tableName = roleType._tableIndex roleURIdims, priItemQNames = EFMlinkRoleURIstructure(modelXbrl, roleType.roleURI) for priItemQName in priItemQNames: for fact in factsByQname[priItemQName]: cntx = fact.context # non-explicit dims must be default if (cntx is not None and all(dimQn in modelXbrl.qnameDimensionDefaults for dimQn in (roleURIdims.keys() - cntx.qnameDims.keys())) and all(mdlDim.memberQname in roleURIdims[dimQn] for dimQn, mdlDim in cntx.qnameDims.items() if dimQn in roleURIdims)): # the flow-up part, drop cntxStartDatetime = cntx.startDatetime cntxEndDatetime = cntx.endDatetime if (tableGroup != STMTS or (cntxStartDatetime, cntxEndDatetime) in stmtReportingPeriods and (fact not in reportedFacts or all(dimQn not in cntx.qnameDims # unspecified dims are all defaulted if reported elsewhere for dimQn in (cntx.qnameDims.keys() - roleURIdims.keys())))): tableFacts.add(fact) reportedFacts.add(fact) roleType._tableFacts = tableFacts # find parent if any closestParentType = None closestParentMatchLength = 0 for _parentRoleDefinition, parentRoleType in sortedRoleTypes[i+1:]: matchLen = parentNameMatchLen(tableName, parentRoleType) if matchLen > closestParentMatchLength: closestParentMatchLength = matchLen closestParentType = parentRoleType if closestParentType is not None: closestParentType._tableChildren.insert(0, roleType) # remove lesser-matched children if there was a parent match unmatchedChildRoles = set() longestChildMatchLen = 0 numChildren = 0 for childRoleType in roleType._tableChildren: matchLen = parentNameMatchLen(tableName, childRoleType) if matchLen < closestParentMatchLength: unmatchedChildRoles.add(childRoleType) elif matchLen > longestChildMatchLen: longestChildMatchLen = matchLen numChildren += 1 if numChildren > 1: # remove children that don't have the full match pattern length to parent for childRoleType in roleType._tableChildren: if (childRoleType not in unmatchedChildRoles and parentNameMatchLen(tableName, childRoleType) < longestChildMatchLen): unmatchedChildRoles.add(childRoleType) for unmatchedChildRole in unmatchedChildRoles: roleType._tableChildren.remove(unmatchedChildRole) for childRoleType in roleType._tableChildren: childRoleType._tableParent = roleType unmatchedChildRoles = None # dereference global UGT_TOPICS if UGT_TOPICS is None: try: from arelle import FileSource fh = FileSource.openFileStream(modelXbrl.modelManager.cntlr, os.path.join(modelXbrl.modelManager.cntlr.configDir, "ugt-topics.zip/ugt-topics.json"), 'r', 'utf-8') UGT_TOPICS = json.load(fh) fh.close() for topic in UGT_TOPICS: topic[6] = set(topic[6]) # change concept abstracts list into concept abstracts set topic[7] = set(topic[7]) # change concept text blocks list into concept text blocks set topic[8] = set(topic[8]) # change concept names list into concept names set except Exception as ex: UGT_TOPICS = None if UGT_TOPICS is not None: def roleUgtConcepts(roleType): roleConcepts = set() for rel in modelXbrl.relationshipSet(XbrlConst.parentChild, roleType.roleURI).modelRelationships: if rel.toModelObject is not None: roleConcepts.add(rel.toModelObject.name) if rel.fromModelObject is not None: roleConcepts.add(rel.fromModelObject.name) if hasattr(roleType, "_tableChildren"): for _tableChild in roleType._tableChildren: roleConcepts |= roleUgtConcepts(_tableChild) return roleConcepts topicMatches = {} # topicNum: (best score, roleType) for roleDefinition, roleType in sortedRoleTypes: roleTopicType = 'S' if roleDefinition.startswith('S') else 'D' if getattr(roleType, "_tableParent", None) is None: # rooted tables in reverse order concepts = roleUgtConcepts(roleType) for i, ugtTopic in enumerate(UGT_TOPICS): if ugtTopic[0] == roleTopicType: countAbstracts = len(concepts & ugtTopic[6]) countTextBlocks = len(concepts & ugtTopic[7]) countLineItems = len(concepts & ugtTopic[8]) if countAbstracts or countTextBlocks or countLineItems: _score = (10 * countAbstracts + 1000 * countTextBlocks + countLineItems / len(concepts)) if i not in topicMatches or _score > topicMatches[i][0]: topicMatches[i] = (_score, roleType) for topicNum, scoredRoleType in topicMatches.items(): _score, roleType = scoredRoleType if _score > getattr(roleType, "_tableTopicScore", 0): ugtTopic = UGT_TOPICS[topicNum] roleType._tableTopicScore = _score roleType._tableTopicType = ugtTopic[0] roleType._tableTopicName = ugtTopic[3] roleType._tableTopicCode = ugtTopic[4] # print ("Match score {:.2f} topic {} preGrp {}".format(_score, ugtTopic[3], roleType.definition)) return firstTableLinkroleURI or firstDocumentLinkroleURI # did build _tableIndex attributes return None
def run(self, options, sourceZipStream=None): self.entrypointFile = options.entrypointFile filesource = FileSource.openFileSource(self.entrypointFile, self, sourceZipStream) if options.validateEFM: if options.gfmName: self.addToLog(_("both --efm and --gfm validation are requested, proceeding with --efm only"), messageCode="info", file=self.entrypointFile) self.modelManager.validateDisclosureSystem = True self.modelManager.disclosureSystem.select("efm") elif options.gfmName: self.modelManager.validateDisclosureSystem = True self.modelManager.disclosureSystem.select(options.gfmName) elif options.validateHMRC: self.modelManager.validateDisclosureSystem = True self.modelManager.disclosureSystem.select("hmrc") else: self.modelManager.disclosureSystem.select(None) # just load ordinary mappings if options.calcDecimals: if options.calcPrecision: self.addToLog(_("both --calcDecimals and --calcPrecision validation are requested, proceeding with --calcDecimals only"), messageCode="info", file=self.entrypointFile) self.modelManager.validateInferDecimals = True self.modelManager.validateCalcLB = True elif options.calcPrecision: self.modelManager.validateInferDecimals = False self.modelManager.validateCalcLB = True if options.utrValidate: self.modelManager.validateUtr = True fo = FormulaOptions() if options.parameters: fo.parameterValues = dict(((qname(key, noPrefixIsNoNamespace=True),(None,value)) for param in options.parameters.split(',') for key,sep,value in (param.partition('='),) ) ) if options.formulaParamExprResult: fo.traceParameterExpressionResult = True if options.formulaParamInputValue: fo.traceParameterInputValue = True if options.formulaCallExprSource: fo.traceCallExpressionSource = True if options.formulaCallExprCode: fo.traceCallExpressionCode = True if options.formulaCallExprEval: fo.traceCallExpressionEvaluation = True if options.formulaCallExprResult: fo.traceCallExpressionResult = True if options.formulaVarSetExprEval: fo.traceVariableSetExpressionEvaluation = True if options.formulaVarSetExprResult: fo.traceVariableSetExpressionResult = True if options.formulaAsserResultCounts: fo.traceAssertionResultCounts = True if options.formulaFormulaRules: fo.traceFormulaRules = True if options.formulaVarsOrder: fo.traceVariablesOrder = True if options.formulaVarExpressionSource: fo.traceVariableExpressionSource = True if options.formulaVarExpressionCode: fo.traceVariableExpressionCode = True if options.formulaVarExpressionEvaluation: fo.traceVariableExpressionEvaluation = True if options.formulaVarExpressionResult: fo.traceVariableExpressionResult = True if options.formulaVarFilterWinnowing: fo.traceVariableFilterWinnowing = True if options.formulaVarFiltersResult: fo.traceVariableFiltersResult = True self.modelManager.formulaOptions = fo timeNow = XmlUtil.dateunionValue(datetime.datetime.now()) startedAt = time.time() modelDiffReport = None success = True modelXbrl = None try: modelXbrl = self.modelManager.load(filesource, _("views loading")) except Exception as err: self.addToLog(_("[Exception] Failed to complete request: \n{0} \n{1}").format( err, traceback.format_tb(sys.exc_info()[2]))) success = False # loading errors, don't attempt to utilize loaded DTS if modelXbrl and modelXbrl.modelDocument: self.addToLog(format_string(self.modelManager.locale, _("loaded in %.2f secs at %s"), (time.time() - startedAt, timeNow)), messageCode="info", file=self.entrypointFile) if options.importFiles: for importFile in options.importFiles.split("|"): ModelDocument.load(modelXbrl, importFile.strip()) self.addToLog(format_string(self.modelManager.locale, _("imported in %.2f secs at %s"), (time.time() - startedAt, timeNow)), messageCode="info", file=importFile) if modelXbrl.errors: success = False # loading errors, don't attempt to utilize loaded DTS else: success = False if success and options.diffFile and options.versReportFile: diffFilesource = FileSource.FileSource(options.diffFile,self) startedAt = time.time() modelXbrl2 = self.modelManager.load(diffFilesource, _("views loading")) if modelXbrl2.errors: if not options.keepOpen: modelXbrl2.close() success = False else: self.addToLog(format_string(self.modelManager.locale, _("diff comparison DTS loaded in %.2f secs"), time.time() - startedAt), messageCode="info", file=self.entrypointFile) startedAt = time.time() modelDiffReport = self.modelManager.compareDTSes(options.versReportFile) self.addToLog(format_string(self.modelManager.locale, _("compared in %.2f secs"), time.time() - startedAt), messageCode="info", file=self.entrypointFile) if success: try: if options.validate: startedAt = time.time() self.modelManager.validate() self.addToLog(format_string(self.modelManager.locale, _("validated in %.2f secs"), time.time() - startedAt), messageCode="info", file=self.entrypointFile) if (options.testReport and self.modelManager.modelXbrl.modelDocument.type in (ModelDocument.Type.TESTCASESINDEX, ModelDocument.Type.TESTCASE, ModelDocument.Type.REGISTRY)): ViewFileTests.viewTests(self.modelManager.modelXbrl, options.testReport) if options.DTSFile: ViewFileDTS.viewDTS(modelXbrl, options.DTSFile) if options.factsFile: ViewFileFactList.viewFacts(modelXbrl, options.factsFile, labelrole=options.labelRole, lang=options.labelLang, cols=options.factListCols) if options.factTableFile: ViewFileFactTable.viewFacts(modelXbrl, options.factTableFile, labelrole=options.labelRole, lang=options.labelLang) if options.conceptsFile: ViewFileConcepts.viewConcepts(modelXbrl, options.conceptsFile, labelrole=options.labelRole, lang=options.labelLang) if options.preFile: ViewFileRelationshipSet.viewRelationshipSet(modelXbrl, options.preFile, "Presentation Linkbase", "http://www.xbrl.org/2003/arcrole/parent-child", labelrole=options.labelRole, lang=options.labelLang) if options.calFile: ViewFileRelationshipSet.viewRelationshipSet(modelXbrl, options.calFile, "Calculation Linkbase", "http://www.xbrl.org/2003/arcrole/summation-item", labelrole=options.labelRole, lang=options.labelLang) if options.dimFile: ViewFileRelationshipSet.viewRelationshipSet(modelXbrl, options.dimFile, "Dimensions", "XBRL-dimensions", labelrole=options.labelRole, lang=options.labelLang) if options.formulaeFile: ViewFileFormulae.viewFormulae(modelXbrl, options.formulaeFile, "Formulae", lang=options.labelLang) for pluginXbrlMethod in pluginClassMethods("CntlrCmdLine.Xbrl.Run"): pluginXbrlMethod(self, options, modelXbrl) except (IOError, EnvironmentError) as err: self.addToLog(_("[IOError] Failed to save output:\n {0}").format(err)) success = False except Exception as err: self.addToLog(_("[Exception] Failed to complete request: \n{0} \n{1}").format( err, traceback.format_tb(sys.exc_info()[2]))) success = False if not options.keepOpen: if modelDiffReport: modelDiffReport.close() elif modelXbrl: modelXbrl.close() return success
def validateTestcase(self, testcase): self.modelXbrl.info("info", "Testcase", modelDocument=testcase) self.modelXbrl.viewModelObject(testcase.objectId()) if testcase.type in (Type.TESTCASESINDEX, Type.REGISTRY): for doc in sorted(testcase.referencesDocument.keys(), key=lambda doc: doc.uri): self.validateTestcase(doc) # testcases doc's are sorted by their uri (file names), e.g., for formula elif hasattr(testcase, "testcaseVariations"): for modelTestcaseVariation in testcaseVariationsByTarget(testcase.testcaseVariations): # update ui thread via modelManager (running in background here) self.modelXbrl.modelManager.viewModelObject(self.modelXbrl, modelTestcaseVariation.objectId()) # is this a versioning report? resultIsVersioningReport = modelTestcaseVariation.resultIsVersioningReport resultIsXbrlInstance = modelTestcaseVariation.resultIsXbrlInstance resultIsTaxonomyPackage = modelTestcaseVariation.resultIsTaxonomyPackage formulaOutputInstance = None inputDTSes = defaultdict(list) baseForElement = testcase.baseForElement(modelTestcaseVariation) # try to load instance document self.modelXbrl.info("info", _("Variation %(id)s%(name)s%(target)s: %(expected)s - %(description)s"), modelObject=modelTestcaseVariation, id=modelTestcaseVariation.id, name=(" {}".format(modelTestcaseVariation.name) if modelTestcaseVariation.name else ""), target=(" target {}".format(modelTestcaseVariation.ixdsTarget) if modelTestcaseVariation.ixdsTarget else ""), expected=modelTestcaseVariation.expected, description=modelTestcaseVariation.description) if self.modelXbrl.modelManager.formulaOptions.testcaseResultsCaptureWarnings: errorCaptureLevel = logging._checkLevel("WARNING") else: errorCaptureLevel = modelTestcaseVariation.severityLevel # default is INCONSISTENCY parameters = modelTestcaseVariation.parameters.copy() for readMeFirstUri in modelTestcaseVariation.readMeFirstUris: if isinstance(readMeFirstUri,tuple): # dtsName is for formula instances, but is from/to dts if versioning dtsName, readMeFirstUri = readMeFirstUri elif resultIsVersioningReport: if inputDTSes: dtsName = "to" else: dtsName = "from" else: dtsName = None if resultIsVersioningReport and dtsName: # build multi-schemaRef containing document if dtsName in inputDTSes: dtsName = inputDTSes[dtsName] else: modelXbrl = ModelXbrl.create(self.modelXbrl.modelManager, Type.DTSENTRIES, self.modelXbrl.modelManager.cntlr.webCache.normalizeUrl(readMeFirstUri[:-4] + ".dts", baseForElement), isEntry=True, errorCaptureLevel=errorCaptureLevel) DTSdoc = modelXbrl.modelDocument DTSdoc.inDTS = True doc = modelDocumentLoad(modelXbrl, readMeFirstUri, base=baseForElement) if doc is not None: DTSdoc.referencesDocument[doc] = ModelDocumentReference("import", DTSdoc.xmlRootElement) #fake import doc.inDTS = True elif resultIsTaxonomyPackage: from arelle import PackageManager, PrototypeInstanceObject dtsName = readMeFirstUri modelXbrl = PrototypeInstanceObject.XbrlPrototype(self.modelXbrl.modelManager, readMeFirstUri) PackageManager.packageInfo(self.modelXbrl.modelManager.cntlr, readMeFirstUri, reload=True, errors=modelXbrl.errors) else: # not a multi-schemaRef versioning report if self.useFileSource.isArchive: modelXbrl = ModelXbrl.load(self.modelXbrl.modelManager, readMeFirstUri, _("validating"), base=baseForElement, useFileSource=self.useFileSource, errorCaptureLevel=errorCaptureLevel, ixdsTarget=modelTestcaseVariation.ixdsTarget) else: # need own file source, may need instance discovery filesource = FileSource.openFileSource(readMeFirstUri, self.modelXbrl.modelManager.cntlr, base=baseForElement) if filesource and not filesource.selection and filesource.isArchive: try: if filesource.isTaxonomyPackage: _rptPkgIxdsOptions = {} for pluginXbrlMethod in pluginClassMethods("ModelTestcaseVariation.ReportPackageIxdsOptions"): pluginXbrlMethod(self, _rptPkgIxdsOptions) filesource.loadTaxonomyPackageMappings() for pluginXbrlMethod in pluginClassMethods("ModelTestcaseVariation.ReportPackageIxds"): filesource.select(pluginXbrlMethod(filesource, **_rptPkgIxdsOptions)) else: from arelle.CntlrCmdLine import filesourceEntrypointFiles entrypoints = filesourceEntrypointFiles(filesource) if entrypoints: # resolve an IXDS in entrypoints for pluginXbrlMethod in pluginClassMethods("ModelTestcaseVariation.ArchiveIxds"): pluginXbrlMethod(self, filesource,entrypoints) filesource.select(entrypoints[0].get("file", None) ) except Exception as err: self.modelXbrl.error("exception:" + type(err).__name__, _("Testcase variation validation exception: %(error)s, entry URL: %(instance)s"), modelXbrl=self.modelXbrl, instance=readMeFirstUri, error=err) continue # don't try to load this entry URL modelXbrl = ModelXbrl.load(self.modelXbrl.modelManager, filesource, _("validating"), base=baseForElement, errorCaptureLevel=errorCaptureLevel, ixdsTarget=modelTestcaseVariation.ixdsTarget) modelXbrl.isTestcaseVariation = True if modelXbrl.modelDocument is None: modelXbrl.info("arelle:notLoaded", _("Variation %(id)s %(name)s readMeFirst document not loaded: %(file)s"), modelXbrl=testcase, id=modelTestcaseVariation.id, name=modelTestcaseVariation.name, file=os.path.basename(readMeFirstUri)) self.determineNotLoadedTestStatus(modelTestcaseVariation, modelXbrl.errors) modelXbrl.close() elif resultIsVersioningReport or resultIsTaxonomyPackage: inputDTSes[dtsName] = modelXbrl elif modelXbrl.modelDocument.type == Type.VERSIONINGREPORT: ValidateVersReport.ValidateVersReport(self.modelXbrl).validate(modelXbrl) self.determineTestStatus(modelTestcaseVariation, modelXbrl.errors) modelXbrl.close() elif testcase.type == Type.REGISTRYTESTCASE: self.instValidator.validate(modelXbrl) # required to set up dimensions, etc self.instValidator.executeCallTest(modelXbrl, modelTestcaseVariation.id, modelTestcaseVariation.cfcnCall, modelTestcaseVariation.cfcnTest) self.determineTestStatus(modelTestcaseVariation, modelXbrl.errors) self.instValidator.close() modelXbrl.close() else: inputDTSes[dtsName].append(modelXbrl) # validate except for formulas _hasFormulae = modelXbrl.hasFormulae modelXbrl.hasFormulae = False try: for pluginXbrlMethod in pluginClassMethods("TestcaseVariation.Xbrl.Loaded"): pluginXbrlMethod(self.modelXbrl, modelXbrl, modelTestcaseVariation) self.instValidator.validate(modelXbrl, parameters) for pluginXbrlMethod in pluginClassMethods("TestcaseVariation.Xbrl.Validated"): pluginXbrlMethod(self.modelXbrl, modelXbrl) except Exception as err: modelXbrl.error("exception:" + type(err).__name__, _("Testcase variation validation exception: %(error)s, instance: %(instance)s"), modelXbrl=modelXbrl, instance=modelXbrl.modelDocument.basename, error=err, exc_info=(type(err) is not AssertionError)) modelXbrl.hasFormulae = _hasFormulae if resultIsVersioningReport and modelXbrl.modelDocument: versReportFile = modelXbrl.modelManager.cntlr.webCache.normalizeUrl( modelTestcaseVariation.versioningReportUri, baseForElement) if os.path.exists(versReportFile): #validate existing modelVersReport = ModelXbrl.load(self.modelXbrl.modelManager, versReportFile, _("validating existing version report")) if modelVersReport and modelVersReport.modelDocument and modelVersReport.modelDocument.type == Type.VERSIONINGREPORT: ValidateVersReport.ValidateVersReport(self.modelXbrl).validate(modelVersReport) self.determineTestStatus(modelTestcaseVariation, modelVersReport.errors) modelVersReport.close() elif len(inputDTSes) == 2: ModelVersReport.ModelVersReport(self.modelXbrl).diffDTSes( versReportFile, inputDTSes["from"], inputDTSes["to"]) modelTestcaseVariation.status = "generated" else: modelXbrl.error("arelle:notLoaded", _("Variation %(id)s %(name)s input DTSes not loaded, unable to generate versioning report: %(file)s"), modelXbrl=testcase, id=modelTestcaseVariation.id, name=modelTestcaseVariation.name, file=os.path.basename(readMeFirstUri)) modelTestcaseVariation.status = "failed" for inputDTS in inputDTSes.values(): inputDTS.close() del inputDTSes # dereference elif resultIsTaxonomyPackage: self.determineTestStatus(modelTestcaseVariation, modelXbrl.errors) modelXbrl.close() elif inputDTSes: # validate schema, linkbase, or instance modelXbrl = inputDTSes[None][0] expectedDataFiles = set(modelXbrl.modelManager.cntlr.webCache.normalizeUrl(uri, baseForElement) for d in modelTestcaseVariation.dataUris.values() for uri in d if not UrlUtil.isAbsolute(uri)) foundDataFiles = set() variationBase = os.path.dirname(baseForElement) for dtsName, inputDTS in inputDTSes.items(): # input instances are also parameters if dtsName: # named instance parameters[dtsName] = (None, inputDTS) #inputDTS is a list of modelXbrl's (instance DTSes) elif len(inputDTS) > 1: # standard-input-instance with multiple instance documents parameters[XbrlConst.qnStandardInputInstance] = (None, inputDTS) # allow error detection in validateFormula for _inputDTS in inputDTS: for docUrl, doc in _inputDTS.urlDocs.items(): if docUrl.startswith(variationBase) and not doc.type == Type.INLINEXBRLDOCUMENTSET: if getattr(doc,"loadedFromXbrlFormula", False): # may have been sourced from xf file if docUrl.replace("-formula.xml", ".xf") in expectedDataFiles: docUrl = docUrl.replace("-formula.xml", ".xf") foundDataFiles.add(docUrl) if expectedDataFiles - foundDataFiles: modelXbrl.info("arelle:testcaseDataNotUsed", _("Variation %(id)s %(name)s data files not used: %(missingDataFiles)s"), modelObject=modelTestcaseVariation, name=modelTestcaseVariation.name, id=modelTestcaseVariation.id, missingDataFiles=", ".join(sorted(os.path.basename(f) for f in expectedDataFiles - foundDataFiles))) if foundDataFiles - expectedDataFiles: modelXbrl.info("arelle:testcaseDataUnexpected", _("Variation %(id)s %(name)s files not in variation data: %(unexpectedDataFiles)s"), modelObject=modelTestcaseVariation, name=modelTestcaseVariation.name, id=modelTestcaseVariation.id, unexpectedDataFiles=", ".join(sorted(os.path.basename(f) for f in foundDataFiles - expectedDataFiles))) if modelXbrl.hasTableRendering or modelTestcaseVariation.resultIsTable: try: RenderingEvaluator.init(modelXbrl) except Exception as err: modelXbrl.error("exception:" + type(err).__name__, _("Testcase RenderingEvaluator.init exception: %(error)s, instance: %(instance)s"), modelXbrl=modelXbrl, instance=modelXbrl.modelDocument.basename, error=err, exc_info=True) modelXbrlHasFormulae = modelXbrl.hasFormulae if modelXbrlHasFormulae and self.modelXbrl.modelManager.formulaOptions.formulaAction != "none": try: # validate only formulae self.instValidator.parameters = parameters ValidateFormula.validate(self.instValidator) except Exception as err: modelXbrl.error("exception:" + type(err).__name__, _("Testcase formula variation validation exception: %(error)s, instance: %(instance)s"), modelXbrl=modelXbrl, instance=modelXbrl.modelDocument.basename, error=err, exc_info=(type(err) is not AssertionError)) if modelTestcaseVariation.resultIsInfoset and self.modelXbrl.modelManager.validateInfoset: for pluginXbrlMethod in pluginClassMethods("Validate.Infoset"): pluginXbrlMethod(modelXbrl, modelTestcaseVariation.resultInfosetUri) infoset = ModelXbrl.load(self.modelXbrl.modelManager, modelTestcaseVariation.resultInfosetUri, _("loading result infoset"), base=baseForElement, useFileSource=self.useFileSource, errorCaptureLevel=errorCaptureLevel) if infoset.modelDocument is None: modelXbrl.error("arelle:notLoaded", _("Variation %(id)s %(name)s result infoset not loaded: %(file)s"), modelXbrl=testcase, id=modelTestcaseVariation.id, name=modelTestcaseVariation.name, file=os.path.basename(modelTestcaseVariation.resultXbrlInstance)) modelTestcaseVariation.status = "result infoset not loadable" else: # check infoset ValidateInfoset.validate(self.instValidator, modelXbrl, infoset) infoset.close() if modelXbrl.hasTableRendering or modelTestcaseVariation.resultIsTable: # and self.modelXbrl.modelManager.validateInfoset: # diff (or generate) table infoset resultTableUri = modelXbrl.modelManager.cntlr.webCache.normalizeUrl(modelTestcaseVariation.resultTableUri, baseForElement) if not any(alternativeValidation(modelXbrl, resultTableUri) for alternativeValidation in pluginClassMethods("Validate.TableInfoset")): try: ViewFileRenderedGrid.viewRenderedGrid(modelXbrl, resultTableUri, diffToFile=True) # false to save infoset files except Exception as err: modelXbrl.error("exception:" + type(err).__name__, _("Testcase table linkbase validation exception: %(error)s, instance: %(instance)s"), modelXbrl=modelXbrl, instance=modelXbrl.modelDocument.basename, error=err, exc_info=True) self.instValidator.close() extraErrors = [] for pluginXbrlMethod in pluginClassMethods("TestcaseVariation.Validated"): pluginXbrlMethod(self.modelXbrl, modelXbrl, extraErrors) self.determineTestStatus(modelTestcaseVariation, [e for inputDTSlist in inputDTSes.values() for inputDTS in inputDTSlist for e in inputDTS.errors] + extraErrors) # include infoset errors in status if modelXbrl.formulaOutputInstance and self.noErrorCodes(modelTestcaseVariation.actual): # if an output instance is created, and no string error codes, ignoring dict of assertion results, validate it modelXbrl.formulaOutputInstance.hasFormulae = False # block formulae on output instance (so assertion of input is not lost) self.instValidator.validate(modelXbrl.formulaOutputInstance, modelTestcaseVariation.parameters) self.determineTestStatus(modelTestcaseVariation, modelXbrl.formulaOutputInstance.errors) if self.noErrorCodes(modelTestcaseVariation.actual): # if still 'clean' pass it forward for comparison to expected result instance formulaOutputInstance = modelXbrl.formulaOutputInstance modelXbrl.formulaOutputInstance = None # prevent it from being closed now self.instValidator.close() compareIxResultInstance = (modelXbrl.modelDocument.type in (Type.INLINEXBRL, Type.INLINEXBRLDOCUMENTSET) and modelTestcaseVariation.resultXbrlInstanceUri is not None) if compareIxResultInstance: formulaOutputInstance = modelXbrl # compare modelXbrl to generated output instance errMsgPrefix = "ix" else: # delete input instances before formula output comparision for inputDTSlist in inputDTSes.values(): for inputDTS in inputDTSlist: inputDTS.close() del inputDTSes # dereference errMsgPrefix = "formula" if resultIsXbrlInstance and formulaOutputInstance and formulaOutputInstance.modelDocument: _matchExpectedResultIDs = not modelXbrlHasFormulae # formula restuls have inconsistent IDs expectedInstance = ModelXbrl.load(self.modelXbrl.modelManager, modelTestcaseVariation.resultXbrlInstanceUri, _("loading expected result XBRL instance"), base=baseForElement, useFileSource=self.useFileSource, errorCaptureLevel=errorCaptureLevel) if expectedInstance.modelDocument is None: self.modelXbrl.error("{}:expectedResultNotLoaded".format(errMsgPrefix), _("Testcase \"%(name)s\" %(id)s expected result instance not loaded: %(file)s"), modelXbrl=testcase, id=modelTestcaseVariation.id, name=modelTestcaseVariation.name, file=os.path.basename(modelTestcaseVariation.resultXbrlInstanceUri), messageCodes=("formula:expectedResultNotLoaded","ix:expectedResultNotLoaded")) modelTestcaseVariation.status = "result not loadable" else: # compare facts for pluginXbrlMethod in pluginClassMethods("TestcaseVariation.ExpectedInstance.Loaded"): pluginXbrlMethod(expectedInstance, formulaOutputInstance) if len(expectedInstance.facts) != len(formulaOutputInstance.facts): formulaOutputInstance.error("{}:resultFactCounts".format(errMsgPrefix), _("Formula output %(countFacts)s facts, expected %(expectedFacts)s facts"), modelXbrl=modelXbrl, countFacts=len(formulaOutputInstance.facts), expectedFacts=len(expectedInstance.facts), messageCodes=("formula:resultFactCounts","ix:resultFactCounts")) else: formulaOutputFootnotesRelSet = ModelRelationshipSet(formulaOutputInstance, "XBRL-footnotes") expectedFootnotesRelSet = ModelRelationshipSet(expectedInstance, "XBRL-footnotes") def factFootnotes(fact, footnotesRelSet): footnotes = {} footnoteRels = footnotesRelSet.fromModelObject(fact) if footnoteRels: # most process rels in same order between two instances, use labels to sort for i, footnoteRel in enumerate(sorted(footnoteRels, key=lambda r: (r.fromLabel,r.toLabel))): modelObject = footnoteRel.toModelObject if isinstance(modelObject, ModelResource): xml = collapseWhitespace(modelObject.viewText().strip()) footnotes["Footnote {}".format(i+1)] = xml #re.sub(r'\s+', ' ', collapseWhitespace(modelObject.stringValue)) elif isinstance(modelObject, ModelFact): footnotes["Footnoted fact {}".format(i+1)] = \ "{} context: {} value: {}".format( modelObject.qname, modelObject.contextID, collapseWhitespace(modelObject.value)) return footnotes for expectedInstanceFact in expectedInstance.facts: unmatchedFactsStack = [] formulaOutputFact = formulaOutputInstance.matchFact(expectedInstanceFact, unmatchedFactsStack, deemP0inf=True, matchId=_matchExpectedResultIDs, matchLang=False) #formulaOutputFact = formulaOutputInstance.matchFact(expectedInstanceFact, unmatchedFactsStack, deemP0inf=True, matchId=True, matchLang=True) if formulaOutputFact is None: if unmatchedFactsStack: # get missing nested tuple fact, if possible missingFact = unmatchedFactsStack[-1] else: missingFact = expectedInstanceFact # is it possible to show value mismatches? expectedFacts = formulaOutputInstance.factsByQname.get(missingFact.qname) if len(expectedFacts) == 1: formulaOutputInstance.error("{}:expectedFactMissing".format(errMsgPrefix), _("Output missing expected fact %(fact)s, extracted value \"%(value1)s\", expected value \"%(value2)s\""), modelXbrl=missingFact, fact=missingFact.qname, value1=missingFact.xValue, value2=next(iter(expectedFacts)).xValue, messageCodes=("formula:expectedFactMissing","ix:expectedFactMissing")) else: formulaOutputInstance.error("{}:expectedFactMissing".format(errMsgPrefix), _("Output missing expected fact %(fact)s"), modelXbrl=missingFact, fact=missingFact.qname, messageCodes=("formula:expectedFactMissing","ix:expectedFactMissing")) else: # compare footnotes expectedInstanceFactFootnotes = factFootnotes(expectedInstanceFact, expectedFootnotesRelSet) formulaOutputFactFootnotes = factFootnotes(formulaOutputFact, formulaOutputFootnotesRelSet) if (len(expectedInstanceFactFootnotes) != len(formulaOutputFactFootnotes) or set(expectedInstanceFactFootnotes.values()) != set(formulaOutputFactFootnotes.values())): formulaOutputInstance.error("{}:expectedFactFootnoteDifference".format(errMsgPrefix), _("Output expected fact %(fact)s expected footnotes %(footnotes1)s produced footnotes %(footnotes2)s"), modelXbrl=(formulaOutputFact,expectedInstanceFact), fact=expectedInstanceFact.qname, footnotes1=sorted(expectedInstanceFactFootnotes.items()), footnotes2=sorted(formulaOutputFactFootnotes.items()), messageCodes=("formula:expectedFactFootnoteDifference","ix:expectedFactFootnoteDifference")) # for debugging uncomment next line to save generated instance document # formulaOutputInstance.saveInstance(r"c:\temp\test-out-inst.xml") expectedInstance.close() del expectedInstance # dereference self.determineTestStatus(modelTestcaseVariation, formulaOutputInstance.errors) formulaOutputInstance.close() del formulaOutputInstance if compareIxResultInstance: for inputDTSlist in inputDTSes.values(): for inputDTS in inputDTSlist: inputDTS.close() del inputDTSes # dereference # update ui thread via modelManager (running in background here) self.modelXbrl.modelManager.viewModelObject(self.modelXbrl, modelTestcaseVariation.objectId()) _statusCounts = OrderedDict((("pass",0),("fail",0))) for tv in getattr(testcase, "testcaseVariations", ()): _statusCounts[tv.status] = _statusCounts.get(tv.status, 0) + 1 self.modelXbrl.info("arelle:testCaseResults", ", ".join("{}={}".format(k,c) for k, c in _statusCounts.items() if k)) self.modelXbrl.modelManager.showStatus(_("ready"), 2000)
def xuleCmdUtilityRun(cntlr, options, **kwargs): # Save the controller and options in the module global variable global _cntlr _cntlr = cntlr global _options _options = options cntlr.addToLog("Xule version: %s" % __version__, 'info') # check option combinations parser = OptionParser() if getattr(options, "xule_version", False): cntlr.addToLog("Xule version: %s" % __version__, 'xule') cntlr.close() if getattr(options, "xule_cpu", None) is not None and not getattr(options, 'xule_multi', None): parser.error(_("--xule-multi is required with --xule_cpu.")) if getattr(options, "xule_server", None) is not None and not getattr(options, 'xule_rule_set', None): parser.error(_("--xule-rule-set is required with --xule_server.")) from os import name if getattr(options, "xule_multi", False) and name == 'nt': parser.error(_("--xule-multi can't be used in Windows")) if getattr(options, "xule-numthreads", None) == None: setattr(options, "xule-numthreads", 1) if getattr(options, 'xule_add_packages', None) is not None and not getattr(options, 'xule_rule_set', None): parser.error(_("--xule-rule-set is required with --xule-add-packages.")) if getattr(options, 'xule_remove_packages', None) is not None and not getattr(options, 'xule_rule_set', None): parser.error(_("--xule-rule-set is required with --xule-remove-packages.")) if getattr(options, 'xule_show_packages', None) is not None and not getattr(options, 'xule_rule_set', None): parser.error(_("--xule-rule-set is required with --xule-show-packages.")) if len([x for x in (getattr(options, "xule_update_rule_set_map", False), getattr(options, "xule_replace_rule_set_map", False), getattr(options, "xule_reset_rule_set_map", False)) if x]) > 1: parser.error(_("Cannot use --xule-update-rule-set-map or --xule-replace-rule-set-map or --xule-reset-rule-set-map the same time.")) if getattr(options, 'xule_validate', None) is not None and getattr(options, 'xule_rule_set', None) is None: parser.error(_("--xule-validate requires a Xule ruleset. Use option --xule-rule-set.")) if getattr(options, 'xule_filing_list', None) is not None and getattr(options, 'entrypointFile', None) is not None: parser.error(_("--xule-filing-list cannot be used with -f")) # compile rules if getattr(options, "xule_compile", None): compile_destination = getattr(options, "xule_rule_set", "xuleRules") xp.parseRules(options.xule_compile.split("|"), compile_destination, getattr(options, "xule_compile_type")) # add packages if getattr(options, "xule_add_packages", None): rule_set = xr.XuleRuleSet(cntlr) rule_set.open(getattr(options, "xule_rule_set"), open_packages=False, open_files=False) packages = options.xule_add_packages.split('|') rule_set.manage_packages(packages, 'add') # remove packages if getattr(options, "xule_remove_packages", None): rule_set = xr.XuleRuleSet(cntlr) rule_set.open(getattr(options, "xule_rule_set"), open_packages=False, open_files=False) packages = options.xule_remove_packages.split('|') rule_set.manage_packages(packages, 'del') # show packages if getattr(options, "xule_show_packages", False): rule_set = xr.XuleRuleSet(cntlr) rule_set.open(getattr(options, "xule_rule_set"), open_packages=False, open_files=False) print("Packages in rule set:") for package_info in rule_set.get_packages_info(): print('\t' + package_info.get('name') + ' (' + os.path.basename(package_info.get('URL')) + ')') # update rule set map if getattr(options, 'xule_update_rule_set_map', None): xu.update_rule_set_map(cntlr, getattr(options, 'xule_update_rule_set_map'), _xule_rule_set_map_name) # replace rule set map if getattr(options, 'xule_replace_rule_set_map', None): xu.update_rule_set_map(cntlr, getattr(options, 'xule_replace_rule_set_map'), _xule_rule_set_map_name, overwrite=True) # reset rule set map if getattr(options, 'xule_reset_rule_set_map', False): xu.reset_rule_set_map(cntlr, _xule_rule_set_map_name) # display the rule set map if getattr(options, 'xule_display_rule_set_map', False): displayValidatorRulesetMap(cntlr, 'Xule', _xule_rule_set_map_name) # validate ruleset if getattr(options, 'xule_validate', False): rule_set = xr.XuleRuleSet(cntlr) rule_set.open(options.xule_rule_set, open_packages=not getattr(options, 'xule_bypass_packages', False)) xv.XuleValidate(cntlr, rule_set, options.xule_rule_set) if getattr(options, "xule_server", None): from threading import Thread try: rule_set = xr.XuleRuleSet() rule_set.open(options.xule_rule_set, False) except xr.XuleRuleSetError: raise # Create global Context global_context = XuleGlobalContext(rule_set, cntlr=cntlr, options=options) global_context.message_queue.print("Using %d processors" % (global_context.num_processors)) # Start Output message queue if getattr(options, "xule_multi", False): t = Thread(target=xm.output_message_queue, args=(global_context,)) t.start() global_context.message_queue.logging("Building Constant and Rule Groups") global_context.all_constants = rule_set.get_grouped_constants() global_context.all_rules = rule_set.get_grouped_rules() for g in global_context.all_constants: global_context.message_queue.logging("Constants: %s - %d" % (g, len(global_context.all_constants[g]))) for g in global_context.all_rules: global_context.message_queue.logging("Rules: %s - %d" % (g, len(global_context.all_rules[g]))) # evaluate valid constants (no dependency, rules taxonomy) global_context.message_queue.logging("Calculating and Storing Constants") xm.run_constant_group(global_context, 'c', 'rtc') # Add precalculated information to the cntlr to pass to XuleServer setattr(cntlr, "xule_options", options) setattr(cntlr, "rule_set", global_context.rule_set) setattr(cntlr, "constant_list", global_context._constants) setattr(cntlr, "all_constants", global_context.all_constants) setattr(cntlr, "all_rules", global_context.all_rules) global_context.message_queue.logging("Finished Server Initialization") # stop message_queue global_context.message_queue.stop() if getattr(options, "xule_multi", False): t.join() else: if getattr(options, 'xule_filing_list', None) is not None: # process filing list if getattr(options, "xule_filing_list", None): try: with open(options.xule_filing_list, 'r') as filing_list_file: # Try json try: filing_list = json.load(filing_list_file, object_pairs_hook=collections.OrderedDict) except: # Try a flat list of file names try: # reset the file pointer filing_list_file.seek(0) filing_list = [{"file": file_name} for file_name in filing_list_file] except: cntlr.addToLog(_("Unable to open Filing listing file '%s'." % options.xule_filing_list), 'xule') raise except FileNotFoundError: cntlr.addToLog(_("Filing listing file '%s' is not found" % options.xule_filing_list), 'xule') raise if isinstance(filing_list, list): for file_info in filing_list: if isinstance(file_info, dict): input_file_name = file_info.get('file') if input_file_name is not None: input_file_name = input_file_name.strip() print("Processing filing", input_file_name) filing_filesource = FileSource.openFileSource(input_file_name, cntlr) modelManager = ModelManager.initialize(cntlr) modelXbrl = modelManager.load(filing_filesource) # Update options new_options = copy.copy(options) delattr(new_options, 'xule_filing_list') for k, v in file_info.items(): if k != 'file' and k.strip().lower().startswith('xule'): # Only change xule options setattr(new_options, k.strip().lower(), v) if getattr(new_options, 'xule_run'): xuleCmdXbrlLoaded(cntlr, new_options, modelXbrl) elif getattr(new_options, 'validate'): for xule_validator in _xule_validators: runXule(_cntlr, new_options, modelXbrl, xule_validator['map_name']) modelXbrl.close() else: if options.entrypointFile is None: # try running the xule processor - This is when rules are run without an instance document xuleCmdXbrlLoaded(cntlr, options, None) # Only register xule as a validator if the xule plugin was directly added in the --plugin options. if isXuleDirect(): xuleRegisterValidators('Xule', _xule_rule_set_map_name)
def xuleCmdUtilityRun(cntlr, options, **kwargs): # Save the controller and options in the module global variable global _cntlr _cntlr = cntlr global _options _options = options cntlr.addToLog("Xule version: %s" % __version__, 'info') # check option combinations parser = optparse.OptionParser() if getattr(options, "xule_version", False): cntlr.addToLog("Xule version: %s" % __version__, 'xule') cntlr.close() if getattr(options, "xule_cpu", None) is not None and not getattr(options, 'xule_multi', None): parser.error(_("--xule-multi is required with --xule_cpu.")) if getattr(options, "xule_server", None) is not None and not getattr( options, 'xule_rule_set', None): parser.error(_("--xule-rule-set is required with --xule_server.")) from os import name if getattr(options, "xule_multi", False) and name == 'nt': parser.error(_("--xule-multi can't be used in Windows")) if getattr(options, "xule-numthreads", None) == None: setattr(options, "xule-numthreads", 1) if getattr(options, 'xule_add_packages', None) is not None and not getattr( options, 'xule_rule_set', None): parser.error( _("--xule-rule-set is required with --xule-add-packages.")) if getattr( options, 'xule_remove_packages', None) is not None and not getattr(options, 'xule_rule_set', None): parser.error( _("--xule-rule-set is required with --xule-remove-packages.")) if getattr( options, 'xule_show_packages', None) is not None and not getattr(options, 'xule_rule_set', None): parser.error( _("--xule-rule-set is required with --xule-show-packages.")) if len([ x for x in (getattr(options, "xule_update_rule_set_map", False), getattr(options, "xule_replace_rule_set_map", False), getattr(options, "xule_reset_rule_set_map", False)) if x ]) > 1: parser.error( _("Cannot use --xule-update-rule-set-map or --xule-replace-rule-set-map or --xule-reset-rule-set-map the same time." )) if getattr(options, 'xule_validate', None) is not None and getattr( options, 'xule_rule_set', None) is None: parser.error( _("--xule-validate requires a Xule ruleset. Use option --xule-rule-set." )) if getattr(options, 'xule_filing_list', None) is not None and getattr( options, 'entrypointFile', None) is not None: parser.error(_("--xule-filing-list cannot be used with -f")) # compile rules if getattr(options, "xule_compile", None): compile_destination = getattr(options, "xule_rule_set", "xuleRules") xp.parseRules(options.xule_compile.split("|"), compile_destination, getattr(options, "xule_compile_type")) # add packages if getattr(options, "xule_add_packages", None): rule_set = xr.XuleRuleSet(cntlr) rule_set.open(getattr(options, "xule_rule_set"), open_packages=False, open_files=False) packages = options.xule_add_packages.split('|') rule_set.manage_packages(packages, 'add') # remove packages if getattr(options, "xule_remove_packages", None): rule_set = xr.XuleRuleSet(cntlr) rule_set.open(getattr(options, "xule_rule_set"), open_packages=False, open_files=False) packages = options.xule_remove_packages.split('|') rule_set.manage_packages(packages, 'del') # show packages if getattr(options, "xule_show_packages", False): rule_set = xr.XuleRuleSet(cntlr) rule_set.open(getattr(options, "xule_rule_set"), open_packages=False, open_files=False) print("Packages in rule set:") for package_info in rule_set.get_packages_info(): print('\t' + package_info.get('name') + ' (' + os.path.basename(package_info.get('URL')) + ')') # update rule set map if getattr(options, 'xule_update_rule_set_map', None): xu.update_rule_set_map(cntlr, getattr(options, 'xule_update_rule_set_map'), _xule_rule_set_map_name) # replace rule set map if getattr(options, 'xule_replace_rule_set_map', None): xu.update_rule_set_map(cntlr, getattr(options, 'xule_replace_rule_set_map'), _xule_rule_set_map_name, overwrite=True) # reset rule set map if getattr(options, 'xule_reset_rule_set_map', False): xu.reset_rule_set_map(cntlr, _xule_rule_set_map_name) # display the rule set map if getattr(options, 'xule_display_rule_set_map', False): displayValidatorRulesetMap(cntlr, 'Xule', _xule_rule_set_map_name) # validate ruleset if getattr(options, 'xule_validate', False): rule_set = xr.XuleRuleSet(cntlr) rule_set.open( options.xule_rule_set, open_packages=not getattr(options, 'xule_bypass_packages', False)) xv.XuleValidate(cntlr, rule_set, options.xule_rule_set) if getattr(options, "xule_server", None): from threading import Thread try: rule_set = xr.XuleRuleSet() rule_set.open(options.xule_rule_set, False) except xr.XuleRuleSetError: raise # Create global Context global_context = XuleGlobalContext(rule_set, cntlr=cntlr, options=options) global_context.message_queue.print("Using %d processors" % (global_context.num_processors)) # Start Output message queue if getattr(options, "xule_multi", False): t = Thread(target=xm.output_message_queue, args=(global_context, )) t.start() global_context.message_queue.logging( "Building Constant and Rule Groups") global_context.all_constants = rule_set.get_grouped_constants() global_context.all_rules = rule_set.get_grouped_rules() for g in global_context.all_constants: global_context.message_queue.logging( "Constants: %s - %d" % (g, len(global_context.all_constants[g]))) for g in global_context.all_rules: global_context.message_queue.logging( "Rules: %s - %d" % (g, len(global_context.all_rules[g]))) # evaluate valid constants (no dependency, rules taxonomy) global_context.message_queue.logging( "Calculating and Storing Constants") xm.run_constant_group(global_context, 'c', 'rtc') # Add precalculated information to the cntlr to pass to XuleServer setattr(cntlr, "xule_options", options) setattr(cntlr, "rule_set", global_context.rule_set) setattr(cntlr, "constant_list", global_context._constants) setattr(cntlr, "all_constants", global_context.all_constants) setattr(cntlr, "all_rules", global_context.all_rules) global_context.message_queue.logging("Finished Server Initialization") # stop message_queue global_context.message_queue.stop() if getattr(options, "xule_multi", False): t.join() else: if getattr(options, 'xule_filing_list', None) is not None: # process filing list if getattr(options, "xule_filing_list", None): try: with open(options.xule_filing_list, 'r') as filing_list_file: # Try json try: filing_list = json.load( filing_list_file, object_pairs_hook=collections.OrderedDict) except: # Try a flat list of file names try: # reset the file pointer filing_list_file.seek(0) filing_list = [{ "file": file_name } for file_name in filing_list_file] except: cntlr.addToLog( _("Unable to open Filing listing file '%s'." % options.xule_filing_list), 'xule') raise except FileNotFoundError: cntlr.addToLog( _("Filing listing file '%s' is not found" % options.xule_filing_list), 'xule') raise if isinstance(filing_list, list): for file_info in filing_list: if isinstance(file_info, dict): input_file_name = file_info.get('file') if input_file_name is not None: input_file_name = input_file_name.strip() print("Processing filing", input_file_name) filing_filesource = FileSource.openFileSource( input_file_name, cntlr) modelManager = ModelManager.initialize(cntlr) modelXbrl = modelManager.load( filing_filesource) # Update options new_options = copy.copy(options) delattr(new_options, 'xule_filing_list') for k, v in file_info.items(): if k != 'file' and k.strip().lower( ).startswith('xule' ): # Only change xule options setattr(new_options, k.strip().lower(), v) if getattr(new_options, 'xule_run'): xuleCmdXbrlLoaded(cntlr, new_options, modelXbrl) elif getattr(new_options, 'validate'): for xule_validator in _xule_validators: runXule(_cntlr, new_options, modelXbrl, xule_validator['map_name']) modelXbrl.close() else: if options.entrypointFile is None: # try running the xule processor - This is when rules are run without an instance document xuleCmdXbrlLoaded(cntlr, options, None) # Only register xule as a validator if the xule plugin was directly added in the --plugin options. if isXuleDirect(): xuleRegisterValidators('Xule', _xule_rule_set_map_name)
def xuleCmdUtilityRun(cntlr, options, **kwargs): #check option combinations parser = OptionParser() if getattr(options, "xule_version", False): cntlr.addToLog("Xule version: %s" % __version__) cntlr.close() if getattr(options, "xule_cpu", None) is not None and not getattr(options, 'xule_multi', None): parser.error(_("--xule-multi is required with --xule_cpu.")) if getattr(options, "xule_run", None) is not None and not getattr(options, 'xule_rule_set', None): parser.error(_("--xule-rule-set is required with --xule-run.")) if getattr(options, "xule_server", None) is not None and not getattr(options, 'xule_rule_set', None): parser.error(_("--xule-rule-set is required with --xule_server.")) if getattr(options, "xule-numthreads", None) == None: setattr(options, "xule-numthreads", 1) if getattr(options, 'xule_add_packages', None) is not None and not getattr(options, 'xule_rule_set', None): parser.error(_("--xule-rule-set is required with --xule-add-packages.")) if getattr(options, 'xule_remove_packages', None) is not None and not getattr(options, 'xule_rule_set', None): parser.error(_("--xule-rule-set is required with --xule-remove-packages.")) if getattr(options, 'xule_show_packages', None) is not None and not getattr(options, 'xule_rule_set', None): parser.error(_("--xule-rule-set is required with --xule-show-packages.")) from os import name if getattr(options, "xule_multi", False) and name == 'nt': parser.error(_("--xule-multi can't be used in Windows")) if not getattr(options, "xule_multi", False) and getattr(options, "xule_cpu", None) is not None: parser.error(_("--xule-cpu can only be used with --xule-multi enabled")) #compile rules if getattr(options, "xule_compile", None): compile_destination = getattr(options, "xule_rule_set", "xuleRules") from .XuleParser import parseRules parseRules(options.xule_compile.split("|"),compile_destination) #add packages if getattr(options, "xule_add_packages", None): rule_set = xr.XuleRuleSet(cntlr) rule_set.open(getattr(options, "xule_rule_set"), open_packages=False, open_files=False) packages = options.xule_add_packages.split('|') rule_set.manage_packages(packages, 'add') #remove packages if getattr(options, "xule_remove_packages", None): rule_set = xr.XuleRuleSet(cntlr) rule_set.open(getattr(options, "xule_rule_set"), open_packages=False, open_files=False) packages = options.xule_remove_packages.split('|') rule_set.manage_packages(packages, 'del') #show packages if getattr(options, "xule_show_packages", False): rule_set = xr.XuleRuleSet(cntlr) rule_set.open(getattr(options, "xule_rule_set"), open_packages=False, open_files=False) print("Packages in rule set:") for package_info in rule_set.get_packages_info(): print('\t' + package_info.get('name') + ' (' + os.path.basename(package_info.get('URL')) + ')' ) if getattr(options, "xule_server", None): from .XuleMultiProcessing import run_constant_group, output_message_queue from threading import Thread try: rule_set = xr.XuleRuleSet() rule_set.open(options.xule_rule_set, False) except xr.XuleRuleSetError: raise # Create global Context global_context = XuleGlobalContext(rule_set, cntlr=cntlr, options=options#, #multi=getattr(options, "xule_multi", False), #async=getattr(options, "xule_async", False), #cpunum=getattr(options, "xule_cpu", None)) ) #global_context.show_timing = getattr(options, "xule_time", None) #global_context.show_debug = getattr(options, "xule_debug", False) #global_context.show_debug_table = getattr(options, "xule_debug_table", False) #global_context.show_trace = getattr(options, "xule_trace", None) #global_context.crash_on_error = getattr(options, "xule_crash", False) global_context.message_queue.print("Using %d processors" % (global_context.num_processors)) # Start Output message queue if getattr(options, "xule_multi", False): t = Thread(target=output_message_queue, args=(global_context,)) t.start() # #load rules taxonomy # global_context.message_queue.logging("Loading rules taxonomy") # global_context.get_rules_dts() # # rules_dts = global_context.get_rules_dts() # # from .XuleProcessor import load_networks # # load_networks(rules_dts) global_context.message_queue.logging("Building Constant and Rule Groups") global_context.all_constants = rule_set.get_grouped_constants() global_context.all_rules = rule_set.get_grouped_rules() for g in global_context.all_constants: global_context.message_queue.logging("Constants: %s - %d" % (g, len(global_context.all_constants[g]))) # for c in global_context.all_constants[g]: # print(" -- %s" % (c)) for g in global_context.all_rules: global_context.message_queue.logging("Rules: %s - %d" % (g, len(global_context.all_rules[g]))) #for c in global_context.all_rules[g]: # print(" -- %s" % (c)) # evaluate valid constants (no dependency, rules taxonomy) global_context.message_queue.logging("Calculating and Storing Constants") run_constant_group(global_context, 'c', 'rtc') # Add precalculated information to the cntlr to pass to XuleServer setattr(cntlr, "xule_options", options) setattr(cntlr, "rule_set", global_context.rule_set) setattr(cntlr, "constant_list", global_context._constants) setattr(cntlr, "all_constants", global_context.all_constants) setattr(cntlr, "all_rules", global_context.all_rules) global_context.message_queue.logging("Finished Server Initialization") # stop message_queue global_context.message_queue.stop() if getattr(options, "xule_multi", False): t.join() else: if options.entrypointFile is None: #try running the xule processor xuleCmdXbrlLoaded(cntlr, options, None) #process filing list if getattr(options, "xule_filing_list", None): try: with open(options.xule_filing_list, "r") as filing_list: for line in filing_list: filing = line.strip() print("Processing filing", filing) filing_filesource = FileSource.openFileSource(filing, cntlr) modelManager = ModelManager.initialize(cntlr) modelXbrl = modelManager.load(filing_filesource) xuleCmdXbrlLoaded(cntlr, options, modelXbrl) modelXbrl.close() except FileNotFoundError: print("Filing listing file '%s' is not found" % options.xule_filing_list)
def xuleCmdUtilityRun(cntlr, options, **kwargs): # Save the controller and options in the module global variable global _cntlr _cntlr = cntlr global _options _options = options #check option combinations parser = OptionParser() if getattr(options, "xule_version", False): cntlr.addToLog("Xule version: %s" % __version__) cntlr.close() if getattr(options, "xule_cpu", None) is not None and not getattr(options, 'xule_multi', None): parser.error(_("--xule-multi is required with --xule_cpu.")) # if getattr(options, "xule_run", None) is not None and not getattr(options, 'xule_rule_set', None): # parser.error(_("--xule-rule-set is required with --xule-run.")) if getattr(options, "xule_server", None) is not None and not getattr( options, 'xule_rule_set', None): parser.error(_("--xule-rule-set is required with --xule_server.")) if getattr(options, "xule-numthreads", None) == None: setattr(options, "xule-numthreads", 1) if getattr(options, 'xule_add_packages', None) is not None and not getattr( options, 'xule_rule_set', None): parser.error( _("--xule-rule-set is required with --xule-add-packages.")) if getattr( options, 'xule_remove_packages', None) is not None and not getattr(options, 'xule_rule_set', None): parser.error( _("--xule-rule-set is required with --xule-remove-packages.")) if getattr( options, 'xule_show_packages', None) is not None and not getattr(options, 'xule_rule_set', None): parser.error( _("--xule-rule-set is required with --xule-show-packages.")) from os import name if getattr(options, "xule_multi", False) and name == 'nt': parser.error(_("--xule-multi can't be used in Windows")) if not getattr(options, "xule_multi", False) and getattr( options, "xule_cpu", None) is not None: parser.error( _("--xule-cpu can only be used with --xule-multi enabled")) if len([ x for x in (getattr(options, "xule_update_rule_set_map", False), getattr(options, "xule_replace_rule_set_map", False), getattr(options, "xule_reset_rule_set_map", False)) if x ]) > 1: parser.error( _("Cannot use --xule-update-rule-set-map or --xule-replace-rule-set-map or --xule-reset-rule-set-map the same time." )) #compile rules if getattr(options, "xule_compile", None): compile_destination = getattr(options, "xule_rule_set", "xuleRules") from .XuleParser import parseRules parseRules(options.xule_compile.split("|"), compile_destination) #add packages if getattr(options, "xule_add_packages", None): rule_set = xr.XuleRuleSet(cntlr) rule_set.open(getattr(options, "xule_rule_set"), open_packages=False, open_files=False) packages = options.xule_add_packages.split('|') rule_set.manage_packages(packages, 'add') #remove packages if getattr(options, "xule_remove_packages", None): rule_set = xr.XuleRuleSet(cntlr) rule_set.open(getattr(options, "xule_rule_set"), open_packages=False, open_files=False) packages = options.xule_remove_packages.split('|') rule_set.manage_packages(packages, 'del') #show packages if getattr(options, "xule_show_packages", False): rule_set = xr.XuleRuleSet(cntlr) rule_set.open(getattr(options, "xule_rule_set"), open_packages=False, open_files=False) print("Packages in rule set:") for package_info in rule_set.get_packages_info(): print('\t' + package_info.get('name') + ' (' + os.path.basename(package_info.get('URL')) + ')') #update rule set map if getattr(options, 'xule_update_rule_set_map', None): xu.update_rule_set_map(cntlr, getattr(options, 'xule_update_rule_set_map')) #replace rule set map if getattr(options, 'xule_replace_rule_set_map', None): xu.update_rule_set_map(cntlr, getattr(options, 'xule_replace_rule_set_map'), overwrite=True) #reset rule set map if getattr(options, 'xule_reset_rule_set_map', False): xu.reset_rule_set_map(cntlr) if getattr(options, "xule_server", None): from .XuleMultiProcessing import run_constant_group, output_message_queue from threading import Thread try: rule_set = xr.XuleRuleSet() rule_set.open(options.xule_rule_set, False) except xr.XuleRuleSetError: raise # Create global Context global_context = XuleGlobalContext( rule_set, cntlr=cntlr, options=options #, #multi=getattr(options, "xule_multi", False), #async=getattr(options, "xule_async", False), #cpunum=getattr(options, "xule_cpu", None)) ) #global_context.show_timing = getattr(options, "xule_time", None) #global_context.show_debug = getattr(options, "xule_debug", False) #global_context.show_debug_table = getattr(options, "xule_debug_table", False) #global_context.show_trace = getattr(options, "xule_trace", None) #global_context.crash_on_error = getattr(options, "xule_crash", False) global_context.message_queue.print("Using %d processors" % (global_context.num_processors)) # Start Output message queue if getattr(options, "xule_multi", False): t = Thread(target=output_message_queue, args=(global_context, )) t.start() # #load rules taxonomy # global_context.message_queue.logging("Loading rules taxonomy") # global_context.get_rules_dts() # # rules_dts = global_context.get_rules_dts() # # from .XuleProcessor import load_networks # # load_networks(rules_dts) global_context.message_queue.logging( "Building Constant and Rule Groups") global_context.all_constants = rule_set.get_grouped_constants() global_context.all_rules = rule_set.get_grouped_rules() for g in global_context.all_constants: global_context.message_queue.logging( "Constants: %s - %d" % (g, len(global_context.all_constants[g]))) # for c in global_context.all_constants[g]: # print(" -- %s" % (c)) for g in global_context.all_rules: global_context.message_queue.logging( "Rules: %s - %d" % (g, len(global_context.all_rules[g]))) #for c in global_context.all_rules[g]: # print(" -- %s" % (c)) # evaluate valid constants (no dependency, rules taxonomy) global_context.message_queue.logging( "Calculating and Storing Constants") run_constant_group(global_context, 'c', 'rtc') # Add precalculated information to the cntlr to pass to XuleServer setattr(cntlr, "xule_options", options) setattr(cntlr, "rule_set", global_context.rule_set) setattr(cntlr, "constant_list", global_context._constants) setattr(cntlr, "all_constants", global_context.all_constants) setattr(cntlr, "all_rules", global_context.all_rules) global_context.message_queue.logging("Finished Server Initialization") # stop message_queue global_context.message_queue.stop() if getattr(options, "xule_multi", False): t.join() else: if options.entrypointFile is None: #try running the xule processor xuleCmdXbrlLoaded(cntlr, options, None) #process filing list if getattr(options, "xule_filing_list", None): try: with open(options.xule_filing_list, "r") as filing_list: for line in filing_list: filing = line.strip() print("Processing filing", filing) filing_filesource = FileSource.openFileSource( filing, cntlr) modelManager = ModelManager.initialize(cntlr) modelXbrl = modelManager.load(filing_filesource) xuleCmdXbrlLoaded(cntlr, options, modelXbrl) modelXbrl.close() except FileNotFoundError: print("Filing listing file '%s' is not found" % options.xule_filing_list)
def _get_rule_set_file_object(self): from arelle import FileSource file_source = FileSource.openFileSource(self.location, self._cntlr) file_object = file_source.file(self.location, binary=True)[0] return file_object
def evaluateTableIndex(modelXbrl): disclosureSystem = modelXbrl.modelManager.disclosureSystem if disclosureSystem.EFM: COVER = "1Cover" STMTS = "2Financial Statements" NOTES = "3Notes to Financial Statements" POLICIES = "4Accounting Policies" TABLES = "5Notes Tables" DETAILS = "6Notes Details" UNCATEG = "7Uncategorized" roleDefinitionPattern = re.compile( r"([0-9]+) - (Statement|Disclosure|Schedule|Document) - (.+)") # build EFM rendering-compatible index definitionElrs = dict( (roleType.definition, roleType) for roleURI in modelXbrl.relationshipSet( XbrlConst.parentChild).linkRoleUris for roleType in modelXbrl.roleTypes.get(roleURI, ())) isRR = any( ns.startswith("http://xbrl.sec.gov/rr/") for ns in modelXbrl.namespaceDocs.keys()) tableGroup = None firstTableLinkroleURI = None firstDocumentLinkroleURI = None sortedRoleTypes = sorted(definitionElrs.items(), key=lambda item: item[0]) for roleDefinition, roleType in sortedRoleTypes: roleType._tableChildren = [] match = roleDefinitionPattern.match( roleDefinition) if roleDefinition else None if not match: roleType._tableIndex = (UNCATEG, "", roleType.roleURI) continue seq, tblType, tblName = match.groups() if isRR: tableGroup = COVER elif not tableGroup: tableGroup = ("Paren" in tblName and COVER or tblType == "Statement" and STMTS or "(Polic" in tblName and NOTES or "(Table" in tblName and TABLES or "(Detail" in tblName and DETAILS or COVER) elif tableGroup == COVER: tableGroup = (tblType == "Statement" and STMTS or "Paren" in tblName and COVER or "(Polic" in tblName and NOTES or "(Table" in tblName and TABLES or "(Detail" in tblName and DETAILS or NOTES) elif tableGroup == STMTS: tableGroup = ((tblType == "Statement" or "Paren" in tblName) and STMTS or "(Polic" in tblName and NOTES or "(Table" in tblName and TABLES or "(Detail" in tblName and DETAILS or NOTES) elif tableGroup == NOTES: tableGroup = ("(Polic" in tblName and POLICIES or "(Table" in tblName and TABLES or "(Detail" in tblName and DETAILS or tblType == "Disclosure" and NOTES or UNCATEG) elif tableGroup == POLICIES: tableGroup = ("(Table" in tblName and TABLES or "(Detail" in tblName and DETAILS or ("Paren" in tblName or "(Polic" in tblName) and POLICIES or UNCATEG) elif tableGroup == TABLES: tableGroup = ("(Detail" in tblName and DETAILS or ("Paren" in tblName or "(Table" in tblName) and TABLES or UNCATEG) elif tableGroup == DETAILS: tableGroup = (("Paren" in tblName or "(Detail" in tblName) and DETAILS or UNCATEG) else: tableGroup = UNCATEG if firstTableLinkroleURI is None and tableGroup == COVER: firstTableLinkroleURI = roleType.roleURI if tblType == "Document" and not firstDocumentLinkroleURI: firstDocumentLinkroleURI = roleType.roleURI roleType._tableIndex = (tableGroup, seq, tblName) # flow allocate facts to roles (SEC presentation groups) if not modelXbrl.qnameDimensionDefaults: # may not have run validatino yet from arelle import ValidateXbrlDimensions ValidateXbrlDimensions.loadDimensionDefaults(modelXbrl) reportedFacts = set( ) # facts which were shown in a higher-numbered ELR table reportingPeriods = set() nextEnd = None deiFact = {} for conceptName in ("DocumentPeriodEndDate", "DocumentType", "CurrentFiscalPeriodEndDate"): for concept in modelXbrl.nameConcepts[conceptName]: for fact in modelXbrl.factsByQname(concept.qname): deiFact[conceptName] = fact if fact.context is not None: reportingPeriods.add( (None, fact.context.endDatetime)) # for instant reportingPeriods.add( (fact.context.startDatetime, fact.context.endDatetime)) # for startEnd nextEnd = fact.context.startDatetime duration = (fact.context.endDatetime - fact.context.startDatetime).days + 1 break if "DocumentType" in deiFact: fact = deiFact["DocumentType"] if "-Q" in fact.xValue: # need quarterly and yr to date durations endDatetime = fact.context.endDatetime # if within 2 days of end of month use last day of month endDatetimeMonth = endDatetime.month if (endDatetime + timedelta(2)).month != endDatetimeMonth: # near end of month endOfMonth = True while endDatetime.month == endDatetimeMonth: endDatetime += timedelta(1) # go forward to next month else: endOfMonth = False startYr = endDatetime.year startMo = endDatetime.month - 3 if startMo <= 0: startMo += 12 startYr -= 1 startDatetime = datetime(startYr, startMo, endDatetime.day, endDatetime.hour, endDatetime.minute, endDatetime.second) if endOfMonth: startDatetime -= timedelta(1) endDatetime -= timedelta(1) reportingPeriods.add((startDatetime, endDatetime)) duration = 91 # find preceding compatible default context periods while (nextEnd is not None): thisEnd = nextEnd prevMaxStart = thisEnd - timedelta(duration * .9) prevMinStart = thisEnd - timedelta(duration * 1.1) nextEnd = None for cntx in modelXbrl.contexts.values(): if cntx is not None: if (cntx.isStartEndPeriod and not cntx.qnameDims and thisEnd == cntx.endDatetime and prevMinStart <= cntx.startDatetime <= prevMaxStart): reportingPeriods.add((None, cntx.endDatetime)) reportingPeriods.add( (cntx.startDatetime, cntx.endDatetime)) nextEnd = cntx.startDatetime break elif (cntx.isInstantPeriod and not cntx.qnameDims and thisEnd == cntx.endDatetime): reportingPeriods.add((None, cntx.endDatetime)) stmtReportingPeriods = set(reportingPeriods) sortedRoleTypes.reverse() # now in descending order for i, roleTypes in enumerate(sortedRoleTypes): roleDefinition, roleType = roleTypes # find defined non-default axes in pre hierarchy for table tableFacts = set() tableGroup, tableSeq, tableName = roleType._tableIndex roleURIdims, priItemQNames = EFMlinkRoleURIstructure( modelXbrl, roleType.roleURI) for priItemQName in priItemQNames: for fact in modelXbrl.factsByQname(priItemQName): cntx = fact.context # non-explicit dims must be default if (cntx is not None and all(dimQn in modelXbrl.qnameDimensionDefaults for dimQn in (roleURIdims.keys() - cntx.qnameDims.keys())) and all(mdlDim.memberQname in roleURIdims[dimQn] for dimQn, mdlDim in cntx.qnameDims.items() if dimQn in roleURIdims)): # the flow-up part, drop cntxStartDatetime = cntx.startDatetime cntxEndDatetime = cntx.endDatetime if (tableGroup != STMTS or (cntxStartDatetime, cntxEndDatetime) in stmtReportingPeriods and (fact not in reportedFacts or all( dimQn not in cntx. qnameDims # unspecified dims are all defaulted if reported elsewhere for dimQn in (cntx.qnameDims.keys() - roleURIdims.keys())))): tableFacts.add(fact) reportedFacts.add(fact) roleType._tableFacts = tableFacts # find parent if any closestParentType = None closestParentMatchLength = 0 for _parentRoleDefinition, parentRoleType in sortedRoleTypes[i + 1:]: matchLen = parentNameMatchLen(tableName, parentRoleType) if matchLen > closestParentMatchLength: closestParentMatchLength = matchLen closestParentType = parentRoleType if closestParentType is not None: closestParentType._tableChildren.insert(0, roleType) # remove lesser-matched children if there was a parent match unmatchedChildRoles = set() longestChildMatchLen = 0 numChildren = 0 for childRoleType in roleType._tableChildren: matchLen = parentNameMatchLen(tableName, childRoleType) if matchLen < closestParentMatchLength: unmatchedChildRoles.add(childRoleType) elif matchLen > longestChildMatchLen: longestChildMatchLen = matchLen numChildren += 1 if numChildren > 1: # remove children that don't have the full match pattern length to parent for childRoleType in roleType._tableChildren: if (childRoleType not in unmatchedChildRoles and parentNameMatchLen(tableName, childRoleType) < longestChildMatchLen): unmatchedChildRoles.add(childRoleType) for unmatchedChildRole in unmatchedChildRoles: roleType._tableChildren.remove(unmatchedChildRole) for childRoleType in roleType._tableChildren: childRoleType._tableParent = roleType unmatchedChildRoles = None # dereference global UGT_TOPICS if UGT_TOPICS is None: try: from arelle import FileSource fh = FileSource.openFileStream( modelXbrl.modelManager.cntlr, os.path.join(modelXbrl.modelManager.cntlr.configDir, "ugt-topics.zip/ugt-topics.json"), 'r', 'utf-8') UGT_TOPICS = json.load(fh) fh.close() for topic in UGT_TOPICS: topic[6] = set( topic[6] ) # change concept abstracts list into concept abstracts set topic[7] = set( topic[7] ) # change concept text blocks list into concept text blocks set topic[8] = set( topic[8] ) # change concept names list into concept names set except Exception as ex: UGT_TOPICS = None if UGT_TOPICS is not None: def roleUgtConcepts(roleType): roleConcepts = set() for rel in modelXbrl.relationshipSet( XbrlConst.parentChild, roleType.roleURI).modelRelationships: if rel.toModelObject is not None: roleConcepts.add(rel.toModelObject.name) if rel.fromModelObject is not None: roleConcepts.add(rel.fromModelObject.name) if hasattr(roleType, "_tableChildren"): for _tableChild in roleType._tableChildren: roleConcepts |= roleUgtConcepts(_tableChild) return roleConcepts topicMatches = {} # topicNum: (best score, roleType) for roleDefinition, roleType in sortedRoleTypes: roleTopicType = 'S' if roleDefinition.startswith('S') else 'D' if getattr(roleType, "_tableParent", None) is None: # rooted tables in reverse order concepts = roleUgtConcepts(roleType) for i, ugtTopic in enumerate(UGT_TOPICS): if ugtTopic[0] == roleTopicType: countAbstracts = len(concepts & ugtTopic[6]) countTextBlocks = len(concepts & ugtTopic[7]) countLineItems = len(concepts & ugtTopic[8]) if countAbstracts or countTextBlocks or countLineItems: _score = (10 * countAbstracts + 1000 * countTextBlocks + countLineItems / len(concepts)) if i not in topicMatches or _score > topicMatches[ i][0]: topicMatches[i] = (_score, roleType) for topicNum, scoredRoleType in topicMatches.items(): _score, roleType = scoredRoleType if _score > getattr(roleType, "_tableTopicScore", 0): ugtTopic = UGT_TOPICS[topicNum] roleType._tableTopicScore = _score roleType._tableTopicType = ugtTopic[0] roleType._tableTopicName = ugtTopic[3] roleType._tableTopicCode = ugtTopic[4] # print ("Match score {:.2f} topic {} preGrp {}".format(_score, ugtTopic[3], roleType.definition)) return firstTableLinkroleURI or firstDocumentLinkroleURI # did build _tableIndex attributes return None
def run(self, options): self.filename = options.filename filesource = FileSource.openFileSource(self.filename,self) if options.validateEFM: if options.gfmName: self.addToLog(_("both --efm and --gfm validation are requested, proceeding with --efm only"), messageCode="info", file=self.filename) self.modelManager.validateDisclosureSystem = True self.modelManager.disclosureSystem.select("efm") elif options.gfmName: self.modelManager.validateDisclosureSystem = True self.modelManager.disclosureSystem.select(options.gfmName) else: self.modelManager.disclosureSystem.select(None) # just load ordinary mappings if options.calcDecimals: if options.calcPrecision: self.addToLog(_("both --calcDecimals and --calcPrecision validation are requested, proceeding with --calcDecimals only"), messageCode="info", file=self.filename) self.modelManager.validateInferDecimals = True self.modelManager.validateCalcLB = True elif options.calcPrecision: self.modelManager.validateInferDecimals = False self.modelManager.validateCalcLB = True if options.utrValidate: self.modelManager.validateUtr = True fo = FormulaOptions() if options.formulaParamExprResult: fo.traceParameterExpressionResult = True if options.formulaParamInputValue: fo.traceParameterInputValue = True if options.formulaCallExprSource: fo.traceCallExpressionSource = True if options.formulaCallExprCode: fo.traceCallExpressionCode = True if options.formulaCallExprEval: fo.traceCallExpressionEvaluation = True if options.formulaCallExprResult: fo.traceCallExpressionResult = True if options.formulaVarSetExprEval: fo.traceVariableSetExpressionEvaluation = True if options.formulaVarSetExprResult: fo.traceVariableSetExpressionResult = True if options.formulaAsserResultCounts: fo.traceAssertionResultCounts = True if options.formulaFormulaRules: fo.traceFormulaRules = True if options.formulaVarsOrder: fo.traceVariablesOrder = True if options.formulaVarExpressionSource: fo.traceVariableExpressionSource = True if options.formulaVarExpressionCode: fo.traceVariableExpressionCode = True if options.formulaVarExpressionEvaluation: fo.traceVariableExpressionEvaluation = True if options.formulaVarExpressionResult: fo.traceVariableExpressionResult = True if options.formulaVarFiltersResult: fo.traceVariableFiltersResult = True self.modelManager.formulaOptions = fo timeNow = XmlUtil.dateunionValue(datetime.datetime.now()) startedAt = time.time() modelXbrl = self.modelManager.load(filesource, _("views loading")) self.addToLog(format_string(self.modelManager.locale, _("loaded in %.2f secs at %s"), (time.time() - startedAt, timeNow)), messageCode="info", file=self.filename) if options.diffFilename and options.versReportFilename: diffFilesource = FileSource.FileSource(self.diffFilename,self) startedAt = time.time() modelXbrl = self.modelManager.load(diffFilesource, _("views loading")) self.addToLog(format_string(self.modelManager.locale, _("diff comparison DTS loaded in %.2f secs"), time.time() - startedAt), messageCode="info", file=self.filename) startedAt = time.time() self.modelManager.compareDTSes(options.versReportFilename) self.addToLog(format_string(self.modelManager.locale, _("compared in %.2f secs"), time.time() - startedAt), messageCode="info", file=self.filename) try: if options.validate: startedAt = time.time() self.modelManager.validate() self.addToLog(format_string(self.modelManager.locale, _("validated in %.2f secs"), time.time() - startedAt), messageCode="info", file=self.filename) if (options.csvTestReport and self.modelManager.modelXbrl.modelDocument.type in (ModelDocument.Type.TESTCASESINDEX, ModelDocument.Type.TESTCASE, ModelDocument.Type.REGISTRY)): ViewCsvTests.viewTests(self.modelManager.modelXbrl, options.csvTestReport) if options.csvDTS: ViewCsvDTS.viewDTS(modelXbrl, options.csvDTS) if options.csvFactList: ViewCsvFactList.viewFacts(modelXbrl, options.csvFactList, cols=options.csvFactListCols) if options.csvConcepts: ViewCsvConcepts.viewConcepts(modelXbrl, options.csvConcepts) if options.csvPre: ViewCsvRelationshipSet.viewRelationshipSet(modelXbrl, options.csvPre, "Presentation", "http://www.xbrl.org/2003/arcrole/parent-child") if options.csvCal: ViewCsvRelationshipSet.viewRelationshipSet(modelXbrl, options.csvCal, "Calculation", "http://www.xbrl.org/2003/arcrole/summation-item") if options.csvDim: ViewCsvRelationshipSet.viewRelationshipSet(modelXbrl, options.csvDim, "Dimension", "XBRL-dimensions") except (IOError, EnvironmentError) as err: self.addToLog(_("[IOError] Failed to save output:\n {0}").format(err)) except Exception as err: self.addToLog(_("[Exception] Failed to complete validation: \n{0} \n{1}").format( err, traceback.format_tb(sys.exc_info()[2])))
def run(self, options): if options.logFile: self.messages = [] else: self.messages = None self.filename = options.filename filesource = FileSource.FileSource(self.filename, self) if options.validateEFM: if options.gfmName: self.addToLog( _("[info] both --efm and --gfm validation are requested, proceeding with --efm only" )) self.modelManager.validateDisclosureSystem = True self.modelManager.disclosureSystem.select("efm") elif options.gfmName: self.modelManager.validateDisclosureSystem = True self.modelManager.disclosureSystem.select(options.gfmName) else: self.modelManager.disclosureSystem.select( None) # just load ordinary mappings if options.calcDecimals: if options.calcPrecision: self.addToLog( _("[info] both --calcDecimals and --calcPrecision validation are requested, proceeding with --calcDecimals only" )) self.modelManager.validateInferDecimals = True self.modelManager.validateCalcLB = True elif options.calcPrecision: self.modelManager.validateInferDecimals = False self.modelManager.validateCalcLB = True if options.utrValidate: self.modelManager.validateUtr = True fo = FormulaOptions() if options.formulaParamExprResult: fo.traceParameterExpressionResult = True if options.formulaParamInputValue: fo.traceParameterInputValue = True if options.formulaCallExprSource: fo.traceCallExpressionSource = True if options.formulaCallExprCode: fo.traceCallExpressionCode = True if options.formulaCallExprEval: fo.traceCallExpressionEvaluation = True if options.formulaCallExprResult: fo.traceCallExpressionResult = True if options.formulaVarSetExprEval: fo.traceVariableSetExpressionEvaluation = True if options.formulaVarSetExprResult: fo.traceVariableSetExpressionResult = True if options.formulaAsserResultCounts: fo.traceAssertionResultCounts = True if options.formulaFormulaRules: fo.traceFormulaRules = True if options.formulaVarsOrder: fo.traceVariablesOrder = True if options.formulaVarExpressionSource: fo.traceVariableExpressionSource = True if options.formulaVarExpressionCode: fo.traceVariableExpressionCode = True if options.formulaVarExpressionEvaluation: fo.traceVariableExpressionEvaluation = True if options.formulaVarExpressionResult: fo.traceVariableExpressionResult = True if options.formulaVarFiltersResult: fo.traceVariableFiltersResult = True self.modelManager.formulaOptions = fo timeNow = XmlUtil.dateunionValue(datetime.datetime.now()) startedAt = time.time() modelXbrl = self.modelManager.load(filesource, _("views loading")) self.addToLog( format_string(self.modelManager.locale, _("[info] loaded in %.2f secs at %s"), (time.time() - startedAt, timeNow))) if options.diffFilename and options.versReportFilename: diffFilesource = FileSource.FileSource(self.diffFilename, self) startedAt = time.time() modelXbrl = self.modelManager.load(diffFilesource, _("views loading")) self.addToLog( format_string( self.modelManager.locale, _("[info] diff comparison DTS loaded in %.2f secs"), time.time() - startedAt)) startedAt = time.time() self.modelManager.compareDTSes(options.versReportFilename) self.addToLog( format_string(self.modelManager.locale, _("[info] compared in %.2f secs"), time.time() - startedAt)) try: if options.validate: startedAt = time.time() self.modelManager.validate() self.addToLog( format_string(self.modelManager.locale, _("[info] validated in %.2f secs"), time.time() - startedAt)) if (options.csvTestReport and self.modelManager.modelXbrl.modelDocument.type in (ModelDocument.Type.TESTCASESINDEX, ModelDocument.Type.REGISTRY)): ViewCsvTests.viewTests(self.modelManager.modelXbrl, options.csvTestReport) if options.csvDTS: ViewCsvDTS.viewDTS(modelXbrl, options.csvDTS) if options.csvFactList: ViewCsvFactList.viewFacts(modelXbrl, options.csvFactList) if options.csvConcepts: ViewCsvConcepts.viewConcepts(modelXbrl, options.csvConcepts) if options.csvPre: ViewCsvRelationshipSet.viewRelationshipSet( modelXbrl, options.csvPre, "Presentation", "http://www.xbrl.org/2003/arcrole/parent-child") if options.csvCal: ViewCsvRelationshipSet.viewRelationshipSet( modelXbrl, options.csvCal, "Calculation", "http://www.xbrl.org/2003/arcrole/summation-item") if options.csvDim: ViewCsvRelationshipSet.viewRelationshipSet( modelXbrl, options.csvDim, "Dimension", "XBRL-dimensions") except (IOError, EnvironmentError) as err: self.addToLog( _("[IOError] Failed to save output:\n {0}").format(err)) if self.messages: try: with open(options.logFile, "w", encoding="utf-8") as fh: fh.writelines(self.messages) except (IOError, EnvironmentError) as err: print("Unable to save log to file: " + err)
def func_csv_data(xule_context, *args): """Read a csv file/url. Arguments: file_url (string or url) has_header (boolean) - determines if the first line of the csv file has headers type list (list) - list of xule types in the order of the columns of the csv file. This is optional. If not provided, then all the data will be treated as stirngs. as_dictionary (boolean) - return the row as a dictionary instead of a list. This is optional. """ if len(args) < 2: raise XuleProcessingError( _("The csv-data() function requires at least 2 arguments (file url, has headers), found {} arguments." .format(len(args))), xule_context) if len(args) > 4: raise XuleProcessingError( _("The csv-data() function takes no more than 3 arguments (file url, has headers, column types, as dictionary), found {} arguments." .format(len(args))), xule_context) file_url = args[0] has_headers = args[1] if file_url.type not in ('string', 'uri'): raise XuleProcessingError( _("The file url argument (1st argument) of the csv-dta() function must be a string or uri, found '{}'." .format(file_url.value)), xule_context) if has_headers.type != 'bool': raise XuleProcessingError( _("The has headers argument (2nd argument) of the csv-data() function muset be a boolean, found '{}'." .format(has_headers.type)), xule_context) if len(args) >= 3: column_types = args[2] if column_types.type == 'none': ordered_cols = None elif column_types.type == 'list': ordered_cols = list() for col in column_types.value: if col.type != 'string': raise XuleProcessingError( _("The type list argument (3rd argument) of the csv-data() function must be a list of strings, found '{}'." .format(col.type)), xule_context) ordered_cols.append(col.value) else: raise XuleProcessingError( _("The type list argument (3rd argument) of the csv-data() fucntion must be list, found '{}'." .format(column_types.type)), xule_context) else: ordered_cols = None if len(args) == 4: if args[3].type != 'bool': raise XuleProcessingError( _("The as dictionary argument (4th argument) of the csv-data() function must be a boolean, found '{}'." .format(args[3].type)), xule_context) if args[3].value: return_row_type = 'dictionary' else: return_row_type = 'list' else: return_row_type = 'list' if return_row_type == 'dictionary' and not has_headers.value: raise XuleProcessingError( _("When the csv-data() function is returning the rows as dictionaries (4th argument), the has headers argument (2nd argument) must be true." ), xule_context) result = list() result_shadow = list() from arelle import PackageManager mapped_file_url = PackageManager.mappedUrl(file_url.value) # Using the FileSource object in arelle. This will open the file and handle taxonomy package mappings. from arelle import FileSource file_source = FileSource.openFileSource(file_url.value, xule_context.global_context.cntlr) file = file_source.file(file_url.value, binary=True) # file is tuple of one item as a BytesIO stream. Since this is in bytes, it needs to be converted to text via a decoder. # Assuming the file is in utf-8. data_source = [x.decode('utf-8') for x in file[0].readlines()] import csv reader = csv.reader(data_source) first_line = True row_num = 0 for line in reader: row_num += 1 if first_line and has_headers.value: first_line = False #skip the headers line if return_row_type == 'dictionary': # Need to get the names from the first row column_names = [x for x in line] if len(column_names) != len(set(column_names)): raise XuleProcessingError( _("There are duplicate column names in the csv file. This is not allowed when return rows as dictionaries. File: {}" .format(file_url.value)), xule_context) continue if return_row_type == 'list': result_line = list() result_line_shadow = list() else: #dictionary result_line = dict() result_line_shadow = dict() for col_num, item in enumerate(line): if ordered_cols is not None and col_num >= len(ordered_cols): raise XuleProcessingError( _("The nubmer of columns on row {} is greater than the number of column types provided in the third argument of the csv-data() function. File: {}" .format(row_num, file_url.value)), xule_context) item_value = convert_file_data_item( item, ordered_cols[col_num] if ordered_cols is not None else None, xule_context) if return_row_type == 'list': result_line.append(item_value) result_line_shadow.append(item_value.value) else: #dictonary if col_num >= len(column_names): raise xule_context( _("The number of columns on row {} is greater than the number of headers in the csv file. File: {}" .format( row_num, mappedUrl if mapped_file_url == file_url.value else file_url.value + ' --> ' + mapped_file_url)), xule_context) result_line[xv.XuleValue(xule_context, column_names[col_num], 'string')] = item_value result_line_shadow[column_names[col_num]] = item_value.value if return_row_type == 'list': result.append( xv.XuleValue(xule_context, tuple(result_line), 'list', shadow_collection=tuple(result_line_shadow))) result_shadow.append(tuple(result_line_shadow)) else: #dictionary result.append( xv.XuleValue(xule_context, frozenset(result_line.items()), 'dictionary', shadow_collection=frozenset( result_line_shadow.items()))) result_shadow.append(frozenset(result_line_shadow.items())) return xv.XuleValue(xule_context, tuple(result), 'list', shadow_collection=tuple(result_shadow))
def run(self, options, sourceZipStream=None): """Process command line arguments or web service request, such as to load and validate an XBRL document, or start web server. When a web server has been requested, this method may be called multiple times, once for each web service (REST) request that requires processing. Otherwise (when called for a command line request) this method is called only once for the command line arguments request. :param options: OptionParser options from parse_args of main argv arguments (when called from command line) or corresponding arguments from web service (REST) request. :type options: optparse.Values """ if options.showOptions: # debug options for optName, optValue in sorted(options.__dict__.items(), key=lambda optItem: optItem[0]): self.addToLog("Option {0}={1}".format(optName, optValue), messageCode="info") self.addToLog("sys.argv {0}".format(sys.argv), messageCode="info") if options.uiLang: # set current UI Lang (but not config setting) self.setUiLanguage(options.uiLang) if options.proxy: if options.proxy != "show": proxySettings = proxyTuple(options.proxy) self.webCache.resetProxies(proxySettings) self.config["proxySettings"] = proxySettings self.saveConfig() self.addToLog(_("Proxy configuration has been set."), messageCode="info") useOsProxy, urlAddr, urlPort, user, password = self.config.get("proxySettings", proxyTuple("none")) if useOsProxy: self.addToLog(_("Proxy configured to use {0}.").format( _('Microsoft Windows Internet Settings') if sys.platform.startswith("win") else (_('Mac OS X System Configuration') if sys.platform in ("darwin", "macos") else _('environment variables'))), messageCode="info") elif urlAddr: self.addToLog(_("Proxy setting: http://{0}{1}{2}{3}{4}").format( user if user else "", ":****" if password else "", "@" if (user or password) else "", urlAddr, ":{0}".format(urlPort) if urlPort else ""), messageCode="info") else: self.addToLog(_("Proxy is disabled."), messageCode="info") if options.plugins: from arelle import PluginManager resetPlugins = False savePluginChanges = True showPluginModules = False for pluginCmd in options.plugins.split('|'): cmd = pluginCmd.strip() if cmd == "show": showPluginModules = True elif cmd == "temp": savePluginChanges = False elif cmd.startswith("+"): moduleInfo = PluginManager.addPluginModule(cmd[1:]) if moduleInfo: self.addToLog(_("Addition of plug-in {0} successful.").format(moduleInfo.get("name")), messageCode="info", file=moduleInfo.get("moduleURL")) resetPlugins = True else: self.addToLog(_("Unable to load plug-in."), messageCode="info", file=cmd[1:]) elif cmd.startswith("~"): if PluginManager.reloadPluginModule(cmd[1:]): self.addToLog(_("Reload of plug-in successful."), messageCode="info", file=cmd[1:]) resetPlugins = True else: self.addToLog(_("Unable to reload plug-in."), messageCode="info", file=cmd[1:]) elif cmd.startswith("-"): if PluginManager.removePluginModule(cmd[1:]): self.addToLog(_("Deletion of plug-in successful."), messageCode="info", file=cmd[1:]) resetPlugins = True else: self.addToLog(_("Unable to delete plug-in."), messageCode="info", file=cmd[1:]) else: # assume it is a module or package savePluginChanges = False moduleInfo = PluginManager.addPluginModule(cmd) if moduleInfo: self.addToLog(_("Activation of plug-in {0} successful.").format(moduleInfo.get("name")), messageCode="info", file=moduleInfo.get("moduleURL")) resetPlugins = True else: self.addToLog(_("Unable to load {0} as a plug-in or {0} is not recognized as a command. ").format(cmd), messageCode="info", file=cmd) if resetPlugins: PluginManager.reset() if savePluginChanges: PluginManager.save(self) if showPluginModules: self.addToLog(_("Plug-in modules:"), messageCode="info") for i, moduleItem in enumerate(sorted(PluginManager.pluginConfig.get("modules", {}).items())): moduleInfo = moduleItem[1] self.addToLog(_("Plug-in: {0}; author: {1}; version: {2}; status: {3}; date: {4}; description: {5}; license {6}.").format( moduleItem[0], moduleInfo.get("author"), moduleInfo.get("version"), moduleInfo.get("status"), moduleInfo.get("fileDate"), moduleInfo.get("description"), moduleInfo.get("license")), messageCode="info", file=moduleInfo.get("moduleURL")) # run utility command line options that don't depend on entrypoint Files hasUtilityPlugin = False for pluginXbrlMethod in pluginClassMethods("CntlrCmdLine.Utility.Run"): hasUtilityPlugin = True pluginXbrlMethod(self, options) # if no entrypointFile is applicable, quit now if options.proxy or options.plugins or hasUtilityPlugin: if not options.entrypointFile: return True # success self.username = options.username self.password = options.password self.entrypointFile = options.entrypointFile if self.entrypointFile: filesource = FileSource.openFileSource(self.entrypointFile, self, sourceZipStream) else: filesource = None if options.validateEFM: if options.disclosureSystemName: self.addToLog(_("both --efm and --disclosureSystem validation are requested, proceeding with --efm only"), messageCode="info", file=self.entrypointFile) self.modelManager.validateDisclosureSystem = True self.modelManager.disclosureSystem.select("efm") elif options.disclosureSystemName: self.modelManager.validateDisclosureSystem = True self.modelManager.disclosureSystem.select(options.disclosureSystemName) elif options.validateHMRC: self.modelManager.validateDisclosureSystem = True self.modelManager.disclosureSystem.select("hmrc") else: self.modelManager.disclosureSystem.select(None) # just load ordinary mappings self.modelManager.validateDisclosureSystem = False if options.utrUrl: # override disclosureSystem utrUrl self.modelManager.disclosureSystem.utrUrl = options.utrUrl # can be set now because the utr is first loaded at validation time # disclosure system sets logging filters, override disclosure filters, if specified by command line if options.logLevelFilter: self.setLogLevelFilter(options.logLevelFilter) if options.logCodeFilter: self.setLogCodeFilter(options.logCodeFilter) if options.calcDecimals: if options.calcPrecision: self.addToLog(_("both --calcDecimals and --calcPrecision validation are requested, proceeding with --calcDecimals only"), messageCode="info", file=self.entrypointFile) self.modelManager.validateInferDecimals = True self.modelManager.validateCalcLB = True elif options.calcPrecision: self.modelManager.validateInferDecimals = False self.modelManager.validateCalcLB = True if options.utrValidate: self.modelManager.validateUtr = True if options.infosetValidate: self.modelManager.validateInfoset = True if options.abortOnMajorError: self.modelManager.abortOnMajorError = True if options.collectProfileStats: self.modelManager.collectProfileStats = True if options.internetConnectivity == "offline": self.webCache.workOffline = True elif options.internetConnectivity == "online": self.webCache.workOffline = False if options.internetTimeout is not None: self.webCache.timeout = (options.internetTimeout or None) # use None if zero specified to disable timeout fo = FormulaOptions() if options.parameters: parameterSeparator = (options.parameterSeparator or ',') fo.parameterValues = dict(((qname(key, noPrefixIsNoNamespace=True),(None,value)) for param in options.parameters.split(parameterSeparator) for key,sep,value in (param.partition('='),) ) ) if options.formulaParamExprResult: fo.traceParameterExpressionResult = True if options.formulaParamInputValue: fo.traceParameterInputValue = True if options.formulaCallExprSource: fo.traceCallExpressionSource = True if options.formulaCallExprCode: fo.traceCallExpressionCode = True if options.formulaCallExprEval: fo.traceCallExpressionEvaluation = True if options.formulaCallExprResult: fo.traceCallExpressionResult = True if options.formulaVarSetExprEval: fo.traceVariableSetExpressionEvaluation = True if options.formulaVarSetExprResult: fo.traceVariableSetExpressionResult = True if options.formulaAsserResultCounts: fo.traceAssertionResultCounts = True if options.formulaFormulaRules: fo.traceFormulaRules = True if options.formulaVarsOrder: fo.traceVariablesOrder = True if options.formulaVarExpressionSource: fo.traceVariableExpressionSource = True if options.formulaVarExpressionCode: fo.traceVariableExpressionCode = True if options.formulaVarExpressionEvaluation: fo.traceVariableExpressionEvaluation = True if options.formulaVarExpressionResult: fo.traceVariableExpressionResult = True if options.timeVariableSetEvaluation: fo.timeVariableSetEvaluation = True if options.formulaVarFilterWinnowing: fo.traceVariableFilterWinnowing = True if options.formulaVarFiltersResult: fo.traceVariableFiltersResult = True if options.formulaVarFiltersResult: fo.traceVariableFiltersResult = True self.modelManager.formulaOptions = fo timeNow = XmlUtil.dateunionValue(datetime.datetime.now()) firstStartedAt = startedAt = time.time() modelDiffReport = None success = True modelXbrl = None try: if filesource: modelXbrl = self.modelManager.load(filesource, _("views loading")) except ModelDocument.LoadingException: pass except Exception as err: self.addToLog(_("[Exception] Failed to complete request: \n{0} \n{1}").format( err, traceback.format_tb(sys.exc_info()[2]))) success = False # loading errors, don't attempt to utilize loaded DTS if modelXbrl and modelXbrl.modelDocument: loadTime = time.time() - startedAt modelXbrl.profileStat(_("load"), loadTime) self.addToLog(format_string(self.modelManager.locale, _("loaded in %.2f secs at %s"), (loadTime, timeNow)), messageCode="info", file=self.entrypointFile) if options.importFiles: for importFile in options.importFiles.split("|"): fileName = importFile.strip() if sourceZipStream is not None and not (fileName.startswith('http://') or os.path.isabs(fileName)): fileName = os.path.dirname(modelXbrl.uri) + os.sep + fileName # make relative to sourceZipStream ModelDocument.load(modelXbrl, fileName) loadTime = time.time() - startedAt self.addToLog(format_string(self.modelManager.locale, _("import in %.2f secs at %s"), (loadTime, timeNow)), messageCode="info", file=importFile) modelXbrl.profileStat(_("import"), loadTime) if modelXbrl.errors: success = False # loading errors, don't attempt to utilize loaded DTS if modelXbrl.modelDocument.type in ModelDocument.Type.TESTCASETYPES: for pluginXbrlMethod in pluginClassMethods("Testcases.Start"): pluginXbrlMethod(self, options, modelXbrl) else: # not a test case, probably instance or DTS for pluginXbrlMethod in pluginClassMethods("CntlrCmdLine.Xbrl.Loaded"): pluginXbrlMethod(self, options, modelXbrl) else: success = False if success and options.diffFile and options.versReportFile: try: diffFilesource = FileSource.FileSource(options.diffFile,self) startedAt = time.time() modelXbrl2 = self.modelManager.load(diffFilesource, _("views loading")) if modelXbrl2.errors: if not options.keepOpen: modelXbrl2.close() success = False else: loadTime = time.time() - startedAt modelXbrl.profileStat(_("load"), loadTime) self.addToLog(format_string(self.modelManager.locale, _("diff comparison DTS loaded in %.2f secs"), loadTime), messageCode="info", file=self.entrypointFile) startedAt = time.time() modelDiffReport = self.modelManager.compareDTSes(options.versReportFile) diffTime = time.time() - startedAt modelXbrl.profileStat(_("diff"), diffTime) self.addToLog(format_string(self.modelManager.locale, _("compared in %.2f secs"), diffTime), messageCode="info", file=self.entrypointFile) except ModelDocument.LoadingException: success = False except Exception as err: success = False self.addToLog(_("[Exception] Failed to doad diff file: \n{0} \n{1}").format( err, traceback.format_tb(sys.exc_info()[2]))) if success: try: modelXbrl = self.modelManager.modelXbrl hasFormulae = modelXbrl.hasFormulae if options.validate: startedAt = time.time() if options.formulaAction: # don't automatically run formulas modelXbrl.hasFormulae = False self.modelManager.validate() if options.formulaAction: # restore setting modelXbrl.hasFormulae = hasFormulae self.addToLog(format_string(self.modelManager.locale, _("validated in %.2f secs"), time.time() - startedAt), messageCode="info", file=self.entrypointFile) if options.formulaAction in ("validate", "run"): # do nothing here if "none" from arelle import ValidateXbrlDimensions, ValidateFormula startedAt = time.time() if not options.validate: ValidateXbrlDimensions.loadDimensionDefaults(modelXbrl) # setup fresh parameters from formula optoins modelXbrl.parameters = fo.typedParameters() ValidateFormula.validate(modelXbrl, compileOnly=(options.formulaAction != "run")) self.addToLog(format_string(self.modelManager.locale, _("formula validation and execution in %.2f secs") if options.formulaAction == "run" else _("formula validation only in %.2f secs"), time.time() - startedAt), messageCode="info", file=self.entrypointFile) if options.testReport: ViewFileTests.viewTests(self.modelManager.modelXbrl, options.testReport, options.testReportCols) if options.rssReport: ViewFileRssFeed.viewRssFeed(self.modelManager.modelXbrl, options.rssReport, options.rssReportCols) if options.DTSFile: ViewFileDTS.viewDTS(modelXbrl, options.DTSFile) if options.factsFile: ViewFileFactList.viewFacts(modelXbrl, options.factsFile, labelrole=options.labelRole, lang=options.labelLang, cols=options.factListCols) if options.factTableFile: ViewFileFactTable.viewFacts(modelXbrl, options.factTableFile, labelrole=options.labelRole, lang=options.labelLang) if options.conceptsFile: ViewFileConcepts.viewConcepts(modelXbrl, options.conceptsFile, labelrole=options.labelRole, lang=options.labelLang) if options.preFile: ViewFileRelationshipSet.viewRelationshipSet(modelXbrl, options.preFile, "Presentation Linkbase", "http://www.xbrl.org/2003/arcrole/parent-child", labelrole=options.labelRole, lang=options.labelLang) if options.calFile: ViewFileRelationshipSet.viewRelationshipSet(modelXbrl, options.calFile, "Calculation Linkbase", "http://www.xbrl.org/2003/arcrole/summation-item", labelrole=options.labelRole, lang=options.labelLang) if options.dimFile: ViewFileRelationshipSet.viewRelationshipSet(modelXbrl, options.dimFile, "Dimensions", "XBRL-dimensions", labelrole=options.labelRole, lang=options.labelLang) if options.formulaeFile: ViewFileFormulae.viewFormulae(modelXbrl, options.formulaeFile, "Formulae", lang=options.labelLang) if options.viewArcrole and options.viewFile: ViewFileRelationshipSet.viewRelationshipSet(modelXbrl, options.viewFile, os.path.basename(options.viewArcrole), options.viewArcrole, labelrole=options.labelRole, lang=options.labelLang) for pluginXbrlMethod in pluginClassMethods("CntlrCmdLine.Xbrl.Run"): pluginXbrlMethod(self, options, modelXbrl) except (IOError, EnvironmentError) as err: self.addToLog(_("[IOError] Failed to save output:\n {0}").format(err)) success = False except Exception as err: self.addToLog(_("[Exception] Failed to complete request: \n{0} \n{1}").format( err, traceback.format_tb(sys.exc_info()[2]))) success = False if modelXbrl: modelXbrl.profileStat(_("total"), time.time() - firstStartedAt) if options.collectProfileStats and modelXbrl: modelXbrl.logProfileStats() if not options.keepOpen: if modelDiffReport: self.modelManager.close(modelDiffReport) elif modelXbrl: self.modelManager.close(modelXbrl) self.username = self.password = None #dereference password return success
def func_csv_data(xule_context, *args): """Read a csv file/url. Arguments: file_url (string or url) has_header (boolean) - determines if the first line of the csv file has headers type list (list) - list of xule types in the order of the columns of the csv file. This is optional. If not provided, then all the data will be treated as stirngs. as_dictionary (boolean) - return the row as a dictionary instead of a list. This is optional. """ file_url = args[0] has_headers = args[1] if len(args) < 2: raise XuleProcessingError(_("The csv-data() function requires at least 2 arguments (file url, has headers), found {} arguments.".format(len(args))), xule_context) if len(args) > 4: raise XuleProcessingError(_("The csv-data() function takes no more than 3 arguments (file url, has headers, column types, as dictionary), found {} arguments.".format(len(args))), xule_context) if file_url.type not in ('string', 'uri'): raise XuleProcessingError(_("The file url argument (1st argument) of the csv-dta() function must be a string or uri, found '{}'.".format(file_url.value)), xule_contet) if has_headers.type != 'bool': raise XuleProcessingError(_("The has headers argument (2nd argument) of the csv-data() function muset be a boolean, found '{}'.".format(has_headers.type)), xule_context) if len(args) >= 3: column_types = args[2] if column_types.type == 'none': ordered_cols = None elif column_types.type == 'list': ordered_cols = list() for col in column_types.value: if col.type != 'string': raise XuleProcessingError(_("The type list argument (3rd argument) of the csv-data() function must be a list of strings, found '{}'.".format(col.type)), xule_context) ordered_cols.append(col.value) else: raise XuleProcessingError(_("The type list argument (3rd argument) of the csv-data() fucntion must be list, found '{}'.".format(column_types.type)), xule_context) else: ordered_cols = None if len(args) == 4: if args[3].type != 'bool': raise XuleProcessingError(_("The as dictionary argument (4th argument) of the csv-data() function must be a boolean, found '{}'.".format(args[3].type)), xule_context) if args[3].value: return_row_type = 'dictionary' else: return_row_type = 'list' else: return_row_type = 'list' if return_row_type == 'dictionary' and not has_headers.value: raise XuleProcessingError(_("When the csv-data() function is returning the rows as dictionaries (4th argument), the has headers argument (2nd argument) must be true."), xule_context) result = list() result_shadow = list() from arelle import PackageManager mapped_file_url = PackageManager.mappedUrl(file_url.value) # Using the FileSource object in arelle. This will open the file and handle taxonomy package mappings. from arelle import FileSource file_source = FileSource.openFileSource(file_url.value, xule_context.global_context.cntlr) file = file_source.file(file_url.value, binary=True) # file is tuple of one item as a BytesIO stream. Since this is in bytes, it needs to be converted to text via a decoder. # Assuming the file is in utf-8. data_source = [x.decode('utf-8') for x in file[0].readlines()] # if mapped_file_url.startswith('http://') or mapped_file_url.startswith('https://'): # # if mapped_file_url.startswith('https://') and getattr(xule_context.global_context.options, 'noCertificateCheck', False): # try: # import ssl # context = ssl.create_default_context() # context.check_hostname = False # context.verify_mode = ssl.CERT_NONE # except ImportError: # context=None # else: # context = None # try: # data_source = urllib.request.urlopen(mapped_file_url, context=context).read().decode('utf-8').splitlines() # except urllib.error.HTTPError as he: # raise XuleProcessingError(_("Trying to open url '{}', got HTTP {} - {}, error".format(mapped_file_url, he.code, he.reason)), xule_context) # else: # try: # with open(mapped_file_url, 'r', newline='') as data_file: # data_source = data_file.readlines() # except FileNotFoundError: # raise XuleProcessingError(_("Trying to open file '{}', but file is not found.".format(mapped_file_url)), xule_context) import csv reader = csv.reader(data_source) first_line = True row_num = 0 for line in reader: row_num += 1 if first_line and has_headers.value: first_line = False #skip the headers line if return_row_type == 'dictionary': # Need to get the names from the first row column_names = [x for x in line] if len(column_names) != len(set(column_names)): raise XuleProcessingError(_("There are duplicate column names in the csv file. This is not allowed when return rows as dictionaries. File: {}".format(file_url.value)), xule_context) continue if return_row_type == 'list': result_line = list() result_line_shadow = list() else: #dictionary result_line = dict() result_line_shadow = dict() for col_num, item in enumerate(line): if ordered_cols is not None and col_num >= len(ordered_cols): raise XuleProcessingError(_("The nubmer of columns on row {} is greater than the number of column types provided in the third argument of the csv-data() function. File: {}".format(row_num, file_url.value)), xule_context) item_value = convert_file_data_item(item, ordered_cols[col_num] if ordered_cols is not None else None, xule_context) if return_row_type == 'list': result_line.append(item_value) result_line_shadow.append(item_value.value) else: #dictonary if col_num >= len(column_names): raise xule_context(_("The number of columns on row {} is greater than the number of headers in the csv file. File: {}".format(row_num, mappedUrl if mapped_file_url == file_url.value else file_url.value + ' --> ' + mapped_file_url)), xule_context) result_line[xv.XuleValue(xule_context, column_names[col_num], 'string')] = item_value result_line_shadow[column_names[col_num]] = item_value.value if return_row_type == 'list': result.append(xv.XuleValue(xule_context, tuple(result_line), 'list', shadow_collection=tuple(result_line_shadow))) result_shadow.append(result_line_shadow) else: #dictionary result.append(xv.XuleValue(xule_context, frozenset(result_line.items()), 'dictionary', shadow_collection=frozenset(result_line_shadow.items()))) result_shadow.append(frozenset(result_line_shadow.items())) return xv.XuleValue(xule_context, tuple(result), 'list', shadow_collection=tuple(result_shadow))
def tdNetLoader(modelXbrl, mappedUri, filepath, **kwargs): if not (mappedUri.startswith("https://www.release.tdnet.info/inbs/I_") and mappedUri.endswith(".html")): return None # not a td net info file rssObject = ModelRssObject(modelXbrl, uri=mappedUri, filepath=filepath) hasMoreSections = True while hasMoreSections: # treat tdnet as an RSS feed object try: tdInfoDoc = html.parse(filepath) except (IOError, EnvironmentError): return None # give up, use ordinary loader # find date date = None for elt in tdInfoDoc.iter(): if elt.tag == "table": break # no date portion, probably wrong document if elt.text and datePattern.match(elt.text): g = datePattern.match(elt.text).groups() date = datetime.date(int(g[0]), int(g[1]), int(g[2])) break if not date: return None # give up, not a TDnet index document urlDir = os.path.dirname(mappedUri) # find <table> with <a>Download in it for tableElt in tdInfoDoc.iter(tag="table"): useThisTableElt = False for aElt in tableElt.iterdescendants(tag="a"): if "download" in aElt.text.lower(): useThisTableElt = True break if useThisTableElt: cols = {} for trElt in tableElt.iter(tag="tr"): col = 0 rowData = {} for tdElt in trElt.iter(tag="td"): text = ''.join(t.strip() for t in tdElt.itertext()) if tdElt.get("class") == "tableh": #header type = { "時刻": "time", "コード": "code", "会社名": "companyName", "表題": "title", "XBRL": "zipUrl", "上場取引所": "stockExchange", "更新履歴": "changeLog" }.get(text, None) if type: cols[col] = type cols[type] = col elif col == cols["title"]: rowData["title"] = text rowData["pdfUrl"] = descendantAttr( tdElt, "a", "href") elif col == cols["zipUrl"]: rowData["zipUrl"] = descendantAttr( tdElt, "a", "href") elif col in cols: # body rowData[cols[col]] = text col += int(tdElt.get("colspan", 1)) if rowData: time = rowData.get("time", "") if timePattern.match(time): g = timePattern.match(time).groups() dateTime = datetime.datetime( date.year, date.month, date.day, int(g[0]), int(g[1])) else: dateTime = datetime.datetime.now() filingCode = rowData.get("code") companyName = rowData.get("companyName") stockExchange = rowData.get("stockExchange") title = rowData.get("title") pdfUrl = rowData.get("pdfUrl") if pdfUrl: pdfUrl = urlDir + "/" + pdfUrl zipUrl = rowData.get("zipUrl") if zipUrl: zipUrl = urlDir + "/" + zipUrl changeLog = rowData.get("changeLog") # find instance doc in file instanceUrls = [] if zipUrl: try: normalizedUri = modelXbrl.modelManager.cntlr.webCache.normalizeUrl( zipUrl) filepath = modelXbrl.modelManager.cntlr.webCache.getfilename( normalizedUri) filesource = FileSource.FileSource(filepath) dir = filesource.dir filesource.close() if dir: for file in dir: if "ixbrl" in file or file.endswith( ".xbrl") or "instance" in file: instanceUrls.append(zipUrl + "/" + file) except: continue # forget this filing for instanceUrl in instanceUrls: rssObject.rssItems.append( TDnetItem(modelXbrl, date, dateTime, filingCode, companyName, title, pdfUrl, instanceUrl, stockExchange)) # next screen if continuation hasMoreSections = False for elt in tdInfoDoc.iter(tag="input"): if elt.value == "次画面": # next screen button nextLocation = elt.get("onclick") if nextLocation and nextLocationPattern.match(nextLocation): hasMoreSections = True nextUrl = urlDir + "/" + nextLocationPattern.match( nextLocation).groups()[0] mappedUri = modelXbrl.modelManager.cntlr.webCache.normalizeUrl( nextUrl) filepath = modelXbrl.modelManager.cntlr.webCache.getfilename( mappedUri) return rssObject