示例#1
0
 def outRtnReferenceDict(self):
     if len(self._rtnRefDict):
         """ generate the dependency in json file """
         with open(os.path.join(self.outdir, "Routine-Ref.json"),
                   'w') as output:
             logger.info("Generate File: %s" % output.name)
             json.dump(self._rtnRefDict, output)
示例#2
0
    def generateRoutineCallGraph(self, isCalled=True):
        logger.info("Start Routine generating call graph......")
        self._isDependency = isCalled

        # Make a list of all routines we want to process
        allRoutines = []
        for package in self._allPackages.itervalues():
            for routine in package.getAllRoutines().itervalues():
                isPlatformGenericRoutine = self._crossRef.isPlatformGenericRoutineByName(routine.getName())
                if self._isDependency and isPlatformGenericRoutine:
                    platformRoutines = routine.getAllPlatformDepRoutines()
                    for routineInfo in platformRoutines.itervalues():
                        allRoutines.append(routineInfo[0])
                else:
                    allRoutines.append(routine)

        # Add other package components too
        # TODO: This logic is copied from
        #       WebPageGenerator::generatePackageInformationPages(),
        #       could be improved in both places
        for keyVal in PACKAGE_COMPONENT_MAP:
            for package in self._allPackages.itervalues():
                allRoutines.extend(package.getAllPackageComponents(keyVal).itervalues())

        # Make the Pool of workers
        pool = ThreadPool(4)
        # Create graphs in their own threads
        pool.map(self._generateRoutineDependencyGraph, allRoutines)
        # close the pool and wait for the work to finish
        pool.close()
        pool.join()

        logger.info("End of generating call graph......")
示例#3
0
    def generateRoutineCallGraph(self, isCalled=True):
        logger.info("Start Routine generating call graph......")
        self._isDependency = isCalled

        # Make a list of all routines we want to process
        allRoutines = []
        for package in itervalues(self._allPackages):
            for routine in itervalues(package.getAllRoutines()):
                isPlatformGenericRoutine = self._crossRef.isPlatformGenericRoutineByName(
                    routine.getName())
                if self._isDependency and isPlatformGenericRoutine:
                    platformRoutines = routine.getAllPlatformDepRoutines()
                    for routineInfo in itervalues(platformRoutines):
                        allRoutines.append(routineInfo[0])
                else:
                    allRoutines.append(routine)

        # Add other package components too
        # TODO: This logic is copied from
        #       WebPageGenerator::generatePackageInformationPages(),
        #       could be improved in both places
        for keyVal in PACKAGE_COMPONENT_MAP:
            for package in itervalues(self._allPackages):
                allRoutines.extend(
                    itervalues(package.getAllPackageComponents(keyVal)))

        # Make the Pool of workers
        pool = ThreadPool(4)
        # Create graphs in their own threads
        pool.map(self._generateRoutineDependencyGraph, allRoutines)
        # close the pool and wait for the work to finish
        pool.close()
        pool.join()

        logger.info("End of generating call graph......")
示例#4
0
 def _generateDataTableHtml(self, fileManData, fileNo, outDir):
     isLargeFile = len(fileManData.dataEntries) > 4500
     tName = normalizePackageName(fileManData.name)
     # Note: We are not normalizing fileNo here
     with open(os.path.join(outDir, "%s.html" % fileNo), 'w') as output:
         output.write("<html>\n")
         if isLargeFile:
             ajexSrc = "%s_array.txt" % fileNo
             outputLargeDataListTableHeader(output, ajexSrc, tName)
         else:
             outputDataListTableHeader(output, tName)
         output.write("<body id=\"dt_example\">")
         output.write("""<div id="container" style="width:80%">""")
         output.write("<h1>File %s(%s) Data List</h1>" % (tName, fileNo))
         writeTableListInfo(output, tName)
         if not isLargeFile:
             output.write("<tbody>\n")
             rows = self._getTableRows(fileManData, fileNo)
             for tableRow in rows:
                 output.write("<tr>\n")
                 for item in tableRow:
                     output.write("<td>%s</td>\n" % item)
                 output.write("</tr>\n")
         output.write("</tbody>\n")
         output.write("</table>\n")
         output.write("</div>\n")
         output.write("</div>\n")
         output.write("</body></html>\n")
     if isLargeFile:
         logger.info("Writing Ajax file: %s" % ajexSrc)
         """ Write out the data file in JSON format """
         outJson = {"aaData": self._getTableRows(fileManData, fileNo)}
         with open(os.path.join(outDir, ajexSrc), 'w') as output:
             json.dump(outJson, output)
 def findPackagesAndRoutinesBySource(self, dirName, pattern):
   searchFiles = glob.glob(os.path.join(dirName, pattern))
   logger.info("Total Search Files are %d " % len(searchFiles))
   allRoutines = self.crossRef.getAllRoutines()
   allPackages = self.crossRef.getAllPackages()
   crossReference = self.crossRef
   for file in searchFiles:
     routineName = os.path.basename(file).split(".")[0]
     needRename = crossReference.routineNeedRename(routineName)
     if needRename:
       origName = routineName
       routineName = crossReference.getRenamedRoutineName(routineName)
     if crossReference.isPlatformDependentRoutineByName(routineName):
       continue
     packageName = os.path.dirname(file)
     packageName = packageName[packageName.index("Packages") + 9:packageName.index("Routines") - 1]
     crossReference.addRoutineToPackageByName(routineName, packageName)
     if needRename:
       routine = crossReference.getRoutineByName(routineName)
       assert(routine)
       routine.setOriginalName(origName)
     if A_ROUTINE_EX.search(routineName):
       pass
   logger.info("Total package is %d and Total Routines are %d" %
               (len(allPackages), len(allRoutines)))
 def parsePackagesFile(self, packageFilename):
   result = csv.DictReader(open(packageFilename, 'rb'))
   crossRef = self.crossRef
   currentPackage = None
   index = 0
   for row in result:
     packageName = row['Directory Name']
     if packageName:
       currentPackage = crossRef.getPackageByName(packageName)
       if not currentPackage:
         crossRef.addPackageByName(packageName)
       currentPackage = crossRef.getPackageByName(packageName)
       currentPackage.setOriginalName(row['Package Name'])
       vdlId = row['VDL ID']
       if vdlId and len(vdlId):
         currentPackage.setDocLink(getVDLHttpLinkByID(vdlId))
     else:
       if not currentPackage:
         logger.warn("row is not under any package: %s" % row)
         continue
     if len(row['Prefixes']):
       currentPackage.addNamespace(row['Prefixes'])
     if len(row['Globals']):
       currentPackage.addGlobalNamespace(row['Globals'])
   logger.info("Total # of Packages is %d" % (len(crossRef.getAllPackages())))
示例#7
0
 def parsePackagesFile(self, packageFilename):
     result = csv.DictReader(open(packageFilename, 'rb'))
     crossRef = self.crossRef
     currentPackage = None
     index = 0
     for row in result:
         packageName = row['Directory Name']
         if len(packageName) > 0:
             currentPackage = crossRef.getPackageByName(packageName)
             if not currentPackage:
                 logger.debug("Package [%s] not found" % packageName)
                 crossRef.addPackageByName(packageName)
             currentPackage = crossRef.getPackageByName(packageName)
             currentPackage.setOriginalName(row['Package Name'])
             vdlId = row['VDL ID']
             if vdlId and len(vdlId):
                 currentPackage.setDocLink(getVDLHttpLinkByID(vdlId))
         else:
             if not currentPackage:
                 logger.warn("row is not under any package: %s" % row)
                 continue
         if len(row['Prefixes']):
             currentPackage.addNamespace(row['Prefixes'])
         if len(row['Globals']):
             currentPackage.addGlobalNamespace(row['Globals'])
     logger.info("Total # of Packages is %d" %
                 (len(crossRef.getAllPackages())))
示例#8
0
 def findPackagesAndRoutinesBySource(self, dirName, pattern):
     searchFiles = glob.glob(os.path.join(dirName, pattern))
     logger.info("Total Search Files are %d " % len(searchFiles))
     allRoutines = self.crossRef.getAllRoutines()
     allPackages = self.crossRef.getAllPackages()
     crossReference = self.crossRef
     for file in searchFiles:
         routineName = os.path.basename(file).split(".")[0]
         needRename = crossReference.routineNeedRename(routineName)
         if needRename:
             origName = routineName
             routineName = crossReference.getRenamedRoutineName(routineName)
         if crossReference.isPlatformDependentRoutineByName(routineName):
             continue
         packageName = os.path.dirname(file)
         packageName = packageName[packageName.index("Packages") +
                                   9:packageName.index("Routines") - 1]
         crossReference.addRoutineToPackageByName(routineName, packageName)
         if needRename:
             routine = crossReference.getRoutineByName(routineName)
             assert (routine)
             routine.setOriginalName(origName)
         if ARoutineEx.search(routineName):
             logger.debug("A Routines %s should be exempted" % routineName)
             pass
     logger.info("Total package is %d and Total Routines are %d" %
                 (len(allPackages), len(allRoutines)))
示例#9
0
 def parseAllCallerGraphLog(self, dirName, pattern):
     callerGraphLogFile = os.path.join(dirName, pattern)
     allFiles = glob.glob(callerGraphLogFile)
     XindexParser = XINDEXLogFileParser(self._crossRef)
     for logFileName in allFiles:
         logger.info("Start paring log file [%s]" % logFileName)
         XindexParser.parseXindexLogFile(logFileName)
示例#10
0
 def _getAllFileManZWRFiles(self):
     dirName = os.path.join(self.MRepositDir, 'Packages')
     pattern = "*/Globals/*.zwr"
     searchFiles = glob.glob(os.path.join(dirName, pattern))
     outFiles = {}
     for file in searchFiles:
         fileName = os.path.basename(file)
         if fileName == 'DD.zwr':
             outFiles['0'] = {
                 'name': 'Schema File',
                 'path': os.path.normpath(os.path.abspath(file))
             }
             continue
         result = re.search(
             "(?P<fileNo>^[0-9.]+)(-[1-9])?\+(?P<des>.*)\.zwr$", fileName)
         if result:
             if result.groups()[1]:
                 logger.info("Ignore file %s" % fileName)
                 continue
             fileNo = result.group('fileNo')
             if fileNo.startswith('0'): fileNo = fileNo[1:]
             globalDes = result.group('des')
             outFiles[fileNo] = {
                 'name': globalDes,
                 'path': os.path.normpath(os.path.abspath(file))
             }
     return outFiles
示例#11
0
 def parseAllCallerGraphLog(self, dirName, pattern):
     callerGraphLogFile = os.path.join(dirName, pattern)
     allFiles = glob.glob(callerGraphLogFile)
     XindexParser = XINDEXLogFileParser(self._crossRef)
     for logFileName in allFiles:
         logger.info("Start paring log file [%s]" % logFileName)
         XindexParser.parseXindexLogFile(logFileName)
示例#12
0
 def __outputIndividualSubFile__(self, outDir, subFile):
     logger.info("Writing SubFile %s" % subFile)
     outputFile = open(
         os.path.join(outDir, "SubFile_%s.json" % (subFile.getFileNo())),
         'wb')
     outputFile.write(self._fileManFileEncoder.outputSubFile(subFile))
     outputFile.write("\n")
     outputFile.close()
示例#13
0
def processFiles(glbDataParser, htmlGen, files):
  for file in files:
    for zwrFile in glbDataParser.allFiles[file]['path']:
      logger.info("Parsing file: %s at %s" % (file, zwrFile))
      glbDataParser.parseZWRGlobalFileBySchemaV2(zwrFile, file)
    htmlGen.outputFileManDataAsHtml(file, glbDataParser)
    glbDataParser.outFileManData.pop(file)
    gc.collect()
示例#14
0
def processFiles(glbDataParser, htmlGen, files):
    for file in files:
        for zwrFile in glbDataParser.allFiles[file]['path']:
            logger.info("Parsing file: %s at %s" % (file, zwrFile))
            glbDataParser.parseZWRGlobalFileBySchemaV2(zwrFile, file)
        htmlGen.outputFileManDataAsHtml(file, glbDataParser)
        glbDataParser.outFileManData.pop(file)
        gc.collect()
示例#15
0
def run(args):
    from InitCrossReferenceGenerator import parseCrossRefGeneratorWithArgs
    from FileManDataToHtml import FileManDataToHtml

    crossRef = parseCrossRefGeneratorWithArgs(args)
    _doxURL = getDOXURL(args.local)
    _vivianURL = getViViaNURL(False)
    glbDataParser = FileManGlobalDataParser(args.MRepositDir, crossRef)
    assert '0' in glbDataParser.allFiles and '1' in glbDataParser.allFiles and set(
        args.fileNos).issubset(glbDataParser.allFiles)

    # Populate glbDataParser.globalLocationMap
    glbDataParser.parseZWRGlobalFileBySchemaV2(
        glbDataParser.allFiles['1']['path'], '1', '^DIC(')
    for fileNo in args.fileNos:
        assert fileNo in glbDataParser.globalLocationMap
    del glbDataParser.outFileManData['1']

    glbDataParser.outdir = args.outDir

    glbDataParser.patchDir = args.patchRepositDir
    htmlGen = FileManDataToHtml(crossRef, args.outDir, _doxURL, _vivianURL)
    isolatedFiles = glbDataParser.schemaParser.isolatedFiles
    if not args.all or set(args.fileNos).issubset(isolatedFiles):
        for fileNo in args.fileNos:
            gdFile = glbDataParser.allFiles[fileNo]['path']
            logger.info("Parsing file: %s at %s" % (fileNo, gdFile))
            glbDataParser.parseZWRGlobalFileBySchemaV2(gdFile, fileNo)

            htmlGen.outputFileManDataAsHtml(glbDataParser)

            del glbDataParser.outFileManData[fileNo]
    else:
        # Generate all required files
        sccSet = glbDataParser.schemaParser.sccSet
        fileSet = set(args.fileNos)
        for idx, value in enumerate(sccSet):
            fileSet.difference_update(value)
            if not fileSet:
                break
        for i in xrange(0, idx + 1):
            fileSet = fileSet.union(sccSet[i])
            fileSet &= set(glbDataParser.allFiles.keys())
            fileSet.discard('757')
        if len(fileSet) > 1:
            for file in fileSet:
                zwrFile = glbDataParser.allFiles[file]['path']
                globalSub = glbDataParser.allFiles[file]['name']
                glbDataParser.generateFileIndex(zwrFile, file)
        for file in fileSet:
            zwrFile = glbDataParser.allFiles[file]['path']
            globalSub = glbDataParser.allFiles[file]['name']
            logger.info("Parsing file: %s at %s" % (file, zwrFile))
            glbDataParser.parseZWRGlobalFileBySchemaV2(zwrFile, file)
            htmlGen.outputFileManDataAsHtml(glbDataParser)
            del glbDataParser.outFileManData[file]

    glbDataParser.outRtnReferenceDict()
示例#16
0
    def _generatePackageDependencyGraph(self, package):
        # merge the routine and package list
        depPackages, depPackageMerged = mergeAndSortDependencyListByPackage(package, self._isDependency)
        packageName = package.getName()
        totalPackage = len(depPackageMerged)
        if (totalPackage == 0) or (totalPackage > MAX_DEPENDENCY_LIST_SIZE):
            logger.info("Nothing to do exiting... Package: %s Total: %d " %
                         (packageName, totalPackage))
            return
        dirName = os.path.join(self._outDir, packageName)
        if self._isDependency:
            packageSuffix = "_dependency"
        else:
            packageSuffix = "_dependent"
        normalizedName = normalizePackageName(packageName)
        dotFilename = os.path.join(dirName, "%s%s.dot" % (normalizedName, packageSuffix))
        with open(dotFilename, 'w') as output:
            output.write("digraph %s%s {\n" % (normalizedName, packageSuffix))
            output.write("\tnode [shape=box fontsize=14];\n") # set the node shape to be box
            output.write("\tnodesep=0.35;\n") # set the node sep to be 0.35
            output.write("\transsep=0.55;\n") # set the rank sep to be 0.75
            output.write("\tedge [fontsize=12];\n") # set the edge label and size props
            output.write("\t%s [style=filled fillcolor=orange label=\"%s\"];\n" % (normalizedName,
                                                                                   packageName))
            for depPackage in depPackages:
                depPackageName = depPackage.getName()
                normalizedDepPackName = normalizePackageName(depPackageName)
                output.write("\t%s [label=\"%s\" URL=\"%s\"];\n" % (normalizedDepPackName,
                                                                    depPackageName,
                                                                    getPackageHtmlFileName(depPackageName)))
                depMetricsList = depPackageMerged[depPackage]
                edgeWeight = sum(depMetricsList[0:7:2])
                edgeLinkURL = getPackageDependencyHtmlFileName(normalizedName, normalizedDepPackName)
                if self._isDependency:
                    edgeStartNode = normalizedName
                    edgeEndNode = normalizedDepPackName
                    edgeLinkArch = packageName
                    toolTipStartPackage = packageName
                    toolTipEndPackage = depPackageName
                else:
                    edgeStartNode = normalizedDepPackName
                    edgeEndNode = normalizedName
                    edgeLinkArch = depPackageName
                    toolTipStartPackage = depPackageName
                    toolTipEndPackage = packageName
                (edgeLabel, edgeToolTip, edgeStyle) = getPackageGraphEdgePropsByMetrics(depMetricsList,
                                                                                        toolTipStartPackage,
                                                                                        toolTipEndPackage)
                output.write("\t%s->%s [label=\"%s\" weight=%d URL=\"%s#%s\" style=\"%s\" labeltooltip=\"%s\" edgetooltip=\"%s\"];\n" %
                                (edgeStartNode, edgeEndNode, edgeLabel,
                                 edgeWeight, edgeLinkURL, edgeLinkArch,
                                 edgeStyle, edgeToolTip, edgeToolTip))
            output.write("}\n")

        pngFilename = os.path.join(dirName, "%s%s.png" % (normalizedName, packageSuffix))
        cmapxFilename = os.path.join(dirName, "%s%s.cmapx" % (normalizedName, packageSuffix))
        self._generateImagesFromDotFile(pngFilename, cmapxFilename, dotFilename)
示例#17
0
 def parse(self, inputFilename, outputFilename):
     with open(inputFilename, 'r') as ICRFile:
         for line in ICRFile:
             line = line.rstrip("\r\n")
             self._curLineNo += 1
             """ get rid of lines that are ignored """
             if self.isIgnoredLine(line):
                 continue
             match = START_OF_RECORD.match(line)
             if match:
                 self._startOfNewItem(match, line)
                 continue
             match = GENERIC_START_OF_RECORD.search(line)
             if not match:
                 match = DBA_COMMENTS.match(line)
             if match and match.group('name') in ICR_FILE_KEYWORDS:
                 fieldName = match.group('name')
                 if isSubFile(fieldName):
                     self._curField = fieldName
                     self._startOfSubFile(match, line)
                 else:
                     logger.debug('field name is: %s', fieldName)
                     logger.debug('cur field is: %s', self._curField)
                     """ Check to see if fieldName is already in the out list """
                     if isWordProcessingField(self._curField):
                         if self._ignoreKeywordInWordProcessingFields(
                                 fieldName):
                             self._appendWordsFieldLine(line)
                             continue
                     # figure out where to store the record
                     self._curField = fieldName
                     self._rewindStack()
                     self._findKeyValueInLine(match, line, self._curRecord)
             elif self._curField and self._curField in self._curRecord:
                 if len(line.strip()) == 0 and not isWordProcessingField(
                         self._curField):
                     logger.warn(
                         'Ignore blank line for current field: [%s]',
                         self._curField)
                     continue
                 self._appendWordsFieldLine(line)
             else:
                 if self._curRecord:
                     if len(line.strip()) == 0:
                         continue
                     print 'No field associated with line %s: %s ' % (
                         self._curLineNo, line)
     logger.info('End of file now')
     if len(self._curStack) > 0:
         self._curField = None
         self._rewindStack()
     if self._curRecord:
         logger.info('Add last record: %s', self._curRecord)
         self._outObject.append(self._curRecord)
     # pprint.pprint(self._outObject);
     with open(outputFilename, 'w') as out_file:
         json.dump(self._outObject, out_file, indent=4)
示例#18
0
 def __outputIndividualPackage__(self, outDir, Package):
     logger.info("Writing Package %s" % Package)
     outputFile = open(
         os.path.join(
             outDir, "Package_%s.json" %
             (Package.getName().replace(' ', '_').replace('-', '_'))), 'wb')
     outputFile.write(self._packageEncoder.outputPackage(Package))
     outputFile.write("\n")
     outputFile.close()
示例#19
0
 def _updateInstallReference(self):
   if not os.path.exists(self.outdir+"/9_7"):
     os.mkdir(self.outdir+"/9_7")
   installData = self._glbData['9.7']
   output = os.path.join(self.outdir, "install_information.json")
   installJSONData = {}
   packageList = self._crossRef.getAllPackages()
   with open(output, 'w') as installDataOut:
     for ien in sorted(installData.dataEntries.keys(), key=lambda x: float(x)):
       installItem = {}
       installEntry = installData.dataEntries[ien]
       package = self._findInstallPackage(packageList, installEntry.name)
       # if this is the first time the package is found, add an entry in the install JSON data.
       if package not in installJSONData:
         installJSONData[package]={}
       if installEntry.name:
         installItem['name'] = installEntry.name
         installItem['ien'] = installEntry.ien
         installItem['label'] = installEntry.name
         installItem['value'] = installEntry.name
         installItem['parent']= package
         if installEntry.name in INSTALL_DEPENDENCY_DICT:
           installItem['BUILD_ien'] = INSTALL_DEPENDENCY_DICT[installEntry.name]["ien"]
           installchildren = []
           if 'multi' in INSTALL_DEPENDENCY_DICT[installEntry.name].keys():
             installItem['multi'] = INSTALL_DEPENDENCY_DICT[installEntry.name]['multi']
           if 'builds' in INSTALL_DEPENDENCY_DICT[installEntry.name].keys():
               for child in INSTALL_DEPENDENCY_DICT[installEntry.name]['builds']:
                 childPackage = self._findInstallPackage(packageList,child)
                 childEntry = {"name": child, "package": childPackage}
                 if child in INSTALL_DEPENDENCY_DICT.keys():
                     if 'multi' in INSTALL_DEPENDENCY_DICT[child].keys():
                       childEntry['multi'] = INSTALL_DEPENDENCY_DICT[child]['multi']
                 installchildren.append(childEntry);
               installItem['children'] = installchildren
         if '11' in installEntry.fields:
           installItem['installDate'] = installEntry.fields['11'].value.strftime("%Y-%m-%d")
         if '1' in installEntry.fields:
           installItem['packageLink'] = installEntry.fields['1'].value
         if '40' in installEntry.fields:
           installItem['numRoutines'] = len(installEntry.fields['40'].value.dataEntries)
         if '14' in installEntry.fields:
           installItem['numFiles'] = len(installEntry.fields['14'].value.dataEntries)
         # Checks for the absence of asterisks which usually denotes a package change, also make it more specific to
         # eliminate the multibuilds that are being marked as package changes
         testMatch = PACKAGE_CHANGE_REGEX.search(installEntry.name)
         if testMatch is None:
           # Assume a package switch name will be just a package name and a version
           capture = PACKAGE_NAME_VAL_REGEX.match(installEntry.name)
           if capture:
                 checkPackage = self._findInstallPackage(packageList, capture.groups()[0],False)
                 if (not (checkPackage == "Unknown") or (len(capture.groups()[0]) <= 4 )):
                   installItem['packageSwitch'] = True
         installJSONData[package][installEntry.name] = installItem
     logger.info("About to dump data into %s" % output)
     json.dump(installJSONData,installDataOut)
示例#20
0
 def _updateInstallReference(self):
   if not os.path.exists(self.outdir+"/9_7"):
     os.mkdir(self.outdir+"/9_7")
   installData = self._glbData['9.7']
   output = os.path.join(self.outdir, "install_information.json")
   installJSONData = {}
   packageList = self._crossRef.getAllPackages()
   with open(output, 'w') as installDataOut:
     for ien in sorted(installData.dataEntries.keys(), key=lambda x: float(x)):
       installItem = {}
       installEntry = installData.dataEntries[ien]
       package = self._findInstallPackage(packageList, installEntry.name)
       # if this is the first time the package is found, add an entry in the install JSON data.
       if package not in installJSONData:
         installJSONData[package]={}
       if installEntry.name:
         installItem['name'] = installEntry.name
         installItem['ien'] = installEntry.ien
         installItem['label'] = installEntry.name
         installItem['value'] = installEntry.name
         installItem['parent']= package
         if installEntry.name in INSTALL_DEPENDENCY_DICT:
           installItem['BUILD_ien'] = INSTALL_DEPENDENCY_DICT[installEntry.name]["ien"]
           installchildren = []
           if 'multi' in INSTALL_DEPENDENCY_DICT[installEntry.name].keys():
             installItem['multi'] = INSTALL_DEPENDENCY_DICT[installEntry.name]['multi']
           if 'builds' in INSTALL_DEPENDENCY_DICT[installEntry.name].keys():
               for child in INSTALL_DEPENDENCY_DICT[installEntry.name]['builds']:
                 childPackage = self._findInstallPackage(packageList,child)
                 childEntry = {"name": child, "package": childPackage}
                 if child in INSTALL_DEPENDENCY_DICT.keys():
                     if 'multi' in INSTALL_DEPENDENCY_DICT[child].keys():
                       childEntry['multi'] = INSTALL_DEPENDENCY_DICT[child]['multi']
                 installchildren.append(childEntry);
               installItem['children'] = installchildren
         if '11' in installEntry.fields:
           installItem['installDate'] = installEntry.fields['11'].value.strftime("%Y-%m-%d")
         if '1' in installEntry.fields:
           installItem['packageLink'] = installEntry.fields['1'].value
         if '40' in installEntry.fields:
           installItem['numRoutines'] = len(installEntry.fields['40'].value.dataEntries)
         if '14' in installEntry.fields:
           installItem['numFiles'] = len(installEntry.fields['14'].value.dataEntries)
         # Checks for the absence of asterisks which usually denotes a package change, also make it more specific to
         # eliminate the multibuilds that are being marked as package changes
         testMatch = PACKAGE_CHANGE_REGEX.search(installEntry.name)
         if testMatch is None:
           # Assume a package switch name will be just a package name and a version
           capture = PACKAGE_NAME_VAL_REGEX.match(installEntry.name)
           if capture:
                 checkPackage = self._findInstallPackage(packageList, capture.groups()[0],False)
                 if (not (checkPackage == "Unknown") or (len(capture.groups()[0]) <= 4 )):
                   installItem['packageSwitch'] = True
         installJSONData[package][installEntry.name] = installItem
     logger.info("About to dump data into %s" % output)
     json.dump(installJSONData,installDataOut)
示例#21
0
文件: ICRParser.py 项目: montge/VistA
    def _startOfNewItem(self, matchObj, line):
        logger.debug('Starting of new item: %s', self._curStack)
        logger.info('Starting of new item: %s', line)
        self._curField = None

        self._rewindStack()
        if self._curRecord:
            self._outObject.append(self._curRecord)
        self._curRecord = {}
        self._findKeyValueInLine(matchObj, line, self._curRecord)
示例#22
0
 def __outputIndividualGlobal__(self, outDir, Global):
     logger.info("Writing Global %s" % Global)
     outputFile = open(
         os.path.join(
             outDir,
             "Global_%s.json" % base64.urlsafe_b64encode(Global.getName())),
         'wb')
     outputFile.write(self._globalEncoder.outputResult(Global))
     outputFile.write("\n")
     outputFile.close()
示例#23
0
 def __outputIndividualRoutine__(self, outDir, routine):
     logger.info("Writing Routine %s" % routine)
     outputFile = open(os.path.join(outDir,"Routine_%s.json" % routine.getName()),'wb')
     outputFile.write(self._routineEncoder.outputRoutine(routine))
     outputFile.write("\n")
     outputFile.close()
     if isinstance(routine, PlatformDependentGenericRoutine):
         routineList = [x[0] for x in routine.getAllPlatformDepRoutines().itervalues()]
         for depRoutine in routineList:
             self.__outputIndividualRoutine__(outDir, depRoutine)
示例#24
0
文件: ICRParser.py 项目: OSEHRA/VistA
    def _startOfNewItem(self, matchObj, line):
        logger.debug("Starting of new item: %s", self._curStack)
        logger.info("Starting of new item: %s", line)
        self._curField = None

        self._rewindStack()
        if self._curRecord:
            self._outObject.append(self._curRecord)
        self._curRecord = {}
        self._findKeyValueInLine(matchObj, line, self._curRecord)
示例#25
0
 def generateRoutineCallGraph(self, isCalled=True):
     logger.info("Start Routine generating call graph......")
     for package in self._allPackages.itervalues():
         routines = package.getAllRoutines()
         for routine in routines.itervalues():
             isPlatformGenericRoutine = self._crossRef.isPlatformGenericRoutineByName(routine.getName())
             if isCalled and isPlatformGenericRoutine:
                 self.generatePlatformGenericDependencyGraph(routine, isCalled)
             else:
                 self.generateRoutineDependencyGraph(routine, isCalled)
     logger.info("End of generating call graph......")
示例#26
0
 def _generateMenuDependency(self, allMenuList, allOptionList, outDir):
     menuDict = dict((x.ien, x) for x in allOptionList)
     menuDepDict = dict((x, set()) for x in allMenuList)
     for dataEntry in allMenuList:
         if '10' in dataEntry.fields:
             menuData = dataEntry.fields['10'].value
             if menuData and menuData.dataEntries:
                 for subIen in menuData.dataEntries:
                     subEntry = menuData.dataEntries[subIen]
                     if not ".01" in subEntry.fields:
                         continue
                     value = subEntry.name
                     childIen = value.split('^')[1]
                     if '2' in subEntry.fields:
                         self.synonymMap[(
                             dataEntry.name, menuDict[childIen].name
                         )] = "[" + subEntry.fields['2'].value + "]"
                     if childIen in menuDict:
                         menuDepDict[dataEntry].add(menuDict[childIen])
                     else:
                         logger.error("Could not find %s: value: %s" %
                                      (childIen, value))
     """ discard any menu does not have any child """
     leafMenus = set()
     for entry in menuDepDict:
         if len(menuDepDict[entry]) == 0:
             leafMenus.add(entry)
     for entry in leafMenus:
         del menuDepDict[entry]
     """ find the top level menu, menu without any parent """
     allChildSet = reduce(set.union, menuDepDict.itervalues())
     rootSet = set(allMenuList) - allChildSet
     leafSet = allChildSet - set(allMenuList)
     """ generate the json file based on root menu """
     for item in rootSet:
         outJson = {}
         outJson['name'] = item.name
         outJson['option'] = item.name
         outJson['ien'] = item.ien
         # Explicitly exclude the ZZSERVERMENU from having a link generated for it.
         outJson['hasLink'] = False if item.name == "ZZSERVERMENU" else True
         if '1' in item.fields:
             outJson['name'] = item.fields['1'].value
         if '3' in item.fields:
             outJson['lock'] = item.fields['3'].value
         if '4' in item.fields:
             outJson['type'] = item.fields['4'].value
         if item in menuDepDict:
             self._addChildMenusToJson(menuDepDict[item], menuDepDict,
                                       outJson, item)
         with open(os.path.join(outDir, "VistAMenu-%s.json" % item.ien),
                   'w') as output:
             logger.info("Generate File: %s" % output.name)
             json.dump(outJson, output)
def testOutput(DDFileParser):
  DDFileParser.printGlobal("^DIBT")
  DDFileParser.printGlobal("^DPT")
  DDFileParser.printGlobal("^GECS(2100")
  DDFileParser.printGlobal("^ONCO(165.5")
  DDFileParser.printGlobal("^ORD(100.99")
  DDFileParser.printGlobal("^ICD9")
  DDFileParser.printGlobal("^YSD(627.8")
  logger.info("Total FileMan Field Note Title is %d" % len(FileManFieldSectionParser.totalFieldNotes))
  for item in FileManFieldSectionParser.totalFieldNotes:
      logger.info(item)
示例#28
0
def testOutput(DDFileParser):
    #DDFileParser.printGlobal("^DIBT")
    DDFileParser.printGlobal("^DPT")
    #DDFileParser.printGlobal("^GECS(2100")
    #DDFileParser.printGlobal("^ONCO(165.5")
    #DDFileParser.printGlobal("^ORD(100.99")
    #DDFileParser.printGlobal("^ICD9")
    #DDFileParser.printGlobal("^YSD(627.8")
    logger.info("Total FileMan Field Note Title is %d" %
                len(FileManFieldSectionParser.totalFieldNotes))
    for item in FileManFieldSectionParser.totalFieldNotes:
        logger.info(item)
示例#29
0
 def __outputIndividualFileManFile__(self, outDir, Global):
     logger.info("Writing Global %s" % Global)
     jsonResult = self._fileManFileEncoder.outputResult(Global)
     outputFile = open(os.path.join(outDir,"Global_%s.json" % base64.urlsafe_b64encode(Global.getName())),'wb')
     outputFile.write(jsonResult)
     outputFile.write("\n")
     outputFile.close()
     logger.info("Writing FileManFile %s" % Global.getFileNo())
     outputFile = open(os.path.join(outDir,"FileManFile_%s.json" % (Global.getFileNo())),'wb')
     outputFile.write(jsonResult)
     outputFile.write("\n")
     outputFile.close()
 def __findTotalSubFileds__(self):
     if not self._lines or len(self._lines) == 0:
         pass
     indentValue = self.__getDefaultIndentLevel__(self._curFile, self.DEFAULT_NAME_INDENT)
     for line in self._lines:
         result = re.search("^ {%d,%d}(?P<Name>[A-Z][^:]+):" % (self.DEFAULT_NAME_INDENT, indentValue), line)
         if result:
             name = result.group('Name')
             if name.startswith("SCREEN ON FILE "): name = "SCREEN ON FILE"
             if name not in self.totalFieldNotes:
                 logger.info("NEW FIELD NOTE TITLE: [%s]" % name)
                 self.totalFieldNotes.add(name)
示例#31
0
文件: ICRParser.py 项目: OSEHRA/VistA
 def parse(self, inputFilename, outputFilename):
     with open(inputFilename, "r") as ICRFile:
         for line in ICRFile:
             line = line.rstrip("\r\n")
             self._curLineNo += 1
             """ get rid of lines that are ignored """
             if self.isIgnoredLine(line):
                 continue
             match = START_OF_RECORD.match(line)
             if match:
                 self._startOfNewItem(match, line)
                 continue
             match = GENERIC_START_OF_RECORD.search(line)
             if not match:
                 match = DBA_COMMENTS.match(line)
             if match and match.group("name") in ICR_FILE_KEYWORDS:
                 fieldName = match.group("name")
                 if isSubFile(fieldName):
                     self._curField = fieldName
                     self._startOfSubFile(match, line)
                 else:
                     logger.debug("field name is: %s", fieldName)
                     logger.debug("cur field is: %s", self._curField)
                     """ Check to see if fieldName is already in the out list """
                     if isWordProcessingField(self._curField):
                         if self._ignoreKeywordInWordProcessingFields(fieldName):
                             self._appendWordsFieldLine(line)
                             continue
                     # figure out where to store the record
                     self._curField = fieldName
                     self._rewindStack()
                     self._findKeyValueInLine(match, line, self._curRecord)
             elif self._curField and self._curField in self._curRecord:
                 if len(line.strip()) == 0 and not isWordProcessingField(self._curField):
                     logger.warn("Ignore blank line for current field: [%s]", self._curField)
                     continue
                 self._appendWordsFieldLine(line)
             else:
                 if self._curRecord:
                     if len(line.strip()) == 0:
                         continue
                     print "No field associated with line %s: %s " % (self._curLineNo, line)
     logger.info("End of file now")
     if len(self._curStack) > 0:
         self._curField = None
         self._rewindStack()
     if self._curRecord:
         logger.info("Add last record: %s", self._curRecord)
         self._outObject.append(self._curRecord)
     # pprint.pprint(self._outObject);
     with open(outputFilename, "w") as out_file:
         json.dump(self._outObject, out_file, indent=4)
示例#32
0
 def generateRoutineCallGraph(self, isCalled=True):
     logger.info("Start Routine generating call graph......")
     for package in self._allPackages.itervalues():
         routines = package.getAllRoutines()
         for routine in routines.itervalues():
             isPlatformGenericRoutine = self._crossRef.isPlatformGenericRoutineByName(
                 routine.getName())
             if isCalled and isPlatformGenericRoutine:
                 self.generatePlatformGenericDependencyGraph(
                     routine, isCalled)
             else:
                 self.generateRoutineDependencyGraph(routine, isCalled)
     logger.info("End of generating call graph......")
示例#33
0
 def __outputIndividualRoutine__(self, outDir, routine):
     logger.info("Writing Routine %s" % routine)
     outputFile = open(
         os.path.join(outDir, "Routine_%s.json" % routine.getName()), 'wb')
     outputFile.write(self._routineEncoder.outputRoutine(routine))
     outputFile.write("\n")
     outputFile.close()
     if isinstance(routine, PlatformDependentGenericRoutine):
         routineList = [
             x[0] for x in routine.getAllPlatformDepRoutines().itervalues()
         ]
         for depRoutine in routineList:
             self.__outputIndividualRoutine__(outDir, depRoutine)
示例#34
0
def run(args):
    if args.localReq:
      logger.info("Using local Requirements file: %s" % args.localReq)
      xlsfileName = args.localReq
    else:
      # First, acquire pages from http://code.osehra.org
      xlsfileName="Open Needs_Epics with BFFs (for Open Source)_Feb2018.xlsx"
      logger.info("Downloading %s from http://code.osehra.org" % xlsfileName)
      quotedURL = urllib.quote("code.osehra.org/files/requirements/"+xlsfileName)
      urllib.urlretrieve("http://%s" % quotedURL,xlsfileName)
    if args.localPast:
      logger.info("Using local pastData file: %s" % args.localPast)
      pastDataFileName = args.localPast
    else:
      pastDataURL= "code.osehra.org/files/requirements/requirements_July_2017/Requirements.json"
      logger.info("Downloading %s" % pastDataURL)
      quotedURL = urllib.quote(pastDataURL)
      urllib.urlretrieve("http://%s" % quotedURL,"oldRequirements.json")
      pastDataFileName = "oldRequirements.json"

    args.ReqJsonFile = os.path.join(args.outDir, "requirements.json")
    requirementsDir = os.path.join(args.outDir, "requirements")
    if not os.path.exists(requirementsDir):
        os.mkdir(requirementsDir)

    filename = os.path.basename(xlsfileName)[:-5] # Remove '.txt'
    curDate = filename[filename.rfind("_")+1:]
    RequirementsXLStoJSON.convertExcelToJson(xlsfileName, args.ReqJsonFile, pastDataFileName, curDate)
    converter = RequirementsJSONtoHTML.RequirementsConverter(requirementsDir)
    converter.convertJsonToHtml(args.ReqJsonFile)
示例#35
0
def run(args):
    if args.localReq:
        logger.info("Using local Requirements file: %s" % args.localReq)
        xlsfileName = args.localReq
    else:
        # First, acquire pages from http://code.osehra.org
        xlsfileName = "Open Needs_Epics with BFFs (for Open Source)_Feb2018.xlsx"
        logger.info("Downloading %s from http://code.osehra.org" % xlsfileName)
        quotedURL = urllib.quote("code.osehra.org/files/requirements/" +
                                 xlsfileName)
        urllib.urlretrieve("http://%s" % quotedURL, xlsfileName)
    if args.localPast:
        logger.info("Using local pastData file: %s" % args.localPast)
        pastDataFileName = args.localPast
    else:
        pastDataURL = "code.osehra.org/files/requirements/requirements_July_2017/Requirements.json"
        logger.info("Downloading %s" % pastDataURL)
        quotedURL = urllib.quote(pastDataURL)
        urllib.urlretrieve("http://%s" % quotedURL, "oldRequirements.json")
        pastDataFileName = "oldRequirements.json"

    args.ReqJsonFile = os.path.join(args.outDir, "requirements.json")
    filename = os.path.basename(xlsfileName)[:-5]  # Remove '.txt'
    curDate = filename[filename.rfind("_") + 1:]
    RequirementsXLStoJSON.convertExcelToJson(xlsfileName, args.ReqJsonFile,
                                             pastDataFileName, curDate)
    converter = RequirementsJSONtoHTML.RequirementsConverter(
        os.path.join(args.outDir, "requirements"))
    converter.convertJsonToHtml(args.ReqJsonFile)
示例#36
0
def loadCrossRefFromMongoDB():
    conn = pymongo.Connection()
    vistadb = conn.vista
    packageTable = vistadb.packages
    packages = packageTable.find()
    crossRef = CrossReference()
    packageDecoder = PackageJSONDecoder(crossRef)
    for packageJson in packages:
        logger.info("decoding Package: %s" % packageJson['name'])
        packageDecoder.decodePackage(packageJson)
    routinesTable = vistadb.routines
    routines = routinesTable.find()
    routineDecoder = RoutineJSONDecode(crossRef)
    for routineJson in routines:
        logger.info("decoding Routine: %s" % routineJson['name'])
        routineDecoder.decodeRoutine(routineJson)
    fileManFileDecoder = FileManFileDecode(crossRef)
    globalsTable = vistadb.globals
    globalFiles = globalsTable.find()
    for globalFileJson in globalFiles:
        logger.info("decoding global: %s" % globalFileJson['name'])
        fileManFileDecoder.decodeGlobal(globalFileJson)
    subFilesTable = vistadb.subfiles
    subFiles = subFilesTable.find()
    for subFileJson in subFiles:
        logger.info("decoding subfile: %s" % subFileJson['file_no'])
        fileManFileDecoder.decodeSubFile(subFileJson)
    return crossRef
 def _generateNoPointerToFileList(self):
     """
   generate list of files that does not have any pointer
   and is not pointed by any files
 """
     depDict = self._fileDep
     allFiles = [
         x for x in self._allSchema if self._allSchema[x].isRootFile()
     ]
     logger.info("Total # of Files: %s" % len(allFiles))
     allFilesWithPointedTo = [
         x for x in depDict if self._allSchema[x].isRootFile()
     ]
     allFilesWithoutPointedTo = sorted(set(allFiles) -
                                       set(allFilesWithPointedTo),
                                       key=lambda x: float(x))
     logger.info(
         "Total # of Files that is not pointed to by any files: %s" %
         len(allFilesWithoutPointedTo))
     allFilePointerTo = reduce(set.union, depDict.itervalues())
     allFileNoPointerTo = set(allFiles) - allFilePointerTo
     logger.info("Total # of files that does not have file pointer: %s" %
                 len(allFileNoPointerTo))
     self._isolatedFile = set(allFilesWithoutPointedTo) & allFileNoPointerTo
     logger.info("Total # of Isolated Files: %s" % len(self._isolatedFile))
示例#38
0
def loadCrossRefFromMongoDB():
    conn = pymongo.Connection()
    vistadb = conn.vista
    packageTable = vistadb.packages
    packages = packageTable.find()
    crossRef = CrossReference()
    packageDecoder = PackageJSONDecoder(crossRef)
    for packageJson in packages:
        logger.info("decoding Package: %s" % packageJson['name'])
        packageDecoder.decodePackage(packageJson)
    routinesTable = vistadb.routines
    routines = routinesTable.find()
    routineDecoder = RoutineJSONDecode(crossRef)
    for routineJson in routines:
        logger.info("decoding Routine: %s" % routineJson['name'])
        routineDecoder.decodeRoutine(routineJson)
    fileManFileDecoder = FileManFileDecode(crossRef)
    globalsTable = vistadb.globals
    globalFiles = globalsTable.find()
    for globalFileJson in globalFiles:
        logger.info("decoding global: %s" % globalFileJson['name'])
        fileManFileDecoder.decodeGlobal(globalFileJson)
    subFilesTable = vistadb.subfiles
    subFiles = subFilesTable.find()
    for subFileJson in subFiles:
        logger.info("decoding subfile: %s" % subFileJson['file_no'])
        fileManFileDecoder.decodeSubFile(subFileJson)
    return crossRef
示例#39
0
def insertJSONFileToMongdb(jsonFilePath, connection, filePattern,
                           dbName, tableName, key, isUnique=True):
    globFiles = glob.glob(os.path.join(jsonFilePath, filePattern))
    if not globFiles or len(globFiles) == 0:
        return
    database = connection[dbName]
    table = database[tableName]
    if key:
        table.ensure_index(key, unique=isUnique)
    for file in globFiles:
        fileHandle = open(file, 'rb')
        logger.info("Reading file %s" % file)
        result = table.insert(json.load(fileHandle))
        logger.info("inserting result is %s" % result)
        fileHandle.close()
示例#40
0
 def __findTotalSubFileds__(self):
     if not self._lines or len(self._lines) == 0:
         pass
     indentValue = self.__getDefaultIndentLevel__(self._curFile,
                                                  self.DEFAULT_NAME_INDENT)
     for line in self._lines:
         result = re.search(
             "^ {%d,%d}(?P<Name>[A-Z][^:]+):" %
             (self.DEFAULT_NAME_INDENT, indentValue), line)
         if result:
             name = result.group('Name')
             if name.startswith("SCREEN ON FILE "): name = "SCREEN ON FILE"
             if name not in self.totalFieldNotes:
                 logger.info("NEW FIELD NOTE TITLE: [%s]" % name)
                 self.totalFieldNotes.add(name)
示例#41
0
def readIntoDictionary(infileName):
    values = {}
    with open(infileName, "r") as templateData:
        sniffer = csv.Sniffer()
        dialect = sniffer.sniff(templateData.read(1024))
        templateData.seek(0)
        hasHeader = sniffer.has_header(templateData.read(1024))
        logger.info("hasHeader: %s" % hasHeader)
        templateData.seek(0)
        for index, line in enumerate(csv.reader(templateData, dialect)):
            if index == 0:
                continue
            if line[1] not in values.keys():
                values[line[1]] = []
            values[line[1]].append(line)
    return values
示例#42
0
def readIntoDictionary(infileName):
  values = {}
  with open(infileName,"r") as templateData:
    sniffer = csv.Sniffer()
    dialect = sniffer.sniff(templateData.read(1024))
    templateData.seek(0)
    hasHeader = sniffer.has_header(templateData.read(1024))
    logger.info ("hasHeader: %s" % hasHeader)
    templateData.seek(0)
    for index, line in enumerate(csv.reader(templateData,dialect)):
      if index == 0:
        continue
      if line[1] not in values.keys():
        values[line[1]] = []
      values[line[1]].append(line)
  return values
示例#43
0
 def parseFileManDbJSONFile(self, dbJsonFile):
     logger.info("Start parsing JSON file [%s]" % dbJsonFile)
     with open(dbJsonFile, 'r') as jsonFile:
         dbCallJson = json.load(jsonFile)
         for pkgItem in dbCallJson:
             """ find all the routines under that package """
             routines = pkgItem['routines']
             for rtn in routines:
                 rtnName = rtn['name']
                 routine = self._crossRef.getRoutineByName(rtnName)
                 if not routine:
                     logger.warn("Cannot find routine [%s]" % rtnName)
                     continue
                 fileManGlobals = rtn['Globals']
                 self._addFileManGlobals(routine, fileManGlobals)
                 fileManFileNos = rtn['FileMan calls']
                 self._addFileManDBCalls(routine, fileManFileNos)
示例#44
0
 def parseFileManDbJSONFile(self, dbJsonFile):
     logger.info("Start parsing JSON file [%s]" % dbJsonFile)
     with open(dbJsonFile, 'r') as jsonFile:
         dbCallJson = json.load(jsonFile)
         for pkgItem in dbCallJson:
             """ find all the routines under that package """
             routines = pkgItem['routines']
             for rtn in routines:
                 rtnName = rtn['name']
                 routine = self._crossRef.getRoutineByName(rtnName)
                 if not routine:
                     logger.warn("Can not find routine [%s]" % rtnName)
                     continue
                 fileManGlobals = rtn['Globals']
                 self._addFileManGlobals(routine, fileManGlobals)
                 fileManFileNos = rtn['FileMan calls']
                 self._addFileManDBCalls(routine, fileManFileNos)
示例#45
0
 def __outputIndividualFileManFile__(self, outDir, Global):
     logger.info("Writing Global %s" % Global)
     jsonResult = self._fileManFileEncoder.outputResult(Global)
     outputFile = open(
         os.path.join(
             outDir,
             "Global_%s.json" % base64.urlsafe_b64encode(Global.getName())),
         'wb')
     outputFile.write(jsonResult)
     outputFile.write("\n")
     outputFile.close()
     logger.info("Writing FileManFile %s" % Global.getFileNo())
     outputFile = open(
         os.path.join(outDir, "FileManFile_%s.json" % (Global.getFileNo())),
         'wb')
     outputFile.write(jsonResult)
     outputFile.write("\n")
     outputFile.close()
示例#46
0
def insertJSONFileToMongdb(jsonFilePath,
                           connection,
                           filePattern,
                           dbName,
                           tableName,
                           key,
                           isUnique=True):
    globFiles = glob.glob(os.path.join(jsonFilePath, filePattern))
    if not globFiles or len(globFiles) == 0:
        return
    database = connection[dbName]
    table = database[tableName]
    if key:
        table.ensure_index(key, unique=isUnique)
    for file in globFiles:
        fileHandle = open(file, 'rb')
        logger.info("Reading file %s" % file)
        result = table.insert(json.load(fileHandle))
        logger.info("inserting result is %s" % result)
        fileHandle.close()
示例#47
0
 def generatePackageDependencyGraph(self, package, dependencyList=True):
     # merge the routine and package list
     depPackages, depPackageMerged = mergeAndSortDependencyListByPackage(
         package, dependencyList)
     if dependencyList:
         packageSuffix = "_dependency"
     else:
         packageSuffix = "_dependent"
     packageName = package.getName()
     normalizedName = normalizePackageName(packageName)
     totalPackage = len(depPackageMerged)
     if (totalPackage == 0) or (totalPackage > MAX_DEPENDENCY_LIST_SIZE):
         logger.info("Nothing to do exiting... Package: %s Total: %d " %
                     (packageName, totalPackage))
         return
     try:
         dirName = os.path.join(self._outDir, packageName)
         if not os.path.exists(dirName):
             os.makedirs(dirName)
     except OSError, e:
         logger.error("Error making dir %s : Error: %s" % (dirName, e))
         return
示例#48
0
 def generatePackageDependencyGraph(self, package, dependencyList=True):
     # merge the routine and package list
     depPackages, depPackageMerged = mergeAndSortDependencyListByPackage(
                                                                   package,
                                                                   dependencyList)
     if dependencyList:
         packageSuffix = "_dependency"
     else:
         packageSuffix = "_dependent"
     packageName = package.getName()
     normalizedName = normalizePackageName(packageName)
     totalPackage = len(depPackageMerged)
     if  (totalPackage == 0) or (totalPackage > MAX_DEPENDENCY_LIST_SIZE):
         logger.info("Nothing to do exiting... Package: %s Total: %d " %
                      (packageName, totalPackage))
         return
     try:
         dirName = os.path.join(self._outDir, packageName)
         if not os.path.exists(dirName):
             os.makedirs(dirName)
     except OSError, e:
         logger.error("Error making dir %s : Error: %s" % (dirName, e))
         return
示例#49
0
def run(args):
    logger.info("Parsing ICR JSON file....")
    icrJsonFile = os.path.abspath(args.icrJsonFile)
    parsedICRJSON = parseICRJson(icrJsonFile)
    logger.info("Building cross reference....")
    doxDir = os.path.join(args.patchRepositDir, 'Utilities/Dox')
    crossRef = CrossReferenceBuilder().buildCrossReferenceWithArgs(
        args,
        icrJson=parsedICRJSON,
        inputTemplateDeps=readIntoDictionary(args.inputTemplateDep),
        sortTemplateDeps=readIntoDictionary(args.sortTemplateDep),
        printTemplateDeps=readIntoDictionary(args.printTemplateDep))
    logger.info("Starting generating graphs....")
    graphGenerator = GraphGenerator(crossRef, args.outDir, doxDir, args.dot)
    graphGenerator.generateGraphs()
    logger.info("End of generating graphs")
示例#50
0
def run(args):
    logger.info ("Parsing ICR JSON file....")
    icrJsonFile = os.path.abspath(args.icrJsonFile)
    parsedICRJSON = parseICRJson(icrJsonFile)
    logger.info ("Building cross reference....")
    doxDir = os.path.join(args.patchRepositDir, 'Utilities/Dox')
    crossRef = CrossReferenceBuilder().buildCrossReferenceWithArgs(args,
                                                                   icrJson=parsedICRJSON,
                                                                   inputTemplateDeps=readIntoDictionary(args.inputTemplateDep),
                                                                   sortTemplateDeps=readIntoDictionary(args.sortTemplateDep),
                                                                   printTemplateDeps=readIntoDictionary(args.printTemplateDep)
                                                                   )
    logger.info ("Starting generating graphs....")
    graphGenerator = GraphGenerator(crossRef, args.outDir, doxDir, args.dot)
    graphGenerator.generateGraphs()

    logger.info ("End of generating graphs")
示例#51
0
 def generatePackageDependenciesGraph(self, isDependency=True):
     # generate all dot file and use dot to generated the image file format
     if isDependency:
         name = "dependencies"
     else:
         name = "dependents"
     logger.info("Start generating package %s......" % name)
     logger.info("Total Packages: %d" % len(self._allPackages))
     for package in self._allPackages.values():
         self.generatePackageDependencyGraph(package, isDependency)
     logger.info("End of generating package %s......" % name)
示例#52
0
 def generatePackageDependenciesGraph(self, isDependency=True):
     # generate all dot file and use dot to generated the image file format
     if isDependency:
         name = "dependencies"
     else:
         name = "dependents"
     logger.info("Start generating package %s......" % name)
     logger.info("Total Packages: %d" % len(self._allPackages))
     for package in self._allPackages.values():
         self.generatePackageDependencyGraph(package, isDependency)
     logger.info("End of generating package %s......" % name)
示例#53
0
 def printAllNamespaces(self):
     crossRef = self._crossRef
     allPackages = crossRef.getAllPackages()
     namespaces = set()
     excludeNamespace = set()
     for package in allPackages.itervalues():
         for namespace in package.getNamespaces():
             if (namespace.startswith("!")):
                 excludeNamespace.add(namespace)
             else:
                 namespaces.add(namespace)
     sortedSet = sorted(namespaces)
     sortedExclude = sorted(excludeNamespace)
     logger.info("Total # of namespace: %d" % len(sortedSet))
     logger.info("Total # of excluded namespaces: %d" % len(sortedExclude))
     logger.info(sortedSet)
     logger.info(sortedExclude)
     for item in excludeNamespace:
         if item[1:] not in sortedSet:
             logger.warn("item: %s not in the namespace set" % item[1:])
示例#54
0
    def generatePackageDependenciesGraph(self, isDependency=True):
        # generate all dot file and use dot to generated the image file format
        self._isDependency = isDependency
        if self._isDependency:
            name = "dependencies"
        else:
            name = "dependents"
        logger.info("Start generating package %s......" % name)
        logger.info("Total Packages: %d" % len(self._allPackages))

        # Make the Pool of workers
        pool = ThreadPool(4)
        # Create graphs in their own threads
        pool.map(self._generatePackageDependencyGraph, self._allPackages.values())
        # close the pool and wait for the work to finish
        pool.close()
        pool.join()

        logger.info("End of generating package %s......" % name)
示例#55
0
 def __outputIndividualPackage__(self, outDir, Package):
     logger.info("Writing Package %s" % Package)
     outputFile = open(os.path.join(outDir,"Package_%s.json" % (Package.getName().replace(' ','_').replace('-','_'))),'wb')
     outputFile.write(self._packageEncoder.outputPackage(Package))
     outputFile.write("\n")
     outputFile.close()
示例#56
0
 def printResult(self):
     logger.info("Total Routines are %d" % len(self._crossRef.getAllRoutines()))
示例#57
0
 def _setFieldSpecificData(self, zeroFields, fileField, rootNode,
                          fileSchema, filePointedTo, subFile):
   if fileField.getType() == FileManField.FIELD_TYPE_FILE_POINTER:
     fileGlobalRoot = ""
     if len(zeroFields) >= 3:
       fileGlobalRoot = zeroFields[2]
     if filePointedTo:
       if filePointedTo not in self._allSchema:
         """ create a new fileman file """
         self._allSchema[filePointedTo] = Global(fileGlobalRoot,
                                                 filePointedTo,
                                                 "")
       pointedToFile = self._allSchema[filePointedTo]
       assert pointedToFile.isRootFile()
       fileField.setPointedToFile(pointedToFile)
       globalName = pointedToFile.getName()
       fileNo = fileSchema.getFileNo()
       if fileSchema.isSubFile():
         fileNo = fileSchema.getRootFile().getFileNo()
       self._addToFileDepDict(fileNo,
                              pointedToFile.getFileNo())
       if fileGlobalRoot:
         if not globalName:
           pointedToFile.setName(fileGlobalRoot)
         elif globalName != fileGlobalRoot:
           logger.warning("%s: FileMan global root mismatch '%s' : '%s'" %
                         (zeroFields, globalName, fileGlobalRoot))
       else:
         logger.info("@TODO, find file global root for # %s" % filePointedTo)
     elif fileGlobalRoot:
       self._noPointedToFiles[fileGlobalRoot] = Global(fileGlobalRoot)
       logger.info("@TODO, set the file number for %s" % fileGlobalRoot)
     else:
       logger.warn("No pointed to file set for file:%s: field:%r 0-index:%s" %
                    (fileSchema.getFileNo(), fileField, zeroFields))
   elif fileField.getType() == FileManField.FIELD_TYPE_SUBFILE_POINTER:
     if subFile:
       if subFile not in self._allSchema:
         self._allSchema[subFile] = FileManFile(subFile, "", fileSchema)
       subFileSchema = self._allSchema[subFile]
       subFileSchema.setParentFile(fileSchema)
       fileSchema.addFileManSubFile(subFileSchema)
       fileField.setPointedToSubFile(subFileSchema)
     else:
       logger.warn("No subfile is set for file:%s, field:%r 0-index:%s" %
                    (fileSchema.getFileNo(), fileField, zeroFields))
   elif fileField.getType() == FileManField.FIELD_TYPE_SET and not subFile:
     setDict = dict([x.split(':') for x in zeroFields[2].rstrip(';').split(';')])
     fileField.setSetMembers(setDict)
   elif fileField.getType() == FileManField.FIELD_TYPE_VARIABLE_FILE_POINTER:
     if "V" in rootNode: # parsing variable pointer
       vptrs = parsingVariablePointer(rootNode['V'])
       vpFileSchemas = []
       if vptrs:
         for x in vptrs:
           if x not in self._allSchema:
             self._allSchema[x] = Global("", x, "")
           pointedToFile = self._allSchema[x]
           if pointedToFile.isSubFile():
             logger.error("Field: %r point to subFile: %s, parent: %s" %
                          (fileField, pointedToFile,
                           pointedToFile.getParentFile()))
           else:
             fileNo = fileSchema.getFileNo()
             if fileSchema.isSubFile():
               fileNo = fileSchema.getRootFile().getFileNo()
             self._addToFileDepDict(fileNo,
                                    pointedToFile.getFileNo())
           vpFileSchemas.append(self._allSchema[x])
         fileField.setPointedToFiles(vpFileSchemas)
示例#58
0
    crossRefArgParse = createCrossReferenceLogArgumentParser()
    parser = argparse.ArgumentParser(
                      description='VistA Cross Reference Externalization',
                      parents=[crossRefArgParse])
    parser.add_argument('-O', '--outputDir', required=True,
                        help='Output Directory')
#    parser.add_argument('-I', required=False, dest='inputXMLFile',
#                        help='Input XML File')
    result = parser.parse_args();
#    if result['inputXMLFile']:
#        crossRefXML = CrossRefXMLEncoder()
#        crossRefXML.loadFromXML(result['inputXMLFile'])
#        crossRef = crossRefXML.getCrossReference()
#        crossRef.getPackageByName("Kernel").printResult()
#        exit()
    logger.info( "Starting parsing caller graph log file....")
    crossRefA = CrossReferenceBuilder().buildCrossReference(result)
#    logger.info("Generating Cross-Reference JSON output files")
#    crossRefEncoder = CrossRefJSONEncoder(crossRef)
#    crossRefEncoder.outputCrossRefAsJSON(result.outputDir)
    # now save to mongodb
#    logger.info("Save to mongodb")
#    insertAllJSONFilestoMongodb(result['outputDir'])
    logger.info("Loading CrossRef from mongodb")
    #crossRefB = loadCrossRefFromMongoDB()
    #crossRefB.generateAllPackageDependencies()
    #package = crossRefA.getPackageByName("VA FileMan")
    #package.printResult()
    #package = crossRefB.getPackageByName("VA FileMan")
    #package.printResult()
#    routineA = crossRefA.getRoutineByName("DIC")
示例#59
0
 def __outputIndividualSubFile__(self, outDir, subFile):
     logger.info("Writing SubFile %s" % subFile)
     outputFile = open(os.path.join(outDir,"SubFile_%s.json" % (subFile.getFileNo())),'wb')
     outputFile.write(self._fileManFileEncoder.outputSubFile(subFile))
     outputFile.write("\n")
     outputFile.close()
示例#60
0
 def __outputIndividualGlobal__(self, outDir, Global):
     logger.info("Writing Global %s" % Global)
     outputFile = open(os.path.join(outDir,"Global_%s.json" % base64.urlsafe_b64encode(Global.getName())),'wb')
     outputFile.write(self._globalEncoder.outputResult(Global))
     outputFile.write("\n")
     outputFile.close()