def generateRoutineDependencyGraph(self, routine, isDependency=True):
     if not routine.getPackage():
         return
     routineName = routine.getName()
     packageName = routine.getPackage().getName()
     if isDependency:
         depRoutines = routine.getCalledRoutines()
         routineSuffix = "_called"
         totalDep = routine.getTotalCalled()
     else:
         depRoutines = routine.getCallerRoutines()
         routineSuffix = "_caller"
         totalDep = routine.getTotalCaller()
     # do not generate graph if no dep routines or
     # totalDep routines > max_dependency_list
     if (not depRoutines
         or len(depRoutines) == 0
         or  totalDep > MAX_DEPENDENCY_LIST_SIZE):
         logger.debug("No called Routines found! for routine:%s package:%s" % (routineName, packageName))
         return
     try:
         dirName = os.path.join(self._outDir, packageName)
         if not os.path.exists(dirName):
             os.makedirs(dirName)
     except OSError, e:
         logger.error("Error making dir %s : Error: %s" % (dirName, e))
         return
 def parsePackagesFile(self, packageFilename):
     result = csv.DictReader(open(packageFilename, "rb"))
     crossRef = self.crossRef
     currentPackage = None
     index = 0
     for row in result:
         packageName = row["Directory Name"]
         if len(packageName) > 0:
             currentPackage = crossRef.getPackageByName(packageName)
             if not currentPackage:
                 logger.debug("Package [%s] not found" % packageName)
                 crossRef.addPackageByName(packageName)
             currentPackage = crossRef.getPackageByName(packageName)
             currentPackage.setOriginalName(row["Package Name"])
             vdlId = row["VDL ID"]
             if vdlId and len(vdlId):
                 currentPackage.setDocLink(getVDLHttpLinkByID(vdlId))
         else:
             if not currentPackage:
                 logger.warn("row is not under any package: %s" % row)
                 continue
         if len(row["Prefixes"]):
             currentPackage.addNamespace(row["Prefixes"])
         if len(row["Globals"]):
             currentPackage.addGlobalNamespace(row["Globals"])
     logger.info("Total # of Packages is %d" % (len(crossRef.getAllPackages())))
Exemple #3
0
 def _appendWordsFieldLine(self, line):
     logger.debug("append line [%s] to word processing field: [%s]", line, self._curField)
     if not (type(self._curRecord[self._curField]) is list):
         preVal = self._curRecord[self._curField]
         self._curRecord[self._curField] = []
         self._curRecord[self._curField].append(preVal)
     self._curRecord[self._curField].append(line.strip())
 def findPackagesAndRoutinesBySource(self, dirName, pattern):
     searchFiles = glob.glob(os.path.join(dirName, pattern))
     logger.info("Total Search Files are %d " % len(searchFiles))
     allRoutines = self.crossRef.getAllRoutines()
     allPackages = self.crossRef.getAllPackages()
     crossReference = self.crossRef
     for file in searchFiles:
         routineName = os.path.basename(file).split(".")[0]
         needRename = crossReference.routineNeedRename(routineName)
         if needRename:
             origName = routineName
             routineName = crossReference.getRenamedRoutineName(routineName)
         if crossReference.isPlatformDependentRoutineByName(routineName):
             continue
         packageName = os.path.dirname(file)
         packageName = packageName[packageName.index("Packages") + 9 : packageName.index("Routines") - 1]
         crossReference.addRoutineToPackageByName(routineName, packageName)
         if needRename:
             routine = crossReference.getRoutineByName(routineName)
             assert routine
             routine.setOriginalName(origName)
         if ARoutineEx.search(routineName):
             logger.debug("A Routines %s should be exempted" % routineName)
             pass
     logger.info("Total package is %d and Total Routines are %d" % (len(allPackages), len(allRoutines)))
 def __parseDataDictionaryLogFile__(self, logFileName):
     if not os.path.exists(logFileName):
         logger.error("File: %s does not exist" % logFileName)
         return
     logFileHandle = open(logFileName, "rb")
     baseName = os.path.basename(logFileName)
     fileNo = baseName[:-len(".schema")]
     self._curGlobal = self._crossRef.getGlobalByFileNo(fileNo)
     if not self._curGlobal:
         logger.error("Could not find global based on file# %d" % fileNo)
         return
     for line in logFileHandle:
         # handle the empty line
         line = line.rstrip("\r\n")
         if len(line) == 0: # ignore the empty line
             continue
         section = self.__isSectionHeader__(line)
         if section:
             if section != self.FILEMAN_FIELD_SECTION:
                 logger.debug("Current Section is %d [%s]" % (section, line))
             if self._curSect and self._curParser:
                 self._curParser.onSectionEnd(line, self._curSect, self._curGlobal, self._crossRef)
             self._curSect = section
             self._curParser = self._sectionParserDict.get(self._curSect)
             if self._curParser:
                 self._curParser.onSectionStart(line, self._curSect, self._curGlobal, self._crossRef)
         elif self._curSect and self._curParser:
             self._curParser.parseLine(line, self._curGlobal, self._crossRef)
 def __stripFieldAttributes__(self, fType):
     outType = fType
     for nameAttr in self.FieldAttributesInfoList:
         if outType.find(nameAttr[0]) != -1:
             outType = outType.replace(nameAttr[0],"")
     logger.debug("[%s]" % outType)
     return outType.strip()
 def __parseFieldDetails__(self):
     if not self._lines or len(self._lines) <= 0:
         return
     curCaption = None
     curValues = None
     for line in self._lines:
         found = False
         for caption in self.FileFieldCaptionList:
             result = re.search(" +%s ?(?P<Value>.*)" % caption, line)
             if result:
                 if curCaption:
                     self._field.addProp(curCaption, curValues)
                     logger.debug("Add Prop: %s value: [%s]" % (curCaption, curValues))
                 curCaption = caption
                 curValues = []
                 if result.group('Value'):
                     curValues.append(result.group('Value').strip())
                 else:
                     curValues.append("")
                 found = True
                 break
         if not found and curCaption:
             if not curValues: curValues = []
             curValues.append(line.strip())
     if curCaption:
         self._field.addProp(curCaption, curValues)
         logger.debug("Add Prop: %s value: [%s]" % (curCaption, curValues))
Exemple #8
0
 def _ignoreKeywordInWordProcessingFields(self, fieldName):
     """ This is a HACK to circuvent the case that there is a keyword value like pair
         in the sub file word processing fields
         the keyword is not part of the subFile, we assume it is part of word processing field
         if any of the parent field already has that field.
     """
     logger.debug("current field is [%s]", self._curField)
     if self._curRecord and fieldName in self._curRecord:
         logger.warn("fieldName: [%s] is already parsed, ignore fields", fieldName)
         return True
     """ This is some special logic to ignore some of the fields in word processing field """
     if fieldName == "ROUTINE":
         recordToCheck = self._curRecord
         if self._curStack and len(self._curStack) > 0:  # we are in subfile mode and it is a world processing field
             recordToCheck = self._curStack[0][0]
         if "REMOTE PROCEDURE" in recordToCheck:
             logger.warn("Ignore ROUTINE field as it is a REMOTE PROCEDURE type")
             return True
     for stackItem in self._curStack:
         if fieldName in stackItem[0]:
             logger.warn(
                 "fieldName: [%s] is already parsed in [%s], ignore the words fields", fieldName, stackItem[1]
             )
             return True
     return False
 def __parseFieldAttributes__(self, fType):
     for nameAttr in self.FieldAttributesInfoList:
         if fType.find(nameAttr[0]) != -1:
             fType = fType.replace(nameAttr[0],"")
             self._field.__setattr__(nameAttr[1], True)
     fType.strip()
     logger.debug("Final Type Name is %s" % fType)
     self._field.setTypeName(fType)
Exemple #10
0
def addToPackageMap(icrEntry, pkgName):
    if 'CUSTODIAL PACKAGE' in icrEntry:
        icrPkg = icrEntry['CUSTODIAL PACKAGE']
        if icrPkg not in pkgMap:
            pkgMap[icrPkg] = pkgName
            logger.debug('[%s] ==> [%s]', icrPkg, pkgName)
        elif pkgMap[icrPkg] != pkgName:
            logger.debug('[%s] mapped to [%s] and [%s]', icrPkg, pkgMap[icrPkg], pkgName)
 def parseLine(self, line, Global, CrossReference):
     logger.debug("Current Description line is [%s]" % line)
     if len(line.strip()) == 0: # assume this is the paragrahp break
         logger.debug("Found Break in Description %s" % Global)
         if len(self._curLine) > 0:
             self._lines.append(self._curLine)
             self._curLine = ""
     else:
         self._curLine += " " + line.strip()
 def generateColorLegend(self, isCalled=True):
     command = "\"%s\" -Tpng -o\"%s\" -Tcmapx -o\"%s\" \"%s\"" % (self._dot,
                                                 os.path.join(self._outDir,"colorLegend.png"),
                                                 os.path.join(self._outDir,"colorLegend.cmapx"),
                                                 os.path.join(self._docRepDir,'callerGraph_color_legend.dot'))
     logger.debug("command is %s" % command)
     retCode = subprocess.call(command, shell=True)
     if retCode != 0:
         logger.error("calling dot with command[%s] returns %d" % (command, retCode))
Exemple #13
0
    def _startOfNewItem(self, matchObj, line):
        logger.debug("Starting of new item: %s", self._curStack)
        logger.info("Starting of new item: %s", line)
        self._curField = None

        self._rewindStack()
        if self._curRecord:
            self._outObject.append(self._curRecord)
        self._curRecord = {}
        self._findKeyValueInLine(matchObj, line, self._curRecord)
Exemple #14
0
 def __handleSuspiousCases__(self, Routine, CrossReference):
     if not self._suspiousLine:
         return
     logger.debug("Handling [%s] with value field [%d] in Routine:[%s]" % (self._varName, self._valueStartIdx, Routine))
     self._varValue = self._varName[self._valueStartIdx - DEFAULT_NAME_FIELD_START_INDEX:]
     self._varName = self._varName[:self._valueStartIdx - DEFAULT_NAME_FIELD_START_INDEX]
     if self._addVarToRoutine:
         self._addVarToRoutine(Routine, CrossReference)
     if self._postParsingRoutine:
         self._postParsingRoutine(Routine, CrossReference)
Exemple #15
0
 def _rewindStack(self):
     logger.debug("rewindStack is called")
     while len(self._curStack) > 0:  # we are in subFile Mode
         if not isSubFileField(self._curStack[-1][1], self._curField):
             preStack = self._curStack.pop()
             # logger.debug('pop previous stack item: %s', preStack)
             preStack[0].setdefault(preStack[1], []).append(self._curRecord)
             # logger.debug('reset current record: %s', preStack)
             self._curRecord = preStack[0]
         else:
             logger.debug("in subFile Fields: %s, record: %s", self._curField, self._curRecord)
             break
Exemple #16
0
 def parse(self, inputFilename, outputFilename):
     with open(inputFilename, "r") as ICRFile:
         for line in ICRFile:
             line = line.rstrip("\r\n")
             self._curLineNo += 1
             """ get rid of lines that are ignored """
             if self.isIgnoredLine(line):
                 continue
             match = START_OF_RECORD.match(line)
             if match:
                 self._startOfNewItem(match, line)
                 continue
             match = GENERIC_START_OF_RECORD.search(line)
             if not match:
                 match = DBA_COMMENTS.match(line)
             if match and match.group("name") in ICR_FILE_KEYWORDS:
                 fieldName = match.group("name")
                 if isSubFile(fieldName):
                     self._curField = fieldName
                     self._startOfSubFile(match, line)
                 else:
                     logger.debug("field name is: %s", fieldName)
                     logger.debug("cur field is: %s", self._curField)
                     """ Check to see if fieldName is already in the out list """
                     if isWordProcessingField(self._curField):
                         if self._ignoreKeywordInWordProcessingFields(fieldName):
                             self._appendWordsFieldLine(line)
                             continue
                     # figure out where to store the record
                     self._curField = fieldName
                     self._rewindStack()
                     self._findKeyValueInLine(match, line, self._curRecord)
             elif self._curField and self._curField in self._curRecord:
                 if len(line.strip()) == 0 and not isWordProcessingField(self._curField):
                     logger.warn("Ignore blank line for current field: [%s]", self._curField)
                     continue
                 self._appendWordsFieldLine(line)
             else:
                 if self._curRecord:
                     if len(line.strip()) == 0:
                         continue
                     print "No field associated with line %s: %s " % (self._curLineNo, line)
     logger.info("End of file now")
     if len(self._curStack) > 0:
         self._curField = None
         self._rewindStack()
     if self._curRecord:
         logger.info("Add last record: %s", self._curRecord)
         self._outObject.append(self._curRecord)
     # pprint.pprint(self._outObject);
     with open(outputFilename, "w") as out_file:
         json.dump(self._outObject, out_file, indent=4)
Exemple #17
0
 def __addVarToRoutine__(self, Routine, CrossReference):
     globalVar = CrossReference.getGlobalByName(self._varName)
     if not globalVar:
        # this is to fix a problem with the name convention of a top level global
        # like ICD9 can be referred as eith ICD9 or ICD9(
        altName = getAlternateGlobalName(self._varName)
        globalVar = CrossReference.getGlobalByName(altName)
        if globalVar:
           logger.debug("Changing global name from %s to %s" % (self._varName, altName))
           self._varName = altName
     Routine.addGlobalVariables(GlobalVariable(self._varName,
                                               self._varPrefix,
                                               self._varValue))
 def _addFileManGlobals(self, routine, fileManGlobals):
     for fileManGbl in fileManGlobals:
         fileManFile = self._crossRef.getGlobalByName(fileManGbl)
         if not fileManFile and fileManGbl[-1] == '(':
             fileManGblAlt = fileManGbl[:-1]
             fileManFile = self._crossRef.getGlobalByName(fileManGblAlt)
         if fileManFile:
             logger.debug("Classic: Adding fileMan:[%s] to routine:[%s]" %
                 (fileManFile, routine.getName()))
             routine.addFilemanDbCallGlobal(fileManFile)
         else: # ignore non-fileman global, could be false positive
             logger.error("global [%s] is not a valid Fileman file for"
                          " routine %s" % (fileManGbl, routine))
             return
Exemple #19
0
 def _icrSubFileToHtml(self, output, icrJson, subFile):
     logger.debug('subFile is %s', subFile)
     # TODO: Is 'icrJson' the correct name for this variable?
     logger.debug('icrJson is %s', icrJson)
     fieldList = SUBFILE_FIELDS[subFile]
     if subFile not in fieldList:
         fieldList.append(subFile)
     for icrEntry in icrJson:
         output.write ("<li>\n")
         for field in fieldList:
             if field in icrEntry: # we have this field
                 value = icrEntry[field]
                 logger.debug('current field is %s', field)
                 if isSubFile(field) and field != subFile: # avoid recursive subfile for now
                     logger.debug('field is a subfile %s', field)
                     output.write ("<dl><dt>%s:</dt>\n" % field)
                     output.write ("<dd>\n")
                     output.write ("<ol>\n")
                     self._icrSubFileToHtml(output, value, field)
                     output.write ("</ol>\n")
                     output.write ("</dd></dl>\n")
                     continue
                 value = self._convertIndividualFieldValue(field, icrEntry, value)
                 output.write ("<dt>%s:  &nbsp;&nbsp;%s</dt>\n" % (field, value))
         output.write ("</li>\n")
Exemple #20
0
def getFileManFileHRefLink(fileNo, icrEntry, **kargs):
    crossRef = None
    if 'crossRef' in kargs:
        crossRef = kargs['crossRef']
    if crossRef:
        fileInfo = crossRef.getGlobalByFileNo(fileNo)
        if fileInfo:
            linkName = getGlobalHtmlFileNameByName(fileInfo.getName())
            logger.debug('link is [%s]', linkName)
            # addToPackageMap(icrEntry, fileInfo.getPackage().getName())
            return '<a href=\"%s%s\">%s</a>' % (DOX_URL, linkName, fileNo)
        else:
            logger.debug('Can not find file: [%s]', fileNo)
    return fileNo
def main():
    crossRefParse = createCrossReferenceLogArgumentParser()
    parser = argparse.ArgumentParser(
          description='VistA Cross-Reference Builder',
          parents=[crossRefParse])
    parser.add_argument('-pj', '--pkgDepJson',
                        help='Output JSON file for package dependencies')
    result = parser.parse_args()

    initLogging(result.logFileDir, "GeneratePackageDep.log")
    logger.debug(result)

    crossRefBlder = CrossReferenceBuilder()
    crossRef = crossRefBlder.buildCrossReferenceWithArgs(result)
    crossRef.generateAllPackageDependencies()
    outputAllPackageDependency(crossRef, result.pkgDepJson)
 def __parsingSubFileDescription__(self):
     description = None
     index = 0
     desPos = -1
     indentValue = self.__getDefaultIndentLevel__(self._pointedToSubFile,
                                                  self.DEFAULT_VALUE_INDENT)
     for index in range(len(self._lines)):
         logger.debug("%s " % self._lines[index])
         if desPos == -1:
             desPos = self._lines[index].find("DESCRIPTION:")
         else:
             if re.search("^ {%d,%d}[^ ]" % (self.DEFAULT_VALUE_INDENT, indentValue), self._lines[index]):
                 if not description: description = []
                 description.append(self._lines[index].strip())
             else:
                 break
     self._pointedToSubFile.setDescription(description)
Exemple #23
0
 def _parseSchemaField(self, fieldNo, rootNode, fileSchema):
   if '0' not in rootNode:
     logger.warn('%s does not have a 0 subscript' % rootNode)
     return None
   zeroFields = rootNode["0"].value
   if not zeroFields:
     logger.warn("No value: %s for %s" % (zeroFields, rootNode['0']))
     return None
   zeroFields = zeroFields.split('^')
   if len(zeroFields) < 2:
     return FileManFieldFactory.createField(fieldNo, zeroFields[0],
                                            FileManField.FIELD_TYPE_NONE, None)
   types, specifier, filePointedTo, subFile = \
       self.parseFieldTypeSpecifier(zeroFields[1])
   location = None
   if len(zeroFields) >= 4 and zeroFields[3]:
     location = zeroFields[3].strip(' ')
     if location == ';': # No location information
       location = None
     elif location.split(';')[-1] == '0': # 0 means multiple
       multipleType = FileManField.FIELD_TYPE_SUBFILE_POINTER
       if not types:
         types = [multipleType]
       if multipleType in types and types[0] != multipleType:
         types.remove(multipleType)
         types.insert(0, multipleType)
         if not subFile: subFile = filePointedTo
   if not types:
     logger.debug('Cannot determine the type for %s, fn: %s, file:%s' %
                  (zeroFields, fieldNo, fileSchema.getFileNo()))
     types = [FileManField.FIELD_TYPE_NONE]
   if types and types[0]  == FileManField.FIELD_TYPE_SUBFILE_POINTER:
     if subFile and subFile == fileSchema.getFileNo():
       logger.warning("Recursive subfile pointer for %s" % subFile)
       types = [FileManField.FIELD_TYPE_NONE]
   fileField = FileManFieldFactory.createField(fieldNo, zeroFields[0],
                                               types[0], location)
   if specifier:
     fileField.setSpecifier(specifier)
   self._setFieldSpecificData(zeroFields, fileField, rootNode,
                             fileSchema, filePointedTo, subFile)
   return fileField
 def _addFileManDBCalls(self, routine, callLists):
     for callDetail in callLists:
         if self.isFunctionIgnored(callDetail):
             logger.debug("Ignore call detail %s" % callDetail)
             continue
         fnIdx = callDetail.find('(')
         if fnIdx < 0:
             logger.error("Can not extract fileman number from %s" %
                 callDetail)
             continue
         callTag = callDetail[:fnIdx]
         fileNo = callDetail[fnIdx+1:]
         fileManFile = self._crossRef.getGlobalByFileNo(fileNo)
         if fileManFile:
             logger.debug("FileMan: Adding fileMan:[%s] to routine:[%s]" %
                 (fileNo, routine.getName()))
             routine.addFilemanDbCallGlobal(fileManFile, callTag)
         else:
             if self._crossRef.isFileManSubFileByFileNo(fileNo): # subfile
                 subFile = self._crossRef.getFileManSubFileByFileNo(fileNo)
                 rootFile = self._crossRef.getSubFileRootByFileNo(fileNo)
                 assert rootFile
                 logger.debug("FileMan: Adding subFile:[%s] to routine:[%s]" %
                     (subFile, routine.getName()))
                 routine.addFilemanDbCallGlobal(subFile, callTag)
             else:
                 logger.error("file #%s[%s] is not a valid fileman file, for"
                     " routine [%s]" % (fileNo, callDetail, routine))
Exemple #25
0
def getRoutineHRefLink(rtnName, icrEntry, **kargs):
    crossRef = None
    if 'crossRef' in kargs:
        crossRef = kargs['crossRef']
    if crossRef:
        routine = crossRef.getRoutineByName(rtnName)
        if routine:
            logger.debug('Routine Name is %s, package: %s', routine.getName(), routine.getPackage())
            # addToPackageMap(icrEntry, routine.getPackage().getName())
            return '<a href=\"%s%s\">%s</a>' % (dox_url, getRoutineHtmlFileName(routine.getName()), rtnName)
        else:
            logger.debug('Can not find routine [%s]', rtnName)
            logger.debug('After Categorization: routine: [%s], info: [%s]', rtnName, crossRef.categorizeRoutineByNamespace(rtnName))
    return rtnName
 def __createFieldByType__(self, fieldNo, fType, fName, fLocation, line, Global, CrossReference):
     logger.debug("Current Type is [%s]" % fType)
     result = self.UNDEFINED_POINTER.search(fType)
     if result:
         self._field = FileManFieldFactory.createField(fieldNo, fName,
                            FileManField.FIELD_TYPE_FILE_POINTER, fLocation)
         return
     result = self.POINTER_TO_REGEX.search(fType)
     if result:
         fileNo = result.group('File')
         filePointedTo = CrossReference.getGlobalByFileNo(fileNo)
         self._field = FileManFieldFactory.createField(fieldNo, fName,
                            FileManField.FIELD_TYPE_FILE_POINTER, fLocation)
         if not filePointedTo:
             logger.error("Could not find file pointed to [%s], [%s], line:[%s]" % (fileNo, self._curFile, line))
         else:
             self._field.setPointedToFile(filePointedTo)
         return
     # deal with file pointer to subFiles
     result = self.SUBFILE_REGEX.search(fType)
     if result:
         # create a field for sub file type
         self._field = FileManFieldFactory.createField(fieldNo, fName,
                             FileManField.FIELD_TYPE_SUBFILE_POINTER, fLocation)
         fileNo = result.group('File')
         logger.debug("Pointer to subFile %s" % fileNo)
         subFile = Global.getSubFileByFileNo(fileNo)
         if not subFile: # this is a new subfile
             subFile = FileManFile(fileNo, fName, self._curFile)
             self._curFile.addFileManSubFile(subFile)
             logger.debug("Added subFile %s to File %s" % (fileNo, self._curFile.getFileNo()))
             if self._isSubFile:
                 Global.addFileManSubFile(subFile)
         self._field.setPointedToSubFile(subFile)
         CrossReference.addFileManSubFile(subFile)
         return
     for (key, value) in self.StringTypeMappingDict.iteritems():
         if fType.startswith(key):
             self._field = FileManFieldFactory.createField(fieldNo, fName, value, fLocation)
             break
     if not self._field:
       # double check the loc and type
       if line.find(fType) > self.MAXIMIUM_TYPE_START_INDEX:
           fType = line[self.MAXIMIUM_TYPE_START_INDEX:]
           if fLocation:
               fLocation = line[line.find(fLocation):self.MAXIMIUM_TYPE_START_INDEX]
           logger.warn("new Type is [%s], loc is [%s]" % (fType, fLocation))
           self.__createFieldByType__(fieldNo, fType, fName, fLocation, line, Global, CrossReference)
     assert self._field, "Could not find the right type for %s, %s, %s, %s, %s" % (fType, fLocation, fieldNo, line, self._curFile.getFileNo())
 def parseLine(self, line, Global, CrossReference):
     assert self._global
     strippedLine = line.rstrip(" ")
     if len(strippedLine) == 0:
         return
     value = strippedLine[self.POINTED_TO_BY_VALUE_INDEX:]
     logger.debug("Parsing line [%s]" % value)
     result = self.POINTED_TO_BY_VALUE.search(value)
     if result:
         fileManNo = result.group("FileNo")
         fieldNo = result.group('fieldNo')
         subFileNo = result.group('subFieldNo')
         logger.debug("File # %s, field # %s, sub-field # %s" % (fileManNo, fieldNo, subFileNo))
         pointedByGlobal = CrossReference.getGlobalByFileNo(fileManNo)
         if pointedByGlobal:
             self._global.addPointedToByFile(pointedByGlobal, fieldNo, subFileNo)
             logger.debug("added global to pointed list: %s, %s, %s" %
                         (fileManNo, fieldNo, subFileNo))
         else:
             logger.warning("Could not find global based on %s, %s" %
                            (fileManNo, result.group("Name")))
     else:
         logger.error("Could not parse pointer reference [%s] in file [%s]" % (line, self._global.getFileNo()))
Exemple #28
0
def _generateICRSummaryPageImpl(inputJson, listName, pkgName, date, outDir,
                                crossRef, isForAll=False):
    listName = listName.strip()
    pkgName = pkgName.strip()
    pkgHtmlName = pkgName
    outFilename = os.path.join(outDir, "%s-%s.html" % (pkgName, listName))
    if not isForAll:
        mappedPkgName = crossRef.getMappedPackageName(pkgName)
        if mappedPkgName is not None:
            pkgName = mappedPkgName
        pkgHtmlName = pkgName + '-ICR.html'
        outFilename = "%s/%s" % (outDir, pkgHtmlName)
    with open(outFilename, 'w+') as output:
        output.write("<html>\n")
        tName = "%s-%s" % (listName.replace(' ', '_'), pkgName.replace(' ', '_'))
        useAjax = _useAjaxDataTable(len(inputJson))
        columnNames = [x[0] for x in SUMMARY_LIST_FIELDS]
        searchColumns = ['IA #', 'Name', 'Custodial Package',
                         'Date Created', 'File #', 'Remote Procedure',
                         'Routine', 'Date Activated', 'General Description']
        hideColumns = ['General Description']
        if useAjax:
            ajaxSrc = '%s_array.txt' % pkgName
            outputLargeDataListTableHeader(output, ajaxSrc, tName,
                                           columnNames, searchColumns,
                                           hideColumns)
        else:
            outputDataListTableHeader(output, tName, columnNames,
                                      searchColumns, hideColumns)
        output.write("<body id=\"dt_example\">")
        output.write("""<div id="container" style="width:80%">""")

        if isForAll:
            output.write("<title id=\"pageTitle\">%s %s</title>" % (pkgName, listName))
        else:
            output.write("<h2 align=\"right\"><a href=\"./All-%s.html\">"
                         "All %s</a></h2>" % (listName, listName))
            output.write("<h1>Package: %s %s</h1>" % (pkgName, listName))
        # pkgLinkName = _getPackageHRefLink(pkgName)
        outputDataTableHeader(output, columnNames, tName)
        outputDataTableFooter(output, columnNames, tName)
        """ table body """
        output.write("<tbody>\n")
        if not useAjax:
            """ Now convert the ICR Data to Table data """
            for icrSummary in inputJson:
                output.write("<tr>\n")
                for item in icrSummary:
                    #output.write("<td class=\"ellipsis\">%s</td>\n" % item)
                    output.write("<td>%s</td>\n" % item)
                output.write("</tr>\n")
        else:
            logger.debug("Ajax source file: %s" % ajaxSrc)
            """ Write out the data file in JSON format """
            outJson = {"aaData": []}
            with open(os.path.join(outDir, ajaxSrc), 'w') as ajaxOut:
                outArray =  outJson["aaData"]
                for icrSummary in inputJson:
                    outArray.append(icrSummary)
                json.dump(outJson, ajaxOut)
        output.write("</tbody>\n")
        output.write("</table>\n")
        if date is not None:
            link = "https://foia-vista.osehra.org/VistA_Integration_Agreement/"
            output.write("<a href=\"%s\">Generated from %s IA Listing Descriptions</a>" % (link, date))
        output.write("</div>\n")
        output.write("</div>\n")
        output.write ("</body></html>\n")
  def findGlobalsBySourceV2(self, dirName, pattern):
    searchFiles = glob.glob(os.path.join(dirName, pattern))
    logger.info("Total Search Files are %d " % len(searchFiles))
    crossReference = self.crossRef
    allGlobals = crossReference.getAllGlobals()
    allPackages = crossReference.getAllPackages()
    skipFile = []
    fileNoSet = set()
    for file in searchFiles:
      packageName = os.path.dirname(file)
      packageName = packageName[packageName.index("Packages") + 9:packageName.index("Globals") - 1]
      if not crossReference.hasPackage(packageName):
        crossReference.addPackageByName(packageName)
      package = allPackages.get(packageName)
      zwrFile = open(file, 'r')
      lineNo = 0
      fileName = os.path.basename(file)
      result = ZWR_FILENO_REGEX.search(fileName)
      if result:
        fileNo = result.group('fileNo')
        if fileNo.startswith('0'): fileNo = fileNo[1:]
        globalDes = result.group('des')
      else:
        result = ZWR_NAMESPACE_REGEX.search(fileName)
        if result:
            namespace = result.group('namespace')
#                    package.addGlobalNamespace(namespace)
            continue
        else:
            continue
      globalName = "" # find out the global name by parsing the global file
      logger.debug("Parsing file: %s" % file)
      for line in zwrFile:
        if lineNo == 0:
          globalDes = line.strip()
          # Removing the extra text in the header of the ZWR file
          # to tell if it needs to be added or skipped
          globalDes = globalDes.replace("OSEHRA ZGO Export: ",'')
          if globalDes.startswith("^"):
            logger.info("No Description: Skip this file: %s" % file)
            skipFile.append(file)
            namespace = globalDes[1:]
            package.addGlobalNamespace(namespace)
            break
        if lineNo >= 2:
          info = line.strip().split('=')
          globalName = info[0]
          detail = info[1].strip("\"")
          if globalName.find(',') > 0:
              result = globalName.split(',')
              if len(result) == 2 and result[1] == "0)":
                  globalName = result[0]
                  break
          elif globalName.endswith("(0)"):
              globalName = globalName.split('(')[0]
              break
          else:
              continue
        lineNo = lineNo + 1
      if not fileNo:
        if file not in skipFile:
          logger.warn("Warning: No FileNo found for file %s" % file)
        continue
      globalVar = Global(globalName, fileNo, globalDes,
                         allPackages.get(packageName))
      try:
        fileNum = float(globalVar.getFileNo())
      except ValueError, es:
        logger.error("error: %s, globalVar:%s file %s" % (es, globalVar, file))
        continue
#            crossReference.addGlobalToPackage(globalVar, packageName)
      # only add to allGlobals dict as we have to change the package later on
      if globalVar.getName() not in allGlobals:
        allGlobals[globalVar.getName()] = globalVar
      if fileNo not in fileNoSet:
        fileNoSet.add(fileNo)
      else:
        logger.error("Duplicated file No [%s,%s,%s,%s] file:%s " %
                      (fileNo, globalName, globalDes, packageName, file))
      zwrFile.close()
Exemple #30
0
            gitCommand = "\"" + result.git + "\"" + " rev-parse --verify HEAD"
            result = subprocess.check_output(gitCommand, shell=True)
            sha1Key = result.strip()
        else:
            sha1Key = "Non-Git Directory"
        file.write("""{
        "date": "%s",
        "sha1": "%s"
        }""" % (datetime.today().date(), sha1Key))


def createArgParser():
    import argparse
    parser = argparse.ArgumentParser(description='Generate Repository Info')
    parser.add_argument('-mr', '--MRepositDir', required=True,
                        help='VistA M Component Git Repository Directory')
    parser.add_argument('-outputfile', required=True,
                        help='Full path to output file')
    parser.add_argument('-lf', '--logFileDir', required=True,
                        help='Logfile directory')
    parser.add_argument('-git', required=True, help='Git executable')
    return parser


if __name__ == '__main__':
    parser = createArgParser()
    result = parser.parse_args()
    initLogging(result.logFileDir, "GenerateRepoInfo.log")
    logger.debug(result)
    run(result)
Exemple #31
0
 def _startOfSubFile(self, match, line):
     """
         for start of the sub file, we need to add a list element to the current record if it not there
         reset _curRecord to be a new one, and push old one into the stack
     """
     subFile = match.group("name")
     logger.debug("Start parsing subFile: %s, %s", subFile, line)
     while len(self._curStack) > 0:  # we are in subfile mode
         prevSubFile = self._curStack[-1][1]
         if prevSubFile == subFile:  # just continue with more of the same subfile
             self._curStack[-1][0].setdefault(subFile, []).append(self._curRecord)  # append the previous result
             logger.debug("append previous record the current stack")
             break
         else:  # this is a different subfile # now check if it is a nested subfile
             if isSubFileField(prevSubFile, subFile):  # this is a nested subFile, push to stack
                 logger.debug("Nested subFile, push to the stack")
                 self._curStack.append((self._curRecord, subFile))
                 logger.debug("Nested subFile, stack is %s", self._curStack)
                 break
             else:  # this is a different subFile now:
                 logger.debug("different subFile")
                 preStack = self._curStack.pop()
                 logger.debug("Pop stack")
                 preStack[0].setdefault(preStack[1], []).append(self._curRecord)
                 self._curRecord = preStack[0]
                 logger.debug("different subFile, stack is %s", self._curStack)
     if len(self._curStack) == 0:
         self._curStack.append(
             (self._curRecord, subFile)
         )  # push a tuple, the first is the record, the second is the subFile field
         # logger.debug('push to stack: %s', self._curStack)
     self._curRecord = {}
     self._findKeyValueInLine(match, line, self._curRecord)
Exemple #32
0
def main():
    parser = createArgParser()
    result = parser.parse_args()
    initLogging(result.logFileDir, "FileManGlobalDataParser.log")
    logger.debug(result)
    run(result)