Exemplo n.º 1
0
 def parsePackagesFile(self, packageFilename):
   result = csv.DictReader(open(packageFilename, 'rb'))
   crossRef = self.crossRef
   currentPackage = None
   index = 0
   for row in result:
     packageName = row['Directory Name']
     if packageName:
       currentPackage = crossRef.getPackageByName(packageName)
       if not currentPackage:
         crossRef.addPackageByName(packageName)
       currentPackage = crossRef.getPackageByName(packageName)
       currentPackage.setOriginalName(row['Package Name'])
       vdlId = row['VDL ID']
       if vdlId and len(vdlId):
         currentPackage.setDocLink(getVDLHttpLinkByID(vdlId))
     else:
       if not currentPackage:
         logger.warn("row is not under any package: %s" % row)
         continue
     if len(row['Prefixes']):
       currentPackage.addNamespace(row['Prefixes'])
     if len(row['Globals']):
       currentPackage.addGlobalNamespace(row['Globals'])
   logger.info("Total # of Packages is %d" % (len(crossRef.getAllPackages())))
Exemplo n.º 2
0
 def __createFieldByType__(self, fieldNo, fType, fName, fLocation, line,
                           Global, CrossReference):
     logger.debug("Current Type is [%s]" % fType)
     result = self.UNDEFINED_POINTER.search(fType)
     if result:
         self._field = FileManFieldFactory.createField(
             fieldNo, fName, FileManField.FIELD_TYPE_FILE_POINTER,
             fLocation)
         return
     result = self.POINTER_TO_REGEX.search(fType)
     if result:
         fileNo = result.group('File')
         filePointedTo = CrossReference.getGlobalByFileNo(fileNo)
         self._field = FileManFieldFactory.createField(
             fieldNo, fName, FileManField.FIELD_TYPE_FILE_POINTER,
             fLocation)
         if not filePointedTo:
             logger.error(
                 "Could not find file pointed to [%s], [%s], line:[%s]" %
                 (fileNo, self._curFile, line))
         else:
             self._field.setPointedToFile(filePointedTo)
         return
     # deal with file pointer to subFiles
     result = self.SUBFILE_REGEX.search(fType)
     if result:
         # create a field for sub file type
         self._field = FileManFieldFactory.createField(
             fieldNo, fName, FileManField.FIELD_TYPE_SUBFILE_POINTER,
             fLocation)
         fileNo = result.group('File')
         logger.debug("Pointer to subFile %s" % fileNo)
         subFile = Global.getSubFileByFileNo(fileNo)
         if not subFile:
             subFile = FileManFile(fileNo, fName, self._curFile)
             self._curFile.addFileManSubFile(subFile)
             logger.debug("Added subFile %s to File %s" %
                          (fileNo, self._curFile.getFileNo()))
             if self._isSubFile:
                 Global.addFileManSubFile(subFile)
         self._field.setPointedToSubFile(subFile)
         return
     for (key, value) in self.StringTypeMappingDict.iteritems():
         if fType.startswith(key):
             self._field = FileManFieldFactory.createField(
                 fieldNo, fName, value, fLocation)
             break
     if not self._field:
         # double check the loc and type
         if line.find(fType) > self.MAXIMIUM_TYPE_START_INDEX:
             fType = line[self.MAXIMIUM_TYPE_START_INDEX:]
             if fLocation:
                 fLocation = line[line.find(fLocation):self.
                                  MAXIMIUM_TYPE_START_INDEX]
             logger.warn("new Type is [%s], loc is [%s]" %
                         (fType, fLocation))
             self.__createFieldByType__(fieldNo, fType, fName, fLocation,
                                        line, Global, CrossReference)
     assert self._field, "Could not find the right type for %s, %s, %s, %s, %s" % (
         fType, fLocation, fieldNo, line, self._curFile.getFileNo())
Exemplo n.º 3
0
 def parsePackagesFile(self, packageFilename):
     result = csv.DictReader(open(packageFilename, 'rb'))
     crossRef = self.crossRef
     currentPackage = None
     index = 0
     for row in result:
         packageName = row['Directory Name']
         if len(packageName) > 0:
             currentPackage = crossRef.getPackageByName(packageName)
             if not currentPackage:
                 logger.debug("Package [%s] not found" % packageName)
                 crossRef.addPackageByName(packageName)
             currentPackage = crossRef.getPackageByName(packageName)
             currentPackage.setOriginalName(row['Package Name'])
             vdlId = row['VDL ID']
             if vdlId and len(vdlId):
                 currentPackage.setDocLink(getVDLHttpLinkByID(vdlId))
         else:
             if not currentPackage:
                 logger.warn("row is not under any package: %s" % row)
                 continue
         if len(row['Prefixes']):
             currentPackage.addNamespace(row['Prefixes'])
         if len(row['Globals']):
             currentPackage.addGlobalNamespace(row['Globals'])
     logger.info("Total # of Packages is %d" %
                 (len(crossRef.getAllPackages())))
Exemplo n.º 4
0
def getPackageHRefLink(pkgName, icrEntry, **kargs):
    global pgkUpperCaseNameDict
    if pkgName in pkgMap:
        pkgLink = getPackageHtmlFileName(pkgMap[pkgName])
        return '<a href=\"%s%s\">%s</a>' % (DOX_URL, pkgLink, pkgName)
    crossRef = None
    if 'crossRef' in kargs:
        crossRef = kargs['crossRef']
    if crossRef:
        if len(pgkUpperCaseNameDict) == 0 :
            for name in crossRef.getAllPackages().iterkeys():
                pgkUpperCaseNameDict[name.upper()] = name
        upperName = normalizeName(pkgName).upper()
        if upperName in pgkUpperCaseNameDict:
            addToPackageMap(icrEntry, pgkUpperCaseNameDict[upperName])
            return '<a href=\"%s%s\">%s</a>' % (DOX_URL,
                                                getPackageHtmlFileName(pgkUpperCaseNameDict[upperName]),
                                                pkgName)
        pkg = crossRef.getPackageByName(pkgName)
        if not pkg:
            pkgRename = normalizeName(pkgName).title()
            # logger.warn('[%s] renamed as [%s]', pkgName, pkgRename)
            pkg = crossRef.getPackageByName(pkgRename)
        if not pkg:
            pkgRename = normalizeName(pkgName)
            pkg = crossRef.getPackageByName(pkgRename)
        if pkg:
            addToPackageMap(icrEntry, pkg.getName())
            pkgLink = getPackageHtmlFileName(pkg.getName())
            return '<a href=\"%s%s\">%s</a>' % (DOX_URL, pkgLink, pkgName)
        else:
            logger.warn('Can not find mapping for package: [%s]', pkgName)
    return pkgName
Exemplo n.º 5
0
 def _updateHL7Reference(self):
   protocol = self._glbData['101']
   for ien in sorted(list(protocol.dataEntries), key=lambda x: float(x)):
     protocolEntry = protocol.dataEntries[ien]
     if '4' in protocolEntry.fields:
       type = protocolEntry.fields['4'].value
       if (type != 'event driver' and type != 'subscriber'):
         entryName = protocolEntry.name
         namespace, package = \
           self._crossRef.__categorizeVariableNameByNamespace__(entryName)
         if package:
           package.protocol.append(protocolEntry)
       # only care about the event drive and subscriber type
       elif (type == 'event driver' or type == 'subscriber'):
         entryName = protocolEntry.name
         namespace, package = \
           self._crossRef.__categorizeVariableNameByNamespace__(entryName)
         if package:
           package.hl7.append(protocolEntry)
         elif '12' in protocolEntry.fields: # check the packge it belongs
           pass
         else:
           logger.warn("Cannot find a package for HL7: %s" % entryName)
         for field in ('771', '772'):
           if field not in protocolEntry.fields:
             continue
           hl7Rtn = protocolEntry.fields[field].value
           if not hl7Rtn:
             continue
           for rtn, tag, pos in getMumpsRoutine(hl7Rtn):
             hl7Info = {"name": entryName,
                        "ien": ien}
             if tag:
               hl7Info['tag'] = tag
             self._rtnRefDict.setdefault(rtn, {}).setdefault('101', []).append(hl7Info)
Exemplo n.º 6
0
def getPackageHRefLink(pkgName, icrEntry, **kargs):
    if pkgName in pkgMap:
        pkgLink = getPackageHtmlFileName(pkgMap[pkgName])
        return '<a href=\"%s%s\">%s</a>' % (dox_url, pkgLink , pkgName)
    crossRef = None
    if 'crossRef' in kargs:
        crossRef = kargs['crossRef']
    if crossRef:
        if len(pgkUpperCaseNameDict) == 0 :
            for name in crossRef.getAllPackages().iterkeys():
                pgkUpperCaseNameDict[name.upper()] = name
        upperName = normalizeName(pkgName).upper()
        if upperName in pgkUpperCaseNameDict:
            addToPackageMap(icrEntry, pgkUpperCaseNameDict[upperName])
            return '<a href=\"%s%s\">%s</a>' % (dox_url, getPackageHtmlFileName(pgkUpperCaseNameDict[upperName]) , pkgName)
        pkg = crossRef.getPackageByName(pkgName)
        if not pkg:
            pkgRename = normalizeName(pkgName).title()
            # logger.warn('[%s] renamed as [%s]', pkgName, pkgRename)
            pkg = crossRef.getPackageByName(pkgRename)
        if not pkg:
            pkgRename = normalizeName(pkgName)
            pkg = crossRef.getPackageByName(pkgRename)
        if pkg:
            addToPackageMap(icrEntry, pkg.getName())
            pkgLink = getPackageHtmlFileName(pkg.getName())
            return '<a href=\"%s%s\">%s</a>' % (dox_url, pkgLink , pkgName)
        else:
            logger.warn('Can not find mapping for package: [%s]', pkgName)
    return pkgName
Exemplo n.º 7
0
 def filterResult(self, line):
   """
     return None to stop reading more information
     return False to keep reading more information
     return GlobalNode to generate the result
   """
   retNode = None
   subscripts, value, rootSub = findSubscriptValue(line)
   if not subscripts: # must have some subscripts
     return None
   if not self.rootSub:
     self.rootSub = rootSub
   if rootSub != self.rootSub: # not under the same root, ignore
     retNode = self.curRoot
     if self.glbLoc:
       logger.warn("Different root, expected: %s, real: %s, ignore for now" %
                     (self.rootSub, rootSub))
       self.curRoot = None
       return True
     else:
       self.rootSub = rootSub
       self.curCommonSub = subscripts[0:self.index+1]
       self.curRoot = createGlobalNode(subscripts, value, rootSub)
       if retNode:
         retNode = retNode.getRootNode()
         for sub in self.curCommonSub:
           retNode = retNode[sub]
         return retNode
       else:
         return True
   if self.commonSubscript and subscripts[0:self.index] != self.commonSubscript:
     logger.warn("Different subsript, expected: %s, real: %s, ignore for now" %
                     (self.commonSubscript, subscripts[0:self.index]))
     retNode = self.curRoot
     self.curRoot = None
     if retNode:
       retNode = retNode.getRootNode()
       for sub in self.commonSubscript:
         retNode = retNode[sub]
       return retNode
     else:
       return True
   if self.curCommonSub is None:
     self.curCommonSub = subscripts[0:self.index+1]
     self.curRoot = createGlobalNode(subscripts, value, rootSub, self.curRoot)
     return True
   curCommonScript = os.path.commonprefix([subscripts, self.curCommonSub])
   if self.curCommonSub == curCommonScript:
     self.curRoot = createGlobalNode(subscripts, value, rootSub, self.curRoot)
     return True
   else:
     retNode = self.curRoot
     if retNode:
       retNode = retNode.getRootNode()
       for subscript in curCommonScript:
         retNode = retNode[subscript]
     self.curRoot = createGlobalNode(subscripts, value, rootSub)
     self.curCommonSub = curCommonScript + subscripts[len(curCommonScript):self.index+1]
     return retNode
Exemplo n.º 8
0
def convertJson(inputJsonFile, date, MRepositDir, patchRepositDir,
                generateHTML, generatePDF, outDir=None, pdfOutDir=None,
                local=False):
    if not generateHTML and not generatePDF:
        raise Exception("Nothing to generate!")

    global DOX_URL
    global VIVIAN_URL
    DOX_URL = getDOXURL(local)
    VIVIAN_URL = getViViaNURL(local)

    if generateHTML:
        if not outDir:
            raise Exception("Must specify Output directory")
        if not os.path.exists(outDir):
            # Will also create intermediate directories if needed
            os.makedirs(outDir)

    if generatePDF:
        if not pdfOutDir:
            raise Exception("Must specify PDF Output directory")
        # Will also create intermediate directories if needed
        if not os.path.exists(pdfOutDir):
            os.makedirs(pdfOutDir)

    crossRef = parseCrossReferenceGeneratorArgs(MRepositDir,
                                                patchRepositDir)
    global RPC_NAME_TO_IEN_MAPPING
    RPC_NAME_TO_IEN_MAPPING = generateSingleFileFieldToIenMappingBySchema(MRepositDir,
                                                                          crossRef,
                                                                          RPC_FILE_NO,
                                                                          RPC_NAME_FIELD_NO)


    with open(inputJsonFile, 'r') as inputFile:
        pkgJson = {} # group by package
        allpkgJson = []
        inputJson = json.load(inputFile)
        for icrEntry in inputJson:
            if 'NUMBER' not in icrEntry:
                logger.error("Could not parse entry: " + str(icrEntry))
                continue
            if generatePDF:
                _generateICRIndividualPagePDF(icrEntry, date, pdfOutDir)
            if generateHTML:
                _generateICRIndividualPage(icrEntry, date, outDir, crossRef)
                summaryInfo = _convertICREntryToSummaryInfo(icrEntry, crossRef)
                allpkgJson.append(summaryInfo)
                if 'CUSTODIAL PACKAGE' in icrEntry:
                    pkgJson.setdefault(icrEntry['CUSTODIAL PACKAGE'],[]).append(summaryInfo)
        if generateHTML:
            _generateICRSummaryPageImpl(allpkgJson, 'ICR List', 'All', date,
                                        outDir, isForAll=True)
            for pkgName, outJson in pkgJson.iteritems():
                _generateICRSummaryPageImpl(outJson, 'ICR List', pkgName, date,
                                            outDir)
            logger.warn('Total # entry in PACKAGE_MAP is [%s]', len(PACKAGE_MAP))
            logger.warn('Total # entry in pkgJson is [%s]', len(pkgJson))
            _generatePkgDepSummaryPage(inputJson, date, outDir, crossRef)
Exemplo n.º 9
0
 def parse(self, inputFilename, outputFilename):
     with open(inputFilename, 'r') as ICRFile:
         for line in ICRFile:
             line = line.rstrip("\r\n")
             self._curLineNo += 1
             """ get rid of lines that are ignored """
             if self.isIgnoredLine(line):
                 continue
             match = START_OF_RECORD.match(line)
             if match:
                 self._startOfNewItem(match, line)
                 continue
             match = GENERIC_START_OF_RECORD.search(line)
             if not match:
                 match = DBA_COMMENTS.match(line)
             if match and match.group('name') in ICR_FILE_KEYWORDS:
                 fieldName = match.group('name')
                 if isSubFile(fieldName):
                     self._curField = fieldName
                     self._startOfSubFile(match, line)
                 else:
                     logger.debug('field name is: %s', fieldName)
                     logger.debug('cur field is: %s', self._curField)
                     """ Check to see if fieldName is already in the out list """
                     if isWordProcessingField(self._curField):
                         if self._ignoreKeywordInWordProcessingFields(
                                 fieldName):
                             self._appendWordsFieldLine(line)
                             continue
                     # figure out where to store the record
                     self._curField = fieldName
                     self._rewindStack()
                     self._findKeyValueInLine(match, line, self._curRecord)
             elif self._curField and self._curField in self._curRecord:
                 if len(line.strip()) == 0 and not isWordProcessingField(
                         self._curField):
                     logger.warn(
                         'Ignore blank line for current field: [%s]',
                         self._curField)
                     continue
                 self._appendWordsFieldLine(line)
             else:
                 if self._curRecord:
                     if len(line.strip()) == 0:
                         continue
                     print 'No field associated with line %s: %s ' % (
                         self._curLineNo, line)
     logger.info('End of file now')
     if len(self._curStack) > 0:
         self._curField = None
         self._rewindStack()
     if self._curRecord:
         logger.info('Add last record: %s', self._curRecord)
         self._outObject.append(self._curRecord)
     # pprint.pprint(self._outObject);
     with open(outputFilename, 'w') as out_file:
         json.dump(self._outObject, out_file, indent=4)
Exemplo n.º 10
0
def convertJson(inputJsonFile, date, MRepositDir, patchRepositDir,
                generateHTML, generatePDF, outDir=None, pdfOutDir=None,
                local=False):
    if not generateHTML and not generatePDF:
        raise Exception("Nothing to generate!")

    global DOX_URL
    global VIVIAN_URL
    DOX_URL = getDOXURL(local)
    VIVIAN_URL = getViViaNURL(local)

    if generateHTML:
        if not outDir:
            raise Exception("Must specify Output directory")
        if not os.path.exists(outDir):
            # Will also create intermediate directories if needed
            os.makedirs(outDir)

    if generatePDF:
        if not pdfOutDir:
            raise Exception("Must specify PDF Output directory")
        # Will also create intermediate directories if needed
        if not os.path.exists(pdfOutDir):
            os.makedirs(pdfOutDir)

    crossRef = parseCrossReferenceGeneratorArgs(MRepositDir,
                                                patchRepositDir)
    global RPC_NAME_TO_IEN_MAPPING
    RPC_NAME_TO_IEN_MAPPING = generateSingleFileFieldToIenMappingBySchema(MRepositDir,
                                                                          crossRef,
                                                                          RPC_FILE_NO,
                                                                          RPC_NAME_FIELD_NO)


    with open(inputJsonFile, 'r') as inputFile:
        pkgJson = {} # group by package
        allpkgJson = []
        inputJson = json.load(inputFile)
        for icrEntry in inputJson:
            if generatePDF:
                _generateICRIndividualPagePDF(icrEntry, date, pdfOutDir)
            if generateHTML:
                _generateICRIndividualPage(icrEntry, date, outDir, crossRef)
                summaryInfo = _convertICREntryToSummaryInfo(icrEntry, crossRef)
                allpkgJson.append(summaryInfo)
                if 'CUSTODIAL PACKAGE' in icrEntry:
                    pkgJson.setdefault(icrEntry['CUSTODIAL PACKAGE'],[]).append(summaryInfo)
        if generateHTML:
            _generateICRSummaryPageImpl(allpkgJson, 'ICR List', 'All', date,
                                        outDir, isForAll=True)
            for pkgName, outJson in pkgJson.iteritems():
                _generateICRSummaryPageImpl(outJson, 'ICR List', pkgName, date,
                                            outDir)
            logger.warn('Total # entry in PACKAGE_MAP is [%s]', len(PACKAGE_MAP))
            logger.warn('Total # entry in pkgJson is [%s]', len(pkgJson))
            _generatePkgDepSummaryPage(inputJson, date, outDir, crossRef)
Exemplo n.º 11
0
 def parse(self, inputFilename, outputFilename):
     with open(inputFilename, "r") as ICRFile:
         for line in ICRFile:
             line = line.rstrip("\r\n")
             self._curLineNo += 1
             """ get rid of lines that are ignored """
             if self.isIgnoredLine(line):
                 continue
             match = START_OF_RECORD.match(line)
             if match:
                 self._startOfNewItem(match, line)
                 continue
             match = GENERIC_START_OF_RECORD.search(line)
             if not match:
                 match = DBA_COMMENTS.match(line)
             if match and match.group("name") in ICR_FILE_KEYWORDS:
                 fieldName = match.group("name")
                 if isSubFile(fieldName):
                     self._curField = fieldName
                     self._startOfSubFile(match, line)
                 else:
                     logger.debug("field name is: %s", fieldName)
                     logger.debug("cur field is: %s", self._curField)
                     """ Check to see if fieldName is already in the out list """
                     if isWordProcessingField(self._curField):
                         if self._ignoreKeywordInWordProcessingFields(fieldName):
                             self._appendWordsFieldLine(line)
                             continue
                     # figure out where to store the record
                     self._curField = fieldName
                     self._rewindStack()
                     self._findKeyValueInLine(match, line, self._curRecord)
             elif self._curField and self._curField in self._curRecord:
                 if len(line.strip()) == 0 and not isWordProcessingField(self._curField):
                     logger.warn("Ignore blank line for current field: [%s]", self._curField)
                     continue
                 self._appendWordsFieldLine(line)
             else:
                 if self._curRecord:
                     if len(line.strip()) == 0:
                         continue
                     print "No field associated with line %s: %s " % (self._curLineNo, line)
     logger.info("End of file now")
     if len(self._curStack) > 0:
         self._curField = None
         self._rewindStack()
     if self._curRecord:
         logger.info("Add last record: %s", self._curRecord)
         self._outObject.append(self._curRecord)
     # pprint.pprint(self._outObject);
     with open(outputFilename, "w") as out_file:
         json.dump(self._outObject, out_file, indent=4)
Exemplo n.º 12
0
 def _convertIndividualFieldValue(self, field, icrEntry, value):
     if isWordProcessingField(field):
         if type(value) is list:
             value = "\n".join(value)
         value = '<pre>\n' + cgi.escape(value) + '\n</pre>\n'
         return value
     if field in field_convert_map:
         if type(value) is list:
             logger.warn('field: [%s], value:[%s], icrEntry: [%s]', field, value, icrEntry)
             return value
         value = field_convert_map[field](value, icrEntry, crossRef=self._crossRef)
         return value
     return value
Exemplo n.º 13
0
 def _convertIndividualFieldValue(self, field, icrEntry, value):
     if isWordProcessingField(field):
         if type(value) is list:
             value = "\n".join(value)
         value = '<pre>\n' + cgi.escape(value) + '\n</pre>\n'
         return value
     if field in field_convert_map:
         if type(value) is list:
             logger.warn('field: [%s], value:[%s], icrEntry: [%s]', field, value, icrEntry)
             return value
         value = field_convert_map[field](value, icrEntry, crossRef=self._crossRef)
         return value
     return value
Exemplo n.º 14
0
 def __createFieldByType__(self, fieldNo, fType, fName, fLocation, line, Global, CrossReference):
     logger.debug("Current Type is [%s]" % fType)
     result = self.UNDEFINED_POINTER.search(fType)
     if result:
         self._field = FileManFieldFactory.createField(fieldNo, fName,
                            FileManField.FIELD_TYPE_FILE_POINTER, fLocation)
         return
     result = self.POINTER_TO_REGEX.search(fType)
     if result:
         fileNo = result.group('File')
         filePointedTo = CrossReference.getGlobalByFileNo(fileNo)
         self._field = FileManFieldFactory.createField(fieldNo, fName,
                            FileManField.FIELD_TYPE_FILE_POINTER, fLocation)
         if not filePointedTo:
             logger.error("Could not find file pointed to [%s], [%s], line:[%s]" % (fileNo, self._curFile, line))
         else:
             self._field.setPointedToFile(filePointedTo)
         return
     # deal with file pointer to subFiles
     result = self.SUBFILE_REGEX.search(fType)
     if result:
         # create a field for sub file type
         self._field = FileManFieldFactory.createField(fieldNo, fName,
                             FileManField.FIELD_TYPE_SUBFILE_POINTER, fLocation)
         fileNo = result.group('File')
         logger.debug("Pointer to subFile %s" % fileNo)
         subFile = Global.getSubFileByFileNo(fileNo)
         if not subFile: # this is a new subfile
             subFile = FileManFile(fileNo, fName, self._curFile)
             self._curFile.addFileManSubFile(subFile)
             logger.debug("Added subFile %s to File %s" % (fileNo, self._curFile.getFileNo()))
             if self._isSubFile:
                 Global.addFileManSubFile(subFile)
         self._field.setPointedToSubFile(subFile)
         CrossReference.addFileManSubFile(subFile)
         return
     for (key, value) in self.StringTypeMappingDict.iteritems():
         if fType.startswith(key):
             self._field = FileManFieldFactory.createField(fieldNo, fName, value, fLocation)
             break
     if not self._field:
       # double check the loc and type
       if line.find(fType) > self.MAXIMIUM_TYPE_START_INDEX:
           fType = line[self.MAXIMIUM_TYPE_START_INDEX:]
           if fLocation:
               fLocation = line[line.find(fLocation):self.MAXIMIUM_TYPE_START_INDEX]
           logger.warn("new Type is [%s], loc is [%s]" % (fType, fLocation))
           self.__createFieldByType__(fieldNo, fType, fName, fLocation, line, Global, CrossReference)
     assert self._field, "Could not find the right type for %s, %s, %s, %s, %s" % (fType, fLocation, fieldNo, line, self._curFile.getFileNo())
Exemplo n.º 15
0
 def _getTableRows(self, fileManData, fileNo):
     rows = []
     for ien in getKeys(fileManData.dataEntries.keys(), float):
         dataEntry = fileManData.dataEntries[ien]
         if not dataEntry.name:
             logger.warn("No name for %s" % dataEntry)
             continue
         name = dataEntry.name
         if isFilePointerType(dataEntry):
             link, name = convertFilePointerToHtml(name)
         dataHtmlLink = "<a href=\"../%s/%s\">%s</a>" % (fileNo.replace(
             ".", "_"), getDataEntryHtmlFile(
                 ien, fileNo), str(name).replace("\xa0", ""))
         rows.append([dataHtmlLink, ien])
     return rows
Exemplo n.º 16
0
 def _generateICRSummaryPage(self, inputJson, date):
     pkgJson = {} # group by package
     allpgkJson = []
     for icrEntry in inputJson:
         self._generateICRIndividualPage(icrEntry, date)
         summaryInfo = self._convertICREntryToSummaryInfo(icrEntry)
         allpgkJson.append(summaryInfo)
         if 'CUSTODIAL PACKAGE' in icrEntry:
             pkgJson.setdefault(icrEntry['CUSTODIAL PACKAGE'],[]).append(summaryInfo)
     self._generateICRSummaryPageImpl(allpgkJson, 'ICR List', 'All', date, True)
     for pkgName, outJson in pkgJson.iteritems():
         self._generateICRSummaryPageImpl(outJson, 'ICR List', pkgName, date)
     logger.warn('Total # entry in pkgMap is [%s]', len(pkgMap))
     logger.warn('Total # entry in pkgJson is [%s]', len(pkgJson))
     self._generatePkgDepSummaryPage(inputJson, date)
Exemplo n.º 17
0
 def parseFileManDbJSONFile(self, dbJsonFile):
     logger.info("Start parsing JSON file [%s]" % dbJsonFile)
     with open(dbJsonFile, 'r') as jsonFile:
         dbCallJson = json.load(jsonFile)
         for pkgItem in dbCallJson:
             """ find all the routines under that package """
             routines = pkgItem['routines']
             for rtn in routines:
                 rtnName = rtn['name']
                 routine = self._crossRef.getRoutineByName(rtnName)
                 if not routine:
                     logger.warn("Can not find routine [%s]" % rtnName)
                     continue
                 fileManGlobals = rtn['Globals']
                 self._addFileManGlobals(routine, fileManGlobals)
                 fileManFileNos = rtn['FileMan calls']
                 self._addFileManDBCalls(routine, fileManFileNos)
Exemplo n.º 18
0
 def parseFileManDbJSONFile(self, dbJsonFile):
     logger.progress("Start parsing JSON file [%s]" % dbJsonFile)
     with open(dbJsonFile, 'r') as jsonFile:
         dbCallJson = json.load(jsonFile)
         for pkgItem in dbCallJson:
             # find all the routines under that package
             routines = pkgItem['routines']
             for rtn in routines:
                 rtnName = rtn['name']
                 routine = self._crossRef.getRoutineByName(rtnName)
                 if not routine:
                     logger.warn("Cannot find routine [%s]" % rtnName)
                     continue
                 fileManGlobals = rtn['Globals']
                 self._addFileManGlobals(routine, fileManGlobals)
                 fileManFileNos = rtn['FileMan calls']
                 self._addFileManDBCalls(routine, fileManFileNos)
Exemplo n.º 19
0
    def visitRoutine(self, routine, outputDir):
        calledRoutines = routine.getCalledRoutines()
        if not calledRoutines or len(calledRoutines) == 0:
            logger.warn("No called Routines found! for package:%s" % routineName)
            return
        routineName = routine.getName()
        if not routine.getPackage():
            logger.error("ERROR: package: %s does not belongs to a package" % routineName)
            return

        packageName = routine.getPackage().getName()
        try:
            dirName = os.path.join(outputDir, packageName)
            if not os.path.exists(dirName):
                os.makedirs(dirName)
        except OSError, e:
            logger.error("Error making dir %s : Error: %s" % (dirName, e))
            return
Exemplo n.º 20
0
    def visitRoutine(self, routine, outputDir):
        calledRoutines = routine.getCalledRoutines()
        if not calledRoutines or len(calledRoutines) == 0:
            logger.warn("No called Routines found! for package:%s" % routineName)
            return
        routineName = routine.getName()
        if not routine.getPackage():
            logger.error("ERROR: package: %s does not belongs to a package" % routineName)
            return

        packageName = routine.getPackage().getName()
        try:
            dirName = os.path.join(outputDir, packageName)
            if not os.path.exists(dirName):
                os.makedirs(dirName)
        except OSError, e:
            logger.error("Error making dir %s : Error: %s" % (dirName, e))
            return
Exemplo n.º 21
0
 def _generateICRSummaryPage(self, inputJson, date):
     self.failures = []
     pkgJson = {} # group by package
     allpgkJson = []
     for icrEntry in inputJson:
         self._generateICRIndividualPage(icrEntry, date)
         if self._generatePDF:
             self._generateICRIndividualPagePDF(icrEntry, date)
         summaryInfo = self._convertICREntryToSummaryInfo(icrEntry)
         allpgkJson.append(summaryInfo)
         if 'CUSTODIAL PACKAGE' in icrEntry:
             pkgJson.setdefault(icrEntry['CUSTODIAL PACKAGE'],[]).append(summaryInfo)
     self._generateICRSummaryPageImpl(allpgkJson, 'ICR List', 'All', date, True)
     for pkgName, outJson in pkgJson.iteritems():
         self._generateICRSummaryPageImpl(outJson, 'ICR List', pkgName, date)
     logger.warn('Total # entry in pkgMap is [%s]', len(pkgMap))
     logger.warn('Total # entry in pkgJson is [%s]', len(pkgJson))
     self._generatePkgDepSummaryPage(inputJson, date)
Exemplo n.º 22
0
 def _generateICRSummaryPage(self, inputJson):
     pkgJson = {} # group by package
     allpgkJson = []
     for icrEntry in inputJson:
         self._generateICRIndividualPage(icrEntry)
         summaryInfo = self._convertICREntryToSummaryInfo(icrEntry)
         allpgkJson.append(summaryInfo)
         if 'CUSTODIAL PACKAGE' in icrEntry:
             pkgJson.setdefault(icrEntry['CUSTODIAL PACKAGE'],[]).append(summaryInfo)
     self._generateICRSummaryPageImpl(allpgkJson, 'ICR List', 'All', True)
     for pkgName, outJson in pkgJson.iteritems():
         self._generateICRSummaryPageImpl(outJson, 'ICR List', pkgName)
     logger.warn('Total # entry in pkgMap is [%s]', len(pkgMap))
     logger.warn('Total # entry in pkgJson is [%s]', len(pkgJson))
     pprint.pprint(set(pkgJson.keys()) - set(pkgMap.keys()))
     pprint.pprint(set(pgkUpperCaseNameDict.values()) - set(pkgMap.values()))
     # pprint.pprint(pkgMap)
     self._generatePkgDepSummaryPage(inputJson)
Exemplo n.º 23
0
 def _parseSchemaField(self, fieldNo, rootNode, fileSchema):
     if '0' not in rootNode:
         logger.warn('%s does not have a 0 subscript' % rootNode)
         return None
     zeroFields = rootNode["0"].value
     if not zeroFields:
         logger.warn("No value: %s for %s" % (zeroFields, rootNode['0']))
         return None
     zeroFields = zeroFields.split('^')
     if len(zeroFields) < 2:
         return FileManFieldFactory.createField(
             fieldNo, zeroFields[0], FileManField.FIELD_TYPE_NONE, None)
     types, specifier, filePointedTo, subFile = \
         self.parseFieldTypeSpecifier(zeroFields[1])
     location = None
     if len(zeroFields) >= 4 and zeroFields[3]:
         location = zeroFields[3].strip(' ')
         if location == ';':  # No location information
             location = None
         elif location.split(';')[-1] == '0':  # 0 means multiple
             multipleType = FileManField.FIELD_TYPE_SUBFILE_POINTER
             if not types:
                 types = [multipleType]
             if multipleType in types and types[0] != multipleType:
                 types.remove(multipleType)
                 types.insert(0, multipleType)
                 if not subFile:
                     subFile = filePointedTo
     if not types:
         logger.debug('Cannot determine the type for %s, fn: %s, file:%s' %
                      (zeroFields, fieldNo, fileSchema.getFileNo()))
         types = [FileManField.FIELD_TYPE_NONE]
     if types and types[0] == FileManField.FIELD_TYPE_SUBFILE_POINTER:
         if subFile and subFile == fileSchema.getFileNo():
             logger.warning("Recursive subfile pointer for %s" % subFile)
             types = [FileManField.FIELD_TYPE_NONE]
     fileField = FileManFieldFactory.createField(fieldNo, zeroFields[0],
                                                 types[0], location)
     if specifier:
         fileField.setSpecifier(specifier)
     self._setFieldSpecificData(zeroFields, fileField, rootNode, fileSchema,
                                filePointedTo, subFile)
     return fileField
Exemplo n.º 24
0
 def _parseSchemaField(self, fieldNo, rootNode, fileSchema):
   if '0' not in rootNode:
     logger.warn('%s does not have a 0 subscript' % rootNode)
     return None
   zeroFields = rootNode["0"].value
   if not zeroFields:
     logger.warn("No value: %s for %s" % (zeroFields, rootNode['0']))
     return None
   zeroFields = zeroFields.split('^')
   if len(zeroFields) < 2:
     return FileManFieldFactory.createField(fieldNo, zeroFields[0],
                                            FileManField.FIELD_TYPE_NONE, None)
   types, specifier, filePointedTo, subFile = \
       self.parseFieldTypeSpecifier(zeroFields[1])
   location = None
   if len(zeroFields) >= 4 and zeroFields[3]:
     location = zeroFields[3].strip(' ')
     if location == ';': # No location information
       location = None
     elif location.split(';')[-1] == '0': # 0 means multiple
       multipleType = FileManField.FIELD_TYPE_SUBFILE_POINTER
       if not types:
         types = [multipleType]
       if multipleType in types and types[0] != multipleType:
         types.remove(multipleType)
         types.insert(0, multipleType)
         if not subFile: subFile = filePointedTo
   if not types:
     logger.debug('Cannot determine the type for %s, fn: %s, file:%s' %
                  (zeroFields, fieldNo, fileSchema.getFileNo()))
     types = [FileManField.FIELD_TYPE_NONE]
   if types and types[0]  == FileManField.FIELD_TYPE_SUBFILE_POINTER:
     if subFile and subFile == fileSchema.getFileNo():
       logger.warning("Recursive subfile pointer for %s" % subFile)
       types = [FileManField.FIELD_TYPE_NONE]
   fileField = FileManFieldFactory.createField(fieldNo, zeroFields[0],
                                               types[0], location)
   if specifier:
     fileField.setSpecifier(specifier)
   self._setFieldSpecificData(zeroFields, fileField, rootNode,
                             fileSchema, filePointedTo, subFile)
   return fileField
Exemplo n.º 25
0
 def printAllNamespaces(self):
     crossRef = self._crossRef
     allPackages = crossRef.getAllPackages()
     namespaces = set()
     excludeNamespace = set()
     for package in allPackages.itervalues():
         for namespace in package.getNamespaces():
             if (namespace.startswith("!")):
                 excludeNamespace.add(namespace)
             else:
                 namespaces.add(namespace)
     sortedSet = sorted(namespaces)
     sortedExclude = sorted(excludeNamespace)
     logger.info("Total # of namespace: %d" % len(sortedSet))
     logger.info("Total # of excluded namespaces: %d" % len(sortedExclude))
     logger.info(sortedSet)
     logger.info(sortedExclude)
     for item in excludeNamespace:
         if item[1:] not in sortedSet:
             logger.warn("item: %s not in the namespace set" % item[1:])
Exemplo n.º 26
0
 def printAllNamespaces(self):
     crossRef = self._crossRef
     allPackages = crossRef.getAllPackages()
     namespaces = set()
     excludeNamespace = set()
     for package in allPackages.itervalues():
         for namespace in package.getNamespaces():
             if (namespace.startswith("!")):
                 excludeNamespace.add(namespace)
             else:
                 namespaces.add(namespace)
     sortedSet = sorted(namespaces)
     sortedExclude = sorted(excludeNamespace)
     logger.info("Total # of namespace: %d" % len(sortedSet))
     logger.info("Total # of excluded namespaces: %d" % len(sortedExclude))
     logger.info(sortedSet)
     logger.info(sortedExclude)
     for item in excludeNamespace:
         if item[1:] not in sortedSet:
             logger.warn("item: %s not in the namespace set" % item[1:])
Exemplo n.º 27
0
 def _ignoreKeywordInWordProcessingFields(self, fieldName):
     """ This is a HACK to circuvent the case that there is a keyword value like pair
         in the sub file word processing fields
         the keyword is not part of the subFile, we assume it is part of word processing field
         if any of the parent field already has that field.
     """
     logger.debug("current field is [%s]", self._curField)
     if self._curRecord and fieldName in self._curRecord:
         logger.warn("fieldName: [%s] is already parsed, ignore fields", fieldName)
         return True
     """ This is some special logic to ignore some of the fields in word processing field """
     if fieldName == "ROUTINE":
         recordToCheck = self._curRecord
         if self._curStack and len(self._curStack) > 0:  # we are in subfile mode and it is a world processing field
             recordToCheck = self._curStack[0][0]
         if "REMOTE PROCEDURE" in recordToCheck:
             logger.warn("Ignore ROUTINE field as it is a REMOTE PROCEDURE type")
             return True
     for stackItem in self._curStack:
         if fieldName in stackItem[0]:
             logger.warn(
                 "fieldName: [%s] is already parsed in [%s], ignore the words fields", fieldName, stackItem[1]
             )
             return True
     return False
Exemplo n.º 28
0
 def _ignoreKeywordInWordProcessingFields(self, fieldName):
     """ This is a HACK to circuvent the case that there is a keyword value like pair
         in the sub file word processing fields
         the keyword is not part of the subFile, we assume it is part of word processing field
         if any of the parent field already has that field.
     """
     logger.debug('current field is [%s]', self._curField)
     if self._curRecord and fieldName in self._curRecord:
         logger.warn('fieldName: [%s] is already parsed, ignore fields',
                     fieldName)
         return True
     """ This is some special logic to ignore some of the fields in word processing field """
     if fieldName == 'ROUTINE':
         recordToCheck = self._curRecord
         if self._curStack and len(
                 self._curStack
         ) > 0:  # we are in subfile mode and it is a world processing field
             recordToCheck = self._curStack[0][0]
         if 'REMOTE PROCEDURE' in recordToCheck:
             logger.warn(
                 'Ignore ROUTINE field as it is a REMOTE PROCEDURE type')
             return True
     for stackItem in self._curStack:
         if fieldName in stackItem[0]:
             logger.warn(
                 'fieldName: [%s] is already parsed in [%s], ignore the words fields',
                 fieldName, stackItem[1])
             return True
     return False
Exemplo n.º 29
0
def _generateICRIndividualPagePDF(icrJson, date, pdfOutDir):
    ien = icrJson['NUMBER']
    if 'CUSTODIAL PACKAGE' in icrJson:
        packageName = icrJson['CUSTODIAL PACKAGE']
        pdfOutDir = os.path.join(pdfOutDir, normalizePackageName(packageName))
        if not os.path.exists(pdfOutDir):
            os.mkdir(pdfOutDir)
    else:
        # TODO: PDF will not be included in a package bundle and will not be
        #       accessible from the Dox pages
        logger.warn("Could not find package for: ICR %s" % ien)
    pdfFile = os.path.join(pdfOutDir, 'ICR-' + ien + '.pdf')

    # Setup the pdf document
    buf = io.BytesIO()
    doc = SimpleDocTemplate(
        buf,
        rightMargin=old_div(inch, 2),
        leftMargin=old_div(inch, 2),
        topMargin=old_div(inch, 2),
        bottomMargin=old_div(inch, 2),
        pagesize=letter,
    )
    pdf = []
    # Title
    pdf.append(
        Paragraph("%s %s (%s)" % (icrJson['NAME'], 'ICR', ien),
                  STYLES['Heading1']))

    # Table
    _icrDataEntryToPDF(pdf, icrJson, doc)

    try:
        doc.build(pdf)
        with open(pdfFile, 'w') as fd:
            fd.write(buf.getvalue())
    except:
        global FAILURES
        FAILURES.append(pdfFile)
Exemplo n.º 30
0
 def _convertFileManDataToHtml(self, fileManData):
     fileManDataFileNo = fileManData.fileNo
     pathSafeFileManDataFileNo = fileManDataFileNo.replace(".", "_")
     for ien in getKeys(fileManData.dataEntries.keys(), float):
         dataEntry = fileManData.dataEntries[ien]
         name = dataEntry.name
         if not name:
             logger.warn("no name for %s" % dataEntry)
             continue
         outDir = self.outDir
         fileNo = dataEntry.fileNo
         if fileNo:
             outDir = os.path.join(self.outDir, fileNo.replace(".", "_"))
         tName = "%s-%s" % (pathSafeFileManDataFileNo, ien)
         if isFilePointerType(dataEntry):
             link, name = convertFilePointerToHtml(name)
         outHtmlFileName = getDataEntryHtmlFile(ien, fileManDataFileNo)
         with open(os.path.join(outDir, outHtmlFileName), 'w') as output:
             output.write("<html>")
             outputDataRecordTableHeader(output, tName)
             output.write("<body id=\"dt_example\">")
             output.write("""<div id="container" style="width:80%">""")
             output.write("<h1>%s (%s) &nbsp;&nbsp;  %s (%s)</h1>\n" %
                          (name, ien, fileManData.name, fileManDataFileNo))
             if fileNo in ['19', '101']:
                 # Todo: Check if the object exists in options/menus first.
                 output.write(
                     "<a style='font-size: 15px;' href='%s../vista_menus.php#%s?name=%s'>View in ViViaN Menu</a>"
                     % (VIV_URL, fileNo, urllib.quote_plus(name)))
             outputFileEntryTableList(output, tName)
             """ table body """
             output.write("<tbody>\n")
             self._fileManDataEntryToHtml(output, dataEntry, True)
             output.write("</tbody>\n")
             output.write("</table>\n")
             output.write("</div>\n")
             output.write("</div>\n")
             output.write("</body></html>")
Exemplo n.º 31
0
def _generateICRIndividualPagePDF(icrJson, date, pdfOutDir):
    ien = icrJson['NUMBER']
    if 'CUSTODIAL PACKAGE' in icrJson:
        packageName = icrJson['CUSTODIAL PACKAGE']
        pdfOutDir = os.path.join(pdfOutDir, normalizePackageName(packageName))
        if not os.path.exists(pdfOutDir):
            os.mkdir(pdfOutDir)
    else:
        # TODO: PDF will not be included in a package bundle and will not be
        #       accessible from the Dox pages
        logger.warn("Could not find package for: ICR %s" % ien)
    pdfFile = os.path.join(pdfOutDir, 'ICR-' + ien + '.pdf')

    # Setup the pdf document
    buf = io.BytesIO()
    doc = SimpleDocTemplate(
        buf,
        rightMargin=inch/2,
        leftMargin=inch/2,
        topMargin=inch/2,
        bottomMargin=inch/2,
        pagesize=letter,
    )
    pdf = []
    # Title
    pdf.append(Paragraph("%s %s (%s)" % (icrJson['NAME'], 'ICR', ien),
                         STYLES['Heading1']))

    # Table
    _icrDataEntryToPDF(pdf, icrJson, doc)

    try:
        doc.build(pdf)
        with open(pdfFile, 'w') as fd:
            fd.write(buf.getvalue())
    except:
        global FAILURES
        FAILURES.append(pdfFile)
Exemplo n.º 32
0
 def _convertFileManDataToHtml(self, fileManData):
   fileManDataFileNo = fileManData.fileNo
   pathSafeFileManDataFileNo = fileManDataFileNo.replace(".", "_")
   for ien in getKeys(fileManData.dataEntries.keys(), float):
     dataEntry = fileManData.dataEntries[ien]
     name = dataEntry.name
     if not name:
       logger.warn("no name for %s" % dataEntry)
       continue
     outDir = self.outDir
     fileNo = dataEntry.fileNo
     if fileNo:
       outDir = os.path.join(self.outDir, fileNo.replace(".","_"))
     tName = "%s-%s" % (pathSafeFileManDataFileNo, ien)
     if isFilePointerType(dataEntry):
       link, name = convertFilePointerToHtml(name)
     outHtmlFileName = getDataEntryHtmlFileName(ien, fileManDataFileNo)
     with open(os.path.join(outDir, outHtmlFileName), 'w') as output:
       output.write ("<html>")
       outputDataRecordTableHeader(output, tName)
       output.write("<body id=\"dt_example\">")
       output.write("""<div id="container" style="width:80%">""")
       output.write ("<h1>%s (%s) &nbsp;&nbsp;  %s (%s)</h1>\n" % (name, ien,
                                                                   fileManData.name,
                                                                   fileManDataFileNo))
       if fileNo in ['19','101']:
         # Todo: Check if the object exists in options/menus first.
         output.write("<a style='font-size: 15px;' href='%s../vista_menus.php#%s?name=%s'>View in ViViaN Menu</a>" %
                         (VIV_URL, fileNo, urllib.quote_plus(name)))
       outputFileEntryTableList(output, tName)
       """ table body """
       output.write("<tbody>\n")
       self._fileManDataEntryToHtml(output, dataEntry, True)
       output.write("</tbody>\n")
       output.write("</table>\n")
       output.write("</div>\n")
       output.write("</div>\n")
       output.write ("</body></html>")
Exemplo n.º 33
0
 def _updateHL7Reference(self):
   protocol = self._glbData['101']
   outJSON = {}
   for ien in sorted(protocol.dataEntries.keys(), key=lambda x: float(x)):
     protocolEntry = protocol.dataEntries[ien]
     if '4' in protocolEntry.fields:
       type = protocolEntry.fields['4'].value
       if (type != 'event driver' and type != 'subscriber'):
         entryName = protocolEntry.name
         namespace, package = \
           self._crossRef.__categorizeVariableNameByNamespace__(entryName)
         if package:
           package.protocol.append(protocolEntry)
       # only care about the event drive and subscriber type
       elif (type == 'event driver' or type == 'subscriber'):
         entryName = protocolEntry.name
         namespace, package = \
           self._crossRef.__categorizeVariableNameByNamespace__(entryName)
         if package:
           package.hl7.append(protocolEntry)
         elif '12' in protocolEntry.fields: # check the packge it belongs
           pass
         else:
           logger.warn("Cannot find a package for HL7: %s" % entryName)
         for field in ('771', '772'):
           if field not in protocolEntry.fields:
             continue
           hl7Rtn = protocolEntry.fields[field].value
           if not hl7Rtn:
             continue
           for rtn, tag, pos in getMumpsRoutine(hl7Rtn):
             hl7Info = {"name": entryName,
                        "ien": ien}
             if tag:
               hl7Info['tag'] = tag
             self._rtnRefDict.setdefault(rtn,{}).setdefault('101',[]).append(hl7Info)
Exemplo n.º 34
0
 def parse(self, inputFilename, outputFilename):
     global date
     with open(inputFilename,'r') as ICRFile:
         for line in ICRFile:
             line = line.rstrip("\r\n")
             self._curLineNo +=1
             """ get rid of lines that are ignored """
             if self.isIgnoredLine(line):
                 continue
             match = INTEGRATION_REFERENCES_LIST.match(line)
             if match:
                 date = match.group(1).strip()
                 continue
             match = START_OF_RECORD.match(line)
             if match and not self._DBAComments and not self._generalDescription:
                 self._startOfNewItem(match, line)
                 continue
             match = GENERIC_START_OF_RECORD.search(line)
             if not match:
                 match = DBA_COMMENTS.match(line)
                 if match:
                     self._DBAComments = True
             if match and match.group('name') in ICR_FILE_KEYWORDS:
                 fieldName = match.group('name')
                 if fieldName == 'DBA Comments':
                     self._DBAComments = True
                 elif fieldName == 'GENERAL DESCRIPTION':
                     self._generalDescription = True
                 if self._DBAComments:
                     if fieldName in ICR_FILE_KEYWORDS:
                         self._DBAComments = False
                 elif self._generalDescription:
                     if line.startswith("  STATUS:"):  # Starts with exactly 2 spaces
                         self._generalDescription = False
                 if self._DBAComments:
                     fieldName = 'DBA Comments'
                     if self._curField == fieldName:
                         self._appendWordsFieldLine(line)
                     else:
                         self._curField = fieldName
                         name = match.group('name') # this is the name part
                         restOfLine = line[match.end():]
                         self._curRecord[name] = restOfLine.strip()
                 elif self._generalDescription:
                     fieldName = 'GENERAL DESCRIPTION'
                     if self._curField == fieldName:
                         self._appendWordsFieldLine(line)
                     else:
                         self._curField = fieldName
                         name = match.group('name') # this is the name part
                         restOfLine = line[match.end():]
                         self._curRecord[name] = restOfLine.strip()
                 elif isSubFile(fieldName):
                     self._curField = fieldName
                     self._startOfSubFile(match, line)
                 else:
                     logger.debug('field name is: %s', fieldName)
                     logger.debug('cur field is: %s', self._curField)
                     """ Check to see if fieldName is already in the out list """
                     if isWordProcessingField(self._curField):
                         if self._ignoreKeywordInWordProcessingFields(fieldName):
                             self._appendWordsFieldLine(line)
                             continue
                     # figure out where to store the record
                     self._curField = fieldName
                     self._rewindStack();
                     self._findKeyValueInLine(match, line, self._curRecord)
             elif self._curField and self._curField in self._curRecord:
                 if len(line.strip()) == 0 and not isWordProcessingField(self._curField):
                     logger.warn('Ignore blank line for current field: [%s]', self._curField)
                     continue
                 self._appendWordsFieldLine(line)
             else:
                 if self._curRecord:
                     if len(line.strip()) == 0:
                         continue
                     print 'No field associated with line %s: %s ' % (self._curLineNo, line)
     logger.info('End of file now')
     if len(self._curStack) > 0:
         self._curField = None
         self._rewindStack()
     if self._curRecord:
         logger.info('Add last record: %s', self._curRecord)
         self._outObject.append(self._curRecord)
     # pprint.pprint(self._outObject);
     with open(outputFilename, 'w') as out_file:
         json.dump(self._outObject,out_file, indent=4)
Exemplo n.º 35
0
 def onSectionStart(self, line, section, Global, CrossReference):
     logger.debug("[%s]" % line)
     self._lines = []
     result = DataDictionaryListFileLogParser.FILEMAN_FIELD_START.search(
         line)
     assert result
     fileNo = result.group('FileNo')
     fieldNo = result.group("FieldNo")
     self._isSubFile = float(fileNo) != float(Global.getFileNo())
     if self._isSubFile:
         logger.debug("%s is a subfile" % fileNo)
         self._curFile = Global.getSubFileByFileNo(fileNo)
         assert self._curFile, "Could not find subFile [%s] in file [%s] line [%s]" % (
             fileNo, Global.getFileNo(), line)
     else:
         self._curFile = Global
     restOfLineStart = line.find("," + fieldNo) + len(fieldNo)
     startIdent = self.DEFAULT_NAME_INDENT
     #if len(fileNo) + 4 > startIdent:
     #    startIdent = self
     defaultIdentLevel = self.__getDefaultIndentLevel__(
         self._curFile, self.DEFAULT_NAME_INDENT)
     if restOfLineStart > defaultIdentLevel:
         logger.debug(
             "FileNo: %s, FieldNo: %s, line: %s, may not be a valid field no, %d, %d"
             % (fileNo, fieldNo, line, restOfLineStart, defaultIdentLevel))
         try:
             floatValue = float(fieldNo)
         except ValueError:
             logger.error("invalid fieldNo %s" % fieldNo)
             fieldNo = line[line.find(",") + 1:defaultIdentLevel]
             floatValue = float(fieldNo)
     restOfLine = line[line.find("," + fieldNo) + len(fieldNo) + 1:].strip()
     logger.debug("Parsing [%s]" % restOfLine)
     result = self.NAME_LOC_TYPE_REGEX.search(restOfLine)
     fName, fType, fLocation = None, None, None
     if result:
         logger.debug("FileNo: %s, Field#: %s, Name: %s, Loc %s, Type %s" %
                      (fileNo, fieldNo, result.group('Name').rstrip(),
                       result.group('Loc'), result.group('Type')))
         fName = result.group('Name').strip()
         fLocation = result.group('Loc').strip()
         if fLocation == ";":
             fLocation = None
         fType = result.group('Type').strip()
     else:
         # handle three cases, 1. no location info 2. no type info 3. Both
         if restOfLine.find(";") != -1:  # missing type info
             logger.warn("Missing Type information [%s]" % line)
             result = self.NAME_LOC_REGEX.search(restOfLine)
             if result:
                 logger.debug("Name: %s, Loc %s" %
                              (result.group('Name'), result.group('Loc')))
                 fName = result.group('Name').strip()
                 fLocation = result.group('Loc').strip()
             else:
                 logger.error("Could not parse [%s]" % restOfLine)
                 return
         else:  # missing location, assume at least two space seperate name and type
             logger.warn("Missing location information [%s]" % line)
             result = self.NAME_TYPE_REGEX.search(restOfLine)
             if result:
                 fName = result.group('Name').strip()
                 fType = result.group('Type').strip()
                 logger.debug("Name: %s, Type %s" %
                              (result.group('Name'), result.group('Type')))
             else:
                 logger.warn("Guessing Name: %s at line [%s]" %
                             (restOfLine.strip(), line))
     stripedType = ""
     if fType:
         stripedType = self.__stripFieldAttributes__(fType)
     if len(stripedType) > 0:
         self.__createFieldByType__(fieldNo, stripedType, fName, fLocation,
                                    line, Global, CrossReference)
     else:
         self._field = FileManFieldFactory.createField(
             fieldNo, fName, FileManField.FIELD_TYPE_NONE, fLocation)
     logger.debug("Add field %s to File %s" %
                  (fName, self._curFile.getFileNo()))
     self._curFile.addFileManField(self._field)
     if len(stripedType) > 0:
         self.__parseFieldAttributes__(fType)
Exemplo n.º 36
0
 def _parseIndividualFieldDetail(self, value, fieldAttr, outDataEntry):
   value = value.strip(' ')
   if not value:
     return
   fieldDetail = value
   pointerFileNo = None
   if fieldAttr.isSetType():
     setDict = fieldAttr.getSetMembers()
     if setDict and value in setDict:
       fieldDetail = setDict[value]
   elif fieldAttr.isFilePointerType() or fieldAttr.isVariablePointerType():
     fileNo = None
     ien = None
     if fieldAttr.isFilePointerType():
       filePointedTo = fieldAttr.getPointedToFile()
       if filePointedTo:
         fileNo = filePointedTo.getFileNo()
         ien = value
       else:
         fieldDetail = 'No Pointed to File'
     else: # for variable pointer type
       vpInfo = value.split(';')
       if len(vpInfo) != 2:
         logger.error("Unknown variable pointer format: %s" % value)
         fieldDetail = "Unknow Variable Pointer"
       else:
         fileNo = self.getFileNoByGlobalLocation(vpInfo[1])
         ien = vpInfo[0]
         if not fileNo:
           logger.warn("Could not find File for %s" % value)
           fieldDetail = 'Global Root: %s, IEN: %s' % (vpInfo[1], ien)
     if fileNo and ien:
       fieldDetail = '^'.join((fileNo, ien))
       idxName = self._getFileKeyIndex(fileNo, ien)
       if idxName:
         idxes = str(idxName).split('^')
         if len(idxes) == 1:
           fieldDetail = '^'.join((fieldDetail, str(idxName)))
         elif len(idxes) == 3:
           fieldDetail = '^'.join((fieldDetail, str(idxes[-1])))
       elif fileNo == self._curFileNo:
         pointerFileNo = fileNo
   elif fieldAttr.getType() == FileManField.FIELD_TYPE_DATE_TIME: # datetime
     if value.find(',') >=0:
       fieldDetail = horologToDateTime(value)
     else:
       outDt = fmDtToPyDt(value)
       if outDt:
         fieldDetail = outDt
       else:
         logger.warn("Could not parse Date/Time: %s" % value)
   elif fieldAttr.getName().upper().startswith("TIMESTAMP"): # timestamp field
     if value.find(',') >=0:
       fieldDetail = horologToDateTime(value)
   if outDataEntry:
     dataField = FileManDataField(fieldAttr.getFieldNo(),
                                  fieldAttr.getType(),
                                  fieldAttr.getName(),
                                  fieldDetail)
     if pointerFileNo:
       self._addDataFieldToPointerRef(pointerFileNo, value, dataField)
     outDataEntry.addField(dataField)
     if fieldAttr.getFieldNo() == '.01':
       outDataEntry.name = fieldDetail
       outDataEntry.type = fieldAttr.getType()
   return fieldDetail
Exemplo n.º 37
0
 def _generateDataTableHtml(self, fileManData, fileNo):
     outDir = self.outDir
     isLargeFile = len(fileManData.dataEntries) > 4500
     tName = normalizePackageName(fileManData.name)
     outDir = os.path.join(self.outDir, fileNo.replace(".", "_"))
     if not os.path.exists(outDir):
         os.mkdir(outDir)
     with open("%s/%s.html" % (outDir, fileNo), 'w') as output:
         output.write("<html>\n")
         if isLargeFile:
             ajexSrc = "%s_array.txt" % fileNo
             outputLargeDataListTableHeader(output, ajexSrc, tName)
         else:
             outputDataListTableHeader(output, tName)
         output.write("<body id=\"dt_example\">")
         output.write("""<div id="container" style="width:80%">""")
         output.write("<h1>File %s(%s) Data List</h1>" % (tName, fileNo))
         writeTableListInfo(output, tName)
         if not isLargeFile:
             output.write("<tbody>\n")
             for ien in getKeys(fileManData.dataEntries.keys(), float):
                 dataEntry = fileManData.dataEntries[ien]
                 if not dataEntry.name:
                     logger.warn("no name for %s" % dataEntry)
                     continue
                 name = dataEntry.name
                 if isFilePointerType(dataEntry):
                     link, name = convertFilePointerToHtml(dataEntry.name)
                 dataHtmlLink = "<a href=\"../%s/%s\">%s</a>" % (
                     fileNo.replace(
                         ".", "_"), getDataEntryHtmlFile(ien, fileNo), name)
                 tableRow = [dataHtmlLink, dataEntry.ien]
                 output.write("<tr>\n")
                 """ table body """
                 for item in tableRow:
                     output.write("<td>%s</td>\n" % item)
                 output.write("</tr>\n")
         output.write("</tbody>\n")
         output.write("</table>\n")
         output.write("</div>\n")
         output.write("</div>\n")
         output.write("</body></html>\n")
     if isLargeFile:
         logger.info("Writing Ajax file: %s" % ajexSrc)
         """ Write out the data file in JSON format """
         outJson = {"aaData": []}
         with open(os.path.join(outDir, ajexSrc), 'w') as output:
             outArray = outJson["aaData"]
             for ien in getKeys(fileManData.dataEntries.keys(), float):
                 dataEntry = fileManData.dataEntries[ien]
                 if not dataEntry.name:
                     logger.warn("No name for %s" % dataEntry)
                     continue
                 name = dataEntry.name
                 if isFilePointerType(dataEntry):
                     link, name = convertFilePointerToHtml(dataEntry.name)
                 dataHtmlLink = "<a href=\"../%s/%s\">%s</a>" % (
                     fileNo.replace(
                         ".", "_"), getDataEntryHtmlFile(
                             ien, fileNo), str(name).replace("\xa0", ""))
                 outArray.append([dataHtmlLink, ien])
             json.dump(outJson, output)
Exemplo n.º 38
0
    def findGlobalsBySourceV2(self, dirName, pattern):
        searchFiles = glob.glob(os.path.join(dirName, pattern))
        logger.info("Total Search Files are %d " % len(searchFiles))
        crossReference = self.crossRef
        allGlobals = crossReference.getAllGlobals()
        allPackages = crossReference.getAllPackages()
        skipFile = []
        fileNoSet = set()
        for file in searchFiles:
            packageName = os.path.dirname(file)
            packageName = packageName[packageName.index("Packages") +
                                      9:packageName.index("Globals") - 1]
            if not crossReference.hasPackage(packageName):
                crossReference.addPackageByName(packageName)
            package = allPackages.get(packageName)
            zwrFile = codecs.open(file, 'r', encoding='utf-8', errors='ignore')
            lineNo = 0
            fileName = os.path.basename(file)
            result = ZWR_FILENO_REGEX.search(fileName)
            if result:
                fileNo = result.group('fileNo')
                if fileNo.startswith('0'): fileNo = fileNo[1:]
                globalDes = result.group('des')
            else:
                result = ZWR_NAMESPACE_REGEX.search(fileName)
                if result:
                    namespace = result.group('namespace')
                    #                    package.addGlobalNamespace(namespace)
                    continue
                else:
                    continue
            globalName = ""  # find out the global name by parsing the global file
            logger.debug("Parsing file: %s" % file)
            for line in zwrFile:
                if lineNo == 0:
                    globalDes = line.strip()
                    # Removing the extra text in the header of the ZWR file
                    # to tell if it needs to be added or skipped
                    globalDes = globalDes.replace("OSEHRA ZGO Export: ", '')
                    if globalDes.startswith("^"):
                        logger.info("No Description: Skip this file: %s" %
                                    file)
                        skipFile.append(file)
                        namespace = globalDes[1:]
                        package.addGlobalNamespace(namespace)
                        break
                if lineNo >= 2:
                    info = line.strip().split('=')
                    globalName = info[0]
                    detail = info[1].strip("\"")
                    if globalName.find(',') > 0:
                        result = globalName.split(',')
                        if len(result) == 2 and result[1] == "0)":
                            globalName = result[0]
                            break
                    elif globalName.endswith("(0)"):
                        globalName = globalName.split('(')[0]
                        break
                    else:
                        continue
                lineNo = lineNo + 1
            if not fileNo:
                if file not in skipFile:
                    logger.warn("Warning: No FileNo found for file %s" % file)
                continue
            globalVar = Global(globalName, fileNo, globalDes,
                               allPackages.get(packageName))
            try:
                fileNum = float(globalVar.getFileNo())
            except ValueError as es:
                logger.error("error: %s, globalVar:%s file %s" %
                             (es, globalVar, file))
                continue


#            crossReference.addGlobalToPackage(globalVar, packageName)
# only add to allGlobals dict as we have to change the package later on
            if globalVar.getName() not in allGlobals:
                allGlobals[globalVar.getName()] = globalVar
            if fileNo not in fileNoSet:
                fileNoSet.add(fileNo)
            else:
                logger.error(
                    "Duplicated file No [%s,%s,%s,%s] file:%s " %
                    (fileNo, globalName, globalDes, packageName, file))
            zwrFile.close()
        logger.info(
            "Total # of Packages is %d and Total # of Globals is %d, Total Skip File %d, total FileNo is %d"
            %
            (len(allPackages), len(allGlobals), len(skipFile), len(fileNoSet)))

        sortedKeyList = sorted(
            list(allGlobals.keys()),
            key=lambda item: float(allGlobals[item].getFileNo()))
        for key in sortedKeyList:
            globalVar = allGlobals[key]
            # fix the uncategoried item
            if globalVar.getFileNo() in fileNoPackageMappingDict:
                globalVar.setPackage(allPackages[fileNoPackageMappingDict[
                    globalVar.getFileNo()]])
            crossReference.addGlobalToPackage(globalVar,
                                              globalVar.getPackage().getName())
Exemplo n.º 39
0
 def isIgnoredLine(self, line):
     for regEx in LINES_TO_IGNORE:
         if regEx.match(line):
             logger.warn('Ignore line %s', line)
             return True
     return False
Exemplo n.º 40
0
 def _setFieldSpecificData(self, zeroFields, fileField, rootNode,
                           fileSchema, filePointedTo, subFile):
     if fileField.getType() == FileManField.FIELD_TYPE_FILE_POINTER:
         fileGlobalRoot = ""
         if len(zeroFields) >= 3:
             fileGlobalRoot = zeroFields[2]
         if filePointedTo:
             if filePointedTo not in self._allSchema:
                 """ create a new fileman file """
                 self._allSchema[filePointedTo] = Global(
                     fileGlobalRoot, filePointedTo, "")
             pointedToFile = self._allSchema[filePointedTo]
             assert pointedToFile.isRootFile()
             fileField.setPointedToFile(pointedToFile)
             globalName = pointedToFile.getName()
             fileNo = fileSchema.getFileNo()
             if fileSchema.isSubFile():
                 fileNo = fileSchema.getRootFile().getFileNo()
             self._addToFileDepDict(fileNo, pointedToFile.getFileNo())
             if fileGlobalRoot:
                 if not globalName:
                     pointedToFile.setName(fileGlobalRoot)
                 elif globalName != fileGlobalRoot:
                     logger.error(
                         "%s: FileMan global root mismatch '%s' : '%s'" %
                         (zeroFields, globalName, fileGlobalRoot))
             else:
                 logger.info("@TODO, find file global root for # %s" %
                             filePointedTo)
         elif fileGlobalRoot:
             self._noPointedToFiles[fileGlobalRoot] = Global(fileGlobalRoot)
             logger.info("@TODO, set the file number for %s" %
                         fileGlobalRoot)
         else:
             logger.warn(
                 "No pointed to file set for file:%s: field:%r 0-index:%s" %
                 (fileSchema.getFileNo(), fileField, zeroFields))
     elif fileField.getType() == FileManField.FIELD_TYPE_SUBFILE_POINTER:
         if subFile:
             if subFile not in self._allSchema:
                 self._allSchema[subFile] = FileManFile(
                     subFile, "", fileSchema)
             subFileSchema = self._allSchema[subFile]
             subFileSchema.setParentFile(fileSchema)
             fileSchema.addFileManSubFile(subFileSchema)
             fileField.setPointedToSubFile(subFileSchema)
         else:
             logger.warn(
                 "No subfile is set for file:%s, field:%r 0-index:%s" %
                 (fileSchema.getFileNo(), fileField, zeroFields))
     elif fileField.getType(
     ) == FileManField.FIELD_TYPE_SET and not subFile:
         setDict = dict(
             [x.split(':') for x in zeroFields[2].rstrip(';').split(';')])
         fileField.setSetMembers(setDict)
     elif fileField.getType(
     ) == FileManField.FIELD_TYPE_VARIABLE_FILE_POINTER:
         if "V" in rootNode:  # parsing variable pointer
             vptrs = parsingVariablePointer(rootNode['V'])
             vpFileSchemas = []
             if vptrs:
                 for x in vptrs:
                     if x not in self._allSchema:
                         self._allSchema[x] = Global("", x, "")
                     pointedToFile = self._allSchema[x]
                     if pointedToFile.isSubFile():
                         logger.error(
                             "Field: %r point to subFile: %s, parent: %s" %
                             (fileField, pointedToFile,
                              pointedToFile.getParentFile()))
                     else:
                         fileNo = fileSchema.getFileNo()
                         if fileSchema.isSubFile():
                             fileNo = fileSchema.getRootFile().getFileNo()
                         self._addToFileDepDict(fileNo,
                                                pointedToFile.getFileNo())
                     vpFileSchemas.append(self._allSchema[x])
                 fileField.setPointedToFiles(vpFileSchemas)
Exemplo n.º 41
0
 def isIgnoredLine(self, line):
     for regEx in LINES_TO_IGNORE:
         if regEx.match(line):
             logger.warn("Ignore line %s", line)
             return True
     return False
Exemplo n.º 42
0
    def findGlobalsBySourceV2(self, dirName, pattern):
        searchFiles = glob.glob(os.path.join(dirName, pattern))
        logger.info("Total Search Files are %d " % len(searchFiles))
        crossReference = self.crossRef
        allGlobals = crossReference.getAllGlobals()
        allPackages = crossReference.getAllPackages()
        skipFile = []
        fileNoSet = set()
        for file in searchFiles:
            packageName = os.path.dirname(file)
            packageName = packageName[packageName.index("Packages") +
                                      9:packageName.index("Globals") - 1]
            if not crossReference.hasPackage(packageName):
                logger.info("Package: %s is new" % packageName)
                crossReference.addPackageByName(packageName)
            package = allPackages.get(packageName)
            zwrFile = open(file, 'r')
            lineNo = 0
            fileName = os.path.basename(file)
            result = re.search("(?P<fileNo>^[0-9.]+)(-1)?\+(?P<des>.*)\.zwr$",
                               fileName)
            if result:
                fileNo = result.group('fileNo')
                if fileNo.startswith('0'): fileNo = fileNo[1:]
                globalDes = result.group('des')
            else:
                result = re.search("(?P<namespace>^[^.]+)\.zwr$", fileName)
                if result:
                    namespace = result.group('namespace')
                    #                    package.addGlobalNamespace(namespace)
                    continue
                else:
                    continue
            globalName = ""  # find out the global name by parsing the global file
            logger.debug("Parsing file: %s" % file)
            for line in zwrFile:
                if lineNo == 0:
                    globalDes = line.strip()
                    # Removing the extra text in the header of the ZWR file
                    # to tell if it needs to be added or skipped
                    globalDes = globalDes.replace("OSEHRA ZGO Export: ", '')
                    if globalDes.startswith("^"):
                        logger.info("No Description: Skip this file: %s" %
                                    file)
                        skipFile.append(file)
                        namespace = globalDes[1:]
                        package.addGlobalNamespace(namespace)
                        break
                if lineNo == 1:
                    assert re.search('ZWR', line.strip())
                if lineNo >= 2:
                    info = line.strip().split('=')
                    globalName = info[0]
                    detail = info[1].strip("\"")
                    if globalName.find(',') > 0:
                        result = globalName.split(',')
                        if len(result) == 2 and result[1] == "0)":
                            globalName = result[0]
                            break
                    elif globalName.endswith("(0)"):
                        globalName = globalName.split('(')[0]
                        break
                    else:
                        continue
                lineNo = lineNo + 1
            logger.debug("globalName: %s, Des: %s, fileNo: %s, package: %s" %
                         (globalName, globalDes, fileNo, packageName))
            if len(fileNo) == 0:
                if file not in skipFile:
                    logger.warn("Warning: No FileNo found for file %s" % file)
                continue
            globalVar = Global(globalName, fileNo, globalDes,
                               allPackages.get(packageName))
            try:
                fileNum = float(globalVar.getFileNo())
            except ValueError, es:
                logger.error("error: %s, globalVar:%s file %s" %
                             (es, globalVar, file))
                continue


#            crossReference.addGlobalToPackage(globalVar, packageName)
# only add to allGlobals dict as we have to change the package later on
            if globalVar.getName() not in allGlobals:
                allGlobals[globalVar.getName()] = globalVar
            if fileNo not in fileNoSet:
                fileNoSet.add(fileNo)
            else:
                logger.error(
                    "Error, duplicated file No [%s,%s,%s,%s] file:%s " %
                    (fileNo, globalName, globalDes, packageName, file))
            zwrFile.close()
Exemplo n.º 43
0
  def findGlobalsBySourceV2(self, dirName, pattern):
    searchFiles = glob.glob(os.path.join(dirName, pattern))
    logger.info("Total Search Files are %d " % len(searchFiles))
    crossReference = self.crossRef
    allGlobals = crossReference.getAllGlobals()
    allPackages = crossReference.getAllPackages()
    skipFile = []
    fileNoSet = set()
    for file in searchFiles:
      packageName = os.path.dirname(file)
      packageName = packageName[packageName.index("Packages") + 9:packageName.index("Globals") - 1]
      if not crossReference.hasPackage(packageName):
        crossReference.addPackageByName(packageName)
      package = allPackages.get(packageName)
      zwrFile = open(file, 'r')
      lineNo = 0
      fileName = os.path.basename(file)
      result = ZWR_FILENO_REGEX.search(fileName)
      if result:
        fileNo = result.group('fileNo')
        if fileNo.startswith('0'): fileNo = fileNo[1:]
        globalDes = result.group('des')
      else:
        result = ZWR_NAMESPACE_REGEX.search(fileName)
        if result:
            namespace = result.group('namespace')
#                    package.addGlobalNamespace(namespace)
            continue
        else:
            continue
      globalName = "" # find out the global name by parsing the global file
      logger.debug("Parsing file: %s" % file)
      for line in zwrFile:
        if lineNo == 0:
          globalDes = line.strip()
          # Removing the extra text in the header of the ZWR file
          # to tell if it needs to be added or skipped
          globalDes = globalDes.replace("OSEHRA ZGO Export: ",'')
          if globalDes.startswith("^"):
            logger.info("No Description: Skip this file: %s" % file)
            skipFile.append(file)
            namespace = globalDes[1:]
            package.addGlobalNamespace(namespace)
            break
        if lineNo >= 2:
          info = line.strip().split('=')
          globalName = info[0]
          detail = info[1].strip("\"")
          if globalName.find(',') > 0:
              result = globalName.split(',')
              if len(result) == 2 and result[1] == "0)":
                  globalName = result[0]
                  break
          elif globalName.endswith("(0)"):
              globalName = globalName.split('(')[0]
              break
          else:
              continue
        lineNo = lineNo + 1
      if not fileNo:
        if file not in skipFile:
          logger.warn("Warning: No FileNo found for file %s" % file)
        continue
      globalVar = Global(globalName, fileNo, globalDes,
                         allPackages.get(packageName))
      try:
        fileNum = float(globalVar.getFileNo())
      except ValueError, es:
        logger.error("error: %s, globalVar:%s file %s" % (es, globalVar, file))
        continue
#            crossReference.addGlobalToPackage(globalVar, packageName)
      # only add to allGlobals dict as we have to change the package later on
      if globalVar.getName() not in allGlobals:
        allGlobals[globalVar.getName()] = globalVar
      if fileNo not in fileNoSet:
        fileNoSet.add(fileNo)
      else:
        logger.error("Duplicated file No [%s,%s,%s,%s] file:%s " %
                      (fileNo, globalName, globalDes, packageName, file))
      zwrFile.close()
Exemplo n.º 44
0
 def _setFieldSpecificData(self, zeroFields, fileField, rootNode,
                          fileSchema, filePointedTo, subFile):
   if fileField.getType() == FileManField.FIELD_TYPE_FILE_POINTER:
     fileGlobalRoot = ""
     if len(zeroFields) >= 3:
       fileGlobalRoot = zeroFields[2]
     if filePointedTo:
       if filePointedTo not in self._allSchema:
         """ create a new fileman file """
         self._allSchema[filePointedTo] = Global(fileGlobalRoot,
                                                 filePointedTo,
                                                 "")
       pointedToFile = self._allSchema[filePointedTo]
       assert pointedToFile.isRootFile()
       fileField.setPointedToFile(pointedToFile)
       globalName = pointedToFile.getName()
       fileNo = fileSchema.getFileNo()
       if fileSchema.isSubFile():
         fileNo = fileSchema.getRootFile().getFileNo()
       self._addToFileDepDict(fileNo,
                              pointedToFile.getFileNo())
       if fileGlobalRoot:
         if not globalName:
           pointedToFile.setName(fileGlobalRoot)
         elif globalName != fileGlobalRoot:
           logger.warning("%s: FileMan global root mismatch '%s' : '%s'" %
                         (zeroFields, globalName, fileGlobalRoot))
       else:
         logger.info("@TODO, find file global root for # %s" % filePointedTo)
     elif fileGlobalRoot:
       self._noPointedToFiles[fileGlobalRoot] = Global(fileGlobalRoot)
       logger.info("@TODO, set the file number for %s" % fileGlobalRoot)
     else:
       logger.warn("No pointed to file set for file:%s: field:%r 0-index:%s" %
                    (fileSchema.getFileNo(), fileField, zeroFields))
   elif fileField.getType() == FileManField.FIELD_TYPE_SUBFILE_POINTER:
     if subFile:
       if subFile not in self._allSchema:
         self._allSchema[subFile] = FileManFile(subFile, "", fileSchema)
       subFileSchema = self._allSchema[subFile]
       subFileSchema.setParentFile(fileSchema)
       fileSchema.addFileManSubFile(subFileSchema)
       fileField.setPointedToSubFile(subFileSchema)
     else:
       logger.warn("No subfile is set for file:%s, field:%r 0-index:%s" %
                    (fileSchema.getFileNo(), fileField, zeroFields))
   elif fileField.getType() == FileManField.FIELD_TYPE_SET and not subFile:
     setDict = dict([x.split(':') for x in zeroFields[2].rstrip(';').split(';')])
     fileField.setSetMembers(setDict)
   elif fileField.getType() == FileManField.FIELD_TYPE_VARIABLE_FILE_POINTER:
     if "V" in rootNode: # parsing variable pointer
       vptrs = parsingVariablePointer(rootNode['V'])
       vpFileSchemas = []
       if vptrs:
         for x in vptrs:
           if x not in self._allSchema:
             self._allSchema[x] = Global("", x, "")
           pointedToFile = self._allSchema[x]
           if pointedToFile.isSubFile():
             logger.error("Field: %r point to subFile: %s, parent: %s" %
                          (fileField, pointedToFile,
                           pointedToFile.getParentFile()))
           else:
             fileNo = fileSchema.getFileNo()
             if fileSchema.isSubFile():
               fileNo = fileSchema.getRootFile().getFileNo()
             self._addToFileDepDict(fileNo,
                                    pointedToFile.getFileNo())
           vpFileSchemas.append(self._allSchema[x])
         fileField.setPointedToFiles(vpFileSchemas)
Exemplo n.º 45
0
 def parse(self, inputFilename, outputFilename):
     global date
     with open(inputFilename,'r') as ICRFile:
         for line in ICRFile:
             line = line.rstrip("\r\n")
             self._curLineNo +=1
             # get rid of lines that are ignored
             if self.isIgnoredLine(line):
                 continue
             match = INTEGRATION_REFERENCES_LIST.match(line)
             if match:
                 date = match.group(1).strip()
                 continue
             match = START_OF_RECORD.match(line)
             if match and not self._DBAComments and not self._generalDescription:
                 self._startOfNewItem(match, line)
                 continue
             match = GENERIC_START_OF_RECORD.search(line)
             if not match:
                 match = DBA_COMMENTS.match(line)
                 if match:
                     self._DBAComments = True
             if match and match.group('name') in ICR_FILE_KEYWORDS:
                 fieldName = match.group('name')
                 if fieldName == 'DBA Comments':
                     self._DBAComments = True
                 elif fieldName == 'GENERAL DESCRIPTION':
                     self._generalDescription = True
                 if self._DBAComments:
                     if fieldName in ICR_FILE_KEYWORDS:
                         self._DBAComments = False
                 elif self._generalDescription:
                     if line.startswith("  STATUS:"):  # Starts with exactly 2 spaces
                         self._generalDescription = False
                 if self._DBAComments:
                     fieldName = 'DBA Comments'
                     if self._curField == fieldName:
                         self._appendWordsFieldLine(line)
                     else:
                         self._curField = fieldName
                         name = match.group('name') # this is the name part
                         restOfLine = line[match.end():]
                         self._curRecord[name] = restOfLine.strip()
                 elif self._generalDescription:
                     fieldName = 'GENERAL DESCRIPTION'
                     if self._curField == fieldName:
                         self._appendWordsFieldLine(line)
                     else:
                         # Starting to process general description
                         self._curField = fieldName
                         self._rewindStack();
                         self._findKeyValueInLine(match, line, self._curRecord)
                 elif isSubFile(fieldName):
                     self._curField = fieldName
                     self._startOfSubFile(match, line)
                 else:
                     logger.debug('field name is: %s', fieldName)
                     logger.debug('cur field is: %s', self._curField)
                     """ Check to see if fieldName is already in the out list """
                     if isWordProcessingField(self._curField):
                         if self._ignoreKeywordInWordProcessingFields(fieldName):
                             self._appendWordsFieldLine(line)
                             continue
                     # figure out where to store the record
                     self._curField = fieldName
                     self._rewindStack();
                     self._findKeyValueInLine(match, line, self._curRecord)
             elif self._curField and self._curField in self._curRecord:
                 if len(line.strip()) == 0 and not isWordProcessingField(self._curField):
                     logger.warn('Ignore blank line for current field: [%s]', self._curField)
                     continue
                 self._appendWordsFieldLine(line)
             else:
                 if self._curRecord:
                     if len(line.strip()) == 0:
                         continue
                     print 'No field associated with line %s: %s ' % (self._curLineNo, line)
     logger.info('End of file now')
     if len(self._curStack) > 0:
         self._curField = None
         self._rewindStack()
     if self._curRecord:
         logger.info('Add last record: %s', self._curRecord)
         self._outObject.append(self._curRecord)
     # pprint.pprint(self._outObject);
     with open(outputFilename, 'w') as out_file:
         json.dump(self._outObject,out_file, indent=4)
Exemplo n.º 46
0
 def onSectionStart(self, line, section, Global, CrossReference):
     self._lines = []
     result = DataDictionaryListFileLogParser.FILEMAN_FIELD_START.search(line)
     assert result
     fileNo = result.group('FileNo')
     fieldNo = result.group("FieldNo")
     self._isSubFile = float(fileNo) != float(Global.getFileNo())
     if self._isSubFile:
         self._curFile = Global.getSubFileByFileNo(fileNo)
         assert self._curFile, "Could not find subFile [%s] in file [%s] line [%s]" % (fileNo, Global.getFileNo(), line)
     else:
         self._curFile = Global
     restOfLineStart = line.find("," + fieldNo) + len(fieldNo)
     startIdent = self.DEFAULT_NAME_INDENT
     defaultIdentLevel = self.__getDefaultIndentLevel__(self._curFile, self.DEFAULT_NAME_INDENT)
     if restOfLineStart > defaultIdentLevel:
         logger.warning("FileNo: %s, FieldNo: %s, line: %s, may not be a valid field no, %d, %d" %
                         (fileNo, fieldNo, line, restOfLineStart, defaultIdentLevel))
         try:
             floatValue = float(fieldNo)
         except ValueError:
             logger.error("invalid fieldNo %s" % fieldNo)
             fieldNo = line[line.find(",")+1:defaultIdentLevel]
             floatValue = float(fieldNo)
     restOfLine = line[line.find("," + fieldNo) + len(fieldNo)+1:].strip()
     result = NAME_LOC_TYPE_REGEX.search(restOfLine)
     fName, fType, fLocation = None, None, None
     if result:
         fName = result.group('Name').strip()
         fLocation = result.group('Loc').strip()
         if fLocation == ";":
             fLocation = None
         fType = result.group('Type').strip()
     else:
         # handle three cases, 1. no location info 2. no type info 3. Both
         if restOfLine.find(";") != -1: # missing type info
             logger.warn("Missing Type information [%s]" % line)
             result = NAME_LOC_REGEX.search(restOfLine)
             if result:
                 fName = result.group('Name').strip()
                 fLocation = result.group('Loc').strip()
             else:
                 logger.error("Could not parse [%s]" % restOfLine)
                 return
         else: # missing location, assume at least two space seperate name and type
             result = NAME_TYPE_REGEX.search(restOfLine)
             if result:
                 fName = result.group('Name').strip()
                 fType = result.group('Type').strip()
             else:
                 logger.warn("Guessing Name: %s at line [%s]" % (restOfLine.strip(), line))
     stripedType = ""
     if fType:
         stripedType = self.__stripFieldAttributes__(fType)
     if stripedType:
         self.__createFieldByType__(fieldNo, stripedType, fName, fLocation, line, Global, CrossReference)
     else:
         self._field = FileManFieldFactory.createField(fieldNo, fName, FileManField.FIELD_TYPE_NONE, fLocation)
     self._curFile.addFileManField(self._field)
     if stripedType:
         self.__parseFieldAttributes__(fType)
Exemplo n.º 47
0
 def _parseIndividualFieldDetail(self, value, fieldAttr, outDataEntry):
     value = value.strip(' ')
     if not value:
         return
     fieldDetail = value
     pointerFileNo = None
     if fieldAttr.isSetType():
         setDict = fieldAttr.getSetMembers()
         if setDict and value in setDict:
             fieldDetail = setDict[value]
     elif fieldAttr.isFilePointerType() or fieldAttr.isVariablePointerType(
     ):
         fileNo = None
         ien = None
         if fieldAttr.isFilePointerType():
             filePointedTo = fieldAttr.getPointedToFile()
             if filePointedTo:
                 fileNo = filePointedTo.getFileNo()
                 ien = value
             else:
                 fieldDetail = 'No Pointed to File'
         else:  # for variable pointer type
             vpInfo = value.split(';')
             if len(vpInfo) != 2:
                 logger.error("Unknown variable pointer format: %s" % value)
                 fieldDetail = "Unknow Variable Pointer"
             else:
                 fileNo = self.getFileNoByGlobalLocation(vpInfo[1])
                 ien = vpInfo[0]
                 if not fileNo:
                     logger.warn("Could not find File for %s" % value)
                     fieldDetail = 'Global Root: %s, IEN: %s' % (vpInfo[1],
                                                                 ien)
         if fileNo and ien:
             fieldDetail = '^'.join((fileNo, ien))
             idxName = self._getFileKeyIndex(fileNo, ien)
             if idxName:
                 idxes = str(idxName).split('^')
                 if len(idxes) == 1:
                     fieldDetail = '^'.join((fieldDetail, str(idxName)))
                 elif len(idxes) == 3:
                     fieldDetail = '^'.join((fieldDetail, str(idxes[-1])))
             elif fileNo == self._curFileNo:
                 pointerFileNo = fileNo
     elif fieldAttr.getType(
     ) == FileManField.FIELD_TYPE_DATE_TIME:  # datetime
         if value.find(',') >= 0:
             fieldDetail = horologToDateTime(value)
         else:
             outDt = fmDtToPyDt(value)
             if outDt:
                 fieldDetail = outDt
             else:
                 logger.warn("Could not parse Date/Time: %s" % value)
     elif fieldAttr.getName().upper().startswith(
             "TIMESTAMP"):  # timestamp field
         if value.find(',') >= 0:
             fieldDetail = horologToDateTime(value)
     if outDataEntry:
         dataField = FileManDataField(fieldAttr.getFieldNo(),
                                      fieldAttr.getType(),
                                      fieldAttr.getName(), fieldDetail)
         if pointerFileNo:
             self._addDataFieldToPointerRef(pointerFileNo, value, dataField)
         outDataEntry.addField(dataField)
         if fieldAttr.getFieldNo() == '.01':
             outDataEntry.name = fieldDetail
             outDataEntry.type = fieldAttr.getType()
     return fieldDetail
Exemplo n.º 48
0
def convertJson(inputJsonFile, date, MRepositDir, patchRepositDir,
                generateHTML, generatePDF, outDir=None, pdfOutDir=None,
                local=False):
    if not generateHTML and not generatePDF:
        raise Exception("Nothing to generate!")

    global DOX_URL
    global VIVIAN_URL
    DOX_URL = getDOXURL(local)
    VIVIAN_URL = getViViaNURL(local)

    if generateHTML:
        if not outDir:
            raise Exception("Must specify Output directory")
        if not os.path.exists(outDir):
            # Will also create intermediate directories if needed
            os.makedirs(outDir)

    if generatePDF:
        if not pdfOutDir:
            raise Exception("Must specify PDF Output directory")
        # Will also create intermediate directories if needed
        if not os.path.exists(pdfOutDir):
            os.makedirs(pdfOutDir)

    from InitCrossReferenceGenerator import parseCrossReferenceGeneratorArgs
    crossRef = parseCrossReferenceGeneratorArgs(MRepositDir,
                                                patchRepositDir)
    global RPC_NAME_TO_IEN_MAPPING
    RPC_NAME_TO_IEN_MAPPING = generateSingleFileFieldToIenMappingBySchema(MRepositDir,
                                                                          crossRef)


    with open(inputJsonFile, 'r') as inputFile:
        pkgJson = {} # group by package
        allpkgJson = []
        inputJson = json.load(inputFile)

        for icrEntry in inputJson:
            if 'NUMBER' not in icrEntry:
                logger.error("Could not parse entry: " + str(icrEntry))
                continue

            if 'CUSTODIAL PACKAGE' in icrEntry:
                pkgName = icrEntry['CUSTODIAL PACKAGE']
                if crossRef.getMappedPackageName(pkgName) is None:
                    crossRef.addMappedPackage(pkgName,
                                              crossRef.normalizePackageName(pkgName).title())
                    logger.warning("Adding package " + pkgName + " to package name map.")
            if generatePDF:
                _generateICRIndividualPagePDF(icrEntry, date, pdfOutDir)
            if generateHTML:
                _generateICRIndividualPage(icrEntry, date, outDir, crossRef)
                summaryInfo = _convertICREntryToSummaryInfo(icrEntry, crossRef)
                allpkgJson.append(summaryInfo)
                if 'CUSTODIAL PACKAGE' in icrEntry:
                    pkgJson.setdefault(icrEntry['CUSTODIAL PACKAGE'], []).append(summaryInfo)
        if generateHTML:
            _generateICRSummaryPageImpl(allpkgJson, 'ICR List', 'All', date,
                                        outDir, crossRef, isForAll=True)
            for pkgName, outJson in iteritems(pkgJson):
                _generateICRSummaryPageImpl(outJson, 'ICR List', pkgName, date,
                                            outDir, crossRef)
            logger.warn('Total # entry in pkgJson is [%s]', len(pkgJson))
            _generatePkgDepSummaryPage(inputJson, date, outDir, crossRef)