def retrieveAllHistoryNames(self,rootName=None):
    """
      Function to create a list of all the HistorySet names present in an existing database
      @ In,  rootName, string, optional, It's the root name, if present, only the history names that have this root are going to be returned
      @ Out, workingList, list, List of the HistorySet names
    """
    if rootName:
      rname = utils.toString(rootName)
    if not self.fileOpen:
      self.__createObjFromFile() # Create the "self.allGroupPaths" list from the existing database
    if not rootName:
      workingList = [utils.toString(k).split('/')[-1] for k, v in zip(self.allGroupPaths,self.allGroupEnds) if v ]
    else:
      workingList = [utils.toString(k).split('/')[-1] for k, v in zip(self.allGroupPaths,self.allGroupEnds) if v and utils.toString(k).endswith(rname)]

    return workingList
Example #2
0
 def addGroupInit(self, groupName, attributes=None):
     """
   Function to add an empty group to the database
   This function is generally used when the user provides a rootname in the input.
   It uses the groupName + it appends the date and time.
   @ In, groupName, string, group name
   @ In, attributes, dict, optional, dictionary of attributes that must be added as metadata (None by default)
   @ Out, None
 """
     attribs = {} if attributes is None else attributes
     groupNameInit = groupName + "_" + datetime.now().strftime(
         "%m-%d-%Y-%H-%S")
     for index in range(len(self.allGroupPaths)):
         comparisonName = utils.toString(self.allGroupPaths[index])
         splittedPath = comparisonName.split('/')
         if len(splittedPath) > 0:
             if groupNameInit in splittedPath[0]:
                 alphabetCounter, movingCounter = 0, 0
                 asciiAlphabet = list(string.ascii_uppercase)
                 prefixLetter = ''
                 while True:
                     testGroup = groupNameInit + "_" + prefixLetter + asciiAlphabet[
                         alphabetCounter]
                     if testGroup not in self.allGroupPaths:
                         groupNameInit = utils.toString(testGroup)
                         break
                     alphabetCounter += 1
                     if alphabetCounter >= len(asciiAlphabet):
                         prefix = asciiAlphabet[movingCounter]
                         alphabetCounter = 0
                         movingCounter += 1
                 break
     self.parentGroupName = "/" + groupNameInit
     # Create the group
     grp = self.h5FileW.create_group(groupNameInit)
     # Add metadata
     grp.attrs.update(attribs)
     grp.attrs['rootname'] = True
     grp.attrs['endGroup'] = False
     grp.attrs[b'groupName'] = groupNameInit
     self.allGroupPaths.append(utils.toBytes("/" + groupNameInit))
     self.allGroupEnds.append(False)
     self.__updateFileLevelInfoDatasets()
     self.h5FileW.flush()
Example #3
0
    def printInput(self, outfile=None):
        """
      Method to print out the new input
      @ In, outfile, string, optional, output file root
      @ Out, None
    """

        # 4 sub levels maximum
        def printSubLevels(xmlnode, IOfile, indentMultiplier):
            IOfile.write('  ' * indentMultiplier + '[./' + xmlnode.tag + ']\n')
            for string in xmlnode.tail if xmlnode.tail else []:
                IOfile.write('    ' * indentMultiplier + string + '\n')
            for key in xmlnode.attrib.keys():
                IOfile.write('    ' * indentMultiplier + key + ' = ' +
                             toStrish(xmlnode.attrib[key]) + '\n')

        if outfile == None:
            outfile = self.inputfile
        IOfile = open(outfile, 'w')
        for child in self.root:
            IOfile.write('[' + child.tag + ']\n')
            if child.tail:
                for string in child.tail:
                    IOfile.write('  ' + string + '\n')
            for key in child.attrib.keys():
                IOfile.write('  ' + toString(key) + ' = ' +
                             toString(toStrish(child.attrib[key])) + '\n')
            for childChild in child:
                printSubLevels(childChild, IOfile, 1)
                for childChildChild in childChild:
                    printSubLevels(childChildChild, IOfile, 2)
                    for childChildChildChild in childChildChild:
                        printSubLevels(childChildChildChild, IOfile, 3)
                        IOfile.write('      [../]\n')
                    IOfile.write('    [../]\n')
                IOfile.write('  [../]\n')
            IOfile.write('[]\n')
Example #4
0
   def write(IOfile, indent, xmlnode):
       """
   Method to print out the key and value pairs
  @ In, IOfile, file, the file to write to
  @ In, indent, string, the string to print before the key
  @ In, xmlnode, ElementNode, the node with the attributes
  @ Out, None
 """
       for key in sorted(xmlnode.attrib.keys()):
           value = xmlnode.attrib[key]
           if type(value) == float:
               valueStr = repr(value)
           else:
               valueStr = toStrish(value)
           IOfile.write(indent + toString(key) + ' = ' + valueStr + '\n')
Example #5
0
 def __returnGroupPath(self, parentName):
     """
   Function to return a group Path
   @ In, parentName, string, parent ID
   @ Out, parentGroupName, string, parent group path
 """
     parentGroupName = '-$'  # control variable
     if parentName != '/':
         # this loops takes ~.2 seconds on a 100 milion list (it is accetable)
         for s in self.allGroupPaths:
             if utils.toString(s).endswith("/" + parentName.strip()):
                 parentGroupName = s
                 break
     else:
         parentGroupName = '/'
     return parentGroupName
Example #6
0
 def writeFile(self, asString=False, **kwargs):
     """
   Writes the input file to disk.
   @ In, asString, bool, optional, if indicated then return string instead of writing
   @ In, kwargs, dict, optional, additional arguments to pass to prettify
   @ Out, pretty, str, optional, only returned if asString is True
 """
     #prettify tree
     pretty = utils.toString(xmlUtils.prettify(self.tree, **kwargs))
     if asString:
         return pretty
     #make sure file is written cleanly and anew
     if self.isOpen():
         self.close()
     self.writelines(pretty, overwrite=True)
     self.close()
Example #7
0
      bl = next(genB)
    except StopIteration:
      return False,msg + ['file '+str(a)+' has more lines than '+str(b)]
    if al.rstrip('\n\r') != bl.rstrip('\n\r'):
      same = False
      print('al '+repr(al)+" != bl "+repr(bl))
      msg += ['line '+str(i)+' is not the same!']



#first test XML to XML
print('Testing XML to XML ...')
tree = TS.parse(open(os.path.join('parse','example_xml.xml'),'r'))
strTree = TS.tostring(tree)
fname = os.path.join('parse','fromXmltoXML.xml')
open(fname,'w').write(toString(strTree))
same,msg = checkSameFile(open(fname,'r'),open(os.path.join('gold',fname),'r'))
if same:
  results['passed']+=1
  print('  ... passed!')
else:
  results['failed']+=1
  print('  ... failures in XML to XML:')
  print('     ',msg[0])



getpot = open(os.path.join('parse','example_getpot.i'),'r')
gtree = TS.parse(getpot,dType='GetPot')
#third test GetPot to XML
print('Testing GetPot to XML ...')
Example #8
0
    def dynamicEventTreeForRELAP7(self, **Kwargs):
        """
      This method is used to create a list of dictionaries that can be interpreted by the input Parser
      in order to change the input file based on the information present in the Kwargs dictionary.
      This is specific for DET sampler
      @ In, **Kwargs, dict, kwared dictionary containing the values of the parameters to be changed
      @ Out, listDict, list, list of dictionaries used by the parser to change the input file
    """
        listDict = []
        if 'hybridsamplerCoordinate' in Kwargs.keys():
            for preconditioner in Kwargs['hybridsamplerCoordinate']:
                preconditioner['executable'] = Kwargs['executable']
                if 'MonteCarlo' in preconditioner['SamplerType']:
                    listDict = self.__genBasePointSampler(**preconditioner)[1]
                    listDict.extend(self.monteCarloForRELAP7(**preconditioner))
                elif 'Grid' in preconditioner['SamplerType']:
                    listDict.extend(self.gridForRELAP7(**preconditioner))
                elif 'Stratified' in preconditioner[
                        'SamplerType'] or 'Stratified' in preconditioner[
                            'SamplerType']:
                    listDict.extend(
                        self.latinHyperCubeForRELAP7(**preconditioner))
        # Check the initiator distributions and add the next threshold
        if 'initiatorDistribution' in Kwargs.keys():
            for i in range(len(Kwargs['initiatorDistribution'])):
                modifDict = {}
                modifDict['name'] = [
                    'Distributions', Kwargs['initiatorDistribution'][i]
                ]
                modifDict['ProbabilityThreshold'] = Kwargs['PbThreshold'][i]
                listDict.append(modifDict)
                del modifDict
        # add the initial time for this new branch calculation
        if 'startTime' in Kwargs.keys():
            if Kwargs['startTime'] != -sys.float_info.max:
                modifDict = {}
                startTime = Kwargs['startTime']
                modifDict['name'] = ['Executioner']
                modifDict['start_time'] = startTime
                listDict.append(modifDict)
                del modifDict
        # create the restart file name root from the parent branch calculation
        # in order to restart the calc from the last point in time
        if 'endTimeStep' in Kwargs.keys():
            #if Kwargs['endTimeStep'] != 0 or Kwargs['endTimeStep'] == 0:

            if Kwargs['startTime'] != -sys.float_info.max:
                modifDict = {}
                endTimeStepString = str(Kwargs['endTimeStep'])
                if (Kwargs['endTimeStep'] <= 9999):
                    numZeros = 4 - len(endTimeStepString)
                    for i in range(numZeros):
                        endTimeStepString = "0" + endTimeStepString
                splitted = Kwargs['outfile'].split('~')
                output_parent = splitted[0] + '~' + splitted[1]
                restartFileBase = os.path.join(
                    "..", utils.toString(Kwargs['RAVEN_parentID']),
                    output_parent + "_cp", endTimeStepString)
                modifDict['name'] = ['Executioner']
                modifDict['restart_file_base'] = restartFileBase
                #print(' Restart file name base is "' + restart_file_base + '"')
                listDict.append(modifDict)
                del modifDict
        # max simulation time (if present)
        if 'endTime' in Kwargs.keys():
            modifDict = {}
            endTime = Kwargs['endTime']
            modifDict['name'] = ['Executioner']
            modifDict['end_time'] = endTime
            listDict.append(modifDict)
            del modifDict

        # in this way we erase the whole block in order to neglect eventual older info
        # remember this "command" must be added before giving the info for refilling the block
        modifDict = {}
        modifDict['name'] = ['RestartInitialize']
        modifDict['special'] = set(['erase_block'])
        listDict.append(modifDict)
        del modifDict
        # check and add the variables that have been changed by a distribution trigger
        # add them into the RestartInitialize block
        if 'branchChangedParam' in Kwargs.keys():
            if Kwargs['branchChangedParam'][0] not in ('None', b'None', None):
                for i in range(len(Kwargs['branchChangedParam'])):
                    modifDict = {}
                    modifDict['name'] = [
                        'RestartInitialize', Kwargs['branchChangedParam'][i]
                    ]
                    modifDict['value'] = Kwargs['branchChangedParamValue'][i]
                    listDict.append(modifDict)
                    del modifDict
        return listDict
Example #9
0
 def XMLread(self,xmlNode,runInfoSkip = set(),xmlFilename=None):
   """
     parses the xml input file, instances the classes need to represent all objects in the simulation
     @ In, xmlNode, ElementTree.Element, xml node to read in
     @ In, runInfoSkip, set, optional, nodes to skip
     @ In, xmlFilename, string, optional, xml filename for relative directory
     @ Out, None
   """
   #TODO update syntax to note that we read InputTrees not XmlTrees
   unknownAttribs = utils.checkIfUnknowElementsinList(['printTimeStamps','verbosity','color','profile'],list(xmlNode.attrib.keys()))
   if len(unknownAttribs) > 0:
     errorMsg = 'The following attributes are unknown:'
     for element in unknownAttribs:
       errorMsg += ' ' + element
     self.raiseAnError(IOError,errorMsg)
   self.verbosity = xmlNode.attrib.get('verbosity','all').lower()
   if 'printTimeStamps' in xmlNode.attrib.keys():
     self.raiseADebug('Setting "printTimeStamps" to',xmlNode.attrib['printTimeStamps'])
     self.messageHandler.setTimePrint(xmlNode.attrib['printTimeStamps'])
   if 'color' in xmlNode.attrib.keys():
     self.raiseADebug('Setting color output mode to',xmlNode.attrib['color'])
     self.messageHandler.setColor(xmlNode.attrib['color'])
   if 'profile' in xmlNode.attrib.keys():
     thingsToProfile = list(p.strip().lower() for p in xmlNode.attrib['profile'].split(','))
     if 'jobs' in thingsToProfile:
       self.jobHandler.setProfileJobs(True)
   self.messageHandler.verbosity = self.verbosity
   runInfoNode = xmlNode.find('RunInfo')
   if runInfoNode is None:
     self.raiseAnError(IOError,'The RunInfo node is missing!')
   self.__readRunInfo(runInfoNode,runInfoSkip,xmlFilename)
   ### expand variable groups before continuing ###
   ## build variable groups ##
   varGroupNode = xmlNode.find('VariableGroups')
   # init, read XML for variable groups
   if varGroupNode is not None:
     varGroups = xmlUtils.readVariableGroups(varGroupNode,self.messageHandler,self)
   else:
     varGroups={}
   # read other nodes
   for child in xmlNode:
     if child.tag=='VariableGroups':
       continue #we did these before the for loop
     if child.tag in list(self.whichDict.keys()):
       self.raiseADebug('-'*2+' Reading the block: {0:15}'.format(str(child.tag))+2*'-')
       Class = child.tag
       if len(child.attrib.keys()) == 0:
         globalAttributes = {}
       else:
         globalAttributes = child.attrib
         #if 'verbosity' in globalAttributes.keys(): self.verbosity = globalAttributes['verbosity']
       if Class not in ['RunInfo','OutStreams'] and "returnInputParameter" in self.addWhatDict[Class].__dict__:
         paramInput = self.addWhatDict[Class].returnInputParameter()
         paramInput.parseNode(child)
         for childChild in paramInput.subparts:
           childName = childChild.getName()
           if "name" not in childChild.parameterValues:
             self.raiseAnError(IOError,'not found name attribute for '+childName +' in '+Class)
           name = childChild.parameterValues["name"]
           if "needsRunInfo" in self.addWhatDict[Class].__dict__:
             self.whichDict[Class][name] = self.addWhatDict[Class].returnInstance(childName,self.runInfoDict,self)
           else:
             self.whichDict[Class][name] = self.addWhatDict[Class].returnInstance(childName,self)
           self.whichDict[Class][name].handleInput(childChild, self.messageHandler, varGroups, globalAttributes=globalAttributes)
       elif Class != 'RunInfo':
         for childChild in child:
           subType = childChild.tag
           if 'name' in childChild.attrib.keys():
             name = childChild.attrib['name']
             self.raiseADebug('Reading type '+str(childChild.tag)+' with name '+name)
             #place the instance in the proper dictionary (self.whichDict[Type]) under his name as key,
             #the type is the general class (sampler, data, etc) while childChild.tag is the sub type
             #if name not in self.whichDict[Class].keys():  self.whichDict[Class][name] = self.addWhatDict[Class].returnInstance(childChild.tag,self)
             if Class != 'OutStreams':
               if name not in self.whichDict[Class].keys():
                 if "needsRunInfo" in self.addWhatDict[Class].__dict__:
                   self.whichDict[Class][name] = self.addWhatDict[Class].returnInstance(childChild.tag,self.runInfoDict,self)
                 else:
                   self.whichDict[Class][name] = self.addWhatDict[Class].returnInstance(childChild.tag,self)
               else:
                 self.raiseAnError(IOError,'Redundant naming in the input for class '+Class+' and name '+name)
             else:
               if name not in self.whichDict[Class][subType].keys():
                 self.whichDict[Class][subType][name] = self.addWhatDict[Class][subType].returnInstance(childChild.tag,self)
               else:
                 self.raiseAnError(IOError,'Redundant  naming in the input for class '+Class+' and sub Type'+subType+' and name '+name)
             #now we can read the info for this object
             #if globalAttributes and 'verbosity' in globalAttributes.keys(): localVerbosity = globalAttributes['verbosity']
             #else                                                      : localVerbosity = self.verbosity
             if Class != 'OutStreams':
               self.whichDict[Class][name].readXML(childChild, self.messageHandler, varGroups, globalAttributes=globalAttributes)
             else:
               self.whichDict[Class][subType][name].readXML(childChild, self.messageHandler, globalAttributes=globalAttributes)
           else:
             self.raiseAnError(IOError,'not found name attribute for one '+Class)
     else:
       #tag not in whichDict, check if it's a documentation tag
       if child.tag not in ['TestInfo']:
         self.raiseAnError(IOError,'<'+child.tag+'> is not among the known simulation components '+repr(child))
   # If requested, duplicate input
   # ###NOTE: All substitutions to the XML input tree should be done BEFORE this point!!
   if self.runInfoDict.get('printInput',False):
     fileName = os.path.join(self.runInfoDict['WorkingDir'],self.runInfoDict['printInput'])
     self.raiseAMessage('Writing duplicate input file:',fileName)
     outFile = open(fileName,'w')
     outFile.writelines(utils.toString(TreeStructure.tostring(xmlNode))+'\n') #\n for no-end-of-line issue
     outFile.close()
   if not set(self.stepSequenceList).issubset(set(self.stepsDict.keys())):
     self.raiseAnError(IOError,'The step list: '+str(self.stepSequenceList)+' contains steps that have not been declared: '+str(list(self.stepsDict.keys())))
Example #10
0
    try:
        os.remove(fName)
    except OSError:
        later.append(fName)
    return later


#establish test XML
xmlString = '<root ratr="root_attrib"><child catr1="child attrib 1" catr2="child attrib 2"><cchild ccatr="cc_attrib">cchildtext</cchild></child></root>'
inFileName = 'testXMLInput.xml'
open(inFileName, 'w').write(xmlString)
xmlTree = ET.parse(inFileName)
toRemove = attemptFileClear(inFileName, toRemove)

# test prettify
pretty = utils.toString(xmlUtils.prettify(xmlTree))
prettyFileName = 'xml/testXMLPretty.xml'
open(prettyFileName, 'w').writelines(pretty)
gold = ''.join(
    line.rstrip('\n\r') for line in open(
        os.path.join(os.path.dirname(__file__), 'gold', prettyFileName), 'r'))
test = ''.join(line.rstrip('\n\r') for line in open(prettyFileName, 'r'))
if gold == test:
    results['pass'] += 1
    toRemove = attemptFileClear(prettyFileName, toRemove)
else:
    print('ERROR: Test of "pretty" failed!  See', prettyFileName,
          '(below) vs gold/', prettyFileName)
    print('( START', prettyFileName, ')')
    for line in file(prettyFileName, 'r'):
        print(line[:-1])  #omit newline
Example #11
0
  def __addGroupRootLevel(self,groupName,attributes,source,upGroup=False):
    """
      Function to add a group into the database (root level)
      @ In, groupName, string, group name
      @ In, attributes, dict, dictionary of attributes that must be added as metadata
      @ In, source, File object, source file
      @ In, upGroup, bool, optional, updated group?
      @ Out, None
    """
    # Check in the "self.allGroupPaths" list if a group is already present...
    # If so, error (Deleting already present information is not desiderable)
    if not upGroup:
      for index in xrange(len(self.allGroupPaths)):
        comparisonName = self.allGroupPaths[index]
        splittedPath=comparisonName.split('/')
        for splgroup in splittedPath:
          if groupName == splgroup and splittedPath[0] == self.parentGroupName:
            self.raiseAnError(IOError,"Group named " + groupName + " already present in database " + self.name + ". new group " + groupName + " is equal to old group " + comparisonName)

    if source['type'] == 'csv':
      # Source in CSV format
      f = open(source['name'],'rb')
      # Retrieve the headers of the CSV file
      firstRow = f.readline().strip(b"\r\n")
      #firstRow = f.readline().translate(None,"\r\n")
      headers = firstRow.split(b",")
      # if there is the alias system, replace the variable name
      if 'alias' in attributes.keys():
        for aliasType in attributes['alias'].keys():
          for var in attributes['alias'][aliasType].keys():
            if attributes['alias'][aliasType][var].strip() in headers:
              headers[headers.index(attributes['alias'][aliasType][var].strip())] = var.strip()
            else:
              metadataPresent = True if 'metadata' in attributes.keys() and 'SampledVars' in attributes['metadata'].keys() else False
              if not (metadataPresent and var.strip() in attributes['metadata']['SampledVars'].keys()):
                self.raiseAWarning('the ' + aliasType +' alias"'+var.strip()+'" has been defined but has not been found among the variables!')
      # Load the csv into a numpy array(n time steps, n parameters)
      data = np.loadtxt(f,dtype='float',delimiter=',',ndmin=2)
      # First parent group is the root name
      parentName = self.parentGroupName.replace('/', '')
      # Create the group
      if parentName != '/':
        parentGroupName = self.__returnParentGroupPath(parentName)
        # Retrieve the parent group from the HDF5 database
        if parentGroupName in self.h5FileW:
          rootgrp = self.h5FileW.require_group(parentGroupName)
        else:
          self.raiseAnError(ValueError,'NOT FOUND group named "' + parentGroupName+'" for loading file '+str(source['name']))
        if upGroup:
          grp = rootgrp.require_group(groupName)
          del grp[groupName+"_data"]
        else:
          grp = rootgrp.create_group(groupName)
      else:
        if upGroup:
          grp = self.h5FileW.require_group(groupName)
        else:
          grp = self.h5FileW.create_group(groupName)
      self.raiseAMessage('Adding group named "' + groupName + '" in DataBase "'+ self.name +'"')
      # Create dataset in this newly added group
      grp.create_dataset(groupName+"_data", dtype="float", data=data)
      # Add metadata
      grp.attrs["outputSpaceHeaders"     ] = headers
      grp.attrs["nParams"                ] = data[0,:].size
      grp.attrs["parentID"               ] = "root"
      grp.attrs["startTime"              ] = data[0,0]
      grp.attrs["end_time"               ] = data[data[:,0].size-1,0]
      grp.attrs["nTimeSteps"             ] = data[:,0].size
      grp.attrs["EndGroup"               ] = True
      grp.attrs["sourceType"             ] = source['type']
      if source['type'] == 'csv':
        grp.attrs["sourceFile"] = source['name']
      for attr in attributes.keys():
        if attr == 'metadata':
          if 'SampledVars' in attributes['metadata'].keys():
            inpHeaders = []
            inpValues  = []
            for inkey, invalue in attributes['metadata']['SampledVars'].items():
              if inkey not in headers:
                inpHeaders.append(utils.toBytes(inkey))
                inpValues.append(invalue)
            if len(inpHeaders) > 0:
              grp.attrs[b'inputSpaceHeaders'] = inpHeaders
              grp.attrs[b'inputSpaceValues' ] = json.dumps(list(utils.toListFromNumpyOrC1arrayIterative(list( inpValues))))
        objectToConvert = mathUtils.convertNumpyToLists(attributes[attr])
        for o,obj in enumerate(objectToConvert):
          if isinstance(obj,Files.File):
            objectToConvert[o]=obj.getFilename()
        converted = json.dumps(objectToConvert)
        if converted and attr != 'name':
          grp.attrs[utils.toBytes(attr)]=converted
        #decoded = json.loads(grp.attrs[utils.toBytes(attr)])
      if "inputFile" in attributes.keys():
        grp.attrs[utils.toString("inputFile")] = utils.toString(" ".join(attributes["inputFile"])) if type(attributes["inputFile"]) == type([]) else utils.toString(attributes["inputFile"])
    else:
      self.raiseAnError(ValueError,source['type'] + " unknown!")
    # Add the group name into the list "self.allGroupPaths" and
    # set the relative bool flag into the dictionary "self.allGroupEnds"
    if parentGroupName != "/":
      self.allGroupPaths.append(parentGroupName + "/" + groupName)
      self.allGroupEnds[parentGroupName + "/" + groupName] = True
    else:
      self.allGroupPaths.append("/" + groupName)
      self.allGroupEnds["/" + groupName] = True
Example #12
0
def prettify(tree,
             doc=False,
             docLevel=0,
             startingTabs=0,
             addRavenNewlines=True):
    """
    Script for turning XML tree into something mostly RAVEN-preferred.  Does not align attributes as some devs like (yet).
    The output can be written directly to a file, as file('whatever.who','w').writelines(prettify(mytree))
    @ In, tree, xml.etree.ElementTree object, the tree form of an input file
    @ In, doc, bool, optional, if True treats the XML as being prepared for documentation instead of full printing
    @ In, docLevel, int, optional, if doc then only this many levels of tabs will use ellipses documentation
    @ In, startingTabs, int, optional, if provided determines the starting tab level for the prettified xml
    @ In, addRavenNewlines, bool, optional, if True then adds newline space between each main-level entity
    @Out, towrite, string, the entire contents of the desired file to write, including newlines
  """
    def prettifyNode(node, tabs=0, ravenNewlines=True):
        """
      "prettifies" a single node, and calls the same for its children
      adds whitespace to make node more human-readable
      @ In, node, ET.Element, node to prettify
      @ In, tabs, int, optional, indentation level for this node in the global scheme
      @ In, addRavenNewlines, bool, optional, if True then adds newline space between each main-level entity
      @ Out, None
    """
        linesep = "\n"  #os.linesep
        child = None  #putting it in namespace
        space = ' ' * 2 * tabs
        newlineAndTab = linesep + space
        if node.text is None:
            node.text = ''
        if len(node):
            node.text = node.text.strip()
            if doc and tabs < docLevel and node.text == '...':
                node.text = newlineAndTab + '  ' + node.text + newlineAndTab + '  '
            else:
                node.text = node.text + newlineAndTab + '  '
            for child in node:
                prettifyNode(child, tabs + 1, ravenNewlines=ravenNewlines)
            #remove extra tab from last child
            child.tail = child.tail[:-2]
        if node.tail is None:
            node.tail = ''
            if doc and tabs != 0 and tabs < docLevel + 1:
                node.tail = newlineAndTab + '...'
        else:
            node.tail = node.tail.strip()
            if doc and tabs < docLevel + 1:
                node.tail += newlineAndTab + '...'
        #custom: RAVEN likes spaces between first-level tab objects
        if ravenNewlines and tabs == 1 and not isComment(node):
            lines = linesep + linesep
        else:
            lines = linesep
        node.tail = node.tail + lines + space
        #custom: except if you're the last child
        if ravenNewlines and tabs == 0 and child is not None:
            child.tail = child.tail.replace(linesep + linesep, linesep)

    #end prettifyNode
    if isinstance(tree, ET.ElementTree):
        prettifyNode(tree.getroot(),
                     tabs=startingTabs,
                     ravenNewlines=addRavenNewlines)
        # NOTE must use utils.toString because ET.tostring returns bytestring in python3
        #  -- if ever we drop python2 support, can use ET.tostring(xml, encoding='unicode')
        return toString(ET.tostring(tree.getroot()))
    else:
        # NOTE must use utils.toString because ET.tostring returns bytestring in python3
        prettifyNode(tree, tabs=startingTabs, ravenNewlines=addRavenNewlines)
        return toString(ET.tostring(tree))