示例#1
0
    def initial(self):
        self.lc_parameters_module.initial()
        self.initial_condition_module.initial()
        self.root_zone_water_module.initial()
        self.snow_frost_module.initial()

        # self.drainage_module.initial()
        
        # self.evapotranspiration_module.initial()
        self.potential_et_module.initial()
        
        self.interception_module.initial()
        self.irrigation_demand_module.initial()
        self.irrigation_supply_module.initial()
        self.infiltration_module.initial()
        self.capillary_rise_module.initial()
        self.drainage_module.initial()
        
        self.actual_et_module.initial()
        
        self.crop_yield_module.initial()
        self.income_module.initial()
        self.investment_module.initial()
        self.accounting_module.initial()
        self.grid_cell_mean_module.initial()
        self.reporting_module = Reporting(
            self,
            self._configuration.outNCDir,
            self._configuration.NETCDF_ATTRIBUTES,
            self._configuration.irrNonPaddy,
            variable_list_crop,
            'irrNonPaddy')
示例#2
0
 def save_report(self):
   """Write a report of the results from the last fitting
   operation to a text file."""
   project = self.project
   dir = project.get_directory()
   projname = project.get_filename()
   name = os.path.splitext(projname)[0] + "_Report.txt"
   types = (
     ("Text Files", (".txt")),
     ("All Files", ("*")))
   path = tkFileDialog.asksaveasfilename(
     title = "Save report as:",
     defaultextension = ".txt", filetypes = types,
     initialdir = dir, initialfile = name)
   if path:
     Reporting.report(project, path)
示例#3
0
    def makeReport(self) :

        dfTeamMap = pd.read_excel(self.listOfConfigFile[2],'Sheet1')
        self.dfMainReport = self.dfMainReport.reset_index(drop=True)
        py.vlookup(self.dfMainReport,dfTeamMap,'LAST_TASK','TASK',['TEAM'],'')
        reportingData = R.Reporting(self.dfMainReport,self.EntireSheet)
        reportingData.setUp()
        reportingData.saveCount()
示例#4
0
def ProgramHeader():
    '''
    This is the program header that clears the console and then prints the current status.
    '''
    # Clear the console.

    cls()

    # Print the header
    print(pounds*31)
    print((tab*6), 'Current Brewery Status')
    print((tab*5),tab,'      ',  datetime.date.today().strftime("%m-%d-%Y"))
    print(new_line)

    # Show all brews currently in primary fermentation.
    Reporting.primary_state()
    # Show all brews currently in secondary fermentation.
    Reporting.secondary_state()
    # Show all brews currently in bottle conditioning.
    Reporting.bottled_state()
    Reporting.ready_to_drink()
    Reporting.expired()

    print(pounds*31)
示例#5
0
Created on Sun Apr 26 12:17:26 2020

@author: Barreneche_A
"""

import Figures
import Reporting

parea = [
    "Governance", "Public research system",
    "Innovation in firms and innovative entrepreneurship",
    "Science-industry knowledge transfer and sharing",
    "Human resources for research and innovation",
    "Research and innovation for society", "Emerging trends in STI policy"
]
themes = [['TH13', 'TH9', 'TH14', 'TH15', 'TH63'],
          [
              'TH18', 'TH19', 'TH20', 'TH27', 'TH22', 'TH10', 'TH24', 'TH25',
              'TH26', 'TH23', 'TH21', 'TH64'
          ],
          [
              'TH30', 'TH31', 'TH32', 'TH38', 'TH34', 'TH33', 'TH82', 'TH36',
              'TH35', 'TH37'
          ], ['TH41', 'TH42', 'TH47', 'TH43', 'TH44', 'TH46'],
          ['TH50', 'TH51', 'TH52', 'TH53', 'TH83', 'TH55', 'TH54'],
          ['TH58', 'TH61', 'TH65', 'TH66'],
          ['TH87', 'TH86', 'TH88', 'TH89', 'TH90', 'TH91']]

#Figures.main(parea, themes)
Reporting.main(parea)
示例#6
0
class ManagedLandWithFarmerBehaviour(LandCover):
    def __init__(self, var, config_section_name):
        super(ManagedLandWithFarmerBehaviour, self).__init__(
            var,
            config_section_name)
        
        self.lc_parameters_module = ManagedLandWithFarmerBehaviourParameters(self, config_section_name)
        self.initial_condition_module = InitialConditionManagedLand(self)
        self.root_zone_water_module = RootZoneWaterIrrigatedLand(self)
        self.snow_frost_module = SnowFrost(self)
        
        # self.drainage_module = Drainage(self)
        self.potential_et_module = PotentialEvapotranspiration(self)
        
        # self.evapotranspiration_module = Evapotranspiration(self)
        self.interception_module = Interception(self)
        self.irrigation_demand_module = IrrigationMultipleCrops(self)
        self.irrigation_supply_module = IrrigationSupply(self)
        self.infiltration_module = Infiltration(self)
        self.capillary_rise_module = CapillaryRise(self)
        self.drainage_module = Drainage(self)
        self.actual_et_module = ActualEvapotranspiration(self)
        
        self.crop_yield_module = CropYield(self)
        self.income_module = Income(self)
        self.investment_module = Investment(self)
        self.accounting_module = Accounting(self)
        self.grid_cell_mean_module = GridCellMean(self)
        self.add_dimensions()
        
    def initial(self):
        self.lc_parameters_module.initial()
        self.initial_condition_module.initial()
        self.root_zone_water_module.initial()
        self.snow_frost_module.initial()

        # self.drainage_module.initial()
        
        # self.evapotranspiration_module.initial()
        self.potential_et_module.initial()
        
        self.interception_module.initial()
        self.irrigation_demand_module.initial()
        self.irrigation_supply_module.initial()
        self.infiltration_module.initial()
        self.capillary_rise_module.initial()
        self.drainage_module.initial()
        
        self.actual_et_module.initial()
        
        self.crop_yield_module.initial()
        self.income_module.initial()
        self.investment_module.initial()
        self.accounting_module.initial()
        self.grid_cell_mean_module.initial()
        self.reporting_module = Reporting(
            self,
            self._configuration.outNCDir,
            self._configuration.NETCDF_ATTRIBUTES,
            self._configuration.irrNonPaddy,
            variable_list_crop,
            'irrNonPaddy')
        
    def dynamic(self):
        self.lc_parameters_module.dynamic()
        self.initial_condition_module.dynamic()
        
        self.root_zone_water_module.dynamic()
        self.snow_frost_module.dynamic()
        # self.drainage_module.dynamic()
        # self.evapotranspiration_module.dynamic()
        self.potential_et_module.dynamic()
        self.interception_module.dynamic()
        self.root_zone_water_module.dynamic()
        self.infiltration_module.compute_infiltration_capacity()        
        self.irrigation_demand_module.dynamic()        
        self.irrigation_supply_module.dynamic()

        # the order here (infiltration/cap rise/drainage)
        # is the same as in CWATM
        self.infiltration_module.dynamic()
        self.root_zone_water_module.dynamic()
        self.capillary_rise_module.dynamic()
        self.drainage_module.dynamic()
        
        self.root_zone_water_module.dynamic()
        self.actual_et_module.dynamic()
        self.crop_yield_module.dynamic()
        self.income_module.dynamic()
        self.investment_module.dynamic()
        self.accounting_module.dynamic()
        self.grid_cell_mean_module.dynamic()
        self.reporting_module.report()
示例#7
0
)
outdf = ActivityApplication.DoActivities(total_table, initout[0], activitylist,
                                         Generic.dict_activity, acdict,
                                         logfile, treatmask, dev, ug)

Helpers.pmes(
    '**FINISHED WITH ACTIVITY APPLICATION MODULE, ENTERING CARBON ACCOUNTING MODULE...'
)
templist = ApplyActions.ApplyGHG(outdf[0], activitylist, Generic.dict_activity,
                                 trt, ug, logfile, dev)

##Use this line below for debugging purposes

Helpers.pmes("**CREATING MULTIBENEFIT REPORTS...**")
Reporting.report(templist[0], outpath, gen, water, resistance, crop, nitrate,
                 air, cover14, cover30, Generic.lutables, acdict, oak, rre,
                 dev, cm, gra, cproc, terflag, ug, ucc, logfile, units,
                 watflag)
Helpers.pmes("**CREATING CARBON REPORTS...**")

#Finish other Reports
Reporting.emis_report(templist[0], outpath, activitylist, Generic.em14,
                      Generic.em30, acdict, dev, cm, ug, logfile)
Reporting.carbreport(templist[0], outpath, activitylist, Generic.Carbon2014,
                     Generic.Carbon2030, acdict, dev, cm, ug, logfile)

temptable = Reporting.report_acres(outdf[1], templist[0], activitylist,
                                   outpath, acdict, logfile)
Reporting.emissions(temptable, outpath, activitylist, acdict, logfile)
if plotlykey != 'None':
    import Create_Plots
    Create_Plots.Plots(outpath, acdict, activitylist, terflag, cproc,
示例#8
0
def change_state():
    '''
    prints the current state and allows the user to change the state to what they want.
    '''

    con = sqlite3.connect('brewery.db')
    with con:
        # Convert to dictionary cursor so that we can refer to the data by their column names.
        con.row_factory = sqlite3.Row

        c = con.cursor()
        # Print all the current brews so that the brewer can pick the one he/she needs to change.
        Reporting.show_all()
        print(new_line)
        updated_brew_number = input('What is the brew # that has changed state > ')
        print(new_line)
        # Print the brew states to pick from
        states = {
                    '1': 'primary',
                    '2': 'secondary',
                    '3': 'bottled',
                    '4': 'drinkable',
                    '5': 'expired'}
        while True:
            options = list(states.keys())
            options.sort()

            print('Possible States')
            for entry in options:
                print(entry, '.', states[entry])

            # Get the new state of the brew from the brewer
            print(new_line)
            selection = input('Choose a number from the list above: ')
            if selection =='1':
                new_state = states['1']
            elif selection == '2':
                new_state = states['2']
            elif selection == '3':
                new_state = states['3']
            elif selection == '4':
                new_state = states['4']
            elif selection == '5':
                new_state = states['5']
            else:
                break

            # Update the databank
            con.row_factory = sqlite3.Row
            c.execute("UPDATE beer SET state=? WHERE brew_number=?", (new_state, updated_brew_number))

            conn.commit()

            # Print out the updated brew
            c.execute("SELECT * from beer WHERE brew_number=?", updated_brew_number)
            all_rows = c.fetchall()

            print('{0:<8}{1:^16}{2:^16}{3:^16}{4:^16}{5:^16}{6:^16}{7:^16}'.format("Brew #", "Name", "State", "Brew Date", "Rack Date", "Bottle Date", "Drink Date", "Expiration Date"))
            print('-'*120)
            # Fetch all the rows in a list of lists.
            for row in all_rows:
                print('{0:<8}{1:^16}{2:^16}{3:^16}{4:^16}{5:^16}{6:^16}{7:^16}'.format(row["brew_number"],
                                                                  row["name"],
                                                                  row["state"],
                                                                  row["brew_date"],
                                                                  row["rack_date"],
                                                                  row["bottle_date"],
                                                                  row["drink_date"],
                                                                  row["expiration_date"],))
            print(new_line*3)
            print('Successfully written to data bank.')
            time.sleep(3)
            print(new_line)
            print('Returning to Main Control Console.')
            time.sleep(3)
            break
示例#9
0
 def Report(self, rep):
     data = self.data
     rep = r.pushReport(data["properties"]["A"],
                        "A: Water permeability coefficient (L/h/m^2/bar)",
                        rep)
     rep = r.pushReport(data["properties"]["B"],
                        "B: Salt permeability coefficient (g/h/m^2)", rep)
     rep = r.pushReport(data["properties"]["S"], "S: Structural parameter",
                        rep)
     rep = r.pushReport(data["dimensions"]["Am"], "Area of membrane (m^2)",
                        rep)
     rep = r.pushReport(data["dimensions"]["Ld"],
                        "Length of membrane along draw direction (m)", rep)
     rep = r.pushReport(data["dimensions"]["Lf"],
                        "Length of membrane along feed direction (m)", rep)
     rep = r.pushReport(data["dimensions"]["Hc"], 'H: Channel height (m)',
                        rep)
     rep = r.pushReport(data["dimensions"]["Ss"], 'Ss: Spacer Spacing (m)',
                        rep)
     rep = r.pushReport(data["dimensions"]["s"],
                        "Approximate length of calculation element (m)",
                        rep)
     rep = r.pushReport(data["dimensions"]["ned"],
                        "Number of elements in draw direction", rep)
     rep = r.pushReport(data["dimensions"]["nef"],
                        "Number of elements in feed direction", rep)
     rep = r.pushReport(data["dimensions"]["dLd"],
                        "Length of element in draw direction (m)", rep)
     rep = r.pushReport(data["dimensions"]["dLf"],
                        "Length of element in feed direction (m)", rep)
     rep = r.pushReport(data["dimensions"]["dAd"],
                        "dAd: Area of channel in draw direction (m^2)", rep)
     rep = r.pushReport(data["dimensions"]["dAf"],
                        "dAf: Area of channel in feed direction (m^2)", rep)
     rep = r.pushReport(data["dimensions"]["dAm"],
                        "dAm: Area of element (m^2)", rep)
     return rep
 def __init__(self, configuration, modelTime, initialState=None):
     DynamicModel.__init__(self)
     self.modelTime = modelTime
     self.model = FAO56(configuration, modelTime, initialState)
     self.model.initial()
     self.reporting = Reporting(configuration, self.model, modelTime)
示例#11
0
def compareResults(model1, model2, gui, dircount=None, tol=1e-3, fileOutput=sys.stdout, filewritehtml=None,resultfile=None,htmlfile=None,file1=None):
    def prepareMatrix(t, y):
        if t is None or y is None:
            print "Not supported to prepare None-vector/matrix."
            return None, None

        if len(t) <> y.shape[0]:
            print "prepareMatrix: Length of time vector and number of rows of y have to be identical."
            return None, None
        yNew = numpy.ndarray((y.shape[0] * 2, y.shape[1]))
        tNew = numpy.ndarray((t.shape[0] * 2,))
        yNew[0, :] = y[0, :]
        tNew[0] = t[0]
        for i in xrange(y.shape[0] - 1):
            yNew[2 * i + 1, :] = y[i, :]
            yNew[2 * i + 2, :] = y[i + 1, :]
            tNew[2 * i + 1] = t[i + 1]
            tNew[2 * i + 2] = t[i + 1]
        yNew[-1, :] = y[-1, :]
        tNew[-1] = t[-1] + 1
        return tNew, yNew
    var1 = model1.integrationResults.getVariables()
    var1Name = var1.keys()
    var2 = model2.integrationResults.getVariables()
    var2Name = var2.keys()

    print "Start of comparing results ..."
    
    ## count the number of total variables in each result file
    model1var=str(len(var1))
    model2var=str(len(var2))
        
    allIdentical = True
    maxEstTol = 0.0

    allNamesBoth = set(var1Name) & set(var2Name)
    allNamesOnce1 = set(var1Name) - set(var2Name)
    allNamesOnce2 = set(var2Name) - set(var1Name)

    nPos = 0
    nNeg = 0

    pMatrix2 = [None] * model2.integrationResults.nTimeSeries
    timeSeries1Names = []
    timeSeries2Names = []
    
    for i in xrange(model1.integrationResults.nTimeSeries):
        timeSeries1Names.append([])
    
    for i in xrange(model2.integrationResults.nTimeSeries):
        timeSeries2Names.append([])


    for name in allNamesBoth:
        timeSeries1Names[var1[name].seriesIndex].append(name)
        timeSeries2Names[var2[name].seriesIndex].append(name)
    
    diff3=[]
    diff2=[]
    diff=[]
    for i in xrange(model1.integrationResults.nTimeSeries):
        if len(timeSeries1Names[i]) > 0:
            t1 = model1.integrationResults.timeSeries[i].independentVariable
            f1 = model1.integrationResults.timeSeries[i].data
           
            if model1.integrationResults.timeSeries[i].interpolationMethod == "constant" and t1 is not None:
                t1, f1 = prepareMatrix(t1, f1)
            for j in xrange(model2.integrationResults.nTimeSeries):
                if len(timeSeries2Names[j]) > 0:
                    check1 = set(timeSeries1Names[i])
                    check2 = set(timeSeries2Names[j])
                    namesBothSub = list(check1 & check2)
    
                    # These variable names are considered in the following:
                    if len(namesBothSub) > 0:
                        k = 0
                        i1 = numpy.ones((len(namesBothSub),), dtype=int) * (-1)
                        i2 = numpy.ones((len(namesBothSub),), dtype=int) * (-1)
                        s1 = numpy.ones((len(namesBothSub),), dtype=int)
                        s2 = numpy.ones((len(namesBothSub),), dtype=int)
                        
                        for variableName in namesBothSub:
                            i1[k] = var1[variableName].column
                            i2[k] = var2[variableName].column
                            s1[k] = var1[variableName].sign
                            s2[k] = var2[variableName].sign
                            k = k + 1
                                          
                        t2 = model2.integrationResults.timeSeries[j].independentVariable
                        f2 = model2.integrationResults.timeSeries[j].data
                        
                        if model2.integrationResults.timeSeries[j].interpolationMethod == "constant" and t2 is not None:
                            if pMatrix2[j] is None:
                                t2, f2 = prepareMatrix(t2, f2)
                                pMatrix2[j] = (t2, f2)
                            else:
                                t2 = pMatrix2[j][0]
                                f2 = pMatrix2[j][1]
                        
                        identical, estTol, error = Compare.Compare(t1, f1, i1, s1, t2, f2, i2, s2, tol)
                              
                        if error:
                            message = u"Error during comparison of results."
                            fileOutput.write(message + u"\n")
                            return
                    
                        maxEstTol = max(maxEstTol, estTol.max())
 
                        allIdentical = allIdentical and all(identical)
                        s = sum(identical)
                        nNeg = nNeg + (len(identical) - s)
                        nPos = nPos + s
                        '''Get the differed variables after comparison'''              
                        for m in xrange(len(identical)):
                            if not identical[m]:
                                message = u"Results for " + namesBothSub[m] + u" are NOT identical within the tolerance " + unicode(tol) + u"; estimated Tolerance = " + unicode(estTol[m])
                                message2=namesBothSub[m]+'#'+unicode(estTol[m])
                                tupl=()
                                tupl=(namesBothSub[m],'%.30f'%float(unicode(estTol[m])))
                                diff.append(namesBothSub[m])
                                diff2.append(message2)
                                diff3.append(tupl)
                                fileOutput.write(message + u"\n")
    
    ## sort the differed variable by name                          
    diff1=sorted(diff2)
    ## sort the differed variable by highest error
    difftol=sorted(diff3,key=lambda x: x[1],reverse=True)
    if (len(diff)!=0):
         Reporting.generatehtml(model1,model2,diff,htmlfile,resultfile,dircount)                   
                                  
#    if len(allNamesOnce1) > 0:
#        print "The following variables are not contained in file " + model2.integrationResults.fileName + ":"
#    for variableName in allNamesOnce1:
#        print variableName
#    if len(allNamesOnce2) > 0:
#        print "The following variables are not contained in file " + model1.integrationResults.fileName + ":"
#    for variableName in allNamesOnce2:
#        print variableName

    lenNamesOnce = len(allNamesOnce1) + len(allNamesOnce2)
    if lenNamesOnce > 0:
        messageOnce = u"; " + unicode(lenNamesOnce) + u" only in one of the two files."
    else:
        messageOnce = u"."
    message = u"Compared results of " + unicode(nPos + nNeg) + u" variables: " + unicode(nPos) + u" identical, " + unicode(nNeg) + u" differ" + messageOnce
    # print message
    fileOutput.write(message + u"\n")
    totalComparedvar= unicode(nPos + nNeg)
    if allIdentical:
        message = u"The results for all compared variables are identical up to the given tolerance = " + unicode(tol)
        # print message
        fileOutput.write(message + u"\n")
    message = u"Maximum estimated tolerance = " + unicode(maxEstTol)
    # print message
    fileOutput.write(message + u"\n")

    print "... done."
    ''' Function call to generate the overview report'''
    if htmlfile is not None:
        Reporting.htmloverview(filewritehtml,resultfile,htmlfile,file1,diff1,difftol,dircount,model1var,model2var,totalComparedvar,maxEstTol)

    return
示例#12
0
    def run(self):
      self.running = True
      
      try:
        import pydevd
        pydevd.connected = True
        pydevd.settrace(suspend=False)
      except:
        # do nothing, since error message only indicates we are not in debug mode
        pass
            
      workdir=os.getcwd()
      encoding = sys.getfilesystemencoding()
      dir1 = self.dir1
      files1 = os.listdir(dir1)
      
      modelName1 = []
      fileName1 = []
      
      ## list for counting the number of result files compared with baseline resultfiles
      listdirfilecounts=[]
      
      ## count variables to calculate the result files of baseline and listdirectories
      dir1filessize=0
      listdirsfilessize=0
      
      for fileName in files1:
          splits = fileName.rsplit('.', 1)
          #print splits
          if len(splits) > 1:
              if splits[1] in SimulationResult.fileExtension:
                   modelName1.append(splits[0])
                   fileName1.append(fileName)
                   fp = os.path.join(dir1, fileName)
                   dir1filessize += int(os.path.getsize(fp))      
      
      size=dir1filessize/(1024*1024.0)
      basedirfilessizes=round(size,1)
      
      if (len(fileName1)!=0): 
          
          subdir=self.logDir          

          ## clear the content of regression report directory if already exists
          if os.path.exists(subdir):
              files = os.listdir(subdir)
              for file in files:
                 fileobj = os.path.join(subdir, file)
                 if(os.path.isfile(fileobj)):
                     os.remove(fileobj)
                 else:
                    shutil.rmtree(fileobj)
                            
          ### create a RegressionReport Directory in the current working directory ###
          if not os.path.exists(subdir): 
              os.mkdir(subdir)
                
          ### copy the dygraph script from /Plugins/Analysis/Testing/ to the result directory ###      
          dygraphpath=os.path.join(self.PySimulatorPath, 'Plugins/Analysis/Testing/dygraph-combined.js').replace('\\','/')
          if os.path.exists(dygraphpath):     
              shutil.copy(dygraphpath,self.logDir)
                                 
          listdirs=self.listdirs
          
          ## create a temp file for writing results and use it later to generate the regression report
          self.logFile=os.path.join(self.logDir, "index.log").replace('\\','/')
          
          fileOut = open(self.logFile, 'w')
          startTime = time.time()
          for dircount in xrange(len(listdirs)):
            dir2=listdirs[dircount]                               
            files2 = os.listdir(dir2)
            
            modelName2 = []
            fileName2 = []
            for fileName in files2:
                splits = fileName.rsplit('.', 1)
                if len(splits) > 1:
                    if splits[1] in SimulationResult.fileExtension:
                        modelName2.append(splits[0])
                        fileName2.append(fileName) 
                        
            '''create a html result file '''
            filename,fileExtension = os.path.splitext(self.logFile)
            logfile1=self.logFile.replace(fileExtension,'.html')                
            fileOuthtml= open(logfile1,'w')
               
            fileOut.write('Output file from comparison of list of simulation results within PySimulator\n')
            fileOut.write('  directory 1 (reference) : ' + dir1.encode(encoding) + '\n')
            fileOut.write('  directory 2 (comparison): ' + dir2.encode(encoding) + '\n')

            for index, name in enumerate(modelName1):            
                if self.stopRequest:
                    fileOut.write("Analysis canceled.")
                    fileOut.close()
                    print "... Comparing result files canceled."
                    self.running = False
                    return

                fileOut.write('\nCompare results from\n')            
                fileOut.write('  Directory 1: ' + fileName1[index].encode(encoding) + '\n')  # Print name of file1
                print "\nCompare results from "
                print "  Directory 1: " + fileName1[index].encode(encoding)

                try:
                    i = modelName2.index(name)
                except:
                    fileOut.write('  Directory 2: NO equivalent found\n')
                    print '  Directory 2: NO equivalent found'
                    ### codes to handle empty directory list in comparing results
                    model1 = Simulator.SimulatorBase.Model(None, None, None)
                    filepath = dir1 + '/' + fileName1[index]
                    model1.loadResultFile(filepath)
                    var = model1.integrationResults.getVariables()
                    message1= '<a href >' + fileName1[index].encode(encoding).replace('.mat','') +'-'+str(len(var))+'</a>' +' </td>'
                    emptyhref= "Not-Found"
                    s = '\n'.join(['<tr>','<td id=2>',message1,'<td id=2 bgcolor=#FFFFFF align="center">',emptyhref,'</td>','</tr>']) 
                    fileOuthtml.write(s)
                    fileOuthtml.write('\n')
                    i = -1
                if i >= 0:
                    fileOut.write('  Directory 2: ' + fileName2[i].encode(encoding) + '\n')  # Print name of file2
                    print "  Directory 2: " + fileName2[i].encode(encoding)
                    file1 = dir1 + '/' + fileName1[index]
                    file2 = dir2 + '/' + fileName2[i]
                    
                    ## calculate the filesize of the comparing directory result files
                    listdirsfilessize += int(os.path.getsize(file2))      
                    listdirfilecounts.append(file2)
                    
                    model1 = Simulator.SimulatorBase.Model(None, None, None)
                    model1.loadResultFile(file1)
                    model2 = Simulator.SimulatorBase.Model(None, None, None)
                    model2.loadResultFile(file2)
                    compareResults(model1, model2, dircount, self.tol, fileOut, fileOuthtml,self.logFile,file2,file1)
            
            fileOut.write('\n')    
            fileOut.write("******* Compare Analysis Completed   *******" + u"\n")
            fileOut.write('\n')                   
            fileOuthtml.close()
            green=[]
            red=[]
            
            '''open the html file to insert start html tags and add add headers of the directory name'''
            with open(logfile1) as myfile:
               htmldata=myfile.read()          
               m1="<table><tr><th id=0>Model</th><th id=0>"+os.path.basename(dir2)+'</th>'+'</tr>'
               soup = BeautifulSoup(open(logfile1))
               data=soup.find_all('td',{"bgcolor":["#00FF00","#FF0000"]})         
               for i in xrange(len(data)):
                  x=BeautifulSoup(str(data[i]))
                  tag=x.td
                  checkcolor=tag['bgcolor']
                  if(checkcolor=="#00FF00"):
                       green.append(checkcolor)
                  else:
                       red.append(checkcolor)

               message='\n'.join(['<html>',m1])
               f=open(logfile1,'w')
               if (len(green)==0 and len(red)==0): 
                 colorpercent=0
               else:
                 colorpercent=int((len(green))*100/(len(green)+len(red)))
                 
               if (colorpercent==100):
                   m1='<tr><td></td><td id=1 bgcolor="#00FF00" align="center">'+ str(len(green))+' passed'+' / '+str(len(red))+' failed'+'</td></tr>'
                   #percentage=str((len(green))*100/(len(green)+len(red)))+'%'+' passed'
                   percentage=str(colorpercent)+'%'+' passed'
                   m2='<tr><td></td><td id=100 bgcolor="#00FF00" align="center">'+percentage+'</td></tr>'
                   m3='\n'.join([message,m1,m2,htmldata,'</table>','</html>'])
                   f.write(m3)
                   f.write('\n')
               if(colorpercent>=51 and colorpercent<=99):
                   m1='<tr><td></td><td id=1 bgcolor="#FFA500" align="center">'+ str(len(green))+' passed'+' / '+str(len(red))+' failed'+'</td></tr>'
                   #percentage=str((len(green))*100/(len(green)+len(red)))+'%'+' passed'
                   percentage=str(colorpercent)+'%'+' passed'
                   m2='<tr><td></td><td id=100 bgcolor="#FFA500" align="center">'+percentage+'</td></tr>'
                   m3='\n'.join([message,m1,m2,htmldata,'</table>','</html>'])
                   f.write(m3)
                   f.write('\n')
               if(colorpercent<=50):
                   m1='<tr><td></td><td id=1 bgcolor="#FF0000" align="center">'+ str(len(green))+' passed'+' / '+str(len(red))+' failed'+'</td></tr>'
                   #percentage=str((len(green))*100/(len(green)+len(red)))+'%'+' passed'
                   percentage=str(colorpercent)+'%'+' passed'
                   m2='<tr><td></td><td id=100 bgcolor="#FF0000" align="center">'+percentage+'</td></tr>'
                   m3='\n'.join([message,m1,m2,htmldata,'</table>','</html>'])
                   f.write(m3)
                   f.write('\n')
               f.close()
           
            '''Save the data to prepare regression report'''
            
            newpath=os.path.dirname(logfile1)
            name=os.path.basename(logfile1)
            newname=''.join([str(dircount),'_',name])
            np1=os.path.join(newpath,'rfiles').replace('\\','/')
            np2=os.path.join(np1,newname).replace('\\','/')
            
            #create a new directory to store the result files for each run, to make parsing easy when user asks for regression chart 
            if not os.path.exists(np1): 
               os.mkdir(np1)
            shutil.copy(logfile1,np2)
                      
          print "... running the analysis done."
          ## final calculation of listdirfiles result size
          size1=listdirsfilessize/(1024*1024.0)
          Totallistdirfilessizes=round(size1,1)   
          
          elapsedTime = time.time() - startTime
          fileOut.close()
          totaldirfilescount=len(listdirfilecounts)
          basefilecount=len(fileName1)
          resultdirsize=basedirfilessizes+Totallistdirfilessizes
          Reporting.genregressionreport(self.logFile,totaldirfilescount,basefilecount,elapsedTime,resultdirsize,dir1,self.tol)
          
          ## remove the temporary rfiles directory after the Regression report generated          
          regressionfilesdir=os.path.join(os.path.dirname(self.logFile),'rfiles').replace('\\','/')
          if os.path.exists(regressionfilesdir): 
              shutil.rmtree(regressionfilesdir)
              
          ## change the directory to workdir after regression report
          os.chdir(workdir)
      else:
          print "No files to be compared found."
          print "... running the analysis done."
      
      self.running = False
示例#13
0
    if c.UseRemoteServer and c.SyncTime != 0:
        if (last_sync == None or (datetime.now() - last_sync).seconds >= c.SyncTime):
            sources, active_calls = resync()
            last_sync = datetime.now()

    # Iterate the data sources and check the ones that need updating
    for src in sources.values():
        if Sources.needsUpdate(src) and Sources.canCheck(src):
            calls = Sources.check(src)

            # If data was returned, merge the new data with the existing
            if calls:
                result, added, removed, updates = Calls.merge(active_calls[src.id], calls)
                active_calls[src.id] = result.values()

                report = Reporting.SourceUpdateReport(src, result, added, removed, updates)

                # Show the changes
                if report.hasChanges():
                    report.printChanges()

                    # Update the server with new data
                    if c.UseRemoteServer and len(c.IngestUrl) > 0:
                        report.sendReport(c.IngestUrl, c.ClientKey)


    # Handle geocode requests
    if Geocoder.canHandleRequests(c):
        requests = Geocoder.getRequests(c.DataUrl)

        if len(requests) > 0:
示例#14
0
#     datefmt = '%Y-%m-%d %H:%M:%S'
#     # logging.basicConfig(level=logging.DEBUG)
#     # formatStr = '[%(asctime)s] - %(name)s - {%(pathname)s:%(lineno)d} ' + \
#     #            '%(levelname)s - %(message)s', datefmt
#     logging.basicConfig(format=formatStr,
#                         datefmt=datefmt,
#                         level=logging.DEBUG)

    logger = logging.getLogger(__name__)
    logger.info("fiurst log message!")
    # define what environment to work with
    env = 'PRD'

    # used to help with formatting information into a string that
    # can be included in an email.
    emailReporter = Reporting.EmailStrings()
    dataCache = Reporting.CachedStrings()
    dataUtil = DataUtil.GetData(env)

    # add a header to the email
    jenkinsUrl = dataUtil.getMiscParam(Constants.JENKINSURL)
    ln1 = "    ----  DataBC Replication Health Check  ----\n"
    ln2 = 'src: https://github.com/bcgov/replication_health_check\n'
    ln3 = f'jenkins: {jenkinsUrl}\n\n\n'
    emailHeaderString = ln1 + ln2 + ln3
    dataCache.setString(emailHeaderString)
    logger.debug(f"emailHeaderString: {emailHeaderString}")

    # get the disabled schedules string
    scheds = dataUtil.getFMESchedules()
    schedsEval = ScheduleEvaluation.EvaluateSchedule(scheds)
示例#15
0
    inp = input(prompt)
    # Verify user input
    if inp not in options:
        print('Select a number > ')
        continue
### View & Search ###
    if inp == '1':
        ProgramHeader()
        ViewHeader()
        data_requested = input('Select a number > ')
        while data_requested != "0":
            ProgramHeader()
            ViewHeader()
            if data_requested == '2':
                print(new_line)
                Reporting.show_all()
                print(new_line)
                print(tab*3, '\t0. Back to MAIN MENU')
                data_requested = input('Select a number > ')
                if data_requested == '0':
                    break
                else:
                    print('Please select a number from the menu.')




### Create & Update ###
    if inp == '2':
        ProgramHeader()
        CreateHeader()
示例#16
0
def main():
    print(args.url)
    Scraper.main(args.url, args.url)
    Reporting.create_reporting()
    print(Helper.broken_links_count())
示例#17
0
    def run(self):
      self.running = True
      workdir=os.getcwd()
      files1 = os.listdir(self.dir1)
      
      dir1filessize=0
      ComparablefileName = []
      for fileName in files1:
         splits = fileName.rsplit('.', 1)
         if len(splits) > 1:
            if splits[1] in SimulationResult.fileExtension:
               ComparablefileName.append(fileName)
               fp = os.path.join(self.dir1, fileName)
               dir1filessize += int(os.path.getsize(fp))      
      
      size=dir1filessize/(1024*1024.0)
      basedirfilessizes=round(size,1)
      
      if(len(ComparablefileName)!=0):      
          subdir=self.logDir    
          
          ## clear the contents of regression report directory if already exists
          if os.path.exists(subdir): 
               files = os.listdir(subdir)
               for file in files:
                  fileobj = os.path.join(subdir, file)
                  if(os.path.isfile(fileobj)):
                      os.remove(fileobj)
                  else:
                      shutil.rmtree(fileobj)
              
          ###  create a RegressionReport Directory in the current working directory ###
          if not os.path.exists(subdir): 
                os.mkdir(subdir)
          
          ### copy the dygraph script from /Plugins/Analysis/Testing/ to the result directory ###      
          dygraphpath=os.path.join(self.PySimulatorPath, 'Plugins/Analysis/Testing/dygraph-combined.js').replace('\\','/')
          if os.path.exists(dygraphpath):     
              shutil.copy(dygraphpath,self.logDir)
          
          ## create a temp file for writing results and use it later to generate the regression report
          self.logFile=os.path.join(self.logDir,'index.log').replace('\\','/')

          logfiles=[]   
          list1dir=[]
          tolerance=[]      
          dir1 = self.dir1
          listdirs=self.listdirs 
          list1dir.append(dir1)
          tolerance.append(self.tol)
          
          listdir1= list1dir*len(listdirs)
          logfiles.append(self.logFile)
          logfiles1=logfiles*len(listdirs)
          tol=tolerance*len(listdirs)
             
          ## create a login directory for logging multiprocessing output from terminal to a file 
          logdir =os.path.join(os.getcwd(),'loginentries').replace('\\','/')
          if not os.path.exists(logdir):
              os.mkdir(logdir)

          processlog=[]
          dircount=[]
          resultfiles=[]      
          for i in xrange(len(logfiles1)):
              dir_name=os.path.dirname(logfiles1[i])
              filename=os.path.basename(logfiles1[i])
              newlogfile=str(i)+'_'+filename
              newlogfilepath=os.path.join(dir_name,newlogfile).replace('\\','/')
              processlogfilepath=os.path.join(logdir,newlogfile).replace('\\','/')
              resultfiles.append(newlogfilepath)
              processlog.append(processlogfilepath)
              dircount.append(i)
          
          ## Create a Pool of process and run the Compare Analysis in Parallel
          pool=Pool()
          startTime = time.time() 
          val=pool.map(ParallelCompareAnalysis, zip(listdir1,listdirs,resultfiles,dircount,tol,processlog))
          pool.close()
          pool.join()
          elapsedTime = time.time() - startTime
          #print elapsedTime
          ''' print the output to GUI after process completed '''
          for i in xrange(len(processlog)):
             f=open(processlog[i],'r')
             processlogentries=f.read()
             print processlogentries
             f.close()

          shutil.rmtree(logdir)
          print "Parallel Compare Analysis Completed"
         
          ## calculate the totalfiles compared and size of result files
          calspace=[]
          calfiles=[]
          for z in xrange(len(val)):
               calspace.append(val[z][0])
               calfiles.append(len(val[z][1]))
               
          size1=sum(calspace)/(1024*1024.0)
          Totallistdirfilessizes=round(size1,1)
          
          totaldirfilescount=sum(calfiles)
          basefilecount=len(ComparablefileName)
          resultdirsize=basedirfilessizes+Totallistdirfilessizes
          Reporting.genlogfilesreport(self.logFile)
          Reporting.genregressionreport(self.logFile,totaldirfilescount,basefilecount,elapsedTime,resultdirsize,dir1,self.tol)      
          
          ## Remove the temporary logfiles and rfiles directories after the regression report completed
          logfilesdir=os.path.join(os.path.dirname(self.logFile),'logfiles').replace('\\','/')
          if os.path.exists(logfilesdir): 
             shutil.rmtree(logfilesdir)
                   
          regressionfilesdir=os.path.join(os.path.dirname(self.logFile),'rfiles').replace('\\','/')
          if os.path.exists(regressionfilesdir): 
             shutil.rmtree(regressionfilesdir)
             
          ## change the directory to workdir after regression report
          os.chdir(workdir)     
      else:
          print "No files to be compared found."
          print "Parallel Compare Analysis Completed"
      
      self.running = False