Example #1
0
    def application(self):
        toExit = input("Press ENTER to continue\n")
        q = False
        while q == False:
                #prompt the user as to what they would like to do
                print("\nWhat would you like to do?")
                usr_input = input("\nEnter: Insert - Delete - Modify - Report.\nTo Exit enter 'q'\n")
                usr_input = usr_input.lower()
                
                if usr_input == 'delete':
                    #open the delete class
                    b = Delete()
                    b.checkTour()
                elif usr_input == 'insert':
                    #open the insert class
                    c = Add()
                    c.usrInputForInsert()
                elif usr_input == 'modify':
                    #open modify class
                    d = Modify()
                    d.updateInput()
                elif usr_input == "report":
                    a = Report()    
                    a.reportInput()
                #quit the program   
                elif usr_input == 'q':
                    print("End the Program")
                    q = True
                    return
                else:
                    print('Invalid choice\n')
#                    self.application()
                    usr_input = input("\nEnter: Insert - Delete - Modify - Report.\nTo Exit enter 'q'\n")
Example #2
0
    def build_report(self):
        file_choices = "Jinga template (*.jing)"
        
        source_name = QFileDialog.getOpenFileName(self, 'Open template', 'templates', file_choices)

        if not (source_name and source_name[0]):
          return

        source_name = source_name[0]
        report_name = ".".join(source_name.split("/")[-1].split(".")[:-1])
        report_dir = source_name.split("/")[:-1]
        target_name = QFileDialog.getSaveFileName(self, 'Save report', "%s/%s" % (report_dir, report_name), 'Report file (%s)' % report_name)

        if not (target_name and target_name[0]):
          return

        target_name = target_name[0]
        splash_pix = QPixmap('splash_loading.png')
        splash = QSplashScreen(splash_pix, Qt.WindowStaysOnTopHint)
        splash.setMask(splash_pix.mask())
        splash.show()
        
        self.app.processEvents()
        Report.to_transform(self, source_name, target_name)
        
        splash.close()
        try:
          from subprocess import Popen, PIPE
          p = Popen(['xsel','-pi'], stdin=PIPE)
          p.communicate(input=target_name)
          copied = True
        except:
          copied = False
        QMessageBox.about(self, "Report generated", "Report saved into '%s'%s" % (target_name, "" if not copied else " (copied into clipboard)"))
def recheckIfUnreachable(host, ops):
    time.sleep(5)
    status = ip_dict_read(host)
    str = '..............'+'Second try'+'..............\n'
    noofpackets = 2
    timeout = 500 #in milliseconds
    command = 'ping {0} {1} -w {2} {3}'.format(ops,noofpackets,timeout,host)
    Stdout,Stderr = systemCommandCheckIP(command)
    if Stdout:
        str += ("Host [{}] is now reachable.\n".format(host))
        str += '......................................\n'
        LogFile.newLog(host, "Reachable")
        if status[0] == 1:
            ip_dict_write(host, [0, 0])
        else:
            ip_dict_write(host, [1, 0])
        constructions = get_constructions()
        bot = constructions["bot"]
        for chat_id in constructions["update"]:
            bot.sendMessage(chat_id, text='{0} is online'.format(constructions[host]))
        print str
    else:
        str += ("Host [{}] is unreachable.\n".format(host))
        LogFile.newLog(host, "Unreachable")
        Report.sendEmail("ip:" + host + " is still unreachable")
        str += 'Report sent!\n'
        str += '......................................\n'
        ip_dict_write(host, [3, 0])
        constructions = get_constructions()
        bot = constructions["bot"]
        for chat_id in constructions["update"]:
            bot.sendMessage(chat_id, text='{0} is offline'.format(constructions[host]))
        print str
def main():
    test_plan_path = '~/Test_plan/test_plan.csv'
    # results_folder_path = '~/Results'

    home = str(Path.home())
    print(home)
    results_folder_path = home + '/Results'
    Freddy_results_path = '/home/ahmad/Results/20200204_194024_results.csv'
    Algo_results_path = '/home/ahmad/Results/hp_results.csv'
    perform_test(test_plan_path, results_folder_path)

    Report.compare(Freddy_results_path, Algo_results_path, results_folder_path)
    Report.make_graph(results_folder_path,
                      results_folder_path + '/Merged_results.csv')
Example #5
0
def generate_report(one_file_report=False):
    # Uses `Report.py` to create a HTML report.
    print("\n-- Generating a test report\n")
    local.run('cp "' + master_base_path + '/templogs/* " "' + report_path + '/logs/" ', warn_only=True)

    # Call format to remove '{{' and '}}'.
    path = os.path.expandvars(report_path.format())
    CmdArgs = collections.namedtuple("CmdArgs", ["one_file", "log_dir", "report_dir"])
    args = CmdArgs(one_file=one_file_report, log_dir="{0}/logs".format(path), report_dir=path)
    Report.make_report(args)

    print("\n-- Test report has been generated and is available here:")
    print('-- "{0}/report.html"'.format(path))
    print()
def doProtocol(prots,pn,fclaim=None,maxproc=2):
    cl = prots[pn]
    cl.sort()
    cl.reverse()        # to start with secrecy (smaller scenarios than auth, hence improving tool.minTime initial behaviour)
    for c in cl:
        if (fclaim == None) or c.startswith(fclaim):
            Report.replog("** Claim: %s,%s ***" % (pn,c), False)
            pl = getProtocols(pn,c)
            for p in pl:
                # Apply filtering
                if TestConfig.filter(pn,c,p.toolname,maxproc):
                    # Get timeout value from config
                    timeout = TestConfig.getTimeout(pn,c,p.toolname,maxproc)
                    verify(p,c,maxproc=maxproc,timeout=timeout)
Example #7
0
File: EOD.py Project: isaku-dev/EOD
 def findFailReport(self):
     try:
         reportList = self.filterReportList()
         for reportListItem in reportList:
             reportItem = Report()
             reportItem.setName(reportListItem[0])
             reportItem.setStatus(reportListItem[3])
             reportItem.setGeneratedTimeStamp(
                 self.getFormatedTime(reportListItem[4]))
             reportItem.setSortByTime(self.getDecimalTime(
                 reportListItem[4]))
             self.reportFailList.append(reportItem)
     except:
         return True
Example #8
0
 def Init_Subset(self,
                 length=1,
                 user_subset=[],
                 show_superset=False,
                 show_subset=True):
     if len(user_subset) == 0:
         self.hypo_subset = Generate.Gen_Subset(self.hypo_superset, length)
     else:
         self.hypo_subset = user_subset
     if show_superset:
         Report.Print_Set(self.hypo_superset)
         print("----Superset----")
     if show_subset:
         Report.Print_Set(self.hypo_subset)
         print("----Subset----")
Example #9
0
File: EOD.py Project: isakSoft/EOD
 def findFailReport(self):
     try:                    
         reportList = self.filterReportList()
         for reportListItem in reportList:
             reportItem = Report()
             reportItem.setName(reportListItem[0])
             reportItem.setStatus(reportListItem[3])
             reportItem.setGeneratedTimeStamp(self.getFormatedTime(reportListItem[4]))
             reportItem.setSortByTime(self.getDecimalTime(reportListItem[4]))
             self.reportFailList.append(reportItem)            
     except:
         return True
Example #10
0
    def createFrameSection(self, frame):
        frameSection = Report.Section("Frame #%d" % (frame.number + 1))
        frameSection.create(Report.LinkTarget, "frame%d" % (frame.number + 1))

        # Add a framebuffer snapshot if we have one
        colorBuffer = player.Instrumentation.getBufferFileName(frame.swapEvent)
        if colorBuffer:
            frameSection.create(Report.Image,
                                colorBuffer,
                                elementClass="screenshot")

        table = frameSection.create(Report.Table, ["Property", "Value"])
        table.addRow("API calls", len(frame.events))
        table.addRow("Render calls", len(frame.renderEvents))
        table.addRow("FPS", "%.02f" % (1.0 / frame.duration))

        # Add some frame-level graphs
        frameSection.add(
            self.createEventPlot("Event distribution",
                                 frame.events,
                                 sliceStart=frame.startTime,
                                 sliceLength=frame.duration,
                                 id=frame.number + 1))
        frameSection.add(
            self.createEventFrequencyPlot("Operation distribution",
                                          frame.events,
                                          id=frame.number + 1))

        return frameSection
Example #11
0
    def createEventFrequencyPlot(self, title, events, id=0):
        # Count the number of each event
        eventFrequency = Collections.DefaultDict(lambda: 0)
        for event in events:
            eventFrequency[event.name] += 1

        items = eventFrequency.items()
        items.sort(key=lambda f: -f[1])

        funcNames = [f[0] for f in items]
        funcFreq = [f[1] for f in items]

        # Create a bar charts and add a text describing the event to each bar
        pylab.figure()
        pylab.yticks([])
        pylab.ylim(len(funcNames), 0)
        rects = pylab.barh(range(len(funcNames)),
                           funcFreq,
                           color=self.primaryColor)

        for name, rect in zip(funcNames, rects):
            pylab.text(rect.get_x() + rect.get_width(),
                       rect.get_y() + rect.get_height(),
                       "  " + name,
                       fontsize=8)

        fn = os.path.join(self.path,
                          title.lower().replace(" ", "_") + "%03d.png" % id)
        pylab.savefig(fn)
        pylab.close()

        section = Report.Section(title)
        section.create(Report.Image, fn)
        return section
Example #12
0
    def createAuxiliaryStatisticsTable(self):
        yNumTriangles = 0
        yTexelUploads = 0
        yRasterizerPixels = 0
        yAvgTriangleSize = 0.0

        for event in self.trace.events:
            if event.sensorData.get("rasterizer_pixels", 0):
                yRasterizerPixels += 1
            if event.sensorData.get("average_triangle_size", 0):
                yAvgTriangleSize = 4.3
            if event.sensorData.get("triangles_in", 0):
                yNumTriangles += event.sensorData["triangles_in"]
            if event.sensorData.get("texel_uploads", 0):
                yTexelUploads += event.sensorData["texel_uploads"]

        t = Report.Table(["Property", "Value"])
        t.addRow("Total triangles submitted", yNumTriangles)
        t.addRow("Number of triangles submitted per frame",
                 float(yNumTriangles) / len(self.frames))
        t.addRow("Total texel uploads", yTexelUploads)
        t.addRow("Number of uploads per frame",
                 float(yTexelUploads) / len(self.frames))
        t.addRow("Rasterizer pixels", yRasterizerPixels)
        t.addRow("Average triangle size", yAvgTriangleSize)

        return t
Example #13
0
 def Simulate(self, simulation_loop=1, state_value_report=True):
     for looptime in range(simulation_loop):
         R = 0
         is_end = False
         next_feature = False
         current_feature = -1
         current_label = -1
         self.Reset()
         while True:
             if is_end:
                 Update.MonteCarlo_Update(R, self.state_list,
                                          self.state_action_label_value_map)
                 break
             else:
                 next_feature = Select.MonteCarlo_Epsilon_Select(
                     self.feature_remaining, current_feature, current_label,
                     self.state_action_label_value_map)
                 Select.Erase_Feature(self.feature_remaining, next_feature)
                 self.hypo_remaining_set = Observe.Observe_Subset(
                     self.true_hypothesis, self.hypo_remaining_set,
                     next_feature)
                 Observe.Clear_Overlap(self.feature_remaining,
                                       self.hypo_remaining_set)
                 is_end = Observe.Check_End(self.hypo_remaining_set)
                 self.state_list.append(
                     (current_feature, next_feature, current_label))
                 R += -1
                 current_label = self.true_hypothesis[next_feature]
                 current_feature = next_feature
     if state_value_report:
         Report.Report_State_Value_Map(self.state_action_label_value_map)
Example #14
0
    def __init__(self, verbose=False):
        self.ct = ColumnsTable.ColumnsTable([
            ('orderid', 13, '%13s', 'orderid',
             'issuepriceid + sequencenumber'),
            ('ticker', 6, '%6s', 'ticker', 'ticker'),
            ('maturity', 10, '%10s', 'maturity', 'maturity'),
            ('n_prints', 10, '%10d', 'nprints',
             'number of prints (transactions)'),
            ('n_buy', 10, '%10d', 'n_buy', 'number of buy transactions'),
            ('n_dealer', 10, '%10d', 'n_dealer',
             'number of dealer transactions'),
            ('n_sell', 10, '%10d', 'n_sell', 'number of sell transactions'),
            ('q_buy', 10, '%10d', 'q_buy',
             'total quantity of buy transactions'),
            ('q_dealer', 10, '%10d', 'q_dealer',
             'total quantity of dealer transactions'),
            ('q_sell', 10, '%10d', 'q_sell',
             'total quantity of sell transactions'),
        ])
        self.report = Report.Report(also_print=verbose, )
        self.report.append('Buy-Dealer-Sell Analysis: Counts by trade_type')
        self.report.append(' ')

        self.n_prints = collections.defaultdict(int)
        self.n_buy = collections.defaultdict(int)
        self.n_dealer = collections.defaultdict(int)
        self.n_sell = collections.defaultdict(int)
        self.q_buy = collections.defaultdict(int)
        self.q_dealer = collections.defaultdict(int)
        self.q_sell = collections.defaultdict(int)
Example #15
0
    def send_report(self, proxy, service_target, capability_target, time):
        '''
        Create a report on a given proxy.
        '''
        note = self.take_note(proxy, service_target, capability_target)

        return Report.Report(service_target, capability_target, note, time)
Example #16
0
    def createEventSection(self, event):
        func = self.library.functions[event.name]

        if func.isRenderCall:
            title = "Render call %s (#%d)" % (event.name, event.seq)
        elif func.isFrameMarker:
            title = "Frame swap %s (#%d)" % (event.name, event.seq)
        else:
            title = "Event %s (#%d)" % (event.name, event.seq)

        section = Report.Section(title)

        # Add a framebuffer snapshot if we have one
        colorBuffer = player.Instrumentation.getBufferFileName(event)
        if func.isRenderCall and colorBuffer:
            section.create(Report.Image,
                           colorBuffer,
                           elementClass="screenshot")

        # Describe the event
        section.create(Report.Paragraph,
                       self.getEventDescription(event),
                       elementClass="code")

        # Insert sensor measurements
        statsTable = section.create(Report.Table, ["Sensor", "Value"])

        for name in sorted(event.sensorData.keys()):
            if name in self.trace.sensors and name in event.sensorData and event.sensorData[
                    name]:
                statsTable.addRow(self.trace.sensors[name].description,
                                  event.sensorData[name])

        return section
Example #17
0
    def O_Task(self):
        # Get a new knowledgeability table
        knowledgeability = Task.NKnowledgeability_Task(self.hypo_table,
                                                       self.num_hypo,
                                                       self.num_feature,
                                                       self.num_label,
                                                       self.knowledge)
        # knowledgeability = [self.delta_g_h]
        for n in range(len(knowledgeability)):
            print(knowledgeability[n])
            p, s = Task.Probability_Task(self.hypo_table, self.num_hypo,
                                         self.num_feature, self.num_label,
                                         copy.deepcopy(knowledgeability[n]),
                                         1000)
            print(p, s, sep="\n")
            Report.Plot_P(Task.Average_Hypo(p, self.num_hypo),
                          self.num_feature, n)

            # If reaches the identitly matrix, the loop will be ended
            if numpy.array_equal(knowledgeability[n],
                                 numpy.eye(self.num_hypo)):
                break
        mtp.legend()
        mtp.show()
        return
Example #18
0
def generate_report(one_file_report=False):
    # Uses `Report.py` to create a HTML report.
    print('\n-- Generating a test report\n')

    # Call format to remove '{{' and '}}'.
    path = os.path.expandvars(report_path.format())
    CmdArgs = collections.namedtuple('CmdArgs',
                                     ['one_file', 'log_dir', 'report_dir'])
    args = CmdArgs(one_file=one_file_report,
                   log_dir='{0}/logs'.format(path),
                   report_dir=path)
    Report.make_report(args)

    print('\n-- Test report has been generated and is available here:')
    print('-- "{0}/report.html"'.format(path))
    print()
Example #19
0
def pl_coveragetest(model, beta_signal_values = [0.2*i for i in range(10)], n = 1000, write_report = True, **deltanll_options):
    result = {}
    result_tables = {}
    for beta_signal in beta_signal_values:
        res = pl_intervals(model, input='toys:%g' % beta_signal, n = n, **deltanll_options)
        for spid in res:
            cls = [k for k in res[spid].keys() if type(k)==float]
            if spid not in result: result[spid] = {}
            if spid not in result_tables:
                 result_tables[spid] = Report.table()
                 result_tables[spid].add_column('beta_signal', 'true beta_signal')
                 for cl in cls: result_tables[spid].add_column('coverage %g' % cl, 'Coverage for cl=%g' % cl)
                 result_tables[spid].add_column('fit success fraction')
            result[spid][beta_signal] = {}
            result_tables[spid].set_column('beta_signal', '%g' % beta_signal)
            icl = 0
            for cl in cls:
                n_covered = len([1 for i in range(len(res[spid][cl])) if res[spid][cl][i][0] <= beta_signal and res[spid][cl][i][1] >= beta_signal])
                n_total = len(res[spid][cl])
                coverage = n_covered*1.0 / n_total
                result[spid][beta_signal][cl] = coverage
                successrate = (n_total*1.0 / n)
                result[spid][beta_signal]['successrate'] = successrate
                result_tables[spid].set_column('coverage %g' % cl, '%g' % coverage)
                result_tables[spid].set_column('fit success fraction', '%g' % successrate)
            result_tables[spid].add_row()
    if write_report:
        for spid in result_tables:
            config.report.new_section("deltanll interval coverage test for signal process '%s'" % spid)
            config.report.add_html(result_tables[spid].html())
    return result
Example #20
0
def reportchoice(request):
    reports = Report.get_reports()
    choices = {k: re.sub(r'\+|_|%2C', ' ', k) for k in reports}

    context = {'choices': choices}

    return render(request, 'reports/reportchoice.html', context)
Example #21
0
    def init(self):
        report = re.Report()
        report.init(
            123,
            42,
            "pepe",
            28,
            "juan",
            2019,
            fileDescriptor=
            "<fo:block ><fo:image ></fo:image><fo:block >Value of Variable: $Variable1$</fo:block></fo:block><fo:block ></fo:block>"
        )

        with flx.VBox():
            with flx.HBox():
                with flx.HBox():
                    self.new = flx.Button(text='Create New Template', flex=0)
                    self.save = flx.Button(text='Save Template', flex=1)
                    self.load = flx.Button(text='Load Template', flex=2)
                    self.download = flx.Button(text='Get_XML_File', flex=3)
                with flx.HBox():
                    self.region = flx.Button(text='Add Region Container',
                                             flex=4)
                    self.text = flx.Button(text='Add Text Container', flex=5)
                    self.image = flx.Button(text='Add Image Container', flex=6)
        self.XML = flx.Label(text="No XML Available Yet", flex=7)
Example #22
0
    def main(self, method, args):
        signal.signal(signal.SIGINT, self.sig_handler)
        signal.signal(signal.SIGTERM, self.sig_handler)
        self.method = method

        self.r = CRedis.CRedis()
        self.r.clear()
        path = os.path.dirname(os.path.realpath(__file__)) + os.sep + 'logs'
        if not os.path.exists(path):
            os.makedirs(path)

        self.r.set('ctime', time.time())
        self.r.set('is_report', 1)

        fd_list = []
        for arg in args:
            p2 = multiprocessing.Process(target=self.sendMsg, args=(arg))
            p2.start()
            fd_list.append(p2)

        # 测试报告
        report = Report.Report()
        p3 = multiprocessing.Process(target=report.report, args=())
        p3.start()
        p3.join()

        print("main exit.")
        sys.exit(0)
Example #23
0
def posteriors(model, histogram_specs, input = 'data', n = 3, signal_prior = 'flat', nuisance_prior = '', signal_processes = None, mcmc_iterations = 10000, **options):
    if signal_processes is None: signal_processes = [[sp] for sp in model.signal_processes]
    signal_prior = signal_prior_dict(signal_prior)
    nuisance_prior = nuisance_prior_distribution(model, nuisance_prior)
    main = {'n-events': n, 'model': '@model', 'producers': ('@posteriors',), 'output_database': sqlite_database(), 'log-report': False}
    posteriors = {'type': 'mcmc_posterior_histo', 'name': 'posteriors', 'parameters': [],
       'override-parameter-distribution': product_distribution("@signal_prior", "@nuisance_prior"),
       'smooth': options.get('smooth', True), 'iterations': mcmc_iterations }
    for par in histogram_specs:
        posteriors['parameters'].append(par)
        nbins, xmin, xmax = histogram_specs[par]
        posteriors['histo_%s' % par] = {'range': [float(xmin), float(xmax)], 'nbins': nbins}
    toplevel_settings = {'signal_prior': signal_prior, 'posteriors': posteriors, 'main': main}
    options['load_root_plugins'] = False
    toplevel_settings.update(get_common_toplevel_settings(**options))
    cfg_names_to_run = []
    main['data_source'], toplevel_settings['model-distribution-signal'] = data_source_dict(model, input)
    cfg_names_to_run = []
    for sp in signal_processes:
        model_parameters = model.get_parameters(sp)
        toplevel_settings['nuisance_prior'] = nuisance_prior.get_cfg(model_parameters)
        name = write_cfg(model, sp, 'posteriors', input, additional_settings = toplevel_settings, **options)
        cfg_names_to_run.append(name)
    if 'run_theta' not in options or options['run_theta']:
        run_theta(cfg_names_to_run)
    else: return None
    
    cachedir = os.path.join(config.workdir, 'cache')
    plotsdir = os.path.join(config.workdir, 'plots')
    
    result = {}
    config.report.new_section('Posteriors %s' % input)
    result_table = Report.table()
    result_table.add_column('process', 'signal process')
    parameters = sorted([par for par in histogram_specs])
    for par in parameters:
        result_table.add_column('maximum posterior %s' % par)
    for name in cfg_names_to_run:
        method, sp, dummy = name.split('-',2)
        config.report.add_html('<h2>For signal "%s"</h2>' % sp)
        result_table.set_column('process', sp)
        sqlfile = os.path.join(cachedir, '%s.db' % name)
        cols = ['posteriors__posterior_%s' % par for par in parameters]
        data = sql(sqlfile, 'select %s from products' % ', '.join(cols))
        data = [map(plotdata_from_histoColumn, row) for row in data]
        result[sp] = {}
        i = 0
        for par in parameters:
            result[sp][par] = [row[i] for row in data]
            for pd in result[sp][par]: pd.as_function = True
            plot(result[sp][par], par, 'posterior density', os.path.join(plotsdir, '%s-%s.png' % (name, par)))
            config.report.add_html('<p>%s:<br/><img src="plots/%s-%s.png" /></p>' % (par, name, par))
            i += 1
            maxima = sorted(map(argmax, result[sp][par]))
            result_table.set_column('maximum posterior %s' % par, '%.3g' % maxima[int(0.5 * len(maxima))])
        result_table.add_row()
    config.report.add_p('input: %s, n: %d, signal prior: %s, nuisance prior: %s' % (input, n, str(signal_prior), str(nuisance_prior)))
    config.report.add_html(result_table.html())
    return result
Example #24
0
    def eventoReporte(self, ventana):
        cantidadMoto = self.CantidadMototaxi.cget("text")
        cantidadCamio = self.CantidadCamion.cget("text")
        cantidadAuto = self.CantidadMototaxi.cget("text")
        cantidadDeTiempo = self.CantidadTiempo

        objReporte = Report.Reports(ventana, cantidadMoto, cantidadCamio,
                                    cantidadAuto, cantidadDeTiempo)
Example #25
0
 def Probability_Map(self,
                     probability_map_report=True,
                     probability_map_plot=True,
                     cumulative=True):
     self.prob_map = {}
     for i in range(self.num_feature + 1):
         self.prob_map[i] = 0
     trajectory_list = Examine.Apply_Policy_To_All_Hypo(
         self.hypo_subset, self.num_feature,
         self.state_action_label_value_map)
     for i in trajectory_list:
         self.prob_map[len(i)] += 1 / len(self.hypo_subset)
     if cumulative:
         for i in range(self.num_feature):
             self.prob_map[i + 1] += self.prob_map[i]
     if probability_map_report: Report.Report_Prob_Table(self.prob_map)
     if probability_map_plot: Report.Plot_Prob_Table(self.prob_map)
Example #26
0
 def __init__(self, tag, verbose=True):
     self.ct = ColumnsTable.ColumnsTable(
         column_defs('orderid', 'effectivedatetime', 'quantity',
                     'oasspread_buy', 'oasspread_dealer', 'oasspread_sell',
                     'restated_trade_type', 'retained_order'))
     self.report = Report.Report(also_print=verbose)
     self.report.append(tag)
     self.report.append(' ')
Example #27
0
def generate_report(one_file_report = False):
    # Uses `Report.py` to create a HTML report.
    print('\n-- Generating a test report\n')

    # Call format to remove '{{' and '}}'.
    path = os.path.expandvars(report_path.format())
    CmdArgs = collections.namedtuple('CmdArgs', ['one_file', 'log_dir', 'report_dir'])
    args = CmdArgs(
        one_file = one_file_report,
        log_dir = '{0}/logs'.format(path),
        report_dir = path
    )
    Report.make_report(args)

    print('\n-- Test report has been generated and is available here:')
    print('-- "{0}/report.html"'.format(path))
    print()
Example #28
0
def thread_function(input_address, output_address):
    global done
    global is_simulating
    max_number_of_tasks = 50_000_000  # maximum number of tasks
    number_of_warm_up_task = 5_000  # after this number of tasks we begin to collect the statistics

    input_data = get_data.get(input_address)  # getting data from an input file
    task_generator = get_initial_data_generator.get_task_generator(
        input_data)  # make a task generator
    system = System(input_data)  # make a main system
    AccuracyChecker.initial(system)

    simulation.simulate(number_of_warm_up_task, max_number_of_tasks,
                        task_generator, system, StatisticalData(input_data))
    Report.print_report(output_address)
    window.close()
    exit()
Example #29
0
    def main(self, method, args):
        signal.signal(signal.SIGINT, self.sig_handler)
        signal.signal(signal.SIGTERM, self.sig_handler)

        self.r = CRedis.CRedis()
        self.r.clear()
        path = os.path.dirname(os.path.realpath(__file__)) + os.sep + 'logs'
        if not os.path.exists(path):
            os.makedirs(path)

        self.r.set('ctime', time.time())
        self.r.set('is_report', 1)
        self.fds = []
        for arg in args:
            tname_Connect2txByIpaddr = 'Connect2txByIpaddr'
            ctime_Connect2txByIpaddr = time.time()
            res_Connect2txByIpaddr = Pysensor.Connect2txByIpaddr(*arg)
            self.record(tname_Connect2txByIpaddr, res_Connect2txByIpaddr[1],
                        ctime_Connect2txByIpaddr, time.time())
            self.fds.append(res_Connect2txByIpaddr[0])
            self.fd_args[res_Connect2txByIpaddr[0]] = arg

        fd_list = []
        for fd in self.fds:
            if method == 'all':
                p1 = multiprocessing.Process(target=self.getMsg, args=(fd, ))
                p2 = multiprocessing.Process(target=self.sendMsg, args=(fd, ))

                p1.start()
                p2.start()

                fd_list.append((p1, p2))

            if method == 'get':
                p1 = multiprocessing.Process(target=self.getMsg, args=(fd, ))
                p1.start()
                fd_list.append(p1)

            if method == 'send':
                p2 = multiprocessing.Process(target=self.sendMsg, args=(fd, ))
                p2.start()
                fd_list.append(p2)

        # 测试报告
        report = Report.Report()
        p3 = multiprocessing.Process(target=report.report, args=())
        p3.start()
        p3.join()

        for p in fd_list:
            if method == 'all':
                p[0].join()
                p[1].join()
            if method == 'get' or method == 'send':
                p.join()

        print("main exit.")
        sys.exit(0)
Example #30
0
    def createCallHistogramTable(self):
        t = Report.Table(["Call", "# of Calls"])

        hist = TraceOperations.calculateCallHistogram(self.trace)
        values = [(count, name) for name, count in hist.items()]
        for i, value in enumerate(sorted(values, reverse=True)):
            count, name = value
            t.addRow(name, count)

        return t
Example #31
0
 def report_create(self, service, capability, note, time):
     '''
     Test the creation of reports.
     '''
     report = Report.Report(service, capability, note, time)
     self.assertEqual(report.get_service(), service)
     self.assertEqual(report.get_capability(), capability)
     self.assertEqual(report.get_note(), note)
     self.assertEqual(report.get_time(), time)
     self.assertEqual(report.csv_output(), f"{service},{capability},{note}")
Example #32
0
def main():
    """
    `read_local_tables()` accepts a "sqlite3.connection" object generated 
    by `write_local_tables()` to minimize the number of SQLite connections.
    Otherwise, if `load.read_local_tables()` is called in isolation on 
    the interpreter, a new SQLite connection is made. This function returns 
    pandas.DataFrame objects for each of the three tables.
    """
    #assert(Local_Connection is not None) # Marked for potential removal
    '''
    try:
        del Imitmidx, Invloc, Bmprdstr, Sfdtlfil, imitmidx_sql, iminvloc_sql, bmprdstr_sql, sfdtlfil_sql
        gc.collect()
        gc.disable()
    except NameError:
        pass
    '''
    Local_Connection = sqlite3.connect("/mnt/c/sqlite/099.db")
    imitmidx_sql = pd.read_sql("SELECT * FROM imitmidx_sql;", Local_Connection)
    iminvloc_sql = pd.read_sql("SELECT * FROM iminvloc_sql;", Local_Connection)
    bmprdstr_sql = pd.read_sql("SELECT * FROM bmprdstr_sql;", Local_Connection)
    sfdtlfil_sql = pd.read_sql("SELECT * FROM sfdtlfil_sql;", Local_Connection)
    # Dropping pesky "index" column of extra indices
    imitmidx_sql = imitmidx_sql.drop(columns=["index"])
    iminvloc_sql = iminvloc_sql.drop(columns=["index"])
    bmprdstr_sql = bmprdstr_sql.drop(columns=["index"])
    sfdtlfil_sql = sfdtlfil_sql.drop(columns=["index"])

    billOfMaterials, children = processing.main(imitmidx_sql, iminvloc_sql,
                                                bmprdstr_sql, sfdtlfil_sql)

    sysargForGenReport = False
    if sysargForGenReport:
        bom_report = Report.BOMReport(billOfMaterials, imitmidx_sql,
                                      iminvloc_sql, sfdtlfil_sql, bmprdstr_sql)
        bom_report.output()

    # Starts the cost-reporting functionality
    inventory = Report.Inventory(bom_Dataframe=billOfMaterials,
                                 bom_Dict=children)
    item = Report.Item.item
    breakpoint()
    return imitmidx_sql, iminvloc_sql, bmprdstr_sql, sfdtlfil_sql
Example #33
0
def pl_intervals(model, input = 'toys:0', n = 100, signal_prior = 'flat', nuisance_constraint = '', cls = [0.90], signal_processes = None, write_report = None, **options):
    signal_processes = default_signal_processes(model, signal_processes)
    if write_report is None: write_report = input == 'data'
    nuisance_constraint = nuisance_prior_distribution(model, nuisance_constraint)
    signal_prior_spec = signal_prior
    signal_prior = signal_prior_dist(signal_prior)
    model_signal_prior = model_signal_prior_dist(input)
    data_source_dict, model_dist_signal_dict = utils.data_source_dict(model, input)
    cfg_names_to_run = []
    for sp in signal_processes:
        main = Run(n, data_source_dict, model_signal_prior)
        pl = PliProducer(Distribution.merge(signal_prior, nuisance_constraint), cls, **options)
        main.add_producer(pl)
        name = write_cfg2(main, model, sp, 'pl_intervals', input)
        cfg_names_to_run.append(name)
    run_theta_ = options.get('run_theta', True)
    if run_theta_: run_theta(cfg_names_to_run)
    else: return None

    result_table = Report.table()
    result_table.add_column('signal process')
    result_table.add_column('mle', 'mle')
    for cl in cls: result_table.add_column('cl%g' % cl, 'confidence level %g' % cl)
    cachedir = os.path.join(config.workdir, 'cache')
    col_suffixes= ['%05d' % (cl*10000) for cl in cls]
    result = {}
    for i in range(len(cfg_names_to_run)):
        name = cfg_names_to_run[i]
        method, sp_id, dummy = name.split('-',2)
        result_table.set_column('signal process', sp_id)
        result[sp_id] = {'mle': []}
        for cl in cls: result[sp_id][cl] = []
        model_parameters = model.get_parameters(sp)
        sqlfile = os.path.join(cachedir, '%s.db' % name)
        colnames = []
        for cs in col_suffixes:
            colnames.append('pli__lower%s' % cs)
            colnames.append('pli__upper%s' % cs)
        data = sql(sqlfile, 'select pli__maxl, %s from products' % (', '.join(colnames)))
        if len(data)==0: raise RuntimeError, "no result (fit not coverged?)"
        first_row = True
        for row in data:
            result[sp_id]['mle'].append(row[0])
            if first_row: result_table.set_column('mle', '%g' % row[0])
            for icol in range(len(cls)):
                interval = (row[2*icol+1], row[2*icol+2])
                result[sp_id][cls[icol]].append(interval)
                if first_row:
                    result_table.set_column('cl%g' % cls[icol], '(%g, %g)' % interval)
            first_row = False
        result_table.add_row()
    if write_report:
        config.report.new_section("deltanll intervals")
        config.report.add_html(result_table.html())
    return result
Example #34
0
 def __init__(self, ticker, msg, verbose=True):
     self.ct = ColumnsTable.ColumnsTable([
         ('column', 22, '%22s', 'column', 'column in input csv file'),
         ('n_nans', 7, '%7d', 'n_NaNs',
          'number of NaN (missing) values in column in input csv file'),
     ])
     self.report = Report.Report(also_print=verbose, )
     self.report.append('Missing Values in Input File %s For Ticker %s' %
                        (msg, ticker))
     self.report.append(' ')
     self.appended = []
Example #35
0
def main() -> None:
    option = ' '
    while option != '0':
        print(''' Select the option:
        1 - Register
        2 - Report
        3 - Data Analysis
        0 - Exit ''')
        option = input('option: ').strip()
        if option == '0':
            continue
        elif option == '1':
            menuRegister.menu()
        elif option == '2':
            Report.menu()
        elif option == '3':
            DataAnalysis.menu()
        else:
            print('\33[1;31mInvalid option\33[m')
    print('Thanks')
Example #36
0
 def __init__(self, folder_path, take_max=-1, get_classes_function=None):
     self.folder_path = folder_path
     self.dataset_name = os.path.basename(folder_path).replace(
         'pairs_', '').replace('_new', '')
     self.take_max = take_max
     self.number_of_files_in_folder = countJsonFiles(folder_path)
     errors_report_path = os.path.normcase(folder_path + '/pairs_errors')
     self.errors_report = Report(errors_report_path)
     self.get_classes_function = get_classes_function
     self.index_file_path = os.path.normcase(self.folder_path +
                                             '/classes_index.json')
     self.filters_indexes_file_path = os.path.normcase(
         self.folder_path + '/filters_indexes.json')
     self.classes_list_file_path = os.path.normcase(self.folder_path +
                                                    '/classes_list.json')
     self.numbers_from_index = None
     self.took = 0
     self.return_names = False
     if not os.path.exists(self.index_file_path):
         self.dumpClassesIndex()
def verify(p,c,maxproc=2,timeout=None):

    for m in p.models:
        t = None
        comment = "MaxProc%i" % maxproc
        if m == "Traces":
            t = Test.Test(p.name,c,p.toolname,m,timeout=timeout,comment=comment)
        elif m == "MaxProc":
            t = Test.Test(p.name,c,p.toolname,m,maxproc,timeout=timeout,comment=comment)
        elif m == "Scen":
            agents = getAgents(c)
            sl = Scenario.ScenarioSet(p.roleCount(),maxproc,fill=True,agentcount=agents).list
            t = Test.Test(p.name,c,p.toolname,m,sl,timeout=timeout,comment=comment)
        elif m == "RepScen":
            agents = getAgents(c)
            ss = Scenario.ScenarioSet(p.roleCount(),1,fill=True,agentcount=agents)
            ss.cover()
            sl = ss.list
            t = Test.Test(p.name,c,p.toolname,m,sl,timeout=timeout,comment=comment)
        if t:
            Report.report(t)
Example #38
0
 def setDiag(self, sender):
     if sender == 'CQC Check-out':
         return Checkout.Checkout()
     elif sender == 'CQC WIP Report':
         return Report.Report(self.cs)
     elif sender == 'Product Manager':
         return Manager.Manager(self.cs)
     elif sender == 'CQC on the Way':
         return Shipment.Shipment(self.cs)
     elif sender == 'CQC Check-in':
         return Receipt.Receipt(self.cs)
     elif sender == ' CQC Transfer    ':
         return Lookup.Lookup(self.cs)
Example #39
0
def report_limit_band_plot(expected_limits, observed_limits, name, shortname, write_table = True):
    plotsdir = os.path.join(config.workdir, 'plots')
    plots = []
    extra_legend_items = []
    if expected_limits is not None:
        expected_limits.legend = 'median expected limit'
        extra_legend_items.append((expected_limits.bands[0][2], '$\\pm 2\\sigma$ expected limit'))
        extra_legend_items.append((expected_limits.bands[1][2], '$\\pm 1\\sigma$ expected limit'))
        plots.append(expected_limits)
    if observed_limits is not None:
        observed_limits.legend = 'observed limit'
        plots.append(observed_limits)
    if len(plots) == 0: return
    config.report.new_section('Limits %s' % name)
    if write_table:
        result_table = Report.table()
        result_table.add_column('process', 'signal process')
        if expected_limits is not None:
            result_table.add_column('exp', 'expected limit')
            result_table.add_column('exp1', 'expected limit (central 1sigma)')
            result_table.add_column('exp2', 'expected limit (central 2sigma)')
        if observed_limits is not None:
            result_table.add_column('obs', 'observed limit')
        x_values = []
        if expected_limits is not None: x_values = expected_limits.x
        else: x_values = observed_limits.x
        for i in range(len(x_values)):
            result_table.set_column('process', '%g' % x_values[i])
            if expected_limits is not None:
                result_table.set_column('exp', '%.3g' % expected_limits.y[i])
                result_table.set_column('exp1', '%.3g--%.3g' % (expected_limits.bands[1][0][i], expected_limits.bands[1][1][i]))
                result_table.set_column('exp2', '%.3g--%.3g' % (expected_limits.bands[0][0][i], expected_limits.bands[0][1][i]))
            if observed_limits is not None:
                if observed_limits.yerrors is not None:
                    result_table.set_column('obs', '%.3g +- %.3g' % (observed_limits.y[i], observed_limits.yerrors[i]))
                else:
                    result_table.set_column('obs', '%.3g' % observed_limits.y[i])
            result_table.add_row()
        config.report.add_html(result_table.html())
    plot(plots, 'signal process', 'upper limit', os.path.join(plotsdir, 'limit_band_plot-%s.png' % shortname), extra_legend_items=extra_legend_items)
    plot(plots, 'signal process', 'upper limit', os.path.join(plotsdir, 'limit_band_plot-log-%s.png' % shortname), logy = True, extra_legend_items=extra_legend_items)
    config.report.add_html('<p><img src="plots/limit_band_plot-%s.png" /></p>' % shortname)
    config.report.add_html('<p><img src="plots/limit_band_plot-log-%s.png" /></p>' % shortname)
    return plots
Example #40
0
from COINPlus import COINPlus
import sys
sys.path.append("..")
import Report
from os.path import isfile, join

seed_size = 50
iscontextual = True

for graph_file in ["nethept.txt", "subnethept.txt"]:
	if(graph_file == "subnethept.txt"):
		epochs = 5000
	elif(graph_file == "nethept.txt"):
		epochs = 5000
	experiment_name = "contextual_" + graph_file[:-4]

	pathway = join("../../Misc/node_results/sigmoid", experiment_name, "coinplus_results.json")

	# Experiment is already done
	if(isfile(pathway)):
		continue
	print(pathway)
	coinplus = COINPlus(seed_size, graph_file, epochs, iscontextual)
	coinplus()

	Report.report("coinplus", coinplus, experiment_name)
Example #41
0
import sys
sys.path.append("..")
import Report
from os.path import isfile, join

seed_size = 50

for iscontextual in [True, False]:
	for graph_file in ["subnethept.txt", "nethept.txt"]:
		if((graph_file == "subnethept.txt") and (iscontextual)):
			epochs = 5000
		elif((graph_file == "nethept.txt") and (iscontextual)):
			epochs = 5000
		elif((graph_file == "subnethept.txt") and (not iscontextual)):
			epochs = 1250
		elif((graph_file == "nethept.txt") and (not iscontextual)):
			epochs = 2500
		experiment_name = "contextual_" if(iscontextual) else "noncontextual_"
		experiment_name += graph_file[:-4]

		pathway = join("../../Misc/results", experiment_name, "coin_results.json")

		# Experiment is already done
		if(isfile(pathway)):
			continue

		coin = COIN(seed_size, graph_file, epochs, iscontextual)
		coin()

		Report.report("coin", coin, experiment_name)
Example #42
0
def Main():
	rptObj = Report('c:\\overnitebld.log');

	# TODO: When debugging is complete, remove the following line of code to turn
	#	verbose mode off
	rptObj.verboseModeOn()

	# Get command line arguments, the first of which is this script's name
	arguments = sys.argv

	# Name of script + two parameters = 3 parameters (we just don't care about the first one)
	if len(arguments) < 3:
		misc_DisplayUsage(rptObj)
		raise Exception, 'Too few command line parameters'

	nodebugbld = false
	noreleasebld = false
	noboundsbld = false
	noautotest = false
	norefreshsrc = false
	nocreatedbs = false
	noinstallbldr = false
	nocreatedoc = false
	testrefresh = false
	testdoc = false
	testdb = false
	testdb = false
	testinst = false
	nodeloutput = false

	# Assume the first two arguments are the build root and output root, and ensure they
	#	end with \ (backslashes)
	strBldFldr = arguments[1]
	strBldFldr = re.sub('\\\\*$', '\\\\', strBldFldr, 1)
	strOutputFldr = arguments[2]
	strOutputFldr = re.sub('\\\\*$', '\\\\', strOutputFldr, 1)
	bldLevel = ''

	TODO: Add strFWDeliverables variable and retrieve off the command line.

	# Loop through and process the command line arguments
	for iArgs in range(3, len(arguments)):
		strArg = arguments[iArgs]

		if strArg[0] != '-':
			rptObj.echoIt('Options must be preceeded by a "-".')
			raise Exception, 'Option not preceeded by a "-"'

		cmd = string.lower(strArg[1])
		if cmd == 'b':
			bldLevel = long(strArg[2])
			if bldLevel < 0 or bldLevel > 8:
				rpt.echoIt('ERROR: Buildlevel must be an integer between 0 and 8.')
				raise Exception, 'Invalid Buildlevel--must be an integer between 0 and 8'
			rptObj.echoItVerbose('Build level set to ' + strArg[2])
		elif cmd == 'l':
			strVSLabel = strArg[2]
			rptObj.echoItVerbose('Applying label ' + strVSSLabel)
		# The command dVariableName results in the the code "VariableName = true" being executed
		elif cmd == 'd':
			strVar = strArg[2:len(strArg)]
			strVar = string.lower(strVar)
			try:
				exec strVar + '= true'
			except:
				raise 'Could not assign variable ' + strVar
			rptObj.echoItVerbose('Defining ' + strVar)
		elif cmd == 'o':
			strOutputFldrOverride = strArg[2:len(strArg)]
			rptObj.echoItVerbose('Overriding output directory ' + strVar)

		else:
			rptObj.echoIt('ERROR: Invalid argument, "' + strArg + '"')
			raise Exception, 'Invalid argument, "' + strArg + '"'

	if testrefresh:
		nodebugbld = true
		noreleasebld = true
		noboundsbld = true
		noautotest = true
		nodeloutput = true
		norefreshsrc = false
		nocreatedbs = true
		noinstallbldr = true
		nocreatedoc = true
	if testdoc:
		nodebugbld = true
		noreleasebld = true
		noboundsbld = true
		noautotest = true
		nodeloutput = true
		norefreshsrc = true
		nocreatedbs = true
		noinstallbldr = true
		nocreatedoc = false
	if testdb:
		nodebugbld = true
		noreleasebld = true
		noboundsbld = true
		noautotest = true
		nodeloutput = true
		norefreshsrc = true
		nocreatedbs = false
		noinstallbldr = true
		nocreatedoc = true
	if testinst:
		nodebugbld = true
		noreleasebld = true
		noboundsbld = true
		noautotest = true
		nodeloutput = true
		norefreshsrc = true
		nocreatedbs = true
		noinstallbldr = false
		nocreatedoc = true
	rptObj.echoItVerbose('Setting up build system')
	rptObj.echoItVerbose('Setting env vars.')

	os.environ['BUILD_LEVEL'] = str(bldLevel)
	#os.environ['FWROOT'] = strBldFldr
	# Set FWROOT and BUILD_ROOT to the source file directory (used w/mkall.bat)
	os.environ['FWROOT'] = 'c:\fwsrc'
	os.environ['BUILD_ROOT'] = 'c:\fwsrc'

	# Delete output directories
	if not nodeloutput:
		rptObj.echoItVerbose('Removing the output folders...')
		try:
			rptObj.echoItVerbose('Removing the output folder...')
			file_DeleteFolder(os.path.join(strBldFldr, 'output'))
			rptObj.echoItVerbose('Removing the obj folder...')
			file_DeleteFolder(os.path.join(strBldFldr, 'obj'))
			rptObj.echoItVerbose('Removing the overnite files...')
			file_DeleteFile(os.path.join(strOutputFldr, 'overnite.tst'))
			rptObj.echoItVerbose('Done clearing out source tree')
		except Exception, err:
			rptObj.reportFailure('Unable to recreate source tree, ' + str(err), 1)
			raise
Example #43
0
def ml_fit(model, input = 'data', signal_prior = 'flat', nuisance_constraint = 'shape:fix', signal_processes = None, n = 1, **options):
    if signal_processes is None: signal_processes = [[sp] for sp in model.signal_processes]
    beta_signal_value = 0.0
    nuisance_constraint = nuisance_prior_distribution(model, nuisance_constraint)
    signal_prior_spec = signal_prior
    signal_prior = signal_prior_dict(signal_prior)
    main = {'n-events': n, 'model': '@model', 'producers': ('@mle',), 'output_database': sqlite_database(), 'log-report': False}
    mle = {'type': 'mle', 'name': 'mle', 'parameters': None,
       'override-parameter-distribution': product_distribution("@signal_prior", "@nuisance_constraint"),
       'minimizer': minimizer()}
    cfg_options = {'plugin_files': ('$THETA_DIR/lib/core-plugins.so','$THETA_DIR/lib/root.so')}
    toplevel_settings = {'model-distribution-signal': delta_distribution(beta_signal = beta_signal_value), 'mle': mle, 'main': main, 'signal_prior':
        signal_prior_dict(signal_prior),  'options': cfg_options}
    main['data_source'], toplevel_settings['model-distribution-signal'] = data_source_dict(model, input, **options)
    cfg_names_to_run = []
    for sp in signal_processes:
        model_parameters = sorted(list(model.get_parameters(sp)))
        mle['parameters'] = model_parameters
        if 'beta_signal' in model_parameters: mle['override-parameter-distribution'] = product_distribution("@signal_prior", "@nuisance_constraint")
        else: mle['override-parameter-distribution'] = "@nuisance_constraint"
        toplevel_settings['nuisance_constraint'] = nuisance_constraint.get_cfg(model_parameters)
        name = write_cfg(model, sp, 'ml_fit', input, additional_settings = toplevel_settings)
        cfg_names_to_run.append(name)
    if 'run_theta' not in options or options['run_theta']:
        run_theta(cfg_names_to_run)
    else: return None
    cachedir = os.path.join(config.workdir, 'cache')
    
    result = {}
    result_table = Report.table()
    result_table.add_column('process', 'signal process')
    
    nuisance_parameters = sorted(list(model.get_parameters('')))
    for p in nuisance_parameters:
        suffix = ''
        if nuisance_constraint.get_distribution(p)['width'] == 0.0: suffix = ' (fixed)'
        result_table.add_column(p, '%s%s' % (p, suffix))
    suffix = ''
    if signal_prior_spec.startswith('fix:'): suffix = ' (fixed)'
    result_table.add_column('beta_signal', 'beta_signal%s' % suffix)
    for i in range(len(cfg_names_to_run)):
        sp = signal_processes[i]
        name = cfg_names_to_run[i]
        method, sp_id, dummy = name.split('-',2)
        result[sp_id] = {}
        result_table.set_column('process', sp_id)
        model_parameters = model.get_parameters(sp)
        sqlfile = os.path.join(cachedir, '%s.db' % name)
        cols = ['mle__%s, mle__%s_error' % (p, p) for p in model_parameters]
        data = sql(sqlfile, 'select %s from products' % ', '.join(cols))
        if len(data) == 0: raise RuntimeError, "no data in result file '%s'" % sqlfile
        i = 0
        for p in model_parameters:
            result[sp_id][p] = [(row[2*i], row[2*i+1]) for row in data]
            i += 1
            sorted_res = sorted([res[0] for res in result[sp_id][p]])
            n = len(sorted_res)
            if n >= 10:
                result_table.set_column(p, '%.3g (%.3g, %.3g)' % (sorted_res[int(0.5*n)], sorted_res[int(0.16*n)], sorted_res[int(0.84*n)]))
            else: result_table.set_column(p, '%.3g' % sorted_res[int(0.5*n)])
        for p in nuisance_parameters + ['beta_signal']:
            if p in model_parameters: continue
            result_table.set_column(p, 'n/a')
        result_table.add_row()
    config.report.new_section("Maximum Likelihood fit on ensemble '%s'" % input)
    config.report.add_p('The table entries give the median (and, if n>=10 the central 68%) of the parameter values at the found maximum of the likelihood function.')
    #config.report.add_p('input: %s, n: %d, signal prior: %s, nuisance prior: %s' % (input, n, str(signal_prior), str(nuisance_constraint)))
    config.report.add_html(result_table.html())
    return result
Example #44
0
 def restore(self, diff_snapshot):
     report = Report()
     volumes = diff_snapshot.convert_to_dict()['volumes']
     for id_obj in volumes:
         report.addVolume(id_obj, self.fix(id_obj, volumes[id_obj]))
     return report
Example #45
0
def cls_limits(model, input = 'toys:0', n = 1000, cl = 0.95, ts = 'lr', signal_prior = 'flat', nuisance_prior = 'shape:fix', signal_processes = None, **options):
    if signal_processes is None: signal_processes = [[sp] for sp in model.signal_processes]
    # 1. make the belts:
    options['ts'] = ts
    options['signal_prior'] = signal_prior
    options['nuisance_prior'] = nuisance_prior
    belts = prepare_belts(model, **options)
    # 2. calculate the test statistic values for the ensemble 'input'
    signal_prior = signal_prior_dict(signal_prior)
    nuisance_prior = nuisance_prior_distribution(model, nuisance_prior)
    main = {'n-events': n, 'model': '@model', 'producers': ('@ts_producer',),
        'output_database': sqlite_database(), 'log-report': False}
    minimizer = {'type': 'root_minuit'}
    if ts == 'mle':
        ts_producer = {'type': 'mle', 'name': 'mle', 'minimizer': minimizer, 'parameter': 'beta_signal',
           'override-parameter-distribution': product_distribution("@signal_prior", "@nuisance_prior")}
        ts_colname = 'mle__beta_signal'
    elif ts == 'lr':
        ts_producer = {'type': 'deltanll_hypotest', 'name': 'lr', 'minimizer': minimizer,
        'signal-plus-background-distribution': product_distribution("@signal_prior", "@nuisance_prior"),
        'background-only-distribution': product_distribution(delta_distribution(beta_signal = 0.0), "@nuisance_prior")}
        ts_colname = 'lr__nll_diff'
    else: raise RuntimeError, 'unknown ts "%s"' % ts
    toplevel_settings = {'signal_prior': signal_prior, 'main': main, 'ts_producer': ts_producer}
    toplevel_settings.update(get_common_toplevel_settings(**options))
    main['data_source'], toplevel_settings['model-distribution-signal'] = data_source_dict(model, input, **options)
    cfg_names_to_run = []
    for sp in signal_processes:
        model_parameters = model.get_parameters(sp)
        toplevel_settings['nuisance_prior'] = nuisance_prior.get_cfg(model_parameters)
        name = write_cfg(model, sp, 'cls_limits', '', additional_settings = toplevel_settings, **options)
        cfg_names_to_run.append(name)
    if 'run_theta' not in options or options['run_theta']:
        run_theta(cfg_names_to_run)
    else: return None
    
    cachedir = os.path.join(config.workdir, 'cache')
    
    result = {}
    result_table = Report.table()
    result_table.add_column('process', 'signal process')
    if input=='data': header = '%f %% C.L. upper limit' % (cl * 100)
    else: header = '%f %% C.L. upper limit (median; central 68%%; central 95%%)' % (cl * 100)
    result_table.add_column('limit', header)
    for name in cfg_names_to_run:
        method, sp, dummy = name.split('-',2)
        result_table.set_column('process', sp)
        sqlfile = os.path.join(cachedir, '%s.db' % name)
        data = sql(sqlfile, 'select %s from products' % ts_colname)
        #TODO: make belt evaluation more efficient !!!
        #result[sp] = map(lambda ts: belts[sp].cls(ts, cl)[0], [row[0] for row in data])
        ts_values = sorted([row[0] for row in data])
        n = len(ts_values)
        if n == 0:
           result_table.set_column('%f quantile' % q, 'N/A')
           continue
        if input == 'data':
            limit, limit_uncertainty = belts[sp].cls(ts_values[n/2], cl)
            result_table.set_column('limit', '%.5g +- %.3g' % (limit, limit_uncertainty))
        else:
            limit_median, limit1low, limit1high, limit2low, limit2high = map(lambda ts: belts[sp].cls(ts, cl)[0], [ts_values[n / 2], ts_values[int(0.16 * n)],
                ts_values[int(0.84 * n)], ts_values[int(0.025 * n)], ts_values[int(0.975 * n)]])
            result_table.set_column('limit', '%.3g  (%.3g, %.3g; %.3g, %.3g)' % (limit_median, limit1low, limit1high, limit2low, limit2high))
        result_table.add_row()
    config.report.new_section("CLs limits on ensemble '%s'" % input)
    #config.report.add_p('input: %s, n: %d, signal prior: %s, nuisance prior: %s' % (input, n, str(signal_prior), str(nuisance_prior)))
    config.report.add_html(result_table.html())
    return result
Example #46
0
def ml_fit2(model, input = 'data', signal_prior = 'flat', nuisance_constraint = 'shape:fix', signal_processes = None, n = 1, **options):
    signal_processes = default_signal_processes(model, signal_processes)
    nuisance_constraint = nuisance_prior_distribution(model, nuisance_constraint)
    signal_prior_spec = signal_prior
    signal_prior = signal_prior_dist(signal_prior)
    model_signal_prior = model_signal_prior_dist(input)
    data_source_dict, model_dist_signal_dict = utils.data_source_dict(model, input, **options)
    cfg_names_to_run = []
    for sp in signal_processes:
        main = Run(n, data_source_dict, model_signal_prior)
        mle = MleProducer(Distribution.merge(signal_prior, nuisance_constraint))
        main.add_producer(mle)
        name = write_cfg2(main, model, sp, 'ml_fit', input, **options)
        cfg_names_to_run.append(name)
    run_theta_ = options.get('run_theta', True)
    if run_theta_: run_theta(cfg_names_to_run)
    else: return None

    cachedir = os.path.join(config.workdir, 'cache')    
    result = {}
    result_table = Report.table()
    result_table.add_column('process', 'signal process')
    
    nuisance_parameters = sorted(list(model.get_parameters('')))
    for p in nuisance_parameters:
        suffix = ''
        if nuisance_constraint.get_distribution(p)['width'] == 0.0: suffix = ' (fixed)'
        result_table.add_column(p, '%s%s' % (p, suffix))
    suffix = ''
    if signal_prior_spec.startswith('fix:'): suffix = ' (fixed)'
    result_table.add_column('beta_signal', 'beta_signal%s' % suffix)
    for icfg in range(len(cfg_names_to_run)):
        sp = signal_processes[icfg]
        name = cfg_names_to_run[icfg]
        method, sp_id, dummy = name.split('-',2)
        result[sp_id] = {}
        result_table.set_column('process', sp_id)
        parameters = set(model.get_parameters(sp))
        sqlfile = os.path.join(cachedir, '%s.db' % name)
        cols = ['mle__%s, mle__%s_error' % (p, p) for p in parameters]
        data = sql(sqlfile, 'select %s from products' % ', '.join(cols))
        if len(data) == 0: raise RuntimeError, "no data in result file '%s'" % sqlfile
        if input != 'data':
            cols = ['source__%s' % p for p in parameters]
            source_par_values = sql(sqlfile, 'select %s from products' % ', '.join(cols))
        i = 0
        for p in parameters:
            result[sp_id][p] = [(row[2*i], row[2*i+1]) for row in data]
            if input != 'data': result[sp_id]['source__%s' % p] = [row[i] for row in source_par_values]
            i += 1
            sorted_res = sorted([res[0] for res in result[sp_id][p]])
            n = len(sorted_res)
            if n >= 10:
                result_table.set_column(p, '%.3g (%.3g, %.3g)' % (sorted_res[int(0.5*n)], sorted_res[int(0.16*n)], sorted_res[int(0.84*n)]))
            else: result_table.set_column(p, '%.3g' % sorted_res[int(0.5*n)])
        nll_values = sql(sqlfile, 'select eventid, mle__nll from products')
        result[sp_id]['nll'] = [row[1] for row in nll_values]
        result[sp_id]['eventid'] = [row[0] for row in nll_values]
        for p in nuisance_parameters + ['beta_signal']:
            if p in parameters: continue
            result_table.set_column(p, 'n/a')
        result_table.add_row()
    config.report.new_section("Maximum Likelihood fit on ensemble '%s'" % input)
    config.report.add_p('The table entries give the median (and, if n>=10 the central 68%) of the parameter values at the found maximum of the likelihood function.')
    config.report.add_html(result_table.html())
    return result
Example #47
0
import os, sys
import SeqUtil, Report

if not os.path.exists('aligns'):
  os.mkdir('aligns')
if not os.path.exists('Bayes'):
  os.mkdir('Bayes')
#if not os.path.exists('ML'):
#  os.mkdir('ML')
  
out= sys.argv[1]
query = sys.argv[2]
SeqUtil.rename('Data/bac-'+out+'.fas')
if not os.path.exists('aligns/bac-'+out+'.best.nex'):
  os.system('prank -d=Data/bac-'+out+' -o=aligns/bac-'+out+' -f=nexus -quiet')
  SeqUtil.bayesinNex('aligns/bac-'+out+'.best.nex')
#SeqUtil.splicealign('aligns/bac-'+out+'.best.nex','Bayes/bac-'+out+'-mod.nxs')
#models=SeqUtil.bestmod('Bayes/bac-'+out+'-mod.nxs')
models_ori=SeqUtil.bestmod('aligns/bac-'+out+'.best.nex')
if not os.path.exists('Bayes/bac-'+out+'-bayes.nxs'):
  SeqUtil.bayesfile('aligns/bac-'+out+'.best.nex',models_ori,'Bayes/bac-'+out+'-bayes.nxs')
#SeqUtil.bayesfile('Bayes/bac-'+out+'-mod.nxs',models,'Bayes/bac-'+out+'-bayes.nxs')
os.system('mb Bayes/bac-'+out+'-bayes.nxs')
#SeqUtil.pamlseqnex('Bayes/bac-'+out+'-mod.nxs','ML/bac-'+out)
#for mod in models.keys():
#    SeqUtil.pamlinput('ML/bac-'+out,'ML/bac-'+out+'.out','ML/bac-'+out+'.ctl',{models.keys()[mod].split('+')[0]:models[models.keys()[mod]][1]})
#    os.system('codeml ML/bac-'+out+'.ctl')
#    SeqUtil.extractMLtree('ML/bac-'+out+'.out')
Report.generateReport(out,query,models_ori,'bac')
Example #48
0
  def __init__(self, project, trace, title, path, reportFormat):
    self.project           = project
    self.trace             = trace
    self.path              = path
    self.reportFormat      = reportFormat
    self.library           = project.targets["code"].library
    self.constants         = Collections.DictProxy(self.library.constants)
    self.report            = Report.Report(title)
    self.frames            = []
    self.interestingFrames = []

    # Lazy loading of charting support
    global pylab, Charting
    import Charting
    from Charting import pylab

    try:
      self.compiler = Report.createCompiler(reportFormat, self.report)
    except KeyError:
      raise RuntimeError("Unsupported format: %s" % format)
    
    if not trace.events:
      raise RuntimeError("Trace is empty.")

    # Make sure the output directory exists
    Report.makePath(self.path)
      
    # Report appearance
    self.primaryColor   = "orange"
    self.secondaryColor = "lightgrey"
    self.highlightColor = "red"  
    self.thumbnailSize  = (64, 64)
    
    # Chart appearance
    if pylab:
      pylab.subplots_adjust(hspace = .5)
      pylab.rc("text", fontsize = 7)
      pylab.rc("xtick", labelsize= 7)
      pylab.rc("ytick", labelsize= 7)
      pylab.rc("axes", labelsize= 7)
      pylab.rc("legend", fontsize = 9)

    # HTML report appearance
    if reportFormat == "html":
      self.compiler.setStyle("h1,h2,h3,h4,h5,h6", 
                             "clear: both;")
      self.compiler.setStyle("a", 
                             "color: %s;" % self.primaryColor,
                             "text-decoration: none;")
      self.compiler.setStyle("a:hover", 
                             "color: %s;" % self.highlightColor,
                             "text-decoration: underline;")
      self.compiler.setStyle("a img", 
                             "border: none;")
      self.compiler.setStyle("p.code", 
                             "background-color: #eee;",
                             "border: solid 1px #ddd;",
                             "width: 66%;",
                             "padding: 1em;",
                             "margin-left: 4em;")
      self.compiler.setStyle("p.checkpass", 
                             "float: right;",
                             "padding: 1em;",
                             "border: solid 1px black;",
                             "background-color: %s;" % self.primaryColor)
      self.compiler.setStyle("p.checkfail", 
                             "float: right;",
                             "padding: 1em;",
                             "border: solid 1px black;",
                             "background-color: %s;" % self.highlightColor)
      self.compiler.setStyle("div.toc li", 
                             "list-style-type: none")
      self.compiler.setStyle("div.toc a", 
                             "color: black;")
      self.compiler.setStyle("table", 
                             "border: solid thin #aaa;",
                             "margin: 1em;",
                             "border-collapse: collapse;",
                             "margin-left: 4em;")
      self.compiler.setStyle("table th", 
                             "text-align: left;",
                             "padding: .1em 1em .1em 1em;",
                             "background-color: %s;" % (self.primaryColor))
      self.compiler.setStyle("tr.odd", 
                             "background-color: #eee;")
      self.compiler.setStyle("table td", 
                             "padding: .1em 1em .1em 1em;")
      self.compiler.setStyle("img.screenshot", 
                             "float: right;")
Example #49
0
from OIM import OIM
import sys
sys.path.append("..")
import Report
from os.path import isfile, join

seed_size = 50
iscontextual = True

for graph_file in ["nethept.txt", "subnethept.txt"]:
	if(graph_file == "subnethept.txt"):
		epochs = 5000
	elif(graph_file == "nethept.txt"):
		epochs = 5000
	experiment_name = "contextual_" + graph_file[:-4]

	pathway = join("../../Misc/node_results/sigmoid", experiment_name, "oim_results.json")

	# Experiment is already done
	if(isfile(pathway)):
		continue
	print(pathway)
	oim = OIM(seed_size, graph_file, epochs, iscontextual)
	oim()

	Report.report("oim", oim, experiment_name)
from PureExploitation import PureExploitation
import sys
sys.path.append("..")
import Report
from os.path import isfile, join

seed_size = 50
iscontextual = True

for graph_file in ["nethept.txt", "subnethept.txt"]:
	if(graph_file == "subnethept.txt"):
		epochs = 5000
	elif(graph_file == "nethept.txt"):
		epochs = 5000
	experiment_name = "contextual_" + graph_file[:-4]

	pathway = join("../../Misc/node_results/sigmoid", experiment_name, "pureexploitation_results.json")

	# Experiment is already done
	if(isfile(pathway)):
		continue
	print(pathway)
	pureexploitation = PureExploitation(seed_size, graph_file, epochs, iscontextual)
	pureexploitation()

	Report.report("pureexploitation", pureexploitation, experiment_name)
Example #51
0
import os, sys
import SeqUtil, Report

if not os.path.exists('aligns'):
  os.mkdir('aligns')
if not os.path.exists('Bayes'):
  os.mkdir('Bayes')
#if not os.path.exists('ML'):
#  os.mkdir('ML')
  
out= sys.argv[1]
query = sys.argv[2]
SeqUtil.rename('Data/arch-'+out+'.fas')
if not os.path.exists('aligns/arch-'+out+'.best.nex'):
  os.system('prank -d=Data/arch-'+out+' -o=aligns/arch-'+out+' -f=nexus -quiet')
  SeqUtil.bayesinNex('aligns/arch-'+out+'.best.nex')
#SeqUtil.splicealign('aligns/arch-'+out+'.best.nex','Bayes/arch-'+out+'-mod.nxs')
#models=SeqUtil.bestmod('Bayes/arch-'+out+'-mod.nxs')
models_ori=SeqUtil.bestmod('aligns/arch-'+out+'.best.nex')
if not os.path.exists('Bayes/arch-'+out+'-bayes.nxs'):
  SeqUtil.bayesfile('aligns/arch-'+out+'.best.nex',models_ori,'Bayes/arch-'+out+'-bayes.nxs')
#SeqUtil.bayesfile('Bayes/arch-'+out+'-mod.nxs',models,'Bayes/arch-'+out+'-bayes.nxs')
os.system('mb Bayes/arch-'+out+'-bayes.nxs')
#SeqUtil.pamlseqnex('Bayes/arch-'+out+'-mod.nxs','ML/arch-'+out)
#for mod in models.keys():
#    SeqUtil.pamlinput('ML/arch-'+out,'ML/arch-'+out+'.out','ML/arch-'+out+'.ctl',{models.keys()[mod].split('+')[0]:models[models.keys()[mod]][1]})
#    os.system('codeml ML/arch-'+out+'.ctl')
#    SeqUtil.extractMLtree('ML/arch-'+out+'.out')
Report.generateReport(out,query,models_ori,'arch')
Example #52
0
    print >>sys.stderr, Config.usage_help()
    sys.exit (1)
  # read, parse and sort input
  import LogParser
  print >>sys.stderr, '%s: sorting %u files...' % (sys.argv[0], len (files))
  sort_pool = LogParser.log_file_sort_pool (files)
  print >>sys.stderr, '%s: parsing %u sorted files...' % (sys.argv[0], len (sort_pool))
  lparser = LogParser.log_file_parse_pool (sort_pool)
  # collect statistics
  stats = Statistics.Statistics (int (Config.stat_year))
  import TopVisits, DailyVisits, GeoHour
  stats.gauges += [ TopVisits.TopVisits (stats),
                    DailyVisits.DailyVisits (stats),
                    GeoHour.GeoHour (stats) ]
  stats.walk_hits (lparser)
  print >>sys.stderr, '%s: generating report...' % sys.argv[0]
  stats.done()
  # generate report
  print "Hits:\t%s" % stats.hits
  print "Visits:\t%s" % stats.visits
  destdir = './logreport'
  if not os.path.isdir (destdir) or not os.access (destdir, os.X_OK):
    try:
      os.mkdir (destdir)
    except OSError, ex:
      die (5, "failed to create or access directory %s: %s" % (destdir, ex.strerror))
  statistics_html_content = stats.as_html (destdir)
  Report.generate (destdir, stats, statistics_html_content)

main (sys.argv)
Example #53
0
 def restore(self, diff_snapshot):
     report = Report()
     images = diff_snapshot.convert_to_dict()['images']
     for id_obj in images:
         report.addImage(id_obj, self.fix(id_obj, images[id_obj]))
     return report
from EpsilonGreedy import EpsilonGreedy
import sys
sys.path.append("..")
import Report
from os.path import isfile, join

seed_size = 50
iscontextual = True

for graph_file in ["nethept.txt", "subnethept.txt"]:
	if(graph_file == "subnethept.txt"):
		epochs = 5000
	elif(graph_file == "nethept.txt"):
		epochs = 5000
	experiment_name = "contextual_" + graph_file[:-4]

	pathway = join("../../Misc/node_results/sigmoid", experiment_name, "epsilongreedy_results.json")

	# Experiment is already done
	if(isfile(pathway)):
		continue
	print(pathway)
	epsilongreedy = EpsilonGreedy(seed_size, graph_file, epochs, iscontextual)
	epsilongreedy()

	Report.report("epsilongreedy", epsilongreedy, experiment_name)
Example #55
0
from Thompson import Thompson
import sys
sys.path.append("..")
import Report
from os.path import isfile, join

seed_size = 50
iscontextual = True

for graph_file in ["nethept.txt", "subnethept.txt"]:
	if(graph_file == "subnethept.txt"):
		epochs = 5000
	elif(graph_file == "nethept.txt"):
		epochs = 5000
	experiment_name = "contextual_" + graph_file[:-4]

	pathway = join("../../Misc/node_results/sigmoid", experiment_name, "thompson_results.json")

	# Experiment is already done
	if(isfile(pathway)):
		continue
	print(pathway)
	thompson = Thompson(seed_size, graph_file, epochs, iscontextual)
	thompson()

	Report.report("thompson", thompson, experiment_name)
Example #56
0
from HighDegree import HighDegree
import sys
sys.path.append("..")
import Report
from os.path import isfile, join

seed_size = 50
iscontextual = True

for graph_file in ["subnethept.txt", "nethept.txt"]:
	if(graph_file == "subnethept.txt"):
		epochs = 5000
	elif(graph_file == "nethept.txt"):
		epochs = 5000
	experiment_name = "contextual_" + graph_file[:-4]

	pathway = join("../../Misc/results/sigmoid", experiment_name, "highdegree_results.json")

	# Experiment is already done
	if(isfile(pathway)):
		continue
	print(pathway)
	highdegree = HighDegree(seed_size, graph_file, epochs, iscontextual)
	highdegree()

	Report.report("highdegree", highdegree, experiment_name)
Example #57
0
from COINHD import COINHD
import sys
sys.path.append("..")
import Report
from os.path import isfile, join

seed_size = 50
iscontextual = True

for graph_file in ["subnethept.txt", "nethept.txt"]:
	if(graph_file == "subnethept.txt"):
		epochs = 5000
	elif(graph_file == "nethept.txt"):
		epochs = 5000
	experiment_name = "contextual_" + graph_file[:-4]

	pathway = join("../../Misc/results/sigmoid", experiment_name, "coinhd_results.json")

	# Experiment is already done
	if(isfile(pathway)):
		continue
	print(pathway)
	coinhd = COINHD(seed_size, graph_file, epochs, iscontextual)
	coinhd()

	Report.report("coinhd", coinhd, experiment_name)
Example #58
0
File: EOD.py Project: isakSoft/EOD
 def extract(self):
     try:
         print self.export_file
         target = open(self.export_file, 'w')            
         target.truncate()
         target.write("Report name\t\t\t\t\t\t\t\t\t\t\t\tInitial time   Generate time \t   Duration(H:M:S)\tStatus\n\n")
         reportList = self.filteredFileContent()
         #get report generate starting date                        
         reportDate = reportList[-1]            
         del reportList[-1]
         ##            
         tempReport = {}
         for index in range(len(reportList)):
             if index % 2 is 0: #get the starting-ending points for each generated report
                 #[::-1] used to reverse list
                 current_item = reportList[index] #starting point                    
                 next_item = reportList[index+1] #ending point                    
                 tempReport = self.extractReportDetails(current_item, next_item)
                 #print tempReport
                 reportItem = Report()                                                            
                 reportItem.setName(tempReport['name'])                    
                 reportItem.setStatus(tempReport['status']) 
                 reportItem.setInitialTimeStamp(self.toTimeStamp(tempReport['initTime'], isDecimal=False))                    
                 reportItem.setGeneratedTimeStamp(self.toTimeStamp(tempReport['genTime'], isDecimal=False))                    
                 m, r = self.extactBeautify(len(reportItem.getName())) # m => multiplier, r => reminder(user to calculate whitespace between columns)                    
                 target.write(reportItem.getName())
                 target.write("\t" * m)
                 target.write("" * r) 
                 target.write(str(reportItem.getInitialTimeStamp()))
                 target.write("\t")
                 target.write(str(reportItem.getGeneratedTimeStamp()))
                 target.write("\t\t\t\t")                    
                 h, m, s =  self.timeDiff(reportDate, reportItem.getInitialTimeStamp(), reportItem.getGeneratedTimeStamp())
                 target.write('{}h:{}m:{}s'.format(h,m,s))
                 target.write("\t")
                 target.write(reportItem.getStatus())
                 timeInSec = int(h) * 3600 + int(m) * 60 + int(s)
                 if timeInSec > 1800: # more than 30 min
                     target.write("\t")
                     target.write("DELAYS(more than 30 min)")                        
                 
                 target.write("\n")
         target.close()
     except:
         None