def update_ui(): if items[current].get('audit_radcat'): radcat_entry.set("RADCAT{}".format(items[current]['audit_radcat'])) fu_entry.set(items[current]['audit_radcat3'] == "Yes") unscored_btn.state(['!disabled']) else: radcat_entry.set('') fu_entry.set(False) item = items[current] r = Report(text=item['report_text']) # logging.debug(r.text) # extractions = r.extractions() # item['radcat'] = int(extractions.get('radcat')) # item['radcat3'] = "Yes" if extractions.get('radcat3') else "No" complete = [k for k in items if k.get('audit_radcat')] task_label_str.set("Report {} of {} ({} complete)\nStatus: {}".format( current + 1, len(items), len(complete), item['status'])) report_text['state'] = 'normal' report_text.delete('1.0', 'end') report_text.insert('1.0', r.anonymized()) report_text['state'] = 'disabled'
def run(self): # Set input files InputUtils.set_processor(self) InputUtils.set_memory(self) InputUtils.set_tasks(self) self.setup_tasks() # Run simulator... while self.time < self.end_sim_time: if self.verbose == System.V_DETAIL: print(f'\ntime = {self.time}') self.print_queue() # time 부터 (time+1)동안 실행될 task 코어의 개수만큼 고르기. exec_task_list = [] if len(self.queue) < self.CPU.n_core: # 큐에 있는 것 모두 실행가능(코어의 개수보다 적으므로) for tup in self.queue: exec_task_list.append(tup[1]) self.queue = [] # self.CPU.n_core - len(self.queue)개의 코어는 idle로 실행 for i in range(self.CPU.n_core - len(self.queue)): self.CPU.exec_idle(time=1) else: for i in range(self.CPU.n_core): exec_task_list.append(self.pop_queue()) # for active tasks (1 unit 실행) for exec_task in exec_task_list: exec_task.exec_active(system=self, time=1) # for other idle tasks (전력 소모 계산 및 1초 흐르기) for i in range(len(self.queue)): task = self.queue[i][1] task.exec_idle(time=1, update_deadline=True) self.queue[i] = (task.calc_priority(), task) heapq.heapify(self.queue) # 재정렬 필요 for tup in self.wait_period_queue: tup[1].exec_idle(time=1, update_deadline=False) self.add_utilization() # 실행된 task의 주기 끝났는지 확인해서 끝났으면 초기화 시키고 wait으로 for exec_task in exec_task_list: if exec_task.det_remain == 0: exec_task.period_start += exec_task.period exec_task.det_remain = exec_task.det exec_task.deadline = exec_task.period self.push_wait_period_queue(exec_task) else: self.push_queue(exec_task) self.check_queued_tasks() self.time += 1 self.check_wait_period_queue() report = Report(self) report.print_console() return report
def make_report(title, ordered_cities): def make_detail_line(city): return { 'city': city, 'median_price': median_prices[city], 'median_price_index': median_prices_indices[city], 'n_trades': n_trades[city], 'n_trades_index': n_trades_indices[city], } c = ColumnsTable(( ('city', 30, '%30s', ('', '', '', '', '', 'City'), 'city name'), ('median_price', 7, '%7.0f', ('', '', '', '', 'median', 'price'), 'median price in city'), ('median_price_index', 7, '%7.2f', ('median', 'price', '/', 'overall', 'median', 'price'), 'median price as fraction of overall median price'), ('n_trades', 7, '%7.0f', ('', '', '', '', 'number', 'trades'), 'number of trades across all months'), ('n_trades_index', 7, '%7.2f', ('number', 'trades', '/ ', 'overall', 'median', 'trades'), 'median number trades as fraction of overall median number of trades' ), )) for city in ordered_cities: c.append_detail(**make_detail_line(city)) c.append_legend(40) r = Report() r.append(title) r.append(' ') for line in c.iterlines(): r.append(line) return r
def reportBasicInfo(self, printOnScreen=True): featuresType = [] featuresMin = [] featuresMax = [] featuresMean = [] for feature in self.features: featuresType.append(type(self.dataFrame[feature].iloc[0]).__name__) featuresMin.append(self.dataFrame[feature].min()) featuresMax.append(self.dataFrame[feature].max()) if type(self.dataFrame[feature].iloc[0]).__name__ != 'str': featuresMean.append(self.dataFrame[feature].mean()) else: featuresMean.append('none') name = 'BasicInfo' + str(self.numberOfReports('Basic') + 1) report = Report( name, cols=zip(self.features, featuresType, featuresMin, featuresMax, featuresMean, self.n_nulls), headers=["Feature", "Type", "Min", "Max", "Mean", "Num Nulls"], typeReport='Basic') if printOnScreen: report.showReport() self.reports.append(report)
def __init__(self): wx.Frame.__init__(self, None, -1, "PyLoad", size=(800, 600)) self.InitProject() self.project = Project() self.reportPath = 'reports/last-report.db' self.report = Report(self.reportPath) # set self.report to None if you don't want to generate report #self.report = None self.path = None self.nb = NoteBook(self, -1, self.project, self.report) self.InitIcons() self.UseMenuBar() self.UseToolBar() self.Bind(wx.EVT_CLOSE, self.OnClose) self.Bind(EVT_PLAY_STOPPED, self.OnPlayStopped) register_changed_callback(self.SetChanged) #TODO: put in tabs' constructors self.nb.recordTab.tree.project = self.project self.nb.editTab.specialsPanel.project = self.project self.proxy = None
def report(self): r = Report() for bus in self.bus_list: field_ops = bus.get_field_ops() for op in field_ops: op.report(r) print r
def report_for_file(self, file_name): print 'report for: ' + file_name windows = self._windows methods = self._methods report = Report(file_name, windows, methods) raw_txt = Reader.readFromFile(file_name) #print raw_txt words = Reader.extractWords(raw_txt, "russian") keywords = Reader.meter(words) self._keywords = keywords self._terms = words #инициализация отчета термами с tf for term in self._terms: report.add_term_tf(term, keywords[term]) for window in windows: for method in methods: print method, window (array, graph) = self.get_rw_for(method, window) report._graph = graph #todo graph как св-во, пересмотреть логику for v in array: term = v.term_value report.add_term_rw_stats(term, method, window, v.term_weight_rw) self._reports[file_name] = report
def PrintReport(self): data = list(self.worldData.plants) data.extend(list(self.worldData.grazers)) data.extend(list(self.worldData.predators)) data.extend(list(self.worldData.obstacles)) report = Report() report.create(data, self.simTime)
def main(): # Delete portfolio.db if it exists os.remove("portfolio.db") print("portfolio.db removed successfully") # Get data paths stock_filename = "data_stocks.csv" bond_filename = "data_bonds.csv" # Initialize dataReader dataReader = DataReader(stock_filename, bond_filename) # Get stock and bond data stockData = dataReader.getStockData() bondData = dataReader.getBondData() # Initialize an investor investor = Investor("Bob", "Smith", "123 Fake St, Denver, CO 80221", "303.777.1234") # Initialize a portfolio portfolio = Portfolio(investor) # Add the stocks and bonds to the portfolio portfolio.addStocks(stockData) portfolio.addBonds(bondData) # Initialize a report report = Report(portfolio) # Print the report report.print()
def __init__(self, pr: int, branch: str, date: str, e_date: float, feature_variables: int, meta_variables: int, passed: bool, pr_id: int, num_samples: int, sha: str, time_elapsed: str, user: str, email: str, status: str, report: str = None): self.pr = pr self.branch = branch self.date = date self.e_date = e_date self.feature_variables = feature_variables self.meta_variables = meta_variables self.passed = passed self.pr_id = pr_id self.num_samples = num_samples self.sha = sha self.time_elapsed = time_elapsed self.user = user self.email = email self.status = status self.report = Report(report)
def doPoll(self): oneDay = 60*60*24 startTime = (time.time() - 7*oneDay) * 1000 reports = self.api.getReports(startTime).json() if (len(reports) == 0): return for report in reports: id = report['id'] fights = self.api.getFights(id) if not report['id'] in self.data.reports.keys(): self.data.reports[id] = Report( id, report['title'], report['owner'], int(report['start'])) print("New log!") if len(self.data.reports[id].fights) < len(fights): print("New fight!") self.data.reports[id].dirty = True for fight in fights: fightId = fight['id'] if not fightId in self.data.reports[id].fights.keys(): newFight = Fight(fightId, fight['name'],id) self.data.reports[id].addFight(newFight) for id, report in self.data.reports.items(): if report.isDirty(): self.data.reports[id].message = report.getFormattedChatMessage() if report.startTime < (time.time() - 14*oneDay) * 1000: self.data.reports.pop(id, None)
def make_report(n_best, n_worst): report = Report() make_header(report) details, extra_info = make_details(data, control.test_months, n_best, n_worst) for line in details.iterlines(): report.append(line) make_plt(data, extra_info, n_best, n_worst) return report
def lint(self): wire_to_field_op = {} # maps wires to the field_op that output it specified_outputs = {} # maps wires to buses field_ops = [] # check that each wire is specified no more than once for bus in self.bus_list: for field_op in bus.get_field_ops(): field_ops.append(field_op) output_wires = field_op.output_wires() # print "field_op %s" % field_op # print " %s" % output_wires for output in output_wires: if (output in specified_outputs): print "LINT: bus %s re-specifies wire %s, already set by bus %s" % ( bus, output, specified_outputs[output]) else: specified_outputs[output] = bus wire_to_field_op[output] = field_op # check that each wire is specified at least once for idx in range(self.total_wire_count): wire = Wire(idx) if (wire not in specified_outputs): print "LINT: no bus specifies output %s" % wire # check that each field_op computes a value that eventually reaches # an output. useful_field_ops = set( filter(lambda f: isinstance(f, FieldOutput), field_ops)) done_field_ops = set() while (len(useful_field_ops) > 0): #print "Considering %s field_ops" % len(useful_field_ops) required_wires = set() for field_op in useful_field_ops: #print "inputs of %s are %s" % (field_op, field_op.input_wires()) required_wires.update(field_op.input_wires()) done_field_ops.update(useful_field_ops) #print "Considering %s required_wires" % len(required_wires) useful_field_ops = set() for wire in required_wires: print wire field_op = wire_to_field_op[wire] if (field_op not in done_field_ops): useful_field_ops.add(field_op) all_field_ops = set(field_ops) unused_field_ops = all_field_ops.difference(done_field_ops) r = Report() for field_op in unused_field_ops: field_op.report(r) if (len(r) > 0): print "LINT: %d unused field ops; cost:\n%s" % ( len(unused_field_ops), r) # print "LINT: %s" % unused_field_ops print "(info) Linted %s field ops from %s buses" % (len(field_ops), len(self.bus_list))
def __init__(self, api_aid=None, api_sec=None, api_org=None, api_url=None, api_token=None, api_token_expires=None): """ """ # logger self.log_level = { 'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL } self.formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s (%(funcName)s:%(lineno)d)' ) self.tcl = tc_logger() # debugging self._memory_monitor = True # credentials self._api_aid = api_aid self._api_sec = api_sec self._api_token = api_token self._api_token_expires = api_token_expires # user defined values self._api_org = api_org self._api_url = api_url self._api_result_limit = 200 # default values self._activity_log = False self._api_request_timeout = 30 self._api_retries = 5 # maximum of 5 minute window self._api_sleep = 59 # seconds self._bulk_on_demand = False self._enable_report = False self._indicators_regex = indicators_regex self._proxies = {'https': None} self._retype = type(re.compile('')) # config items self._report = [] self._verify_ssl = False # initialize request session handle self._session = Session() # instantiate report object self.report = Report() # save custom types for later self._indicator_parser = IndicatorObjectParser(self)
def __init__(self, header_lines, column_defs, print_as_spaces, verbose=True): self._report = Report() self._header(header_lines) self._ct = ColumnsTable(column_defs, verbose) self._print_as_spaces = print_as_spaces
def _make_report(self, counters): r = Report() r.append('Records retained while reducing input file') for path, counter in counters.iteritems(): r.append(' ') r.append('path %s' % path) for tag, value in counter.iteritems(): r.append('%30s: %d' % (tag, value)) return r
def __init__(self): self.__report = Report() self.__csvFile = None self.__arrayOfDates = self.__report.getDates() self.__avgTemperature = None self.__avgHumidity = None self.__status = None self.__message = None
def reportInfoRelevancies(self, printOnScreen=True): name = 'RelevanciesInfo' + str(self.numberOfReports('Relevancies') + 1) report = Report(name, cols=zip(self.features, self.regressor.feature_importances_), headers=["Feature", "Relevancy"], typeReport='Relevancies') if printOnScreen: report.showReport() self.reports.append(report)
def calculateOnFlowForFilters(pairs, filters, tasks, report_file_name, threads, examples_limit=20, overwrite=True, log_level=2): pairs.dumpFiltersIndexes(filters, overwrite=overwrite) report = Report(report_file_name, overwrite=overwrite) result = report.get() pairs.setReturnNames(True) compiled_filters = list(filter(lambda f: len(pairs.filterBy(f)) > 0, compileFilters(filters))) if log_level >= 1: base_progress_bar_desc = 'Doing tasks' progress_bar = tqdm(total = len(compiled_filters) * len(tasks), desc = base_progress_bar_desc) for f in compiled_filters: pairs.filterBy(f) filter_as_string = json.dumps(f) if not filter_as_string in result: result[filter_as_string] = {} for task in tasks: if log_level >= 1: progress_bar.set_description(base_progress_bar_desc + ' ("' + task['name'] + '" for filter "' + filter_as_string + '")') if (task['name'] in result[filter_as_string]) and not ('overwrite' in task): progress_bar.set_description(base_progress_bar_desc) progress_bar.update(1) continue pairs.shuffle() initial_metric_values = getPairsMetricsWithPairs(iter(pairs), task['initial_metric'], threads=threads, log=(log_level > 1)) initial_metric_values_batches = task['batcher'](initial_metric_values, batch_size = task['batch_size']) if 'values_to_load' in task: can_not_load_values_names = [] fake_globals = globals() for v in task['values_to_load']: load_result = loadOnFlowMetric(v, result, report_file_name, filter_as_string) if load_result == None: can_not_load_values_names.append(v) else: fake_globals[v] = load_result if len(can_not_load_values_names) > 0: if log_level > 1: print('\n\nCan not load values', ', '.join(can_not_load_values_names), 'from filter', filter_as_string, '\n\n') progress_bar.update(1) continue else: # print('\n\nloaded metrics', fake_globals, '\n\n') task['preprocessing'] = map(lambda f: FunctionType(f.__code__, fake_globals), task['preprocessing']) preprocessed_batches = map(\ lambda batch:\ (reduce(lambda value, f: f(value), task['preprocessing'], batch[0]), batch[1]),\ initial_metric_values_batches\ ) metrics_on_flow = calculateMetricsOnFlow(preprocessed_batches, copy.deepcopy(task['on_flow_metrics']), examples_limit, log = (log_level > 1)) result[filter_as_string][task['name']] = metrics_on_flow report.write(filter_as_string, result[filter_as_string], dump_now=False) if log_level >= 1: progress_bar.set_description(base_progress_bar_desc) progress_bar.update(1) report.dump() progress_bar.close() pairs.setReturnNames(False)
def main(): # Get data path filename = "allStocks.json" # Initialize dataReader and get data dataReader = DataReader(filename) data = dataReader.getData() # Initialize a report and call print report = Report(data) report.print()
def f(): try: common.setmessage(win, 'Pylint syntax checking...') try: lint.Run([pyfile], Report(win.pylintsyntaxcheckwindow.list)) except: error.track() finally: common.setmessage(win, '') common.note(tr('Pylint syntax checking finished!'))
def create_report(self, template='default', save_loc='default'): """Create a formatted report with relevant information and save it""" # create Report object with relevant company, portfolio, and news data rep = Report() # populate report in the format of a chosen template rep.build_report(template) this.report = rep.get_report() # save report in specified location save_log = rep.save_report(save_loc) # return the log of the saved report return save_log
def main(): main_API_logger = APILog() main_SQL_logger = SQLog(filename='main_gs.db') global_reporter = Report( sqlogger=main_SQL_logger, apilogger=main_API_logger, ) recorder = Record(reporter=global_reporter) recorder.start_record()
def make_report(data, cities, sorted_by_tag): 'return a Report' r = Report() r.append('Price Statistics by City') r.append('Sorted by %s' % sorted_by_tag) r.append('Transactions from %s to %s' % (data.date.min(), data.date.max())) r.append(' ') ct = make_column_table(cities, data) for line in ct.iterlines(): r.append(line) return r
def test_record_result(): tr1 = TestResult(False, 'failed', 'failed detail') tr2 = TestResult(True, 'testing', 'testing detail') tr3 = None r = Report() r.record_result(tr1) r.record_result(tr2) r.record_result(tr3) assert len(r.results) == 2 assert r.results[0] is tr1 assert r.results[1] is tr2 r.log()
def main(): analyze = Analyze() data = analyze.market.update_chart_data(start=DAY, period=HOUR / 12) report = Report() header = [ 'date', 'volume', 'open', 'high', 'low', 'close', 'quoteVolume', 'weightedAverage' ] report.write_csv(data=data, header=header, file_name='somefile') print("Done!")
def main(): main_module_data = ModuleData() main_gps_data = GPSData() main_api_logger = APILog() main_sql_logger = SQLog(filename='test.db') global_reporter = Report(sql_logger=main_sql_logger, api_logger=main_api_logger, gps_data=main_gps_data, module_data=main_module_data) main_recorder = Record(reporter=global_reporter) global_reporter.report_all_unsent_data() main_recorder.start_record() global_reporter.start_status_updates()
def make_report(summary): r = Report() format_header = '%40s %8s %8s %8s %8s %8s %8s %8s' format_detail = '%40s %8.0f %8.0f %8.0f %8.0f %8d %8d %8.0f' r.append(format_header % ('numeric feature', 'min', 'median', 'mean', 'max', 'distinct', 'NaN', 'std')) for row_name, row_value in summary.iterrows(): r.append( format_detail % (row_name, row_value['min'], row_value['50%'], row_value['mean'], row_value['max'], row_value['number_distinct'], row_value['number_nan'], row_value['std'])) return r
def test_report_structure(self): lista = ["pippo", 5, "PAOLO"] thisdict = {"brand": "Ford", "model": "Mustang", "year": 1964} thisdict2 = {"brand": "Ferrari", "model": "408", "year": 1997} rp = ReportPagine(lista, thisdict, thisdict2) thisdict3 = {"brand": "Armani", "model": "jeans", "year": 2000} thisdict4 = { "brand": "The north face", "model": "Jacket", "year": 2010 } rf = ReportFoto(thisdict3, thisdict4) report = Report("sito", rp, rf) print(report.toJSON())
def __init__(self, k, validation_month, ensemble_weighting, column_definitions, test): self._column_definitions = column_definitions self._report = Report() self._test = test self._header(k, validation_month, ensemble_weighting) cd = self._column_definitions.defs_for_columns( 'description', 'mae_validation', 'mae_query', 'mare_validation', 'mare_query', ) self._ct = ColumnsTable(columns=cd, verbose=True)