def __iter__(self): t1 = now() report('starting query: ') q = self.query t2 = now() report('finished query ({0}s)'.format(t2-t1)) return q
def __init__(self, path=None, settings=None, items=[], fill=None, force=False, wrap_exit=True, **kargs): """ fill+path determine self.query """ if wrap_exit: self.__call__ = wrap_kbi(self.__call__) settings._engine.get_server() # HACK: ensure we're initialized self.record = defaultdict(lambda: 0) if self.requires_path: if not path or not ope(path): raise FileDoesntExist(str(path)) self.path = path and abspath(path) self.conf = settings self.force = force if fill: if path is not None: raise SystemExit('if you use --fill you cant ' 'use a path (and vice versa)') self.fill = fill if items: report('instantiated {0} with size {1} item-list'.format( self,len(items))) self.__iter__ = lambda himself: ([i.id, i] for i in items) self.record_invocation()
def sgEmail(self): log.sgLog('发送测试结果\n') testDate = report().testDate() filename = report().fileName() msg = MIMEMultipart() html = open(filename, 'rb').read() part1 = MIMEText(html, 'html', 'utf-8') msg.attach(part1) msg['to'] = 'email address' msg['from'] = 'email address' msg['subject'] = '前台自动化测试报告' + testDate try: server = smtplib.SMTP() server.connect('smtp.gmail.com:587') server.ehlo() server.starttls() server.ehlo() server.login('username', 'password') server.sendmail(msg['from'], msg['to'], msg.as_string()) server.quit() print '发送成功' except Exception, e: print str(e)
def report_worker(sid): try: job = get_job(sid) log.info("============= STARTING WORKER ==============") log.debug(job) from ast import literal_eval job['series'] = literal_eval(job['series']) # From string # Expand paths to full location on filesystem output_filename = os.path.join( app.config['UPLOAD_FOLDER'], next(tempfile._get_candidate_names()) + '.pdf') # Make list of input datafiles input_datafiles = [ os.path.join(app.config['UPLOAD_FOLDER'], f['temporary_name']) for f in get_files(sid) ] report.report(input_datafiles, output_filename, **{**job, 'pdf': True, 'htm': False}) log.info("============= WORKER FINISHED ==============") # Update finished job upd_job(sid, 'generated_pdf', output_filename) upd_job(sid, 'status', 'done') except Exception as e: log.error("Exception occurred in worker thread") log.error(sys.exc_info()[0]) upd_job(sid, 'status', 'error') upd_job(sid, 'generated_pdf', None) raise e
def send(trans): """Sends an email to the list.""" me = '*****@*****.**' you = '*****@*****.**' today = datetime.date.today() # Create message container - the correct MIME type is multipart/alternative. msg = MIMEMultipart('alternative') msg['Subject'] = 'Magic Cones Report {0}'.format(today) msg['From'] = me msg['To'] = you # create content text = TEXT.format(report=report(trans), rules=rules()) html = HTML.format(report=report(trans, html=True), rules=rules(html=True)) part1 = MIMEText(text, 'plain') part2 = MIMEText(html, 'html') msg.attach(part1) msg.attach(part2) # Send the message pw = passwd(me) s = smtplib.SMTP('smtp.gmail.com', 587) #s.ehlo() s.starttls() #s.ehlo() s.login(me, pw) s.sendmail(me, you, msg.as_string()) s.quit()
def callback(self, item=None, **kargs): report(item._id) if item._id not in self.collisions['fname']: self.seek_fname_collision(item) if item._id not in self.collisions['md5']: self.seek_md5_collision(item)
def run( d , dname, full ): partitionTestResult = TestResult() if full == True: partitionTestResult.set_total_points(4) else: partitionTestResult.set_total_points(1) partitionScore = 0 print("Validating that {} has a separate partition...".format(d)) try: #Input: #>>> mount | grep `d` #Expected output: #>>> tmpfs on `d` type tmpfs (rw,nosuid,nodev,noexec,relatime) fsTest1 = subprocess.Popen(('mount'), stdout=subprocess.PIPE) try: fstTest1Output = subprocess.check_output(('grep', d), stdin=fsTest1.stdout) partitionScore += 1 print("......Passed!") if full == True: partitionScore += output_verification(fstTest1Output, d, dname) print partitionScore except subprocess.CalledProcessError as e: report.report("(X)...{} does not exist in a separate partition.".format(d)) mit(d, dname) except OSError: report.report("(!)...Tools do not support the use of the mount command.".format(fs)) partitionTestResult.set_points(partitionScore) return partitionTestResult
def delete_file(self, key=None, item=None): report('deleting file') if key and item: self.record['errors'] += 1 self.record['error'] = 'cant pass key and item to delete_file.' return if not (key or item): self.record['errors'] += 1 self.record['error'] = 'need either key or item' return if not key: if item is None: self.record['errors'] += 1 self.record['error'] = 'item is none' return key = item and item.id if not key: self.record['errors'] += 1 self.record['error'] = 'item is none' report('deleting file',key) if not ope(key): self.record['errors'] += 1 self.record['error']='file does not exist.' os.remove(key) # TODO: use unipath self.record['files_deleted'] += 1 self.delete_record(key)
def update_db(db, _id, dct, schema=None): """ stupid.. have to delete and restore instead of update? """ if not schema: report('SCHEMA NOT PROVIDED!!!!!!!') report('updating db',[db, _id, dct]) doc = db[_id] report('before',doc.items()) for x in dct: doc[x] = dct[x] # TODO: use db.update(doc) ? #db[doc.id] = doc report('after', doc) report('updated "{id}" with new values for keys'.format(id=_id), dct.keys()) else: doc = schema.load(db, _id) for x in dct: val = dct[x] fieldtype = getattr(schema, x).__class__ if fieldtype==ListField: val = demjson.decode(val) elif fieldtype==StringField: pass else: raise Exception, 'NIY:'+str(fieldtype) setattr(doc, x, val) doc.store(db)
def mapData(self): # pracujeme s: http://sigma.nkp.cz/F/?func=file&file_name=find-b&local_base=ADR report("INFO: SiglaMapper.mapData") siglaURI = False sigla = self.doc.getXPath("//varfield[@id='040']/subfield[@label='a']") if not sigla == []: sigla = sigla[0] # Check whether the sigla is already cached. if sigla in cache.siglaCache.keys(): siglaURI = cache.siglaCache[sigla] else: doc = self.searchAlephBase("http://sigma.nkp.cz", "ADR", "SIG", sigla) if doc: xpath = "present/record/doc_number" docNum = doc.getXPath(xpath) if not docNum == []: docNum = docNum[0].lstrip("0") siglaURI = "http://sigma.nkp.cz/X?op=doc-num&base=ADR&doc-num=" + docNum cache.siglaCache[sigla] = siglaURI if siglaURI: return [( self.representationURI, rdflibWrapper.namespaces["dc"]["creator"], rdflib.URIRef(siglaURI) )] else: return False
def delete_all(self,really=False): if not really: report('seriously? well ok, but pass `really=True`') else: for x in self: report('deleting: '+str(x)) del self[x]
def partition(d, dname): partitionTestResult = TestResult() partitionTestResult.set_total_points(1) partitionScore = 0 print("Validating that {} has a separate partition...".format(d)) try: fsTest1 = subprocess.Popen(('mount'), stdout=subprocess.PIPE) try: fstTest1Output = subprocess.check_output(('grep', d), stdin=fsTest1.stdout) partitionScore += 1 print("......Passed!") except subprocess.CalledProcessError as e: report.report( "(X)...{} does not exist in a separate partition.".format(d)) report.mitigation( " Mitigation: run systemctl unmask {}.mount".format( dname)) report.mitigation( " systemctl enable {}.mount".format( dname)) print("......Failed!") except OSError: report.report( "(!)...Tools do not support the use of the mount command.".format( fs)) partitionTestResult.set_points(partitionScore) return partitionTestResult
def report_test(): os.makedirs(TEST_PATH, exist_ok=True) print_memory('pre') for i, line in enumerate(DATAFILES): objgraph.show_growth(limit=3) input_files, location = line[0], line[1] report_name = TEST_PATH + '/test_' + str(i) + '.pdf' log.info(input_files) log.info(location) log.info(report_name) try: report.report(input_files, report_name, location=location) log.info("Report saved to {}".format(report_name)) except Exception as e: log.exception("Exception occurred when reporting (test failed):") print_memory(i) plot_object_counts()
def main(): # # # Generate instances # instances = instance_helper.generate_instances(config.n, config.m, config.q, config.num_instances, loc=config.loc) # instance_helper.convert_instances_pickle_to_mat(loc=config.loc, num_instances=config.num_instances) # # # Optimization # load instances instances = instance_helper.load_instances( loc=config.loc, num_instances=config.num_instances) # calculate honest scores, if the honest file does not exist honestScore(instances) print(f'loaded {len(instances)} instances') # MMM alg_greedy.run_MMM(instances, loc=config.loc) # MMM # CGS alg_cyclic.loadCGS(instances) # load CGS # MMM-CGS alg_cyclicgreedy.run_MMM_CGS( instances, loc=config.loc, cycloc=f'{config.loc}/CGS_assignments') # MMM-CGS # ILP, usually takes a long time for large scale optimization # opt_ilp.run_ilp(instances, loc=config.loc) # ILP # report with ilp # report.report(loc=config.loc, algs=['random', 'MMM', 'CGS', 'MMMCGS', 'ilp']) # report without ilpS report.report(loc=config.loc, algs=['random', 'MMM', 'CGS', 'MMMCGS'])
def main(): while True: print_menu() choice = int(input("Enter your choice [1-6]:")) if choice == 1: clear() add() elif choice == 2: clear() list_all() elif choice == 3: clear() report() elif choice == 4: clear() update() elif choice == 5: clear() delete() elif choice == 6: print("Exit") conn.close() exit(1) break else: clear() print("Wrong option selection.")
def record_collision(self, reason, item_ids, item=None): self.collisions[reason] += item_ids item_ids = sorted(item_ids) event = Event(reason=reason, item_ids=item_ids, details=dict(md5=item.md5)) event.store(self.events_db) report(' - by {0}: found {1} events'.format( reason, len(item_ids)))
def getParsedDoc(self, url): """Na zadané URL nebo urllib2.Request vrátí naparsovaný XML dokument.""" report("INFO: Mapper.getParsedDoc on URL %s" % (url)) result = urllib2.urlopen(url) doc = result.read() result.close() doc = Record(libxml2.parseDoc(doc)) return doc
def validateURI(self, uri): """Zjišťuje, zdali je zadané URI dostupné.""" report("INFO: validating URI %s" % (uri)) try: urllib2.urlopen(urllib2.Request(uri, None, {"Accept" : "application/rdf+xml"})) # Validation on marccodes.heroku.com doesn't work without the Accept HTTP header return True except urllib2.HTTPError: return False
def show(project): build(project) proot = opj(src_root, project) report("serving "+proot) def f(): webbrowser.open(URL) threading.Thread(target=f).start() local("cd {0} && poole --serve".format(proot))
def run_and_collect(self, cmd): """ for gathering the output from file(1) and md5(1) etc """ cmd = cmd.replace('`','\`') try: return os.popen(cmd).read().strip() except IOError,e: report("IOError: " + str(e)) return None
def get_or_create(DB_NAME): from ixle.settings import Settings server = Settings().server if DB_NAME not in server: report("creating database {0} on {1} ".format(DB_NAME, server)) server.create(DB_NAME) db = server[DB_NAME] return db
def call_agent_on_dir(agent_nick, dirname): kls = get_agent_by_name(agent_nick) agent_obj = kls(path=dirname, settings=conf()) result = agent_obj() if result is None: report('WARNING: got None-result from agent, ' 'should have been self.record.') result = {} return agent_obj, result
def reportProjectData(confFile,globalConfFile): #java -Xmx2024m -cp "C:\Program Files\Weka-3-7\weka.jar" weka.core.Instances 13_Appended.arff vers, gitPath,bugsPath, workingDir = utilsConf.configure(confFile) versPath, dbadd= Mkdirs(workingDir, vers) LocalGitPath=os.path.join(workingDir,"repo") reportCsv=os.path.join(workingDir,"report.csv") lastVer=os.path.join(dbadd,vers[-1]+".db") testsDB=os.path.join(workingDir,"testsBugsMethods.db") report.report(reportCsv,LocalGitPath,lastVer,testsDB)
def collapse(self): assert self.unipath.isdir() parent = self.unipath.parent for path in self.unipath.listdir(): subitem = Item.objects.get(path=path) fname = subitem.fname new_path = opj(parent, fname) report("moving: ", path, new_path) subitem._move(new_path)
def seek_md5_collision(self, item): if not item.md5: report(' - md5 not set, calling subagent'); self.md5er.callback(item) reason = 'md5' results = self.find_matches(item, 'md5') if not len(results): return item_ids = [row.value['_id'] for row in results] + [item._id] self.record_collision(reason, item_ids, item)
def item2text(item): report('reading file') if ope(item.abspath): with open(item.abspath, 'r') as fhandle: if item.fext in 'txt'.split(): return fhandle elif item.fext=='pdf': import StringIO body = StringIO.StringIO(getPDFContent(item.abspath)) return body
def insertSKOSRelations(self, terms, predicate): if not terms == []: for term in terms: try: termTranslated = self.pshTranslateDict[term] self.results.append( (self.resourceURI, rdflibWrapper.namespaces["skos"][predicate], rdflib.URIRef(termTranslated)) ) except KeyError: report("[ERROR] term %s doesn't have a translation." % (term))
def finish_displayhook(self): """Finish up all displayhook activities.""" try: super(SmashDisplayHook, self).finish_displayhook() except AttributeError, e: # occasionally throws # IOStream instance has no attribute 'flush' # # I think this is a race condition on embedded shells report(str(e))
def main(self): abspath = self['_'] if abspath and ope(abspath): return '<font style="color:green;">exists ✓</font>' msg = '(this file is not available. is the drive mounted?)' report(msg.replace('this file','"{0}"'.format(abspath))) return ''.join([ '<small><font style="color:red;margin-left:15px;">', "<img src=/static/img/icons/red-x.png width=15px>"+msg, '</font></small>'])
def record_invocation(self): from ixle.schema import Event report("writing event for my birthday") e = Event( #reason="birthday::"+self.__class__.__name__ reason = "birthday", details = dict( agent=self.__class__.__name__, path=self.path) ) e.save()
def __call__(self): report('moving record', self) old_doc = Item.load(self.db, self.old_key) new_doc = Item.load(self.db, self.new_key) or Item() for attr_name in old_doc._fields: setattr(new_doc, attr_name, getattr(old_doc, attr_name)) new_doc._id = self.new_key new_doc.fname = new_doc.unipath.name report('saving record', new_doc) new_doc.store(self.db) return new_doc
def mapExtractedValues(self, predicate, valueDict, extractedValues): report("INFO: Fixfield008Mapper.mapExtractedValues") for extractedValue in extractedValues: try: self.results.append(( self.representationURI, predicate, valueDict[extractedValue] )) except KeyError: report("[ERROR] Fixfield008Mapper.mapExtractedValues KeyError for key %s" % (extractedValue))
def is_blacklisted(self, key): from ixle.schema import DSetting blacklist = getattr(self, '_blacklist_cache', None) if blacklist is None: report('no cache found.. recomputing blacklist setting') # ?TODO: move this type of thing to DSetting.initialize() blacklist_setting, created = DSetting.objects.get_or_create( name='file_name_blacklist', defaults = dict(value='[]')) self._blacklist_cache = blacklist_setting.decode() return self.is_blacklisted(key) return ops(key)[-1] in blacklist
def mount_dir(self): from ixle import settings try: mounts_dir = settings.Settings()['ixle']['mount_dir'] except KeyError: raise Exception, 'settings[ixle][mount_dir] is not defined' mountpoint = opj(expanduser(mounts_dir), self.nickname) mountpoint = unipath.FSPath(mountpoint) if not mountpoint.exists(): mountpoint.mkdir(parents=True) report("mountpoint did not exist. created it: "+mountpoint) return mountpoint
def query(self): report.console.draw_line() if self._query_override() is not None: q = self._query_override() elif self.path: q = self._query_from_path() elif self.fill: q = self._query_from_fill() else: q = Item.objects.all() report("chose query: (everything)") return q
def delete_dir(self, path): ctx = self.get_ctx() _from = ctx['_from'] assert ctx['num_fs_files']==0, 'can only remove empty dirs currently' if _from in 'both db'.split(): for item in ctx['items']: report("removing from db: ",item) item.delete() if _from in 'both fs'.split(): import os report('removing (empty) dir',self['_']) os.rmdir(self['_'])
def main(): parser = argparse.ArgumentParser(description='Machine Learning') parser.add_argument('-s', '--seed', type=int, default=2020, metavar='S', help='random seed (default: 2020)') parser.add_argument('--test', action='store_true', default=False, help='training or testing mode') args = parser.parse_args() logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%H:%M:%S.') logger = logging.getLogger() logger.setLevel(logging.INFO) y_true = dict() y_pred = dict() names = ["DT_Entropy", "DT_Gini", "RF_Entropy", "RF_Gini"] classifiers = [ DecisionTreeClassifier(random_state=args.seed, criterion='entropy'), DecisionTreeClassifier(random_state=args.seed, criterion='gini'), RandomForestClassifier(random_state=args.seed, n_estimators=10, n_jobs=-1, criterion='entropy'), RandomForestClassifier(random_state=args.seed, n_estimators=10, n_jobs=-1, criterion='gini'), ] hyperparam = [ {'splitter': ['best', 'random'], 'max_features': ['sqrt', 'log2', None]}, {'splitter': ['best', 'random'], 'max_features': ['sqrt', 'log2', None]}, {}, {}, ] colors = ['blue', 'orange', 'green', 'red', 'purple', 'brown', 'pink', 'gray'] X_train, y_train, X_test, y_test = prepare_data(args.seed) logger.info(str(X_train.shape) + ' ' + str(X_test.shape)) for name, est, hyper in zip(names, classifiers, hyperparam): logger.info(name + '...') if not args.test: clf = GridSearchCV(est, hyper, cv=5, n_jobs=-1) clf.fit(X_train, y_train) y_true[name], y_pred[name] = y_test, clf.predict(X_test) logger.info('____Accuracy: %0.4f' % metrics.accuracy_score(y_true[name], y_pred[name])) print(clf.best_estimator_) pickle.dump(clf, open('model/kdd99_' + str(name) + '.sav', 'wb')) else: clf = pickle.load(open('model/kdd99_' + str(name) + '.sav', 'rb')) y_true[name], y_pred[name] = y_test, clf.predict(X_test) report('kdd99', names, y_true, y_pred) draw_roc('kdd99', names, colors, y_true, y_pred)
def search(filename, initial_i, initial_j, goal_i, goal_j): ''' <filename> is the file that contains the enviornment. the rest of the arguments are the initial and goal coordinates. Returns a list of tuples. Each tuple is a pair of integers. The first tuple represents the (i, j) coordinate of the start state, and the last tuple represents the goal coordinates. The tuple in between are coordinates in order that represents an optimal path from the start state to the goal. ''' # If path was not already computed, compute retList = remember.getPath((initial_i, initial_j), (goal_i, goal_j)) if not retList: # Define the start and goal node startState = world.State(initial_i, initial_j) goalState = world.State(goal_i, goal_j) startNode = astar.Node(startState, config.heuristicWeight) goalNode = astar.Node(goalState, config.heuristicWeight) startNode.heuristicFucntion = goalNode.heuristicFucntion = config.heuristicFunction( goalNode) # Get environment from file environment = readMaze(filename) # Perform A* search foundGoal = astar.aStar(startNode, goalNode, environment, config.heuristicWeight) # Construct the path retList = [] while foundGoal: fromAction = foundGoal.fromAction i = foundGoal.state.i j = foundGoal.state.j retList.append((i, j)) if fromAction: foundGoal = fromAction.fromNode else: foundGoal = None retList.reverse() # Save the path report.report(filename, retList) remember.sendPath(retList) return retList
def main(): logger.info(f"SCRAPER,JOB,INITIATED,{DATE},") init() tickers = _connector.get_equity_tickers(N_USD) checkpoint = len(tickers) / BATCH_SIZE checkpoint = int(checkpoint / 4) faults_summary = { "options" : {}, "analysis" : {}, "keystats" : {}, "ohlc" : {} } db_flags, db_stats = [], [] ############################################################################################### for batch_id, batch in enumerate(range(BATCH_SIZE, len(tickers) + BATCH_SIZE, BATCH_SIZE)): ticker_batch = tickers[batch - BATCH_SIZE : batch] results = batch_main(batch_id, ticker_batch) b_fault_summary, b_db_flag, b_db_stats = results for key in b_fault_summary: for ticker in b_fault_summary[key]: faults_summary[key][ticker] = b_fault_summary[key][ticker] db_flags.append(b_db_flag) db_stats.append(b_db_stats) success, failure = get_job_success_rates(tickers[ : BATCH_SIZE * (1 + batch_id)]) send_metrics(success, failure) # if batch_id % checkpoint == 0 and batch_id != 0: # report("Partial", success, failure, faults_summary, db_flags, db_stats) ############################################################################################### success, failure = get_job_success_rates(tickers) report("Full", success, failure, faults_summary, db_flags, db_stats) store() logger.info(f"SCRAPER,JOB,TERMINATED,{DATE},")
def main(): aReport = report.report() aReport.execute(reportName="0VerView", table="0VerView", excelFile="./0VerView.xlsx") aReport.execute(reportName="2019v1Member", table="2019v1Member", excelFile="./2019v1Member.xlsx") aReport.execute(reportName="2019v1MemberClass", table="2019v1MemberClass", excelFile="./2019v1MemberClass.xlsx") aReport.execute(reportName="2019v1MemberClassCount", table="2019v1MemberClassCount", excelFile="./2019v1MemberClassCount.xlsx") aReport.execute(reportName="2019v1RegisteredWaitingPayment", table="2019v1RegisteredWaitingPayment", excelFile="./2019v1RegisteredWaitingPayment.xlsx") aReport.execute(reportName="2019v1UserClassSelection", table="2019v1UserClassSelection", excelFile="./2019v1UserClassSelection.xlsx") aReport.execute(reportName="2019v1UserClassCount", table="2019v1UserClassCount", excelFile="./2019v1UserClassCount.xlsx") aReport.execute(reportName="2019v1AllClassSelection", table="2019v1AllClassSelection", excelFile="./2019v1AllClassSelection.xlsx") aReport.execute(reportName="v1Donations", table="v1Donations", excelFile="./v1Donations.xlsx")
def run(): logger = logging.getLogger(__name__) messages = [] alarm = 0 terminating = 0 time.time() for x in range(0, 8): running_time = datetime.datetime.now( tz=pytz.timezone('Australia/Sydney')).strftime('%Y-%m-%d %H:%M') now = time.time() - 36000 record = worker_config.get_record(str(x)) if record is not 0: last_update_time_diff = int(now) - record.lastUpdateTime last_index = record.lastIndex r_object = report(x, last_update_time_diff, last_index, record.isActive, True) if record.isActive: if (last_update_time_diff > 125): r_object.is_healthy = False alarm = 1 if (last_update_time_diff > 300): terminating = 1 r_object.terminating = True logger.info("Terminating work:{}".format(x)) port = cluster.delete_tasks(str(x)) r_object.port = port logging.info("{}\t{}\t{}\t{}\t{}\t{}".format( x, last_update_time_diff, last_index, r_object.is_active, r_object.is_healthy, r_object.terminating)) messages.append(r_object) # if(alarm==1): email_handler.process_item(messages, running_time, alarm, terminating)
def param_init(log_filename): global bio_ner_train_data, bio_ner_test_data, bio_ner_model, bio_ner_report bio_ner_train_data = data( RAW_DATA_PATH, PROCESSED_DATA_PATH, WORD_VEC_FILE, CHAR_VEC_FILE, POS_VEC_FILE, ADD_FEAT_VEC_FILE, CLASS_VEC_FILE, ADD_FEAT_FILE_SUBSTRING, CHAR_ONE_HOT_FILE_SUBSTRING, WORD_CONTEXT_LENGTH, NB_UNIQUE_CHARS, CHAR_VECTOR_SIZE, WORD_VECTOR_SIZE, NB_CLASSES, MAX_WORD_LENGTH, ADD_FEAT_VEC_SIZE, LIMITED_ADD_FEAT_VEC_SIZE, POS_TAG_VECTOR_SIZE) bio_ner_test_data = data( RAW_DATA_PATH, PROCESSED_DATA_PATH, TEST_word_vec_file_substring, CHAR_VEC_FILE, POS_VEC_FILE, ADD_FEAT_VEC_FILE, TEST_entity_vec_file_substring, TEST_add_feat_vec_file_substring, TEST_char_vec_file_substring, WORD_CONTEXT_LENGTH, NB_UNIQUE_CHARS, CHAR_VECTOR_SIZE, WORD_VECTOR_SIZE, NB_CLASSES, MAX_WORD_LENGTH, ADD_FEAT_VEC_SIZE, LIMITED_ADD_FEAT_VEC_SIZE, POS_TAG_VECTOR_SIZE) bio_ner_model = model(WORD_VECTOR_SIZE, WORD_CONTEXT_LENGTH, NB_UNIQUE_CHARS, CHAR_VECTOR_SIZE, MAX_WORD_LENGTH, EMBEDDING_OP_DIM, MAX_FEATURES, NB_CLASSES, CHAR_FEATURE_OUTPUT, HIDDEN_SIZE, ADD_FEAT_VEC_SIZE, POS_TAG_VECTOR_SIZE) bio_ner_report = report(REPORT_PATH, log_filename)
def testeAssembly(jar, testDir, nasmDir, hackDir, gui, verbose): pwd = os.path.dirname(os.path.abspath(__file__)) # global path os.path.abspath(nasm) os.path.abspath(hack) print("==== Assembling Files ==========================") clearbin(hack) errAssembler, logAssembler = assembler(jar, nasm, hack, True) if errAssembler == 0: print("==== Simulating ================================") clearTestDir(testDir) if simulateFromTestDir(testDir, hackDir, gui, verbose) < 0: sys.exit(1) # testAssembling files print("==== Testando ==================================") error, log = compareFromTestDir(testDir) if error < -1: sys.exit(-1) # report error print("==== Reporting results =========================") r = report(log, 'H') error = r.assemblyTeste(log) r.send()
def run(self): if not len(self.queue): return 1 filenames = [q.filename for q in self.queue] results = report(filenames) for item in self.queue: logging.info('\n Optimizing: ' + str(item)) item.createBackup() try: for tool in self.getOptimizer(item).tools: logging.info(' Running ' + tool.__name__) tool(item).execute() except: self.failure = 1 logging.error(' Error while compressing \'' + str(item) + '\'') else: item.removeBackup() results.reportItem(item) results.reportTotals(filenames) return self.failure
def Reports2020(): #aReport.execute(table = "2020Membership") #aReport.execute(table = "2020MembershipFromChina") # datetime object containing current date and time now = datetime.now() print("now =", now) dt_string = now.strftime("%m%d%H%M") print("date and time =", dt_string) aReport = report.report() #if not os.path.isdir("reports"): # os.mkdir("reports") excel_filename = "2020全体会员." + dt_string + ".xlsx" aReport.execute(table="2020AllMembers", excelFile=excel_filename) aReport.execute(table="vwUserInfo", excelFile="./UserInfo.xlsx") #aReport.execute(table="2020CMemberClass") tables = [ "2020CMemberClass", "2020CM春季-楷书班-九成宫", "2020CM春季-小楷班-灵飞经", "2020CM春季-行书班-赵孟頫", "2020CM春季-隶书班", "2020CM春季-隶书班-曹全碑", "2020CM春季-行草班-米芾", "2020CM春季-楷书班-赵孟頫", "2020CM春季-硬笔班", "2020CM春季-小楷班", "2020CM春季-篆刻班", "2020CM春季-篆书班-石鼓文", "2020CM春季-楷书班-多宝塔" ] excel_filename = "2020春季各班选课." + dt_string + ".xlsx" aReport.executeMulti(excelFile=excel_filename, tables=tables)
def Reports2020Fall(): #aReport.execute(table = "2020Membership") #aReport.execute(table = "2020MembershipFromChina") # datetime object containing current date and time now = datetime.now() print("now =", now) dt_string = now.strftime("%m%d%H%M") print("date and time =", dt_string) aReport = report.report() #if not os.path.isdir("reports"): # os.mkdir("reports") excel_filename = "2020全体会员." + dt_string + ".xlsx" aReport.execute(table="2020AllMembers", excelFile=excel_filename) aReport.execute(table="vwUserInfo", excelFile="./UserInfo.xlsx") #aReport.execute(table="2020FMemberClass", excelFile = "./2020FMemberClass." + dt_string + ".xlsx") tables = [ "2020FMemberClass", "2020FM秋季-楷书班", "2020FM秋季-隶书班", "2020FM秋季-楷书班-赵孟頫", "2020FM秋季-集王圣教序", "2020FM秋季-兰亭集序", "2020FM秋季-行草班-文徵明", "2020FM秋季-行草班-文徵明(旁听)", "2020FM秋季-小楷班", "2020FM秋季-篆书班", "2020FM秋季-篆刻班", "2020FM秋季-硬笔班", "2020FM秋季-软硬班-跋保母", "2020FM秋季-章草兴趣班" ] excel_filename = "2020秋季各班选课." + dt_string + ".xlsx" aReport.executeMulti(excelFile=excel_filename, tables=tables)
def run(): logger = logging.getLogger(__name__) messages = [] alarm = 0 terminating = 0 time.time() print("WorkerID\tLastRun\tPMIDs\tActive\tHealthy\tTerminating") for x in range(0,8): running_time = datetime.datetime.now(tz=pytz.timezone('Australia/Sydney')).strftime('%Y-%m-%d %H:%M') now = time.time() -36000 record = worker_config.get_record(str(x)) if record is not 0: last_update_time_diff = int(now) - record.lastUpdateTime last_index = record.lastIndex r_object = report(x, last_update_time_diff, last_index, record.isActive, True) if record.isActive: if(last_update_time_diff>125): r_object.is_healthy = False alarm = 1 if(last_update_time_diff>300): terminating =1 r_object.terminating = True logger.info("Terminating work:{}".format(x)) # port = cluster.delete_tasks(str(x)) port = "" r_object.port = port print("{}\t{}\t{}\t{}\t{}\t{}".format(x, last_update_time_diff, last_index,r_object.is_active, r_object.is_healthy, r_object.terminating))
def Reports2021Spring(): #aReport.execute(table = "2020Membership") #aReport.execute(table = "2020MembershipFromChina") # datetime object containing current date and time now = datetime.now() print("now =", now) dt_string = now.strftime("%m%d%H%M") print("date and time =", dt_string) aReport = report.report() #if not os.path.isdir("reports"): # os.mkdir("reports") excel_filename = "2021全体会员." + dt_string + ".xlsx" aReport.execute(table="2021AllMembers", excelFile=excel_filename) aReport.execute(table="vwUserInfo", excelFile="./UserInfo.xlsx") #return tables = [ "2021CMemberClass", "2021CM春季-楷书班-九成宫", "2021CM春季-隶书班", "2021CM春季-楷书班-赵孟頫", "2021CM春季-行草班-二王尺牍", "2021CM春季-行草班-二王尺牍(旁听)", "2021CM春季-小楷班", "2021CM春季-诗词班", "2021CM春季-篆刻班", "2021CM春季-硬笔班", "2021CM春季-软硬班-跋保母", "2021CM春季-兰亭互助班", "2021CM春季-多宝塔互助班", "2021CM春季-书法与艺术研讨班" ] excel_filename = "2021春季各班选课." + dt_string + ".xlsx" aReport.executeMulti(excelFile=excel_filename, tables=tables)
def controlHeating(): thermostat1 = Thermostat() if report.report() > 0: # someone at home -> set manual temperature thermostat1.set_mode(auto_mode=0) # manual mode manualTemp = thermostat1.getManualTemp() thermostat1.temperature(manualTemp) else: # nobody at home -> auto mode (was predicted and programmed in the mornimg) thermostat1.set_mode(auto_mode=1) # auto mode on
def main(): parsed = parser() args = parsed.parse_args() user = args.user if args.ip: ipLst = getIP() else: try: discover = mdnsDiscover() ipLst = discover.getIPAddr() except: choice = input( 'No BeagleBoards were found. \nWould you like to enter an IP address manually? (yes or no) ' ) choice = strtobool(choice) if choice: ipLst = getIP() else: quit() reports = [] for i in range(len(ipLst)): ip = ipLst[i] com = SSHComs(ip, user) re = report(ipLst[i]) reports.append(re) print('Scanning device', (i + 1), 'of', len(ipLst), '\n') if com.defPass: reports[i].isPassDef() if com.passEnabled: reports[i].isPassEnabled() software = com.In('dpkg --list') services = [ 'nginx', 'cloud9.socket', 'nodered.socket', 'bonescript.socket', 'bonescript-autorun', 'hostapd', 'bb-bbai-tether' ] enabledServices = [] for serv in services: service = com.In(f'systemctl is-enabled {serv}') if service.find('enabled') > -1: enabledServices.append(serv) reports[i].disServices(enabledServices) netstatData = com.In('netstat -lntu') listening = netstatParse(netstatData) if len(listening) > 0: reports[i].listeningPorts(listening) if args.cve: vulnSoft = checkForVulnSoft(software) reports[i].vulnSoft(vulnSoft) com.endSession() writeReport(reports)
def main(): args = parser.parse_args() bounds, ghosts, pacman, goal = mapPositions(args.layout) print('Barreiras:', bounds) print('Fantasmas:', ghosts) print('Pacman:', pacman) print('Gol:', goal) print() #Problema e algoritmos problem = PacmanProblem(obstacles=bounds | ghosts, initial=pacman, goal=goal) gfsProblem = greedy_best_first_search(problem) astarProblem = astar_search(problem) bfsProblem = breadth_first_graph_search(problem) dfsProblem = depth_first_graph_search(problem) print('Greedy Best First Search:') print('Caminho:', gfsProblem.path()) print('Gol:', gfsProblem) print('A* Search:') print('Caminho:', astarProblem.path()) print('Gol:', astarProblem) print('Breadth-First Search:') print('Caminho:', bfsProblem.path()) print('Gol:', dfsProblem) print('Depth-First Search:') print('Caminho:', dfsProblem.path()) print('Gol:', dfsProblem) print() print('Gerando saídas...') generateOutput(gfsProblem.path(), args.layout, 'gfs') generateOutput(astarProblem.path(), args.layout, 'astar') generateOutput(dfsProblem.path(), args.layout, 'bfs') generateOutput(dfsProblem.path(), args.layout, 'dfs') print() print('Desempenho:') report([ greedy_best_first_search, astar_search, breadth_first_graph_search, depth_first_graph_search ], [problem])
def synthesis(prepare_res, analysis_res, params): from math import sqrt separator, filename, orig_filename, labels, dw = prepare_res labels = [n for n in labels if n not in options.discard] if filename != orig_filename: os.unlink(filename) # aggregate typing and statistics res = {} res['num_broken_lines'] = 0 res['num_lines'] = 0 res['lines_per_slice'] = [] for sliceno, tmp in enumerate(analysis_res): res['num_broken_lines'] += tmp['num_broken_lines'] res['num_lines'] += tmp['num_lines'] res['lines_per_slice'].append(tmp['num_lines']) dw.set_lines(sliceno, tmp['num_lines']) blob.save(res, 'import') # write report r = report.report() if not res['num_lines']: r.println('No lines read - empty file!') r.close() return r.println('Number of rows read\n') r.println(' slice lines') for sliceno, nlines in enumerate(res['lines_per_slice']): if res['num_lines']: r.println(' %2d %9d (%6.2f%%)' % (sliceno, nlines, 100 * nlines / res['num_lines'])) else: r.println(' %2d %9d ' % (sliceno, nlines, 100 * nlines / res['num_lines'])) r.println(' total %9d' % (res['num_lines'], )) stdev = sqrt( sum((x - res['num_lines'] / params.slices)**2 for x in res['lines_per_slice']) / params.slices) r.println('\n hash stdev %9d (%6.2f%%)' % (stdev, round(100 * stdev / res['num_lines']))) r.line() r.println('Number of columns %9d' % len(labels, )) r.close() if res['num_broken_lines'] and not options.allow_bad: raise Exception('%d bad lines without options.allow_bad' % (res['num_broken_lines'], ))
def format_report(self): r = report(self._start_date, self._end_date) try: r.stock_selection_analysis(self._stock_pool_info) except Exception as e: print(e) try: r.trade_analysis(self._trade_info, self._buy_info, self._sell_info) except Exception as e: print(e) try: r.fund_analysis(self._starting_fund, self._fund_record) except Exception as e: print(e)
def export(self): if self.sample is None: QMessageBox.warning(self, "Error", "No open sample") return try: dlg = pdfDialog() if (dlg.exec_() == QDialog.Accepted): name, subs, layers = dlg.getValues() path = QFileDialog.getSaveFileName(self, "Save file", "", '(*.pdf)')[0] if path: report(path=path, name=name, samp=self.sample, substrate=subs, layers=layers) except PermissionError as e: logger.exception("Can't export PDF : File is probably open") QMessageBox.critical(self, "Error", "Can't export PDF : File is probably open") except Exception as e: logger.exception("Can't export PDF") QMessageBox.critical(self, "Error", "Can't export PDF")
def submit(self): text_buf = self.error_submit_text.get_buffer() content = text_buf.get_text(text_buf.get_start_iter(), text_buf.get_end_iter()) signature = report.createAlertSignature("selinux-policy", "setroubleshoot", self.alert.get_hash(), self.summary, content, package=self.alert.get_policy_rpm()) try: rc = report.report(signature, report.io.GTKIO.GTKIO(self.parent.accounts)) except ProtocolError, e: FailDialog(e)
def Reports2019(): aReport = report.report() aReport.execute(reportName="2019v1Member", table="2019v1Member", excelFile="./2019v1Member.xlsx") aReport.execute(reportName="2019v1RegisteredWaitingPayment", table="2019v1RegisteredWaitingPayment", excelFile="./2019v1RegisteredWaitingPayment.xlsx") #aReport.execute(reportName = "2019v1UserClassSelection", table="2019v1UserClassSelection", excelFile = "./2019v1UserClassSelection.xlsx") #aReport.execute(reportName = "2019v1MemberClass", table="2019v1MemberClass", excelFile = "./2019v1MemberClass.xlsx") #aReport.execute(reportName = "2019v1MemberClassCount", table="2019v1MemberClassCount", excelFile = "./2019v1MemberClassCount.xlsx") aReport.execute(reportName="2019v1AllClassSelection", table="2019v1AllClassSelection", excelFile="./2019v1AllClassSelection.xlsx")
def run( fs ): mountingTestResult = TestResult() mountingTestResult.set_total_points(1) passedTest = True print("Validating that {} support is disabled...".format(fs)) #In order to run the tests, a try catch block is set up to ensure the needed commands #are available on the system. try: #Input: #>>> modprobe -n -v `fs` #Expected output: #>>> install /bin/true fsTest1 = subprocess.check_output(('modprobe', '-n', '-v', fs)) if "install /bin/true" not in fsTest1: report.report("(X)...Support for mounting {} is not disabled.".format(fs)) passedTest = False #Input: #>>> lsmod | grep `fs` #Expected output: #<NONE> fsTest2 = subprocess.Popen(('lsmod'), stdout=subprocess.PIPE) #With grep piping, a try catch block is needed to guarantee that if the grep #returns no results, the process will not fail. try: fsTest2Output = subprocess.check_output(('grep', fs), stdin=fsTest2.stdout) passedTest = False print("(X) ... A module exists in /proc/modules for {}.") except subprocess.CalledProcessError as e: if str(e) != "Command '('grep', '{}')' returned non-zero exit status 1".format(fs): passedTest = False except OSError as e: #Catch if any of our commands fail report.error("(!)...Tools do not support running a scan for {}\n".format(fs)) mountingTestResult.set_error(True) mountingTestResult.set_error_status(" {}".format(e)) return mountingTestResult #If passedTest has been set by any of the checks, the test fails if passedTest == True: report.report("......Passed!") mountingTestResult.set_points(1) else: report.mitigation(" Mitigation: run install {} /bin/true".format(fs)) report.report("......Failed!") #Send up the result return mountingTestResult
def report_worker(sid): try: job = get_job(sid) log.info("============= STARTING WORKER ==============") log.debug(job) from ast import literal_eval job['series'] = literal_eval(job['series']) # From string # Expand paths to full location on filesystem output_filename = os.path.join( app.config['UPLOAD_FOLDER'], next(tempfile._get_candidate_names()) + '.pdf') # Make list of input datafiles input_datafiles = [ os.path.join(app.config['UPLOAD_FOLDER'], f['temporary_name']) for f in get_files(sid) ] report.report(input_datafiles, output_filename, **{ **job, 'pdf': True, 'htm': False }) log.info("============= WORKER FINISHED ==============") # Update finished job upd_job(sid, 'generated_pdf', output_filename) upd_job(sid, 'status', 'done') except Exception as e: log.error("Exception occurred in worker thread") log.error(sys.exc_info()[0]) upd_job(sid, 'status', 'error') upd_job(sid, 'generated_pdf', None) raise e
def report(cmdline): survey = model.survey.Survey.load(cmdline['project']) import report if cmdline['format'] == 'short': small = 1 else: small = 0 if cmdline['all_filters']: return report.stats(survey, cmdline['filter'], cmdline['output'], cmdline['papersize'], small, cmdline['suppress']) else: return report.report(survey, cmdline['filter'], cmdline['output'], cmdline['papersize'], small, cmdline['suppress'])