def _decode(self, shelf): root = Registry("root") parser = Parser(root) parser.parseFile(shelf.name) shelf['inventory'] = root shelf._frozen = True return
def getFileDiff(self, filename, revisionOld, revisionNew): """ @revisionOld: string @revisionNew: string """ revisionNew = Revision ("number", revisionNew).getObject() revisionOld = Revision ("number", revisionOld).getObject() tmpPath = "." recurseFlag = False myDiff =\ self.client.diff( tmpPath, filename, revision1=revisionOld, url_or_path2=filename, revision2=revisionNew, recurse=recurseFlag, ignore_ancestry=False, diff_deleted=True, ignore_content_type=False, #header_encoding=self.getEncoding(), ) parser = Parser(myDiff) diffEntities = parser.parseDiff() return diffEntities
def __init__(self, rule, parent): Parser.__init__(self, rule, parent) self.parsers = [] # parsers constructed for each of rule.items self.active = None # main active parser (top of self.parsers) self.pos = 0 # position in self.rule.items self.tokens = [] # all tokens we have ever seen self.backlog = [] # [(pos,#tokens consumed,donetext)]
def getGlobalParameters(self): Parser.getGlobalParameters(self) for i in range(0, self.writer.parsedModel.numGlobalParameters): if ((len(self.writer.parsedModel.parameterId) - self.comp) < 9): self.writer.parsedModel.parameterId.append("parameter0" + repr(i + 1)) else: self.writer.parsedModel.parameterId.append("parameter" + repr(i + 1))
def getSpecies(self): Parser.getSpecies(self) for k in range(0, len(self.listOfSpecies)): if ((len(self.writer.parsedModel.speciesId) - self.comp) < 9): self.writer.parsedModel.speciesId.append("species0" + repr(k + 1)) else: self.writer.parsedModel.speciesId.append("species" + repr(k + 1))
def file_test(request): # Handle file upload if request.method == 'POST': form = AlgorithmRunForm(request.POST, request.FILES) if form.is_valid(): out_file = File(open("./pgss15compbio/media/out_file.txt", "w+")) p = Parser() model = p.get_model(request.FILES['input_file'], out_file) model.iterate() new_algorithm_run = AlgorithmRun(input_file = request.FILES['input_file'], output_file=out_file) new_algorithm_run.save() # Redirect to the document list after POST h = HttpResponseRedirect("../../media/out_file.txt") return h else: form = AlgorithmRunForm() # A empty, unbound form # Load documents for the list page algorithm_runs = AlgorithmRun.objects.all() # Render list page with the documents and the form return render_to_response( 'skeletonpages/file_test.html', {'algorithm_runs': algorithm_runs, 'form': form}, context_instance=RequestContext(request) )
def test_premier_dependency(self): """ Test that premier.c is a dependency for premier and is a file.""" parser = Parser() parser.parse_makefile(self.makefile) task = parser.get_task('premier') self.assertEquals('premier.c', task.dependencies[0].target) self.assertTrue(task.dependencies[0].is_file_dependency())
def parse_file(file, no_confirm): # suppose type print CYAN + file + WHITE redirect_null() supposed_type = Parser.suppose_type(file) redirect_standard() if not supposed_type: print YELLOW + 'Detection failed\n' + WHITE if no_confirm: return None else: for i in Parser.parser_list: if i[0] == supposed_type: print GREEN + 'detected %s\n' % i[1] + WHITE break # confirm supposed type if not no_confirm: supposed_type = confirm_type(supposed_type) # re-ask while problems while not no_confirm and not supposed_type: print YELLOW + 'Invalid choice' + WHITE supposed_type = confirm_type(supposed_type) if not supposed_type: print RED + 'Critical no equipment parser found' + WHITE return None redirect_null() fws = Parser.parser(file, supposed_type, None) redirect_standard() return fws
def test_parser_can_identify_multiple_ctes(self): parser = Parser() output = parser.parse("with test_cte as ( SELECT 1 ) , test_cte2 as ( SELECT 3 ) Select 2") self.assertEqual(len(output), 3) self.assertEqual(output[0].query, "SELECT 1".lower()) self.assertEqual(output[1].query, "SELECT 3".lower()) self.assertEqual(output[2].query, "SELECT 2".lower())
def test_parser_can_handle_minimal_whitespace(self): parser = Parser() output = parser.parse("with test_cte as (SELECT 1),test_cte2 as (SELECT 3 ) Select 2") self.assertEqual(len(output), 3) self.assertEqual(output[0].query, "SELECT 1".lower()) self.assertEqual(output[1].query, "SELECT 3".lower()) self.assertEqual(output[2].query, "SELECT 2".lower())
def test_dependency_finder(self): parser = Parser() firstQuery = Query("testQuery1", "select * from testQuery2", []) secondQuery = Query("testQuery2", "select * from testQuery1", []) output = parser._update_dependencies([firstQuery, secondQuery]) self.assertTrue("testQuery1" in output[1].dependencies) self.assertTrue("testQuery2" in output[0].dependencies)
def __init__(self, *args, **kwds): #blast, allHits=False, evalue=1e-10, taxFilter=None, taxFilterType=0): """Intialise a BLAST XML parser. """ #Initialize Parser super class Parser.__init__(self, *args, **kwds) #Build iter to loop over XML self.context = iter(ET.iterparse(self.infile, events=("start", "end"))) #Boolean to allow hits to be returned self.runHSP = True #Save the current contig, or queryID self.queryID = None #The number of Hits that have been processed self.numHits = 0 #File came form RAPSearch2? self.rapsearch = kwds.get("rapsearch", False) #Start initial parsing event, root = self.context.next() if root.tag not in ["BlastOutput", "Output"]: raise RuntimeError("This is not a valid BLAST XML file or RAPSearch2 XML file") elif root.tag == "Output": self.rapsearch = True #Start looping over data until we get to first iteration for event, elem in self.context: if event == "start" and elem.tag == "Iteration": break
def test_all_targets(self): """ Test that list.txt is the default target. """ parser = Parser() parser.parse_makefile(self.makefile) task = parser.get_task('') self.assertTrue(task.target == 'list.txt') self.assertEquals(len(task.dependencies), 20)
def on_error_conf(self, item): """Launch parser and generate an anonymous configuration file with parsed token""" filename = None dialog = gtk.FileChooserDialog('Save anonymous configuration file', None, gtk.FILE_CHOOSER_ACTION_SAVE, (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_SAVE, gtk.RESPONSE_OK)) dialog.set_default_response(gtk.RESPONSE_OK) response = dialog.run() if response == gtk.RESPONSE_OK: filename = dialog.get_filename() dialog.destroy() if not filename: return try: Parser.generate_debug_conf(filename, self.node.object.name, self.node.object.type) except Exception as e: Gtk_DialogBox(e.message) except: Gtk_DialogBox("An error occurred.")
def __init__(self, pose3d, laser, motors): self.pose3d = pose3d self.laser = laser self.motors = motors # Car direction self.carx = 0.0 self.cary = 0.0 # Obstacles direction self.obsx = 0.0 self.obsy = 0.0 # Average direction self.avgx = 0.0 self.avgy = 0.0 # Current target self.targetx = 0.0 self.targety = 0.0 self.targetid = "NaN" self.stop_event = threading.Event() self.kill_event = threading.Event() self.lock = threading.Lock() threading.Thread.__init__(self, args=self.kill_event) # Init targets parser = Parser('targets.json') self.targets = parser.getTargets()
def record(self, article): self.sendMsg(article + Enter) self.recvMsg() self.sendMsg('Q') message = self.recvMsg() if self.debug is 1: writer = open('case.txt', 'w') writer.write(message.encode('utf-8', 'ignore')) writer.close() url = Parser().getWebUrl(message) title = Parser().getTitle(message) os.system('clear') writer = open('result.txt', 'a') writer.write(url.encode('ascii','ignore') + '\n') writer.write(title.encode('utf-8','ignore') + '\n') writer.write('================================\n') writer.close() self.sendMsg(Enter) self.recvMsg() # Make It read self.sendMsg(RightArrow) self.recvMsg() self.sendMsg(LeftArrow) self.recvMsg()
def wordProcess( lst ) : parser = Parser() termString = parser.clean( lst ) termLst = parser.tokenize( termString ) termLst = parser.removeStopWords( termLst ) termLst = util.removeDuplicates( termLst ) return termLst
def getBranchUrl(self, arg): BR = None b = Parser().read("branch") b = b.strip() a = Parser().read("rom_abrv") a = a.strip() if b == "Default": Dialogs().CDial(gtk.MESSAGE_ERROR, "No branch choosen", "Please select a branch so I know which device list to pull.\n\nThanks!") return if a == "CM": from projects.CyanogenMod import CyanogenMod as CM BR = CM().getBranch(arg) elif a == "CNA": from projects.CodenameAndroid import CodenameAndroid as CNA BR = CNA().getBranch(arg) elif a == "AOSP": from projects.AOSP import AOSP as AOSP BR = AOSP().getBranch(arg) elif a == "AOKP": from projects.AOKP import AOKP as AOKP BR = AOKP().getBranch(arg) else: pass return BR
def __init__(self, sensor): self.sensor = sensor self.imageRight=None self.imageLeft=None self.lock = threading.Lock() # Car direction self.carx = 0.0 self.cary = 0.0 # Obstacles direction self.obsx = 0.0 self.obsy = 0.0 # Average direction self.avgx = 0.0 self.avgy = 0.0 # Current target self.targetx = 0.0 self.targety = 0.0 # Init targets parser = Parser('targets.json') self.targets = parser.getTargets()
def test_premier_valid_command(self): """ Test that the command in the premier target is valid.""" parser = Parser() parser.parse_makefile(self.makefile) task = parser.get_task('premier') self.assertEquals('gcc premier.c -o premier -lm', task.command)
def main(): "output file is the file where output will be written to" filename = sys.argv[1].split('.')[0] outputfile = open( filename + ".hack", "a" ) "input file is the file where input will come from" inputfile = Parser( sys.argv[1] ) lines = inputfile.commandLines() for line in lines: if( ParserComd( line ).commandType() == 'A_command' ): symbol_line = ParserComd( line ).symbol( ) symbol_a = SymbolTable( ) symbol_a.addEntry( symbol_line ) f = symbol_a.GetAddress( symbol_line ) outputfile.write( f ) outputfile.write( '\n' ) elif( ParserComd( line ).commandType() == 'C_command_a' or ParserComd( line ).commandType() == 'C_command_b'): dest_line = ParserComd( line ).dest() comp_line = ParserComd( line ).comp() jump_line = ParserComd( line ).jump() cbinary = Code( dest_line, comp_line, jump_line ).cinstruction() outputfile.write( cbinary ) outputfile.write( '\n' ) elif( ParserComd( line ).commandType() == 'L_command' ): outputfile.write( 'This line is going to delete\n' ) outputfile.close()
def validate(cls, variant_string): # get the variant breakdown from the parser parser = Parser() variant = parser.parse('', variant_string) # for validation we can leave ID(the 1st param) blank # If parsing failed - ie: no values where properly indexed if variant == None: return False # Genomic and cDNA - possibilities: c.112g>t, g.12345678, 12345678, NC_000017.10:12345678 or Chr.17:12345678 if (variant_string[0].lower() == 'g' or variant_string[0].lower() == 'c' or variant_string[0].lower() == 'n' or variant_string[0].isdigit()): return Validate_Genomic_cDNA.validate(variant) # mRNA elif variant_string[0].lower() == 'm': return Validate_mRNA.validate(variant) # Protein elif variant_string[0].lower() == 'p': return Validate_Protein.validate(variant) # if nothing else, then fail else: return False
def __init__(self, domain, problem): p = Parser(domain, problem) domainKB = p.parseActionsAndPropositions(); self.actions = domainKB[0] self.propositions = domainKB[1] prob = p.pasreProblem() self.initialState = prob[0] self.goal = prob[1]
def get_fields(thefile): """Parse a SOS GML file and extract the field data.""" doc = Parser(file=thefile, namespace="http://www.opengis.net/swe/1.0.1") om_result = doc.tag('member/Observation/result', doc.get_ns('om')) fields = doc.elem_tags( om_result, 'DataArray/elementType/DataRecord/field') return extract_field_data(doc, fields)
def parse_mpl(file): p = Parser() try: p.parsefile(file) return 0 except SyntaxError as e: errmes(file, e.line, e.col, str(e)) return 1
def __init__(self, downloader=None): '''Initialize the character parser.''' log.debug("Creating character parser with downloader " + str(downloader)) Parser.__init__(self, downloader=downloader) self._gp = GuildParser(downloader=self._downloader) self._ip = ItemParser(downloader=self._downloader) self._ap = AchievementParser(downloader=self._downloader) Base.metadata.create_all(Database.engine)
def feedConfig(self, fn): fd = open(fn, 'r') parser = Parser(fd) self.config = parser.parse() self.runner = JobRunner(self.config) # config file info self.fn = fn self.ftime = datetime.datetime.fromtimestamp(os.path.getmtime(self.fn))
def __init__(self,rule,parent): Parser.__init__(self, rule, parent) self.parsers = [] # parsers constructed for each of rule.items self.active = None # main active parser (top of self.parsers) self.pos = 0 # position in self.rule.items self.or_parser = None # In use if we split into an OrParser self.fakePrefix = '' # Text to prepend to the next parse()'s output self.justSplit = False # True for left branch immediately after a split
def test_lists_are_valid_target(self): """ Test that list[1..20].txt are valid targets.""" parser = Parser() parser.parse_makefile(self.makefile) for i in range(1, 21): task = parser.get_task('list{}.txt'.format(i)) self.assertTrue(task.dependencies[0].target == 'premier') self.assertTrue(task.command.startswith('./premier'))
def main(semester,year): # Hardcoded values url_base = "http://ssbp.mycampus.ca" url_action = "/prod/bwckschd.p_get_crse_unsec" # Set metaData (used by database storer) metaData = { 'term': semester, 'year': year } # Everything is in catch-all try block try: # Instantiate objects courseData = [] pageParser = Parser(courseData) # === PHASE 1: PARSING === # Set list of faculties to parse facsToUse = acros.faculties #facsToUse = ["ENGR"] # Parse for each faculty facSoFar = 0 for faculty in facsToUse: facSoFar += 1 # Print out which faculty is being parsed outta = str(facSoFar)+"/"+str(len(facsToUse)) logging.info("Parsing faculty: "+outta+" "+faculty+": "+acros.faculties[faculty]) # Instanciate page loader and get page #pageLoader = PageLoader(url_base,url_action,semester,year,faculty) #page = pageLoader.get_page(pageLoader.gen_url_and_data()); with open('last_source.html','r') as f: page = ''.join(f.readlines()); bs = BowlShit(page) """ with open('last_source.html','w') as f: f.write(page) """ # Parse page pageParser.parse_course_info(page) with open('last_object_dump.txt','w') as f: util.log_anything_prettily(lambda t: f.write(t + "\n"),courseData) #pickle.dump(courseData,f) # === PHASE 2: STORING === frf = FRFStore() frf.set_course_data(courseData,metaData) frf.setup_default_connection() frf.insert_data_as_offerings() except SomethingWentWrong as e: logging.exception("Something terrible(?) happened!!")
def run(self): repo = Utils().which("repo") if repo == None: Utils().CDial(gtk.MESSAGE_INFO, "Repo is not installed", "You need to install repo to continue.") main_cmc_cmd() return r = Parser().read("repo_path") url = Utils().getBranchUrl("init") b = Parser().read("branch") j = Parser().read("sync_jobs") if not os.path.exists(r): os.mkdir(r) Globals.TERM.feed_child("cd %s\n" % r) if not os.path.exists("%s/.repo" % r): Globals.TERM.feed_child("repo init -u %s -b %s\n" % (url, b)) Globals.TERM.feed_child("y\n") Utils().CDial(gtk.MESSAGE_INFO, "Running repo init!", "You needed to init the repo, doing that now.") Globals.TERM.feed_child("repo sync -j%s\n" % j) Globals.TERM.feed_child("echo \"Complete!\"\n")
def test(file): ''' Distributed detection test. This function take a configuration file as parameter and : - clear the graph topology - detect the file type (Cisco Asa, Juniper, ...) - construct the firewall data structure - construct the graph topology - perform the distributed anomaly detection - return the error list ''' res = '' NetworkGraph().clear() fw = Parser.parser(file, Parser.suppose_type(file), None) fw.build_bdd() NetworkGraph().network_graph(fw) error_list = DistributedDetection.DistributedDetection( False).distributed_detection() for k, v in error_list: if len(v) > 0: res += "\n".join(v) return res
def process_sample(file_path): print file_path config = Config() load_sucess = config.load_config() if load_sucess: dis = Dissector() parser = Parser() extrator = Extractor() operator = Operator(config) r_generator = Report_Generator() sample = Sample(file_path) rlt = dis.extract_file(sample, config.get_output_dir()) bin_time_list = list() if rlt: parser.parse(sample) extrator.extract(sample) # config.print_info() operator.operate(sample, config) r_generator.write_report(sample) return sample
def main(): print( "Please, enter the mathematical expression containing floating point numbers, operations +, -, *, / or ():" ) expr = input("> ") parser = Parser(expr) try: expr = parser.parse() print(expr.calculate()) except ParserError as pe: print(f"Parser error: {pe}") except ZeroDivisionError as zde: print(f"Calculation error: {zde}") except ProgrammerError as pre: print(f"Programmer error: {pre}") except: # pylint: disable=bare-except print("Unknown error") input("Press <Enter> key to exit...")
def test(file): ''' Internal detection test. This function take a configuration file as parameter and : - detect the file type (Cisco Asa, Juniper, ...) - construct the firewall data structure - perform the internal detection - return the error list: ''' res = '' type = Parser.suppose_type(file) if type is None: type = "Parser.JuniperNetscreen.JuniperNetscreenYacc" firewalls = Parser.parser(file, type, None) for fw in firewalls: fw.build_bdd() error_list = InternalDetection.InternalDetection( Node.Node(fw), True).detect_anomaly() for elem in error_list: for error in elem: res += error return res
def openFile(self): """opens Open File dialogue""" tr = QtCore.QCoreApplication.translate fileName = QtWidgets.QFileDialog.getOpenFileName( self, tr("OpenDialog:", "Berechnung öffnen"), "", tr("OpenDialog:", "Bauphysikberechnung (*.baup);;Alle Dateien (*)")) if (all(fileName)): file = QtCore.QFile(fileName[0]) if (not file.open(QtCore.QIODevice.ReadOnly)): QtWidgets.QMessageBox.Information( self, tr("OpenDialog:", "Datei kann nicht geöffnet werden"), file.errorString()) return parser = Parser(fileName[0]) tabdata = parser.parse() self.addNewTab(tabdata.mode, tabdata) self.tabWidget.currentWidget( ).data.currentFileLocation = fileName[0]
def _number_of_tokens(self): token_num_dict = {} parser = Parser(self.corpus) token_list = parser.clean_token_list uni_grams_list = parser.uni_grams bio_grams_list = parser.bio_grams tri_grams_list = parser.tri_grams for i in range(parser.token_list_length): token_num_dict[self.doc_id_dict[token_list[i][0]]] = (len( uni_grams_list[i]), len( bio_grams_list[i]), len(tri_grams_list[i])) return token_num_dict
def parse(): """Check the consistency of the uploaded file.""" if 'file_uuid' not in session.keys(): return redirect(url_for('main')) parser = Parser() uploader = Uploader() f = uploader.open_file(session['file_uuid']) parser.parse_content(f) if len(parser.errors) == 0: target = 'metafields' else: session.pop('errors', None) session['errors'] = parser.errors flash( 'ERROR: We found some problems when parsing your file. <a href="#" data-toggle="modal" data-target="#parsingModal">Click here</a> to get a detailed review.' ) target = 'main' return redirect(url_for(target))
def getBranchUrl(self, arg): BR = None b = Parser().read("branch") b = b.strip() a = Parser().read("rom_abrv") a = a.strip() if b == "Default": Utils().CDial( gtk.MESSAGE_ERROR, "No branch choosen", "Please select a branch so I know which device list to pull.\n\nThanks!" ) return if a == "CM": from projects.CyanogenMod import CyanogenMod as CM BR = CM().getBranch(arg) elif a == "CNA": from projects.CodenameAndroid import CodenameAndroid as CNA BR = CNA().getBranch(arg) elif a == "GR": from projects.GeekRom import GeekRom as GR BR = GR().getBranch(arg) elif a == "AOSP": from projects.AOSP import AOSP as AOSP BR = AOSP().getBranch(arg) elif a == "AOKP": from projects.AOKP import AOKP as AOKP BR = AOKP().getBranch(arg) else: pass return BR
def main(): parse_args() print("debug:", DEBUG) print("file to process:", fname) parser = Parser() parser.load(fname) parser.run()
def record(self, article): self.sendMsg(article + Enter) self.recvMsg() self.sendMsg('Q') message = self.recvMsg() if self.debug is 1: writer = open('case.txt', 'w') writer.write(message.encode('utf-8', 'ignore')) writer.close() url = Parser().getWebUrl(message) title = Parser().getTitle(message) os.system('clear') writer = open('result.txt', 'a') writer.write(url.encode('ascii', 'ignore') + '\n') writer.write(title.encode('utf-8', 'ignore') + '\n') writer.write('================================\n') writer.close() self.sendMsg(Enter) self.recvMsg() # Make It read self.sendMsg(RightArrow) self.recvMsg() self.sendMsg(LeftArrow) self.recvMsg()
def main(): asm_file = raw_input('enter file name ') #get file myfile = open(asm_file, 'r') #open asm file asm_instr = myfile.readlines() #returns list of strings, each one a line hack_instr = open( asm_file[0:len(asm_file) - 3] + 'hack', 'w' ) #open new file with hack ext and same name as asm. to store binary instructions in. parse = Parser() #instantiate new parser object symbols = SymbolTable() #instantiate new SymbolTable object code = Code( ) #instantiate new Code object. has binary equivalents for ASM commands. #actually add lines to parser for line in asm_instr: #for each asm line parse.read_instr(line) #build parsed list #build SymbolTable - first run through for line in parse.lines: #for each parsed line if '@' in line: #if is a instruction symbols = build_sym(line[1:len(line)], symbols) #send symbol and table #conversion run through for line in parse.lines: #for each parsed line if '@' in line: # is A instruction line = line[1:len(line)] # remove @ out_line = is_a(line, symbols) #translated a instruction else: # is C instruction out_line = is_c(line, code) hack_instr.write( out_line + '\n') #store the translated instruction in the hack file. return hack_instr #return translated hack file
def translate_file(input_file, input_file_name, output_file, write_boot): """ translates the given input vm file to the given output asm file :param input_file: the input vm file :param input_file_name: the name of the input file :param output_file: the output asm file :param write_boot: should the function write the booting lines in the beginning of the translation """ file_name_dirs = input_file_name.split( os.path.sep) # split the path to its directories and the file name file_name = file_name_dirs[ FILE_NAME_POSITION][:-len(VM_SUFFIX) - 1] # gets the file name only file_parser = Parser(file_name) file_translator = Translator(file_parser) # if needed: puts the booting line at the start of the file if write_boot: output_file.write(file_translator.translate_booting()) # the input file translation for line in input_file: file_parser.set_command(line) # setting the parser to the current line file_parser.parse() asm_command = file_translator.translate() output_file.write( asm_command) # printing the asm code in the output file
def problem3(filename): parser = Parser(filename) # associate one band to all colleagues who like it # {'The Doors' : ['Alice', 'Bob'], 'U2' : ['Bob'], ... } bands_colleagues = {} # set of colleagues who are not satisfied yet unhappy_colleagues = set() while True: try: current_line = parser.next() except StopIteration: break name = current_line[0] bands = current_line[1] for band in bands: if band in bands_colleagues: bands_colleagues[band].add(name) else: bands_colleagues[band] = set() bands_colleagues[band].add(name) unhappy_colleagues.add(name) """ While there is still unhappy colleagues : find the band who is the most liked by remaining unhappy colleagues add this band to the result remove colleague who like this band from the unhappy list """ result = [] while unhappy_colleagues: band = find_closer_band(bands_colleagues, unhappy_colleagues) result.append(band) for colleague in bands_colleagues[band]: if colleague in unhappy_colleagues: unhappy_colleagues.remove(colleague) for band in result: print band
def __init__(self, param): """ generated source for method __init__ """ Parser.__init__(self) self.param = param self.differ = Differential(self.param.Seed) self.predict = [] self.interval = None # Kalman Filter params self.P = 100 # estimation error covariance (over all time instance) self.Q = 1000 # process noise synthetic data self.R = 1000000 # measurement noise optimal for alpha = 1, synthetic data self.K = 0 # kalman gain # PID control params - default self.Cp = 0.9 # proportional gain, to keep output proportional to current error self.Ci = 0.1 # integral gain, to eliminate offset self.Cd = 0.0 # derivative gain, to ensure stability - prevent large error in future # fixed internally self.theta = 1 # magnitude of changes self.xi = 0.2 # gamma (10%) self.minIntvl = 1 # make sure the interval is greater than 1 self.windowPID = 5 # I(integration) window self.ratioM = 0.2 # sampling rate # self.isSampling = False
def save_data(): parser = Parser('https://tap.az/all', base='https://tap.az') product_name = '//div[contains(@class, "endless-products")]/div[@class="products-i"]/a[@class="products-link"]/div[@class="products-name"]/text()' product_price = '//div[contains(@class, "endless-products")]/div[@class="products-i"]/a[@class="products-link"]/div[@class="products-top"]/div[@class="products-price-container"]/div[@class="products-price"]/span[@class="price-val"]/text()' currencies = '//div[contains(@class, "endless-products")]/div[@class="products-i"]/a[@class="products-link"]/div[@class="products-top"]/div[@class="products-price-container"]/div[@class="products-price"]/span[@class="price-cur"]/text()' product_code = '//div[contains(@class, "endless-products")]/div[@class="products-i"]/div[@class="products-bookmarking"]/a/@href' product_names = parser.get_data(product_name) prices = parser.get_data(product_price) valyuta = parser.get_data(currencies) codes = parser.get_data(product_code) for a in range(0, len(product_names)): product = session.query(Product).filter( Product.code.in_([codes[a].replace('bookmark', '')])).all() if not product: new_product = Product() new_product.name = product_names[a] new_product.price = prices[a].replace(' ', '') new_product.valyuta = valyuta[a] new_product.code = codes[a].replace('bookmark', '') session.add(new_product) session.commit() print(product_names[a], prices[a].replace(' ', ''), valyuta[a], codes[a].replace('bookmark', '')) else: pass today = datetime.datetime.now() print(product_names[a], ' exists in the database.', now.strftime("%Y-%m-%d %H:%M:%S"))
def __init__(self, input_path, output_path): self.input_path = input_path self.output_path = output_path self.category_num = -1 self.num_of_docs_per_category = {} self.category_to_terms_map = {} # terms in each category self.top_ten_terms_per_category = {} self.term_appearances_per_category = { } # In how many docs has each term appeared (per category) self.parser = Parser() self.categories = { 'C01': 'Bacterial Infections and Mycoses', 'C02': 'Virus Diseases', 'C03': 'Parasitic Diseases', 'C04': 'Neoplasms', 'C05': 'Musculoskeletal Diseases', 'C06': 'Digestive System Diseases', 'C07': 'Stomatognathic Diseases', 'C08': 'Respiratory Tract Diseases', 'C09': 'Otorhinolaryngologic Diseases', 'C10': 'Nervous System Diseases', 'C11': 'Eye Diseases', 'C12': 'Urologic and Male Genital Diseases', 'C13': 'Female Genital Diseases and Pregnancy Complications', 'C14': 'Cardiovascular Diseases', 'C15': 'Hemic and Lymphatic Diseases', 'C16': 'Neonatal Diseases and Abnormalities', 'C17': 'Skin and Connective Tissue Diseases', 'C18': 'Nutritional and Metabolic Diseases', 'C19': 'Endocrine Diseases', 'C20': 'Immunologic Diseases', 'C21': 'Disorders of Environmental Origin', 'C22': 'Animal Diseases', 'C23': 'Pathological Conditions, Signs and Symptoms' } for category, name in self.categories.items(): self.top_ten_terms_per_category[category] = [] self.category_to_terms_map[category] = {} self.term_appearances_per_category[category] = {}
def __init__(self): if len(sys.argv) != 3: print( "\033[91m[INPUT ERROR]: Invalid number of input arguments.\033[0m" ) self.printHelp() inFile = sys.argv[1] outFileRel = sys.argv[2] currentWorkingDir = os.getcwd() outFileAbs = currentWorkingDir + "/" + outFileRel print(inFile) print(outFileAbs) parser = Parser(inFile) components = parser.getComponents() nodesNames = parser.getNodeNames() nonlinearModels = parser.get_nonlinear_models() opampModels = parser.get_opamp_models() baseMatrices = BaseMatrixGenerator(components, nodesNames) matrixDict = baseMatrices.getMatrixDict() matrixDict.addToDict("nodeNames", nodesNames) matrixDict.addToDict("potentiometerNames", []) # TODO: Implement pot names matrixDict.addToDict("nonlinearModels", nonlinearModels) matrixDict.addToDict("opampModels", opampModels) matrixDict.printMatrixToFile(outFileAbs) print("Json data written to " + outFileAbs + ".")
def analyseModelStructure(self): Parser.analyseModelStructure(self) for i in range(0, len(self.listOfReactions)): for n in range(0, self.numLocalParameters[i]): self.parameterId.append( self.listOfReactions[i].getKineticLaw().getParameter( n).getId()) if ((len(self.writer.parsedModel.parameterId) - self.comp) < 10): self.writer.parsedModel.parameterId.append( "parameter0" + repr(len(self.parameterId) - self.comp)) else: self.writer.parsedModel.parameterId.append( "parameter" + repr(len(self.parameterId) - self.comp)) name = self.listOfReactions[i].getKineticLaw().getParameter( n).getId() new_name = 'parameter' + repr( len(self.parameterId) - self.comp) node = self.sbmlModel.getReaction(i).getKineticLaw().getMath() new_node = self.rename(node, name, new_name) self.writer.parsedModel.kineticLaw[i] = formulaToString( new_node)
def menu_parse_exams(): file_name = input('Введите имя xls/xlsx файла: ') organization = input('Введите название организации: ') header_row = int(input('Номер строки заголовков (по умолчанию 0): ')) with Parser(file_name) as parser: data = parser.parse_exams_from_excel(header_row) json_data = {organization: data} with open('exams_data.json', 'w') as outfile: json.dump(json_data, outfile, indent=4, sort_keys=True) print('Расписание экзаменов сохранено в exams_data.json')
class VMTranslator: def __init__(self, path): self.path = path.replace('\\', '/') try: self.filename = self.path[len(self.path) - self.path[::-1].index('/'):-3] except ValueError: self.filename = self.path[:-3] self.cw = CodeWriter(self.filename) self.parser = Parser(path) def translate(self): self.parser.advance() for token in self.parser.tokens: if token.getToken() in ['push', 'pop']: self.cw.writePushPop(token.getToken(), token.getArg(0), token.getArg(1)) elif token.getToken() in self.parser.ARITHMETIC: self.cw.writeArithmetic(token.getToken()) with open(self.path[:-3] + '.asm', 'w') as f: f.write(self.cw.out) print("Done.")
class Interpreter: def __init__(self): pass p = Parser("test3.txt") print("Program Started") prog = p.parse() prog.execute() try: print("Ended") except: print("Crashed for some reason")
def __init__(self, option): size = np.random.random_integers(4, 80) list_of_nodes = [] for i in range(0, size): distance = np.random.random_sample() letter = str(chr(97 + i % 22)) sign = {letter: distance} list_of_nodes.append(sign) list_of_nodes = str(RandomTree.split_list(list_of_nodes)).replace( "{", "").replace("}", "").replace("[", "(").replace("]", ")") list_of_nodes = RandomTree.add_lengths(self, list_of_nodes) + ";" list_of_nodes = list_of_nodes.replace(" ", "") p = Parser(list_of_nodes, option)
def __init__(self, content): self.parser = Parser(content) self.parser.main() # 生成的语法树 self.tree = self.parser.tree # 要生成的汇编文件管理器 self.ass_file_handler = AssemblerFileHandler() # 符号表 self.symbol_table = {} # 语法类型 self.sentence_type = [ 'Sentence', 'Include', 'FunctionStatement', 'Statement', 'FunctionCall', 'Assignment', 'Control', 'Expression', 'Return' ] # 表达式中的符号栈 self.operator_stack = [] # 表达式中的操作符栈 self.operand_stack = [] # 已经声明了多少个label self.label_cnt = 0 # ifelse中的标签 self.labels_ifelse = {}
def work_parser(message): bot.send_message(message.from_user.id, 'Данные парсятся, это может занять некоторое время....') p = Parser(url='https://www.work.ua/jobs-', page='/?page=', message=message.text, chat_id=message.from_user.id) file = open( str(message.from_user.id) + '_-_' + str(message.text) + '.csv', 'rb') bot.send_document(message.from_user.id, file) delete_file(str(message.from_user.id) + '_-_' + str(message.text) + '.csv') msg = bot.send_message(message.from_user.id, 'Готово! Для дальнейшей работы нажмите "/start"')
def testtable2_ll2(self): P = Parser() G = Grammar() G.readGrammar('gram2.txt') P.setGrammar(G) #print(P.parsetable) self.assertEqual( P.parsetable, { '0': { 'S': set([1]), 'A': set() }, '1': { 'S': set([2, 3]), 'A': set([4]) }, '$': { 'S': set([2]), 'A': set() } }) self.assertFalse(P.preParse())
def testParseAgriculture(self): url = "http://itunes.apple.com/WebObjects/DZR.woa/wa/viewPodcast?cc=us&id=387961518" text = self.o.open(url).read() file('Food and Sustainable Agriculture.html','w').write(text) parsed_html = Parser(url, "text/HTML", text) self.assertEqual(parsed_html.Redirect, '') self.assertEqual(parsed_html.Title, 'Food and Sustainable Agriculture') self.assertEqual(len(parsed_html.mediaItems), 7) # FIXME: The following should be made into proper tests for line in parsed_html.mediaItems: logging.debug(line)
def openSection(self, params={}): get = params.get url = urllib.unquote_plus(get("url")) if 'True' == get("contentReady"): videos = self.__settings__.getSetting("lastContent") if 0 == len(Parser().sections(videos)): videos = self.fetchData(url) else: videos = self.fetchData(url) originalId = Parser().originalId(videos) if originalId: Gui().drawItem(self.localize('< Search >'), 'openSearch', originalId, self.ROOT + '/resources/media/icons/search.png') else: Gui().drawItem(self.localize('< Search >'), 'openSearch', re.search("(\d+)$", url).group(1), self.ROOT + '/resources/media/icons/search.png') for section in Parser().sections(videos): contextMenu = [( self.localize('Search Like That'), 'XBMC.Container.Update(%s)' % ('%s?action=%s&url=%s&like=%s' % (sys.argv[0], 'openSearch', re.search("(\d+)$", url).group(1), urllib.quote_plus(section['title']))))] xbmc.log('%s: url %s' % (sys.argv[0], section['link']), xbmc.LOGNOTICE) description = self.openPage2(section['link']) Gui().drawItem(section['title'], 'openPage', section['link'], section['image'], contextMenu=contextMenu, fanart=True, description=description) Gui().drawPaging(videos, 'openSection') xbmcplugin.setContent(int(sys.argv[1]), 'movies') Gui().lockView('info_big')
def do_eval(vm, argv): """ Eval builtin - touches the $@ variable """ # Doing an eval basically is a whole other # parse-compile-run operation... from Lexer import Lexer from Parser import Parser from VM import VM error_msg = '' v = None try: l = Lexer(str(argv[0])) p = Parser(l) ast = p.parse() v = VM() # merge in existing scope, stack, subs, etc v.current_scope = vm.current_scope v.pgm_stack_frames = vm.pgm_stack_frames ast.emit(v) v.run() # merge the scopes back out again vm.current_scope = v.current_scope vm.pgm_stack_frames = v.pgm_stack_frames except Exception as e: error_msg = str(e) # push the return value back to the main program if v != None and len(v.stack) > 0: vm.stack.push(v.stack.pop()) else: vm.stack.push(Value(None)) # set $@ result vm.set_variable('@', Value(error_msg), 'scalar')
def postData(): try: name = request.json["name"] email = request.json["email"] phone = request.json["mob_no"] except Exception as e: Log.log("Error" + str(e)) return json.dumps(Details.Detail().__dict__) ans = Parser.parse(name, email, phone) Log.log("ans = ", ans) if '_id' in ans: ans.pop('_id', None) return json.dumps(ans)