def run( self ): # it is necessary to get the qprocess because we need to send it back to the scheduler when we're done importing try: self.importProgressWidget.show() session = self.db.session() self.tsLog("Parsing nmap xml file: " + self.filename) startTime = time() try: parser = Parser(self.filename) except: self.tsLog('Giving up on import due to previous errors.') self.tsLog("Unexpected error: {0}".format(sys.exc_info()[0])) self.done.emit() return self.db.dbsemaphore.acquire( ) # ensure that while this thread is running, no one else can write to the DB s = parser.getSession() # nmap session info if s: n = nmapSessionObj(self.filename, s.startTime, s.finish_time, s.nmapVersion, s.scanArgs, s.totalHosts, s.upHosts, s.downHosts) session.add(n) hostCount = len(parser.getAllHosts()) if hostCount == 0: # to fix a division by zero if we ran nmap on one host hostCount = 1 totalprogress = 0 self.importProgressWidget.setProgress(int(totalprogress)) self.importProgressWidget.show() createProgress = 0 createOsNodesProgress = 0 createPortsProgress = 0 for h in parser.getAllHosts( ): # create all the hosts that need to be created db_host = session.query(hostObj).filter_by(ip=h.ip).first() if not db_host: # if host doesn't exist in DB, create it first hid = hostObj(osMatch='', osAccuracy='', ip=h.ip, ipv4=h.ipv4, ipv6=h.ipv6, macaddr=h.macaddr, status=h.status, hostname=h.hostname, vendor=h.vendor, uptime=h.uptime, lastboot=h.lastboot, distance=h.distance, state=h.state, count=h.count) self.tsLog("Adding db_host") session.add(hid) t_note = note(h.ip, 'Added by nmap') session.add(t_note) else: self.tsLog("Found db_host already in db") createProgress = createProgress + ((100.0 / hostCount) / 5) totalprogress = totalprogress + createProgress self.importProgressWidget.setProgress(int(totalprogress)) self.importProgressWidget.show() session.commit() for h in parser.getAllHosts( ): # create all OS, service and port objects that need to be created self.tsLog("Processing h {ip}".format(ip=h.ip)) db_host = session.query(hostObj).filter_by(ip=h.ip).first() if db_host: self.tsLog( "Found db_host during os/ports/service processing") else: self.log( "Did not find db_host during os/ports/service processing" ) os_nodes = h.getOs() # parse and store all the OS nodes self.tsLog(" 'os_nodes' to process: {os_nodes}".format( os_nodes=str(len(os_nodes)))) for os in os_nodes: self.tsLog( " Processing os obj {os}".format(os=str(os.name))) db_os = session.query(osObj).filter_by( hostId=db_host.id).filter_by(name=os.name).filter_by( family=os.family).filter_by( generation=os.generation).filter_by( osType=os.osType).filter_by( vendor=os.vendor).first() if not db_os: t_osObj = osObj(os.name, os.family, os.generation, os.osType, os.vendor, os.accuracy, db_host.id) session.add(t_osObj) createOsNodesProgress = createOsNodesProgress + ( (100.0 / hostCount) / 5) totalprogress = totalprogress + createOsNodesProgress self.importProgressWidget.setProgress(int(totalprogress)) self.importProgressWidget.show() session.commit() all_ports = h.all_ports() self.tsLog(" 'ports' to process: {all_ports}".format( all_ports=str(len(all_ports)))) for p in all_ports: # parse the ports self.tsLog(" Processing port obj {port}".format( port=str(p.portId))) s = p.getService() if not ( s is None ): # check if service already exists to avoid adding duplicates #print(" Found service {service} for port {port}".format(service=str(s.name),port=str(p.portId))) #db_service = session.query(serviceObj).filter_by(name=s.name).filter_by(product=s.product).filter_by(version=s.version).filter_by(extrainfo=s.extrainfo).filter_by(fingerprint=s.fingerprint).first() db_service = session.query(serviceObj).filter_by( name=s.name).first() if not db_service: #print("Did not find service *********** name={0} prod={1} ver={2} extra={3} fing={4}".format(s.name, s.product, s.version, s.extrainfo, s.fingerprint)) db_service = serviceObj(s.name, s.product, s.version, s.extrainfo, s.fingerprint) session.add(db_service) # else: #print("FOUND service *************** name={0}".format(db_service.name)) else: # else, there is no service info to parse db_service = None # fetch the port db_port = session.query(portObj).filter_by( hostId=db_host.id).filter_by( portId=p.portId).filter_by( protocol=p.protocol).first() if not db_port: #print("Did not find port *********** portid={0} proto={1}".format(p.portId, p.protocol)) if db_service: db_port = portObj(p.portId, p.protocol, p.state, db_host.id, db_service.id) else: db_port = portObj(p.portId, p.protocol, p.state, db_host.id, '') session.add(db_port) #else: #print('FOUND port *************** portid={0}'.format(db_port.portId)) createPortsProgress = createPortsProgress + ( (100.0 / hostCount) / 5) totalprogress = totalprogress + createPortsProgress self.importProgressWidget.setProgress(totalprogress) self.importProgressWidget.show() session.commit() #totalprogress += progress #self.tick.emit(int(totalprogress)) for h in parser.getAllHosts( ): # create all script objects that need to be created db_host = session.query(hostObj).filter_by(ip=h.ip).first() for p in h.all_ports(): for scr in p.getScripts(): self.tsLog( " Processing script obj {scr}".format( scr=str(scr))) print(" Processing script obj {scr}".format( scr=str(scr))) db_port = session.query(portObj).filter_by( hostId=db_host.id).filter_by( portId=p.portId).filter_by( protocol=p.protocol).first() db_script = session.query(l1ScriptObj).filter_by( scriptId=scr.scriptId).filter_by( portId=db_port.id).first() if not db_script: # if this script object doesn't exist, create it t_l1ScriptObj = l1ScriptObj( scr.scriptId, scr.output, db_port.id, db_host.id) self.tsLog( " Adding l1ScriptObj obj {script}". format(script=scr.scriptId)) session.add(t_l1ScriptObj) for hs in h.getHostScripts(): db_script = session.query(l1ScriptObj).filter_by( scriptId=hs.scriptId).filter_by( hostId=db_host.id).first() if not db_script: t_l1ScriptObj = l1ScriptObj(hs.scriptId, hs.output, None, db_host.id) session.add(t_l1ScriptObj) session.commit() for h in parser.getAllHosts(): # update everything db_host = session.query(hostObj).filter_by(ip=h.ip).first() if db_host.ipv4 == '' and not h.ipv4 == '': db_host.ipv4 = h.ipv4 if db_host.ipv6 == '' and not h.ipv6 == '': db_host.ipv6 = h.ipv6 if db_host.macaddr == '' and not h.macaddr == '': db_host.macaddr = h.macaddr if not h.status == '': db_host.status = h.status if db_host.hostname == '' and not h.hostname == '': db_host.hostname = h.hostname if db_host.vendor == '' and not h.vendor == '': db_host.vendor = h.vendor if db_host.uptime == '' and not h.uptime == '': db_host.uptime = h.uptime if db_host.lastboot == '' and not h.lastboot == '': db_host.lastboot = h.lastboot if db_host.distance == '' and not h.distance == '': db_host.distance = h.distance if db_host.state == '' and not h.state == '': db_host.state = h.state if db_host.count == '' and not h.count == '': db_host.count = h.count session.add(db_host) tmp_name = '' tmp_accuracy = '0' # TODO: check if better to convert to int for comparison os_nodes = h.getOs() for os in os_nodes: db_os = session.query(osObj).filter_by( hostId=db_host.id).filter_by(name=os.name).filter_by( family=os.family).filter_by( generation=os.generation).filter_by( osType=os.osType).filter_by( vendor=os.vendor).first() db_os.osAccuracy = os.accuracy # update the accuracy if not os.name == '': # get the most accurate OS match/accuracy to store it in the host table for easier access if os.accuracy > tmp_accuracy: tmp_name = os.name tmp_accuracy = os.accuracy if os_nodes: # if there was operating system info to parse if not tmp_name == '' and not tmp_accuracy == '0': # update the current host with the most accurate OS match db_host.osMatch = tmp_name db_host.osAccuracy = tmp_accuracy session.add(db_host) for scr in h.getHostScripts(): print("-----------------------Host SCR: {0}".format( scr.scriptId)) db_host = session.query(hostObj).filter_by(ip=h.ip).first() scrProcessorResults = scr.scriptSelector(db_host) for scrProcessorResult in scrProcessorResults: session.add(scrProcessorResult) for scr in h.getScripts(): print("-----------------------SCR: {0}".format( scr.scriptId)) db_host = session.query(hostObj).filter_by(ip=h.ip).first() scrProcessorResults = scr.scriptSelector(db_host) for scrProcessorResult in scrProcessorResults: session.add(scrProcessorResult) for p in h.all_ports(): s = p.getService() if not (s is None): #db_service = session.query(serviceObj).filter_by(name=s.name).filter_by(product=s.product).filter_by(version=s.version).filter_by(extrainfo=s.extrainfo).filter_by(fingerprint=s.fingerprint).first() db_service = session.query(serviceObj).filter_by( name=s.name).first() else: db_service = None # fetch the port db_port = session.query(portObj).filter_by( hostId=db_host.id).filter_by( portId=p.portId).filter_by( protocol=p.protocol).first() if db_port: #print("************************ Found {0}".format(db_port)) if db_port.state != p.state: db_port.state = p.state session.add(db_port) if not ( db_service is None ) and db_port.serviceId != db_service.id: # if there is some new service information, update it db_port.serviceId = db_service.id session.add(db_port) for scr in p.getScripts( ): # store the script results (note that existing script outputs are also kept) db_script = session.query(l1ScriptObj).filter_by( scriptId=scr.scriptId).filter_by( portId=db_port.id).first() if not scr.output == '' and scr.output is not None: db_script.output = scr.output session.add(db_script) totalprogress = 100 self.importProgressWidget.setProgress(int(totalprogress)) self.importProgressWidget.show() session.commit() self.db.dbsemaphore.release() # we are done with the DB self.tsLog('Finished in ' + str(time() - startTime) + ' seconds.') self.done.emit() self.importProgressWidget.hide() self.schedule.emit( parser, self.output == '' ) # call the scheduler (if there is no terminal output it means we imported nmap) except Exception as e: self.tsLog('Something went wrong when parsing the nmap file..') self.tsLog("Unexpected error: {0}".format(sys.exc_info()[0])) self.tsLog(e) raise self.done.emit()
def run( self ): # it is necessary to get the qprocess because we need to send it back to the scheduler when we're done importing try: logging.info("[+] Parsing nmap xml file: " + self.filename) starttime = time.time() try: parser = Parser(self.filename) except Exception as err: #Giving up on import due to previous errors.') logger.exception('[!] Error - {!r}'.format(err)) self.done.emit() return self.db.dbsemaphore.acquire( ) # ensure that while this thread is running, no one else can write to the DB s = parser.get_session() # nmap session info if s: nmap_session(self.filename, s.start_time, s.finish_time, s.nmap_version, s.scan_args, s.total_hosts, s.up_hosts, s.down_hosts) hostCount = len(parser.all_hosts()) if hostCount == 0: # to fix a division by zero if we ran nmap on one host hostCount = 1 progress = 100.0 / hostCount totalprogress = 0 self.tick.emit(int(totalprogress)) for h in parser.all_hosts( ): # create all the hosts that need to be created db_host = nmap_host.query.filter_by(ip=h.ip).first() if not db_host: # if host doesn't exist in DB, create it first hid = nmap_host('', '', h.ip, h.ipv4, h.ipv6, h.macaddr, h.status, h.hostname, h.vendor, h.uptime, h.lastboot, h.distance, h.state, h.count) note(hid, '') session.commit() for h in parser.all_hosts( ): # create all OS, service and port objects that need to be created db_host = nmap_host.query.filter_by( ip=h.ip).first() # fetch the host os_nodes = h.get_OS() # parse and store all the OS nodes for os in os_nodes: db_os = nmap_os.query.filter_by( host_id=db_host.id).filter_by(name=os.name).filter_by( family=os.family).filter_by( generation=os.generation).filter_by( os_type=os.os_type).filter_by( vendor=os.vendor).first() if not db_os: nmap_os(os.name, os.family, os.generation, os.os_type, os.vendor, os.accuracy, db_host) for p in h.all_ports(): # parse the ports s = p.get_service() if not ( s is None ): # check if service already exists to avoid adding duplicates db_service = nmap_service.query.filter_by( name=s.name).filter_by( product=s.product).filter_by( version=s.version).filter_by( extrainfo=s.extrainfo).filter_by( fingerprint=s.fingerprint).first() if not db_service: db_service = nmap_service(s.name, s.product, s.version, s.extrainfo, s.fingerprint) else: # else, there is no service info to parse db_service = None # fetch the port db_port = nmap_port.query.filter_by( host_id=db_host.id).filter_by( port_id=p.portId).filter_by( protocol=p.protocol).first() if not db_port: db_port = nmap_port(p.portId, p.protocol, p.state, db_host, db_service) session.commit() totalprogress += progress self.tick.emit(int(totalprogress)) for h in parser.all_hosts( ): # create all script objects that need to be created db_host = nmap_host.query.filter_by(ip=h.ip).first() for p in h.all_ports(): for scr in p.get_scripts(): db_port = nmap_port.query.filter_by( host_id=db_host.id).filter_by( port_id=p.portId).filter_by( protocol=p.protocol).first() db_script = nmap_script.query.filter_by( script_id=scr.scriptId).filter_by( port_id=db_port.id).first() if not db_script: # if this script object doesn't exist, create it nmap_script(scr.scriptId, scr.output, db_port, db_host) for hs in h.get_hostscripts(): db_script = nmap_script.query.filter_by( script_id=hs.scriptId).filter_by( host_id=db_host.id).first() if not db_script: nmap_script(hs.scriptId, hs.output, None, db_host) session.commit() for h in parser.all_hosts(): # update everything db_host = nmap_host.query.filter_by(ip=h.ip).first( ) # get host from DB (if any with the same IP address) if db_host.ipv4 == '' and not h.ipv4 == '': db_host.ipv4 = h.ipv4 if db_host.ipv6 == '' and not h.ipv6 == '': db_host.ipv6 = h.ipv6 if db_host.macaddr == '' and not h.macaddr == '': db_host.macaddr = h.macaddr if not h.status == '': db_host.status = h.status if db_host.hostname == '' and not h.hostname == '': db_host.hostname = h.hostname if db_host.vendor == '' and not h.vendor == '': db_host.vendor = h.vendor if db_host.uptime == '' and not h.uptime == '': db_host.uptime = h.uptime if db_host.lastboot == '' and not h.lastboot == '': db_host.lastboot = h.lastboot if db_host.distance == '' and not h.distance == '': db_host.distance = h.distance if db_host.state == '' and not h.state == '': db_host.state = h.state if db_host.count == '' and not h.count == '': db_host.count = h.count tmp_name = '' tmp_accuracy = '0' # TODO: check if better to convert to int for comparison os_nodes = h.get_OS() for os in os_nodes: db_os = nmap_os.query.filter_by( host_id=db_host.id).filter_by(name=os.name).filter_by( family=os.family).filter_by( generation=os.generation).filter_by( os_type=os.os_type).filter_by( vendor=os.vendor).first() db_os.os_accuracy = os.accuracy # update the accuracy if not os.name == '': # get the most accurate OS match/accuracy to store it in the host table for easier access if os.accuracy > tmp_accuracy: tmp_name = os.name tmp_accuracy = os.accuracy if os_nodes: # if there was operating system info to parse if not tmp_name == '' and not tmp_accuracy == '0': # update the current host with the most accurate OS match db_host.os_match = tmp_name db_host.os_accuracy = tmp_accuracy for p in h.all_ports(): s = p.get_service() if not (s is None): # fetch the service for this port db_service = nmap_service.query.filter_by( name=s.name).filter_by( product=s.product).filter_by( version=s.version).filter_by( extrainfo=s.extrainfo).filter_by( fingerprint=s.fingerprint).first() else: db_service = None # fetch the port db_port = nmap_port.query.filter_by( host_id=db_host.id).filter_by( port_id=p.portId).filter_by( protocol=p.protocol).first() db_port.state = p.state if not ( db_service is None ): # if there is some new service information, update it db_port.service_id = db_service.id for scr in p.get_scripts( ): # store the script results (note that existing script outputs are also kept) db_script = nmap_script.query.filter_by( script_id=scr.scriptId).filter_by( port_id=db_port.id).first() if not scr.output == '': db_script.output = scr.output totalprogress += progress self.tick.emit(int(totalprogress)) session.commit() self.db.dbsemaphore.release() # we are done with the DB logger.info('\t[+] Finished in ' + str(time.time() - starttime) + ' seconds.') self.done.emit() self.schedule.emit( parser, self.output == '' ) # call the scheduler (if there is no terminal output it means we imported nmap) except Exception as err: logger.exception('[!] Error - {!r}'.format(err)) self.done.emit()
def main(): start_time = time.time() # Make sure the assignment path doesn't contain any spaces planner_path = os.path.abspath(__file__) assert ' ' not in planner_path, "Move your assignment from {} to another path that doesn't contain any spaces.".format( planner_path) print("Starting SAT-based planner...") print("Checking for plugins...") try: encoding_wrapper = EncodingWrapper() encoding_wrapper.read_encoding_list() solver_wrapper = SolverWrapper() solver_wrapper.read_solver_list() except (EncodingException, SolvingException) as e: print(e.message) sys.exit(1) print("Encodings registered: {}".format( len(encoding_wrapper.valid_encodings))) print("Solvers registered: {}".format( len(solver_wrapper.valid_solvers))) args = parse_cmd_line_args( encoding_wrapper.valid_encodings, encoding_wrapper.default_encoding, solver_wrapper.valid_solvers, solver_wrapper.default_solver) if args is None: sys.exit(1) arg_processing_time = time.time() print("Command line arg processing time: {}".format( (arg_processing_time - start_time))) # Ensure that the tmp_dir exists try: os.makedirs(tmp_path) except OSError as exception: if exception.errno != errno.EEXIST: print("Error: could not create temporary directory: {}".format(tmp_path)) sys.exit(1) # Parse the input PDDL try: parser = Parser(args.domain_file_name, args.problem_file) print("Parsing the PDDL domain...") parser.parse_domain() print("Parsing the PDDL problem...") parser.parse_problem() print("Simplifying the problem representation...") problem = parser.problem problem.simplify() problem.assign_cond_codes() end_parsing_time = time.time() print("Parsing time: {}".format( (end_parsing_time - arg_processing_time))) print("Grounding the problem...") pre_file_name = os.path.join(tmp_path, args.exp_name + PRE_SUFFIX) ground_file_name = os.path.join( tmp_path, args.exp_name + GROUND_SUFFIX) grounder = Grounder(problem, pre_file_name, ground_file_name) grounder.ground() end_grounding_time = time.time() print("Grounding time: {}".format( end_grounding_time - end_parsing_time)) print("Simplifying the ground encoding...") problem.compute_static_preds() problem.link_groundings() problem.make_flat_preconditions() problem.make_flat_effects() problem.get_encode_conds() problem.make_cond_and_cond_eff_lists() problem.link_conditions_to_actions() problem.make_strips_conditions() problem.compute_conflict_mutex() end_linking_time = time.time() print("Simplify time: {}".format(end_linking_time - end_grounding_time)) object_invariants = [] if args.plangraph: print("Generating Plangraph invariants...") plangraph_preprocessor = PlangraphPreprocessor(problem) object_invariants = plangraph_preprocessor.run() if object_invariants is False: raise PreprocessingError('Cannot preprocess plangraph.') end_plangraph_time = time.time() if args.plangraph: print("Plangraph invariants time:", (end_plangraph_time - end_linking_time)) strips_problem = problem.make_strips_problem() except (ParsingException, PreprocessingException, ProblemException) as e: print(e) sys.exit(1) finally: if args.remove_tmp: try: os.system("rm " + pre_file_name) except: pass try: os.system("rm " + ground_file_name) except: pass print("Planning...\n") try: for horizon in args.horizons: print("Step:", horizon) print("-------------------------------------------------") step_start_time = time.time() try: print("Generating base encoding:", args.encoding, "...") encoding_wrapper.instantiate_encoding( args.encoding, strips_problem) encoding = encoding_wrapper.encoding encoding.encode(horizon, args.exec_semantics, args.plangraph_constraints) end_encoding_base_time = time.time() print("Encoding generation time:", (end_encoding_base_time - step_start_time)) print("Writing CNF file...") cnf_file_name = os.path.join(tmp_path, args.exp_name + "_" + str(horizon) + ".cnf") encoding.write_cnf(cnf_file_name) end_writing_cnf_time = time.time() print("Writing time:", (end_writing_cnf_time - end_encoding_base_time)) except Exception as e: print("Exception while generating the CNF!\n") print(traceback.format_exc()) try: os.system("rm " + cnf_file_name) except: pass sys.exit(0) if args.debug_cnf: print("Writing debug CNF...") encoding.write_debug_cnf(cnf_file_name + "_dbg") end_writing_dbg_cnf_time = time.time() if args.debug_cnf: print( ("Writing time:", (end_writing_dbg_cnf_time - end_writing_cnf_time))) try: print("Solving...") solver_wrapper.instantiate_solver(args.solver, cnf_file_name, tmp_path, args.exp_name, args.time_out) (sln_res, sln_time, true_vars) = solver_wrapper.solver.solve() print("SAT" if sln_res else "UNSAT") print("Solution time: ", sln_time) except SolvingException as e: raise PlanningException(e.message, solving_error_code) finally: if args.remove_tmp: try: os.system("rm " + cnf_file_name) except: pass try: os.system("rm " + solver_wrapper.solver.sln_file_name) except: pass if sln_res: encoding.set_true_variables(true_vars) try: print("Extracting the plan...") encoding.build_plan(horizon) # problem.make_plan_from_strips(encoding.plan) plan = encoding.plan except: print("Exception while extracting the plan!\n") print(traceback.format_exc()) sys.exit(0) output_file = None if args.output_file_name is not None: try: output_file = open(args.output_file_name, "w") except: print("Error: could not open plan file! Not saving plan.") num_actions = 0 print("Plan:") for step, s_actions in enumerate(plan): for action in s_actions: a_str = str(action) print(str(step) + ": " + a_str) if output_file is not None: output_file.write(str(step) + ": " + a_str + "\n") num_actions += 1 if output_file is not None: output_file.close() print("Simulating plan for validation.") sim_res, plan_cost = problem.simulate_strips_plan( strips_problem, plan) if sim_res: print("Plan valid. {} actions.".format(num_actions)) else: raise PlanningException( "INVALID PLAN!", solving_error_code) step_end_time = time.time() print("Step time: {}".format(step_end_time - step_start_time)) break step_end_time = time.time() print("Step time: {}\n".format(step_end_time - step_start_time)) end_time = time.time() print("Total time: {}\n".format(end_time - start_time)) except PlanningException as e: print("Planning Error: {}\n".format(e.message)) sys.exit(1) sys.exit(0)
from tokenizer import Tokenizer from parsers import Parser sentence = ' ' tokenizer = Tokenizer() parser = Parser() accept_nb = reject_nb = 0 while True: sentence = input() if len(sentence) > 0: parser.set_parser() tokenizer.set_tokenizer(sentence) check = True while not tokenizer.is_end(): token = tokenizer.next() # print(token) if token['status'] == 'ERROR': break if not parser.parsing(token['status']): check = False break if tokenizer.is_end() and check and parser.is_accept(): # print('accept') accept_nb += 1 else: # print('reject') reject_nb += 1 print('accept: %d, reject: %d' % (accept_nb, reject_nb))
def __init__(self): self.pres = Parser() self.proc = Processor()