def Getting_plug_ins_id(self): # chrome_options = webdriver.ChromeOptions() # chrome_options.add_extension('{}lib/Ghelper_1.4.6.crx'.format(self.root)) # browser_driver = webdriver.Chrome() # browser_driver.get("https://www.baidu.com") # browser.get('http://ip138.com') if os.path.getsize('{}lib/id.json'.format(self.root)) <= 0: warning('正在打开URL:chrome://extensions/') self.browser.get('chrome://extensions/') warning('复制插件id...') warning('点击详细信息后,即可查看id...') warning( '例子:chrome://extensions/?id=mcholjcecoiejoamfejfaadoefkcodok') warning('如果已输入id则跳过此步骤,按回车即可跳过...') ipt1 = input('ID>') if ipt1: data1 = {'id': '{}'.format(ipt1)} self.Save_json(data=data1) info('ID保存成功...') info('ID已存在...') data2 = self.Read_json() i = 0 for data3 in data2: if i == 0: if not os.path.getsize('{}lib/id.json'.format(self.root)) <= 0: data3 = json.loads(data3) # print('插件id:{}'.format(data3['id'])) return data3['id'] i += 1
def main(): args = argparse_() sftp = args.sFtp_config if sftp: config = sftp_config() config.main() info('SFTP 服务配置完毕.')
def requests_(self, content): try: self.Login_Google_CRX() if not os.path.getsize('{}lib/cookies.txt'.format(self.root)) == 0: cookies = self.Read_text('cookies.txt') for cookie in cookies: cookie = eval(cookie.strip()) if cookie: self.cookie = random.choice(cookie) info(('cookie = ', self.cookie)) time.sleep(10) self.browser.get('https://www.google.com') if self.cookie: info(('cookie2 = ', self.cookie)) self.browser.add_cookie(self.cookie) self.browser.get('https://www.google.com') Search_G = self.browser.find_element_by_xpath( '//*[@id="tsf"]/div[2]/div/div[1]/div/div[1]/input').send_keys( content) Search_ENTER = self.browser.find_element_by_xpath( '//*[@id="tsf"]/div[2]/div/div[1]/div/div[1]/input').send_keys( Keys.ENTER) except Exception as e: # print(traceback.format_exc()) if self.option_ != 'n': self.Verification_Handle()
def sock_read(sock): data = sock.recv(4096) if data: on_recv(data) else: lib.info("disconnect: {}", sock) sel.unregister(sock) sock.close()
def sock_connect(self, addr): sock = socket.create_connection(addr) self.sel.register(sock, selectors.EVENT_READ, self.sock_read) fd = sock.fileno() self.nodes[fd] = Node(sock) lib.info("connect to: {}({})", addr, fd) msg = Message("version", Version(0).tobytes()) self.send_msg(fd, msg)
def debug(self, fd, cmd): lib.info("dbg: {}\n", cmd) handler = getattr(self, "debug_" + cmd, None) if not handler: err = "unknown debug cmd <{}>".format(cmd) lib.err(err) self.send_debug_msg(fd, err) return handler(fd)
def sock_read(self, sock): data = sock.recv(4096) fd = sock.fileno() if data: self.on_recv(fd, data) else: lib.info("disconnect: {}", fd) self.sel.unregister(sock) sock.close()
def deleteDoc(self, docName): if self.docs.has_key(docName): d = self.docs[docName] for tag in d.tags.values(): os.remove(tag.path) os.rmdir(d.path) self.docs.pop(d.name) lib.info('Doc "'+docName+'" has been removed') else: lib.info('No such Doc named "'+docName)
def on_recv(data): if slicedmsg and len(slicedmsg) > 0: data = slicedmsg + data while True: if not data or len(data) <= 0: break vs, size = VarStr.load(data) data = data[size:] lib.info("{}\n", vs.string.decode("utf-8"))
def sock_accept(self, sock): conn, addr = sock.accept() conn.setblocking(False) self.sel.register(conn, selectors.EVENT_READ, self.sock_read) _, port = conn.getsockname() debug = port == config.debug_port fd = conn.fileno() self.nodes[fd] = Node(conn, debug) lib.info("new connection: {}({}), debug: {}", addr, fd, debug)
def port_scan(ip, filename): """ nmap scann """ try: green(f'Run cmd -> {cmd2.format(ip,libs.root,filename)}') libs.commands_(cmd=[cmd2.format(ip, libs.root, filename)]) time.sleep(10) data1 = libs.commands_(cmd=[ 'sudo python2 {}lib/nmap_xml.py {}lib/nmap_xml/{}'.format( libs.root, libs.root, filename) ]).strip() # data1 = exec('data1 = '+data1) data1 = eval(data1) foo.nScan_Result = data1 for i in range(0, 2): for d1 in foo.nScan_Result[i]: if 'ip' in d1 and \ 'port' in d1 and \ 'state' in d1 and \ 'agreement' in d1: ip = d1.get('ip') port = d1.get('port') state = d1.get('state') agreement = d1.get('agreement') if ip: print('') if state != 'closed' and state != 'filtered': info(ip + ':' + port + ' /' + state + ' ' + '-' + agreement) if port == '443' or port == '80': w = open( '{}lib/Nmap_Result/nScan_Result.txt'. format(libs.root), 'a+') w.write( '{"ip":"%s","port":"%s","state":"%s","agreement":"%s"}' % (ip, port, state, agreement)) w.write('\n') warning('写入 lib/batch/nScan_Result.txt ...') w.close() else: if state == 'closed': error(ip + ':' + port + ' /' + state + ' ' + '-' + agreement) elif state == 'filtered': warning(ip + ':' + port + ' /' + state + ' ' + '-' + agreement) except Exception as e: # error(traceback.format_exc()) pass
def __init__(self, name, path): self.name = name self.path = path self.tags = {} lib.info(' Initializing Doc'+ self.name+' '+ self.path) self.currentTag = None for item in os.listdir(self.path): if os.path.isfile(os.path.join(self.path, item)) and os.path.splitext(item)[1] == '.mtf': t = tag.Tag(os.path.splitext(item)[0], os.path.join(self.path, item)) self.tags[t.name] = t self.currentTag = t
def handle(self, fd, msg): msgtypes = { "version": "Version", "verack": "VerAck", "addr": "Addr", "inv": "Inv", "getdata": "GetData", "notfound": "NotFound", "getblocks": "GetBlocks", "getheaders": "GetHeaders", "tx": "Tx", "block": "Block", "headers": "Headers", "getaddr": "GetAddr", "mempool": "MemPool", "checkorder": "CheckOrder", "submitorder": "SubmitOrder", "reply": "Reply", "ping": "Ping", "pong": "Pong", "reject": "Reject", "filterload": "FilterLoad", "fileteradd": "FilterAdd", "filterclear": "FilterClear", "merkleblock": "MerkleBlock", "alert": "Alert", "sendheaders": "SendHeaders", "feefilter": "FeeFilter", "sendcmpct": "SendCmpct", "cmpctblock": "CmpctBlock", "getblocktxn": "GetBlockTxn", "blocktxn": "BlockTxn" } cmd = msg.command if cmd not in msgtypes: lib.err("unknown command: <{}>({})", cmd, cmd.encode("utf-8")) return clsname = msgtypes[cmd] handler = getattr(self, "handle_" + clsname, None) if not handler: lib.err("no handler for <{}, {}>", cmd, clsname) return lib.info("<- <{}>", cmd) if clsname not in globals(): lib.err("no class for <{}, {}>", cmd, clsname) return payload = globals()[clsname].load(msg.payload) if config.debug_enabled: payload.debug() return handler(fd, payload)
def __init__(self, name, path): self.name = name self.path = path try: f = open(self.path) except IOError: print 'Failed to open file: ', self.path return None self.text = f.read() f.close() lib.info(' Tag'+self.name+ ' ['+self.path+'] '+ 'initialized')
def delete(self): #删除扫描任务 try: r = requests.delete(url=self.server+'/scans/'+self.check_id(), timeout=10, verify=False, headers=self.header) if r.status_code == 204: print('url = ',self.server+'/scans/'+self.check_id()) print('status_code = ',r.status_code) # print(self.G+'[-] OK, 已经删除任务...'+self.W) info('已经删除任务') except Exception as e: pass
def __init__(self, workPath, win): if not os.path.exists(workPath): os.mkdir(workPath) self.workPath = workPath self.docs = {} self.win = win self.currentDoc = None lib.info('Initializing File DataBase') for item in os.listdir(self.workPath): if os.path.isdir(os.path.join(self.workPath, item)): doc = document.Doc(item, os.path.join(self.workPath, item)) self.docs[doc.name] = doc lib.info('Done')
def Find_Keyword__(self,keyword1,keyword2): self.Website_search(keyword=keyword1) with open('{}lib/Search_Url.txt'.format(root),'r') as r_: for link in r_.readlines(): r = requests.get(link.strip()) html = r.text scode = r.status_code if scode == 200: com_ = re.compile(keyword2) if com_.findall(html): info('Url -> {}'.format(link)) with open('{}lib/Search_Url_.txt'.format(root),'a+') as w: w.write(link+'\n')
def r(): if eXit: return False i = 1 for target1 in datas: if i <= 5: if self.option: info(('Add scann target -> ', target1)) self.add_task(target=target1, rule=rule) else: i = 0 time.sleep(600) i += 1
def changeFile(self): item = self.ui.treeWidget.currentItem() lib.info('changeFile') if item: if item.parent(): self.fileDB.setCurrentDoc(item.parent().text(0)) self.fileDB.setCurrentTag(item.text(0)) f = open(self.fileDB.currentDoc.currentTag.path) lib.info(self.fileDB.currentDoc.currentTag.path) self.ui.textEdit.setText(f.read()) f.close() else: self.fileDB.setCurrentDoc(item.parent())
def Verification_Handle(self): warning('发现google验证...') if os.path.getsize('{}lib/cookies.txt'.format(self.root)) <= 3000: ipt1 = input('手动验证完成[y|n]') if ipt1: if ipt1 is 'y': cookies = self.browser.get_cookies() print('cookies = ', cookies) self.Save_text_('cookies.txt', cookies) info('继续爬取数据...') if ipt1 is 'n': self.option_ = 'n' return False return True
def main(): """ Main function """ cfg = parser_cfg() const.LOGIN_DATA['email'] = cfg['username'] const.LOGIN_DATA['password'] = cfg['password'] # Start Requests session session = requests.Session() session.headers.update(const.HEADERS) info("Login into [{0}]".format(const.BASE_URL)) session.post(const.BASE_URL, data=const.LOGIN_DATA) info("Looking for new ebook [{0}]".format(const.LOOKUP_URL)) lookup = session.get(const.LOOKUP_URL) claim_url = "" for line in lookup.iter_lines(): match_obj = re.match(r'.*(freelearning-claim/[0-9]+/[0-9]+)', line) if match_obj: claim_url = const.BASE_URL + "/" + match_obj.group(1) break info("Claiming ebook from [{0}]".format(claim_url)) session.get(claim_url) book_code = claim_url.split("/")[-2] download_url = const.BASE_URL + "/ebook_download/" + book_code + "/pdf" info("Downloading ebook from [{0}]".format(download_url)) download_ebook = session.get(download_url) with open("{0}.pdf".format(book_code), "wb") as pdfile: pdfile.write(download_ebook.content) sysexit("Done", color='green')
def delete_(self): c = 0 # print("[*]开始清除任务") warning('开始清除任务') while True: result = requests.get(self.server+"/targets?c="+str(c),headers=self.header,timeout=30,verify=False) results = json.loads(result.content) c = c + 100 if results['targets'] == []: info('任务全部清除完毕') break for s in results["targets"]: r = requests.delete(url=self.server+'/targets/'+s['target_id'], timeout=10, verify=False, headers=self.header) warning("当前删除 target_id:%s"%s['target_id'])
def Login_Google_CRX(self): try: self.browser.get(self.login_url) self.browser.minimize_window() email = username password = passwd time.sleep(10) log_email = self.browser.find_element_by_id('email').send_keys( email) log_password = self.browser.find_element_by_id( 'password').send_keys(password) log_password = self.browser.find_element_by_id( 'password').send_keys(Keys.ENTER) info('登入成功...') except Exception as e: error(('Login_Google_CRX = ', traceback.format_exc())) pass
def main(): video_id = prompt.query("Enter the video ID: ") title_url = const.BASEURL.format(video_id) video_resp = requests.get(title_url, headers=const.HEADERS) if video_resp.status_code != requests.codes.ok: err("Failed to get URL") return result = video_resp.json() content_info = result['resultObj']['contentInfo'][0] # Video Details vid_title = content_info['contentTitle'].replace(" ", "-") vid_episode = content_info['episodeNumber'] vid_ep_title = content_info['episodeTitle'].replace(" ", "-") info("You are downloading \"{0}\"".format(vid_title)) # Downloading actual content now file_url = const.CDNURL.format(video_id) cdn_resp = requests.get(file_url, headers=const.HEADERS) if cdn_resp.status_code != requests.codes.ok: err("Failed to get info about content") return cdn_content = cdn_resp.json()['resultObj'] src = cdn_content['src'] file_url = src.replace("https", "hlsvariant://https") file_url = file_url.replace("2000,_STAR.", "2000,3000,4500,_STAR.") if (vid_title): localFile = str(vid_title) if (vid_episode): localFile += "_" + str(vid_episode) if (vid_ep_title): localFile += "_" + str(vid_ep_title) localFile += ".mp4" info("Downloading with filename: {0}".format(localFile)) video_options = "1080p (best), 180p (worst), 234p, 360p, 404p, 720p, 900p" quality = prompt.query("Enter quality {0}: ".format(video_options)) choice = prompt.query("Enter 'S' to Stream and 'D' to Download ") command = "livestreamer \"{0}\" \"{1}\" ".format(file_url, quality) if choice == "D" or choice == 'd': command += "-o {2}".format(localFile) elif choice == "S" or choice == 's': info("Streaming ...") else: err("Invalid command") return info("Starting system command : {0}".format(command)) os.system(command)
def parser_cfg(): """ Helper function to parser configuration file """ config_file = "packt_config.ini" config_path = os.path.join(os.path.expanduser('~'), config_file) config = ConfigParser.ConfigParser() if not os.path.exists(config_path): info("Trying local configuration file") config_path = os.path.join(os.getcwd(), config_file) if not os.path.exists(config_path): sysexit( "Unable to find configuration file {0}".format(config_path)) if const.DEBUG: debug("Using configuration file from {0}".format(config_path)) config.read(config_path) return dict(config.items('packt'))
def getVideoFileUrl(video_id): title_url = const.BASEURL.format(video_id) video_resp = requests.get(title_url, headers=const.HEADERS) if video_resp.status_code != requests.codes.ok: err("Failed to get URL") return result = video_resp.json() content_info = result['resultObj']['contentInfo'][0] # Video Details vid_title = content_info['contentTitle'].replace(" ", "-") vid_episode = content_info['episodeNumber'] vid_ep_title = content_info['episodeTitle'].replace(" ", "-") info("You are downloading \"{0}\"".format(vid_title)) # Downloading actual content now file_url = const.CDNURL.format(video_id) print(file_url) cdn_resp = requests.get(file_url, headers=const.HEADERS) if cdn_resp.status_code != requests.codes.ok: err("Failed to get info about content") return cdn_content = cdn_resp.json()['resultObj'] src = cdn_content['src'] file_url = src.replace("http", "hlsvariant://http") file_url = file_url.replace("2000,_STAR.", "2000,3000,4500,_STAR.") info(file_url) if (vid_title): localFile = str(vid_title) if (vid_episode): localFile += "_" + str(vid_episode) if (vid_ep_title): localFile += "_" + str(vid_ep_title) localFile += ".mp4" localFile = localFile.replace("'", '-') localFile = localFile.replace('"', '-') localFile = localFile.replace('(', '-') localFile = localFile.replace(')', '-') return file_url, localFile
def main(video_id=None, quality=None, saveOrStream='D'): if not video_id: idstr = prompt.query("Enter the video ID: ") video_id = getVideoId(idstr) if not video_id: err('invalid video id entered') return try: file_url, localFile = getVideoFileUrl(video_id) info("Downloading with filename: {0}".format(localFile)) if not quality: video_options = "1080p (best), 180p (worst), 234p, 360p, 404p, 720p, 900p" quality = prompt.query("Enter quality {0}: ".format(video_options)) if not saveOrStream: choice = prompt.query("Enter 'S' to Stream and 'D' to Download ") else: choice = saveOrStream command = "streamlink \"{0}\" \"{1}\" ".format(file_url, quality) if choice == "D" or choice == 'd': command += "-o \"{0}\"".format(localFile) elif choice == "S" or choice == 's': info("Streaming ...") else: err("Invalid command") return info("Starting system command : {0}".format(command)) os.system(command) except Exception as e: err(str(e))
def setCurrentDoc(self, docName): if self.docs.has_key(docName): self.currentDoc = self.docs[docName] lib.info('Current Doc has changed to "'+self.currentDoc.name+'"') lib.info('Current Tag has changed to the last tag in the folder') else: lib.info('No such Doc named "'+docName)
def start(self, appId, commandList, context): # Stop previous instances if any self.stop(appId) # Workaround on Windows machine, the environment path is not searched to find the executable, hence # we need to do this manually. if sys.platform =='win32': fullPath = lib.which(commandList[0], cwd=context["cwd"]) if fullPath: commandList[0] = fullPath # Start a subprocess with the executabel information process = subprocess.Popen([sys.executable, __file__, appId, self.config["log"]] + commandList, stdin=None, stdout=None, stderr=None, shell=False, cwd=context["cwd"]) # 2s timeout before checking the status (one is too low) # This is to gfive enough time for the process to start time.sleep(2) if process.poll() != None and process.returncode != 0: raise Exception("Unable to start daemon '%s' in '%s'" % (" ".join(commandList), context["cwd"])) lib.info("Started daemon '%s'" % (appId))
def stop(self, appId=None): # Get the list of running process childrenPids = set() runningProcesses = Daemon.getRunningProcesses(appId, childrenPids=childrenPids) # Delete the running processes if any for pid, process in runningProcesses.items(): lib.info("Stopping daemon '%s' with pid %i" % (process["id"], process["ppid"])) # Stop the parent process for to make sure ti will not restart the child process if process["ppid"]: try: Daemon.killProcess(process["ppid"]) except: pass # Ignore errors as the process might be gone by then # Deleting all children PIDs if still alive if childrenPids: lib.info("Stopping children with pid(s): %s" % (", ".join([str(pid) for pid in childrenPids]))) for pid in childrenPids: # Stop the process itself try: Daemon.killProcess(pid) except: pass # Ignore errors as the process might be gone by then
def DNS_Query_Interface(self, domain): """ DNS接口查询 """ # threadLock.acquire() #方法:add_cookie(cookie={'':'','':''}) try: datas_d1 = [] datas_d2 = [] warning(('Query Domain -> ' + domain)) selenium_.browser_.get(dns_query1.format(domain)) time.sleep(7) htmldoc = selenium_.browser_.find_element_by_xpath( '/html/body/pre').text data = loads(htmldoc) # info(data['RDNS']) try: if 'FDNS_A' in data: data1 = data['FDNS_A'] for data_1 in data1: find1 = re.findall(regular(1), data_1) find2 = re.findall(regular(2), data_1) find3 = re.findall(regular(3), data_1) d1 = data_1.split(',') if find1: ips = d1[0] if find2 or find3: domains = d1[1] if ips or domains: datas_d1.append([ips, domains]) except: pass try: if 'RDNS' in data: data2 = data['RDNS'] for data_2 in data2: find1 = re.findall(regular(1), data_2) find2 = re.findall(regular(2), data_2) find3 = re.findall(regular(3), data_2) d2 = data_2.split(',') if find1: ips = d2[0] if find2 or find3: domains = d2[1] if ips or domains: datas_d2.append([ips, domains]) except: pass foo.Dns_Qery = [datas_d1, datas_d2] for d1 in foo.Dns_Qery[0]: ip = d1[0] if ip: domain = d1[1] thread1 = threading.Thread(target=self.port_scan, args=(ip, ip)) time.sleep(1) thread1.start() info('域名:{} -> 查询IP:{}'.format(domain, ip)) for d1 in foo.Dns_Qery[1]: ip = d1[0] if ip: domain = d1[1] thread2 = threading.Thread(target=self.port_scan, args=(ip, ip)) time.sleep(1) thread2.start() info('域名:{} -> 查询IP:{}'.format(domain, ip)) selenium_.browser_.quit() return True except Exception as e: selenium_.browser_.quit() error(traceback.format_exc()) i = 0 while True: if i == 3: if self.DNS_Query_Interface(domain=domain): break i = 0 i += 1
def send_debug_msg(self, fd, string): node = self.nodes[fd] data = VarStr(string.encode("utf-8")).tobytes() sent = node.sock.send(data) lib.info("-> dbg:{}/{}", sent, len(data))
def sock_connect(): global sock sock = socket.create_connection(("localhost", config.debug_port)) sel.register(sock, selectors.EVENT_READ, sock_read) lib.info("connect to: {}", sock)
def send_msg(self, fd, msg): node = self.nodes[fd] data = msg.tobytes() sent = node.sock.send(data) lib.info("-> {}:{}/{}", msg.command, sent, len(data))
def deleteTag(self, tagName): if self.tags.has_key(tagName): os.remove(self.tags[tagName].path) self.tags.pop(tagName) lib.info(u'Tag "'+tagName+u'" has been removed')
def Google_Search(self, keyword, number=26, time_sleep=3): """ keyword:Search keyword. number:Page number. data[0] = Title data[1] = Link """ # mutex.release() # thread2 = threading.Thread(target=self.requests_,args=()) # thread1 = threading.Thread(target=self.Config_chromedriver()) # thread2 = threading.Thread(target=self.requests_()) # thread1.start() # thread2.start() try: # self.browser_.quit() result1 = [] self.requests_(keyword) try: i = 0 while True: # for i1 in range(2,12): info('第{}页'.format(i + 1)) if i + 1 < 27: if i != 0: if i + 1 < number + 1: try: time.sleep(time_sleep) elements = self.browser.find_element_by_xpath( '//*[@id="pnnext"]/span[2]') # time.sleep(1) elements.click() # time.sleep(3) except Exception as e: try: if self.option_ != 'n': self.Verification_Handle() except Exception as e: pass # print(traceback.format_exc()) # print(traceback.format_exc()) if i + 1 == number + 1: # self.browser.close() break if i + 1 == 27: # self.browser.close() break time.sleep(0.5) for i2 in range(1, 11): try: #Title elements1 = self.browser.find_element_by_xpath( '//*[@id="rso"]/div/div/div[{}]/div/div/div[1]/a[1]/h3' .format(i2)) #link elements2 = self.browser.find_element_by_xpath( '//*[@id="rso"]/div/div/div[{}]/div/div/div[1]/a[1]' .format(i2)).get_attribute('href') blue('Title ==> ' + elements1.text) print_('Link ==> ' + elements2) result1.append([elements1.text, elements2]) except Exception as e: # print(traceback.format_exc()) pass # info(('下一页...')) i += 1 # self.browser.quit() return result1 except Exception as e: print(traceback.format_exc()) pass except Exception as e: print(traceback.format_exc()) pass
def setCurrentTag(self, tagName): if self.currentDoc.tags.has_key(tagName): self.currentDoc.currentTag = self.currentDoc.tags[tagName] lib.info('Current Tag has changed to "'+self.currentDoc.currentTag.name+'" in Doc "'+self.currentDoc.name+'"') else: lib.info('No such Tag named "'+tagName+'in Doc "'+self.currentDoc.name+'"')
def sovpile(inst): """ Takes instructions and writes the python equivalent in a new file named the same as the .sov one but with .py extension inst : instructions from the clean() function """ num_bits = 0 file0 = open(sys.argv[1].split(".")[0] + ".py", "w+") file0.write("import sys\n") file0.write("if sys.argv[1] == \"-m\":\n") for inst_name in inst: if inst_name == "in": sov = True argv_num = 2 pos = lib.position(inst_name, inst) + 1 while sov: if inst[pos] == "in.": sov = False else: file0.write(lib.varinput(inst[pos], argv_num)) pos = pos + 1 argv_num = argv_num + 1 num_bits = argv_num - 3 elif inst_name == "var": sov = True pos = lib.position(inst_name, inst) + 1 while sov: if inst[pos] == "var.": sov = False else: if "*" in inst[pos]: var_name = inst[pos].split("*") file0.write(lib.var(var_name[1], 1)) else: file0.write(lib.var(inst[pos], 0)) pos = pos + 1 elif "not" in inst_name: sov = True pos = lib.position(inst_name, inst) + 1 while sov: if inst_name == "not.": sov = False else: if pos + 2 < len(inst): if inst[pos + 2] == "not.": lib.info("Data correct for " + inst_name) var0 = inst[pos] result = inst[pos + 1] file0.write(lib.dnot(var0, result)) sov = False elif "and" in inst_name and "nand" not in inst_name: sov = True pos = lib.position(inst_name, inst) + 1 while sov: if inst_name == "and.": sov = False else: if pos + 3 < len(inst): if inst[pos + 3] == "and.": lib.info("Data correct for " + inst_name) var0 = inst[pos] var1 = inst[pos + 1] result = inst[pos + 2] file0.write(lib.dand(var0, var1, result)) sov = False pos = pos + 1 elif "nand" in inst_name: sov = True pos = lib.position(inst_name, inst) + 1 while sov: if inst_name == "nand.": sov = False else: if pos + 3 < len(inst): if inst[pos + 3] == "nand.": lib.info("Data correct for " + inst_name) var0 = inst[pos] var1 = inst[pos + 1] result = inst[pos + 2] file0.write(lib.dnand(var0, var1, result)) sov = False pos = pos + 1 elif "or" in inst_name and "nor" not in inst_name: sov = True pos = lib.position(inst_name, inst) + 1 while sov: if inst_name == "or.": sov = False else: if pos + 3 < len(inst): if inst[pos + 3] == "or.": lib.info("Data correct for " + inst_name) var0 = inst[pos] var1 = inst[pos + 1] result = inst[pos + 2] file0.write(lib.dor(var0, var1, result)) sov = False pos = pos + 1 elif "nor" in inst_name: sov = True pos = lib.position(inst_name, inst) + 1 while sov: if inst_name == "nor.": sov = False else: if pos + 3 < len(inst): if inst[pos + 3] == "nor.": lib.info("Data correct for " + inst_name) var0 = inst[pos] var1 = inst[pos + 1] result = inst[pos + 2] file0.write(lib.dnor(var0, var1, result)) sov = False pos = pos + 1 elif inst_name == "print": sov = True pos = lib.position(inst_name, inst) + 1 while sov: if inst[pos] == "print.": sov = False else: file0.write(lib.printvar(inst[pos])) pos = pos + 1 ending = """ elif sys.argv[1] == "-a": try: import truth_table except ImportError: print("ERROR: truth_table.py couldn't be found, please use manual (-m) mode.") exit() truth_table.gen_truth({0}, sys.argv[0]) """ file0.write(ending.format(num_bits))