def rcompile_and_find(self, data, filename): ''' parse the detections and check them against wordsstripped ''' with copen(filename, "r", encoding='utf8') as file: for _ in loads(file.read()): with ignore_excpetion(Exception): if "Type" in _ and "QREGEX" in _["Type"]: _list = [] tempmatches = 0 for item in _["Detection"]: if _["Options"]["Word"] == "Normal": temp_value = rsearch( rcompile(r"{}".format(item), _["Options"]["Flag"]), self.wordsstripped) elif _["Options"]["Word"] != "Normal": temp_value = rsearch( rcompile(r"\b{}\b".format(item), _["Options"]["Flag"]), self.wordsstripped) if temp_value is not None: _list.append(temp_value.group()) tempmatches += 1 if _list and tempmatches >= _["Options"]["Required"]: data.append({ "Matched": tempmatches, "Required": _["Options"]["Required"], "Behavior": _["Name"], "Detected": ', '.join(_list) })
def get_tweets(self, path): 'Get Tweets by scrolling down.' path_no_ext = self.storage.modpath(path, 'tweets') self.chrome.expand_page(path_no_ext=path_no_ext, limit=self.options['limitPages']) self.chrome.page_pdf(path_no_ext) if self.options['Photos']: cnt = 1 pinfo = [] # to store urls for html in self.chrome.get_outer_html( 'TagName', 'img'): # get all embeded media if rsearch('class="avatar', html) == None and rsearch( 'class="Emoji', html) == None: url = self.ct.src(html) fname = 'photo_%05d%s' % (cnt, self.ct.ext(url)) try: # try to download photo self.storage.download(url, path, fname) except: continue cnt += 1 pinfo.append( { # store counter, media type and url to media info list 'file': fname, 'time': datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'), 'url': url }) if pinfo != []: self.storage.write_dicts(pinfo, ('file', 'time', 'url'), path, 'photos.csv') self.storage.write_json(pinfo, path, 'photos.json')
def get_profile_name(self, html): 'Extract name' m = rsearch('>[^<]+</a>', html) if m != None: return m.group()[1:-4] m = rsearch('>[^<]+<span[^>]*>[^<]+</span>[^<]*</a>', html) if m != None: return rsub('<[^>]+>', '', m.group()[1:-4]) return 'undetected'
def rm_search(self): 'Remove search filters etc.' m = rsearch('<div class="Grid-cell [^"]+', self.chrome.get_inner_html_by_id('page-container')) self.chrome.rm_outer_html('ClassName', 'SearchNavigation') if m != None: self.chrome.set_outer_html('ClassName', m.group()[12:], 0, '')
async def translate(message, client, arguments): #Cheking if input contains any arguments DO NOT TOUCH WAS PAIN try: popped = rsearch("--([a-zA-Z0-9])\w+", arguments).group() except AttributeError: google = quote(str(arguments)) language = translate_to_lang else: google = quote(str(rsub(r"--([a-zA-Z0-9])\w+", "", arguments))) language = popped[2:] #Creating and fetching link query = "https://translation.googleapis.com/language/translate/v2?key=%s&target=%s&q=%s" % ( google_api, language, google) response = loads(rget(query).text) # Trying to create message try: detectedlanguage = response["data"]["translations"][0][ "detectedSourceLanguage"] translatedtext = response["data"]["translations"][0]["translatedText"] letter = ":cloud: **| " + detectedlanguage.upper( ) + " -> " + language.upper() + " `" + translatedtext + "`**" # if can't create mesage rteturn error except KeyError: letter = ":cloud: **| Invalid language target!**" # sending message await client.send_message(message.channel, letter)
def find_encryption_patterns(self, data, domains): ''' loop sequences, find encryptions ''' for domain in domains: domain = domain["domain"] detection = rsearch(self.detectionhex, domain) if detection is not None: temp = "" if len(detection.group()) == 32: temp = "md5" elif len(detection.group()) == 40: temp = "sha1" elif len(detection.group()) == 64: temp = "sha256" elif len(detection.group()) == 128: temp = "sha512" else: temp = "HEX" data.append({ "Type": temp, "Detected": detection.group(), "URL": domain })
def getSenderAddress(self, mail): r = rsearch("<(.*?)>", mail.fr) if r: return r.group(1) return None
def search(self, pattern, string): 'Makes re.search usable' if string == None: return None m = rsearch(pattern, string) if m == None: return None return m.group()
def terminator(self): 'Check date of posts to abort' if self.stop_utc <= 0: return False for i in self.chrome.get_outer_html('TagName', 'abbr'): m = rsearch(' data-utime="[0-9]+" ', i) try: if int(m.group()[13:-2]) <= self.stop_utc: return True except: pass return False
def capture_logic(packet): _layers,hex_payloads,raw_payloads,_fields,_raw,_hex = [],{},{},{},'None','None' _layers = list(self.get_layers(packet)) for layer in _layers: try: _fields[layer] = packet[layer].fields if "load" in _fields[layer]: raw_payloads[layer] = _fields[layer]["load"] hex_payloads[layer] = hexlify(_fields[layer]["load"]) if rsearch(self.common,raw_payloads[layer]): _q_s.logs.info(["sniffer",{'action':'creds_check',"payload":raw_payloads[layer]}]) except Exception as e: _q_s.logs.error(["errors",{'error':'capture_logic',"type":"error -> "+repr(e)}]) try: if _q_s.method == "ALL": try: _q_s.logs.info(["sniffer",{'action':'all','ip':_q_s.current_ip,'mac':_q_s.current_mac,'layers':_layers,'fields':_fields,"payload":hex_payloads}]) except Exception as e: _q_s.logs.error(["errors",{'error':'capture_logic',"type":"error -> "+repr(e)}]) elif _q_s.method == "TCPUDP": if packet.haslayer('IP') and len(hex_payloads) > 0 and packet['IP'].src != _q_s.current_ip: if packet.haslayer('TCP'): try: _q_s.logs.info(["sniffer",{'action':'tcppayload','ip':_q_s.current_ip,'mac':_q_s.current_mac,'src_ip':packet['IP'].src,'src_port':packet['TCP'].sport,'dst_ip':packet['IP'].dst,'dst_port':packet['TCP'].dport,"raw_payload":raw_payloads,"payload":hex_payloads}]) except Exception as e: _q_s.logs.error(["errors",{'error':'capture_logic',"type":"error -> "+repr(e)}]) elif packet.haslayer('UDP'): try: _q_s.logs.info(["sniffer",{'action':'udppayload','ip':_q_s.current_ip,'mac':_q_s.current_mac,'src_ip':packet['IP'].src,'src_port':packet['UDP'].sport,'dst_ip':packet['IP'].dst,'dst_port':packet['UDP'].dport,"raw_payload":raw_payloads,"payload":hex_payloads}]) except Exception as e: _q_s.logs.error(["errors",{'error':'capture_logic',"type":"error -> "+repr(e)}]) if packet.haslayer('IP') and packet.haslayer('ICMP') and packet['IP'].src != _q_s.current_ip: _q_s.logs.info(["sniffer",{'action':'icmp','ip':_q_s.current_ip,'mac':_q_s.current_mac,'src_ip':packet['IP'].src,'dst_ip':packet['IP'].dst,'ICMP_Code':packet['ICMP'].code,'ICMP_Type':packet['ICMP'].type,'ICMP_MSG':self.find_ICMP(packet['ICMP'].type,packet['ICMP'].code)}]) if packet.haslayer('IP') and packet.haslayer('TCP') and packet['IP'].src != _q_s.current_ip: if packet['TCP'].flags == 2: _q_s.logs.info(["sniffer",{'action':'tcpscan','ip':_q_s.current_ip,'mac':_q_s.current_mac,'src_ip':packet['IP'].src,'src_port':packet['TCP'].sport,'dst_ip':packet['IP'].dst,'dst_port':packet['TCP'].dport,"raw_payload":raw_payloads,"payload":hex_payloads}]) send(IP(dst=packet['IP'].src, src=packet['IP'].dst) / TCP(dport=packet['TCP'].sport, sport=packet['TCP'].dport, ack=(packet['TCP'].seq + 1), flags='SA'),verbose=False) except Exception as e: _q_s.logs.error(["errors",{'error':'capture_logic',"type":"error -> "+repr(e)}]) stdout.flush()
def rkhunter_version_check(): cp = srun([RKHUNTER_PATH, '--version'], stdout=stdPIPE) output_lst = cp.stdout.decode('utf-8').split('\n') for index in range(0, len(output_lst)): line = output_lst[index].strip() if line == '': continue #vmatch = rsearch('^\[ Rootkit Hunter version (.*?) \]$', line) vmatch = rsearch('^Rootkit Hunter ([0-9\.]+?)$', line) if vmatch: rkhunter_version = vmatch.groups()[0] if rkhunter_version != WORKING_VERSION: print('Incompatible version found! Aborting.') sexit(255) return rkhunter_version print('Unable to identify RKHunter Version! Aborting.') sexit(255) return False
def navigate(self, url): 'Navigate to given URL. Open Chrome/Chromium and/or login if needed' if not self.chrome.is_running(): self.logger.debug('Facebook: Chrome/Chromium is not running!') self.login() self.logger.debug('Facebook: navigate to: %s' % url) for i in range(10): self.chrome.navigate(url) # go to page for j in range(10): self.sleep(1) try: m = rsearch('<img', self.chrome.get_inner_html_by_id('content')) if m != None: return except: pass self.login() raise Exception('Facebook might have blocked all given accounts.')
def singularize(cls, what): '''Singularizes english words (example: people => person, sheep => sheep, lines => line)''' for x in range(len(cls._uncountable) - 1, -1, -1): value = cls._uncountable[x][0] if value == what[-len(value):].lower(): return what for x in range(len(cls._irregular) - 1, -1, -1): key = cls._irregular[x][1] value = cls._irregular[x][0] if key == what[-len(key):].lower(): return what[:-len(key)] + value for x in range(len(cls._singular) - 1, -1, -1): key = cls._singular[x][0] value = cls._singular[x][1] if rsearch(key, what, I): return rsub(key, value, what, I) return what
def pluralize(cls, what): '''Pluralizes english words (example: person => people, news => news, post => posts)''' for x in range(len(cls._uncountable) - 1, -1, -1): value = cls._uncountable[x][0] if value == what[-len(value):].lower(): return what for x in range(len(cls._irregular) - 1, -1, -1): key = cls._irregular[x][0] value = cls._irregular[x][1] if key == what[-len(key):].lower(): return what[:-len(key)] + value for x in range(len(cls._plural) - 1, -1, -1): key = cls._plural[x][0] value = cls._plural[x][1] if rsearch(key, what, I): return rsub(key, value, what, I) return what
section = None for index in range(0, len(output_lst)): line = output_lst[index].strip() if line == '': continue # search for rkhunter version if 'information' in hunter_report.keys() and 'version' not in hunter_report['information'].keys(): if rkhunter_version: hunter_report['information']['version'] = rkhunter_version # generic parser if line.startswith('Performing '): perf, rest = line.split(' ', 1) section = rest.replace("'", "") hunter_report[section] = {} continue generic_match = rsearch('^(?P<checkname>.+?)\s+?\[ (?P<checkresult>.*?) \]$', line) if generic_match and section: if generic_match.group('checkname').count("'") > 1: checkname = generic_match.group('checkname').replace("'", "") else: checkname = generic_match.group('checkname') if generic_match.group('checkresult').count("'") > 1: checkresult = generic_match.group('checkresult').replace("'", "") else: checkresult = generic_match.group('checkresult') if checkname in WARNING_CHECK_LST and checkresult == 'Warning': #print('>> checking warning value of: %s' % checkname) warning_mess = check_rkhunter_log(checkname) elif section == 'file properties checks' and checkresult == 'Warning': warning_mess = check_rkhunter_log(checkname) else:
def _delete_left_bracket(self, stem: str, morpheme: str, accent: str, current_AP: str) -> List[List[str]]: """ This function is so far for nouns only. It explicitly uses noun AP names. handling yers and defining if the word has neocircumflex: if morpheme is genitive -ā, stem has mobile vowel, ending is accented, gender is not feminine (i.e. m/n), and, at last, word is not komunizam-like exception (a.p. q), then word DOES have neocircumflex retraction and yer is FORCEDLY clarified to an 'a' sound; other yers will be handled afterwards by the common yer rule ... without yers, in cases like jèzik : jȅzīkā: stem has no mobile vowel, stem is accented, word is not feminine, accented vowel is not the first one """ result = [] if morpheme.startswith('<'): # so far only '-ā' is like that # 1. finding vowel places that will be of importance lvi, _, pvi = indices(stem) # 2 handling óvca > ovácā and óvan > ovánā if c.macron in stem: if ((self.label("f") and current_AP in ('c:', 'g:')) or (self.label("m") and current_AP in ('a¿', ) and not 'œ' in stem)): stem = stem.replace(c.macron, '') # 3 handling yers and predefining retractions retraction = [0] if self.label('m'): if (has(stem, 'ъ', 'ꚜ') and current_AP in accent and current_AP == 'b:'): retraction = [2, 1] # Макѐдо̄на̄ца̄ & Македóна̄ца̄ elif (has(stem, 'ъ', 'ꚜ') and current_AP in accent and current_AP in ('a:', 'c:', 'f.')): retraction = [2] # но̏ва̄ца̄ elif 'd' in current_AP and 'œв' in stem: # у́до̄ва̄ retraction = [1] elif (pvi is not None and not has(stem, 'ъ', 'ꚜ') and current_AP not in accent): if current_AP == 'a.': retraction = [1] # је̏зӣка̄ elif 'b.' in current_AP and 'ъц' in stem and 'œ' in stem: retraction = [1] # о̀че̄ва̄ elif 'œв' in stem and 'c?' in current_AP: retraction = [2, 1, 0] # бо̏го̄ва̄, бо̀го̄ва̄, бого́ва̄ elif 'œв' in stem and 'b' in current_AP: retraction = [ 2, 1 ] # гро̏ше̄ва̄ & гро̀ше̄ва̄, би̏ко̄ва̄ & бѝко̄ва̄ elif self.label('f'): if (pvi is not None and not has(stem, 'ъ', 'ꚜ') and current_AP not in accent and current_AP not in ('a¡', )): retraction = [1] # па̏ртӣја̄ elif self.label('n'): if ((current_AP in ('g.', 'b:', 'a.')) or # sèlo:sêlа̄, písmo:pîsа̄mа̄, kòleno:kо̏lе̄nа̄ ( current_AP in ('b.', 'b:') and has(stem, 'ъ', 'ꚜ') and (rsearch('[лрмнвјљњ][ъꚜ]', stem) # NB vesálа̄! or stem.endswith('ц') # nе̏bа̄cа̄ )) # bŕvno: bȑvа̄nа̄, pisàmce:pìsamа̄cа̄, písmo:pîsа̄mа̄ ): retraction = [2] if not 'œ' in stem: # TODO one day think about better condition stem = stem.replace('ъ', 'а').replace('ꚜ', 'а') else: stem = deyerify(stem) # 4. a renewed set of indices, since ъ/ꚜ has become а lvi, _, pvi = indices(stem) # 5. handling strange new exceptions like komunizam if '·' in stem and current_AP == 'q.' and lvi is not None: stem = insert(stem.replace('·', ''), {lvi: '·'}) # 6. we insert macron after lvi if last vowel is short if not c.macron in stem[lvi:] and lvi is not None: stem = swap(insert(stem, {lvi + 1: c.macron}), c.macron, '·') # 7. if we have neocircumflex retraction, we apply it for case in retraction: result.append(_apply_neocirk(stem, lvi, pvi, morpheme, case)) else: result = [[stem, morpheme]] return [[base, aff.replace('<', '')] for (base, aff) in result]