def gen_test(): for regex, result in RS.items(): try: assert list(generate(regex)) == result except: print('[E] Assertion error! "%s"\n\t%r != %r' % (regex, list(generate(regex)), result)) return -1 return 0
def __call__(self, parser, namespace, values, option_string=None): filenames = [] data = [] inpts = values.split(',') for inp in inpts: inp = inp.rstrip() if not inp: continue try: fd = openany(inp, 'r') fd.close() filenames.append(inp) except: data.append(inp) expand_cb = None if namespace.wildcards: expand_cb = lambda t: exrex.generate(t) inputs = cTuples.cInputGenerator( filenames=filenames, data=data, circle=True, expand_cb=expand_cb, prepare_cb=lambda t: t.rstrip() or None, ) inputs.setDaemon(True) inputs.start() setattr(namespace, self.dest, inputs)
def create_file_list(filename, link): for i in range(0, len(link)): if i % 2 == 0: link[i] = re.escape(link[i]) link = ''.join(link) links = list(exrex.generate(link)) return list_write(filename, links)
def generate(regex): i = 0 urls = [] for url in exrex.generate(regex): i += 1 if i > 1000: break urls.append(url) if i <= 1000: for url in urls: print "<test url=\"%s\" />" % url
def simplify_test(): for regex, result in RS.items(): new_regex = simplify(regex) if not IS_PY3: new_regex = new_regex.encode('utf-8') r = list(generate(new_regex)) try: assert r == result except: print('[E] Assertion error! "%s"\n\t%r != %r' % (regex, r, result)) return -1
def transform_synthesized_rows(self, synthesized_rows, table_name, num_rows): """Add primary key and reverse transform synthetized data. Args: synthesized_rows(pandas.DataFrame): Generated data from model table_name(str): Name of the table. num_rows(int): Number of rows sampled. Return: pandas.DataFrame: Formatted synthesized data. """ # get primary key column name meta = self.dn.tables[table_name].meta orig_meta = self._get_table_meta(self.dn.meta, table_name) primary_key = meta.get('primary_key') if primary_key: node = meta['fields'][primary_key] regex = node['regex'] generator = self.primary_key.get(table_name) if not generator: generator = exrex.generate(regex) values = [x for i, x in zip(range(num_rows), generator)] self.primary_key[table_name] = generator if len(values) != num_rows: raise ValueError( 'Not enough unique values for primary key of table {} with regex {}' ' to generate {} samples.'.format(table_name, regex, num_rows) ) synthesized_rows[primary_key] = pd.Series(values) if (node['type'] == 'number') and (node['subtype'] == 'integer'): synthesized_rows[primary_key] = pd.to_numeric(synthesized_rows[primary_key]) sample_info = (primary_key, synthesized_rows) self.sampled = self.update_mapping_list(self.sampled, table_name, sample_info) # filter out parameters labels = list(self.dn.tables[table_name].data) synthesized_rows = self._fill_text_columns(synthesized_rows, labels, table_name) # reverse transform data reversed_data = self.dn.ht.reverse_transform_table( synthesized_rows, orig_meta, missing=False) synthesized_rows.update(reversed_data) return synthesized_rows[labels]
def connect(): io = zio(('119.254.101.232', 8888)) regex = re.compile('SHA\((.*?)\) = ([\d\w]+)') _reg, _hash = regex.findall(io.read_until('\n'))[0] for candidate in exrex.generate(_reg): shasum = sha1(candidate).hexdigest() if shasum == _hash: io.write(candidate + '\n') break io.read_until('your answer\n') return io
def __init__(self, bucket_name, filter, local_path='download/', filter_path='enriched/bad/run='): self.filter = filter_path + filter self.aws_access_key_id = os.environ['ACCESS_KEY_ID'] self.aws_secret_access_key = os.environ['SECRET_ACCESS_KEY'] self.local_path = local_path self.bucket_name = bucket_name self.prefixes = list(exrex.generate(filter_path + r'%s' % filter))
class CheriMipsCallbacksManager(CallbacksManager): """ A concrete CallbacksManager that handles callbacks for CHERI-mips traces. """ iclass_map = { IClass.I_CAP_LOAD: list(chain( exrex.generate("cl[dc][ri]?|cl[bhw][u]?[ri]?"), exrex.generate("cll[cd]|cll[bhw][u]?"), ["clcbi"])), IClass.I_CAP_STORE: list(chain( exrex.generate("cs[bhwdc][ri]?"), exrex.generate("csc[cbhwd]"), ["cscbi"])), IClass.I_CAP_CAST: [ "ctoptr", "cfromptr", "cfromddc"], IClass.I_CAP_ARITH: [ "cincoffset", "csetoffset", "csub", "cmove"], IClass.I_CAP_BOUND: [ "csetbounds", "csetboundsexact", "candperm"], IClass.I_CAP_FLOW: [ "cbtu", "cbts", "cjr", "cjalr", "ccall", "creturn", "cbez", "cbnz"], IClass.I_CAP_CPREG: [ "csetdefault", "cgetdefault", "cgetepcc", "csetepcc", "cgetkcc", "csetkcc", "cgetkdc", "csetkdc", "cgetpcc", "cgetpccsetoffset"], IClass.I_CAP_CMP: [ "ceq", "cne", "clt", "cle", "cltu", "cleu", "cexeq"], IClass.I_CAP_OTHER: [ "cgetperm", "cgettype", "cgetbase", "cgetlen", "cgettag", "cgetsealed", "cgetoffset", "cseal", "cunseal", "ccleartag", "cclearregs", "cgetcause", "csetcause", "ccheckperm", "cchecktype", "clearlo", "clearhi", "cclearlo", "cclearhi", "fpclearlo", "fpclearhi", "creadhwr", "cwritehwr", "cgetnull"] } iclass_map[IClass.I_CAP] = list(chain(*iclass_map.values()))
def dic_creat(hosts): web_dics = hosts.split('.') special = list(exrex.generate('[!@#]+')) #取出有用的东西放入字典生成的地方,生成字典 #我们希望将核心的生成规则写入配置文件,方便后期的扩展使用 f_rule = open('rule.ini', 'r') for i in f_rule: if '#' != i[0]: rule = i for web_dic in web_dics: if web_dic not in web_white: f_pass = open('pass_0.txt', 'r') for dic_pass in f_pass: dics = list( exrex.generate( rule.format(web_dic=web_dic, special=special, dic_pass=dic_pass.strip('\n')))) for dic in dics: print dic
def make_pass(dics): for dic in dics: #获取字典中的内容 f_pass = open('pass.txt', 'r') for pwd in f_pass: #print pwd pwd = pwd.strip('\n') #过滤换行 #dic+@+pwd final_pwds = list(exrex.generate(dic + '[@]' + pwd)) for final_pwd in final_pwds: print final_pwd
def items(self): paths = [ # catch only this one that doesn't have open group in pattern Path.objects.exclude(translations__pattern__contains='.*' ).language(lang_code) for lang_code, _ in settings.LANGUAGES ] items = [] for path in list(chain(*paths)): patterns = list(exrex.generate(path.pattern)) for pattern in patterns: items.append((pattern, path.language_code)) return items
def generate(regexp): #check parentheses and brackets.. if regexp.count("(") != regexp.count(")") or regexp.count("[") != regexp.count("]"): msg = "ERROR in \n%s\n%s" % (regexp, "unbalanced parentheses") print(msg) return ([],msg.split("\n")) #Because the format isn't actually regexp.. org_regexp = regexp regexp = re.sub("\.", "\.", regexp) regexp = re.sub("\?", "\?", regexp) regexp = re.sub("\[", "(", regexp) regexp = re.sub("\]", ")?", regexp) try: gens = list(exrex.generate(regexp)) #build a dictionary to check for doubles gen_count = {} for gen in gens: gen = gen.strip() gen = re.sub(" +"," ",gen) if gen in gen_count: gen_count[gen] += 1 else: gen_count[gen] = 1 mod_gens = [] for gen in gens: gen = gen.strip() gen = re.sub(" +"," ",gen) if gen_count[gen] > 1: mod_gens.append("WARNING --- DOUBLE %d" % gen_count[gen]) mod_gens.append(gen) return (mod_gens, []) except: #msg = "ERROR in %s: %s %s" % (regexp, sys.exc_info()[0], sys.exc_info()[1]) msg = "ERROR in \n%s\n%s" % (org_regexp, sys.exc_info()[1]) print(msg) return ([],msg.split("\n"))
def regex_to_str(self, all_combo: bool = False): """Convert a regex to a matching string Args: all_combo (bool, optional): Generate all combos that match regex. Defaults to False. Returns: Chepy: The Chepy object. """ if all_combo: self.state = list(exrex.generate(self._convert_to_str())) else: self.state = exrex.getone(self._convert_to_str()) return self
def revers_page_paths(page_name=None, lang_args=None): paths_queries = [(path.pattern, path.language_code) for path in get_path_patterns(page_name=page_name)] paths = [] for path in paths_queries: lang = path[1] args = lang_args.get(lang, None) pattern = replace_capture_groups_with_values(path[0], args) if args else path[0] for url in exrex.generate(pattern): paths.append((url, path[1])) return paths
def _get_primary_keys(self, table_name, num_rows): """Return the primary key and amount of values for the requested table. Args: table_name(str): Name of the table to get the primary keys. num_rowd(str): Number of primary_keys to generate. Returns: tuple(str,pandas.Series): If the table has a primary key. tuple(None, None): If the table doesn't have a primary key. Raises: ValueError: If there aren't enough remaining values to generate. """ meta = self.dn.get_meta_data(table_name) primary_key = meta.get('primary_key') primary_key_values = None if primary_key: node = meta['fields'][primary_key] regex = node['regex'] generator = self.primary_key.get(table_name) if generator is None: generator = exrex.generate(regex) self.primary_key[table_name] = generator remaining = exrex.count(regex) self.remaining_primary_key[table_name] = remaining else: remaining = self.remaining_primary_key[table_name] if remaining < num_rows: raise ValueError( 'Not enough unique values for primary key of table {} with regex {}' ' to generate {} samples.'.format(table_name, regex, num_rows)) self.remaining_primary_key[table_name] -= num_rows primary_key_values = pd.Series( [x for i, x in zip(range(num_rows), generator)]) if (node['type'] == 'number') and (node['subtype'] == 'integer'): primary_key_values = primary_key_values.astype(int) return primary_key, primary_key_values
def regex_test(corr,inp,folder,file_check,fault,limit,time): import exrex inp_temp=tempfile.TemporaryFile(dir=folder) if limit==0: inp_gen=exrex.generate(inp,limit=sys.maxint) for inp_n in inp_gen: inp_temp.write(inp_n+"\n") else: inp_gen=exrex.generate(inp,limit=limit) n=0 inp_temp.write(str(limit)+"\n") for inp_n in inp_gen: n+=1 if(n>limit): break inp_temp.write(inp_n+"\n") if(n<limit): for x in range(n,limit): inp_one=exrex.getone(inp,limit=limit) inp_temp.write(inp_one+"\n") #pass inp_temp how=check(corr=corr,inp=inp_temp,folder=folder,file_check=file_check,fault=fault,time=time) inp_temp.close() return how
def force(start_depth, pattern, check_callback, cache_callback): attempts = start_depth gen = exrex.generate(pattern) for i in range(start_depth): next(gen) for guess in gen: attempts += 1 if check_callback(guess) == 1: return (guess, attempts) print("- {} - permutations: {}".format(guess, attempts)) if attempts % 1000 == 0: cache_callback(attempts) return False
def extract_value_lists_from_string(property_name, schema_json) -> OrderedDict: value_list = OrderedDict() property_type = schema_json['type'] if "pattern" in schema_json: # extracting all pattern values limit = 100 value_list[property_name] = list( exrex.generate(schema_json['pattern'], limit)) elif "enum" in schema_json: # extracting enum values enum_values = schema_json['enum'] value_list[property_name] = enum_values if "null" in property_type: value_list[property_name].append(None) return value_list
def dic_create(url): web_pass = open("web_pass.txt", 'w') web_pass.close() web_dics = url.split('.') for web_dic in web_dics: if web_dic not in web_band: f_pass = open('pass.txt', 'r') for dic_pass in f_pass: dics = list( exrex.generate( rule.format(web_dic=web_dic, dic_pass=dic_pass.strip("\n")))) for dic in dics: web_pass = open("web_pass.txt", 'a+') web_pass.write(dic + '\n') web_pass.close()
def expand_this(pattern): """ Cleans then expands any and all patterns >>> expand_this("@num{1,2}") [['@num'], ['@num', '@num']] >>> expand_this("spam{3}") [['spam', 'spam', 'spam']] >>> expand_this("(a|f) b c") [['a', 'b', 'c'], ['f', 'b', 'c']] >>> expand_this("a{10}") [['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a']] >>> expand_this("@banana_2{1,2}") [['@banana_2'], ['@banana_2', '@banana_2']] """ pattern = make_clean_input(pattern) patterns = [strip_extra_spaces(_).split() for _ in exrex.generate(pattern)] return patterns
def generate(regexp): #check parentheses and brackets.. if regexp.count("(") != regexp.count(")") or regexp.count( "[") != regexp.count("]"): msg = "ERROR in \n%s\n%s" % (regexp, "unbalanced parentheses") print(msg) return ([], msg.split("\n")) #Because the format isn't actually regexp.. org_regexp = regexp regexp = re.sub("\.", "\.", regexp) regexp = re.sub("\?", "\?", regexp) regexp = re.sub("\[", "(", regexp) regexp = re.sub("\]", ")?", regexp) try: gens = list(exrex.generate(regexp)) #build a dictionary to check for doubles gen_count = {} for gen in gens: gen = gen.strip() gen = re.sub(" +", " ", gen) if gen in gen_count: gen_count[gen] += 1 else: gen_count[gen] = 1 mod_gens = [] for gen in gens: gen = gen.strip() gen = re.sub(" +", " ", gen) if gen_count[gen] > 1: mod_gens.append("WARNING --- DOUBLE %d" % gen_count[gen]) mod_gens.append(gen) return (mod_gens, []) except: #msg = "ERROR in %s: %s %s" % (regexp, sys.exc_info()[0], sys.exc_info()[1]) msg = "ERROR in \n%s\n%s" % (org_regexp, sys.exc_info()[1]) print(msg) return ([], msg.split("\n"))
def get_letters(regex, limit, position, current_str): possible_strs = list() if limitting_num > 10000000: for i in range(limitting_num/100): #phase = 1 #if float(i/10000) > 1: # print i # phase +=1 possible_strs.append(exrex.getone(regex, limit = limit + 1)) else: gen = exrex.generate(regex, limit = limit + 1) i = 0 for i in range(limitting_num): str = next(gen, None) if str != None: possible_strs.append(str) else: break #possible_strs = list(exrex.generate(regex, limit = limit + 1)) #print 'possibilities: {}'.format(possible_strs) # removing all strings shorter than we need #print limit remove_outliers(possible_strs, limit) #print 'possibilities: {}'.format(possible_strs) # Cut all the strings that don't match current string is_bad = False chosen_strs = list() for string in possible_strs: for j in range(len(string)): if string[j] != current_str[j] and current_str[j] != '~': is_bad = True if is_bad: is_bad = False continue else: chosen_strs.append(string) # Collect all possible variations of position' letter resulting_set = list() for i in chosen_strs: if resulting_set.count(i[position]) == 0:#curr_l != i[position] or curr_l == '': resulting_set.append(i[position]) return resulting_set
def generate(str_length=10, patterns=[]): print("# total possible strings: ", exrex.count('(a|b|c|d|e|f){{{}}}'.format(str_length))) data = { "pos": [], "neg": [] } for str in exrex.generate('(a|b|c|d|e|f){{{}}}'.format(str_length)): label = False latest = -1 for p in patterns: if p[0].match(str): label = p[1] latest = p[0] if label: data["pos"].append((str, label, latest)) else: data["neg"].append((str, label, latest)) return data
def expand(expression): """ Use regex entry expansion to populate the register. """ expresults = list(exrex.generate(expression)) # no regex if len(expresults) == 1: results = list() results.append(expression) if settings.LANGUAGE == 'DE': # German genitive form: if no s at the end of one component if not re.search(r's$', expression) and not re.search( r'\s', expression): temp = expression + 's' results.append(temp) return results # serialize else: return expresults
def dic_create(url_dics): with open('D:\\django_test\\webtool\\tool\\dic_rule.ini', 'r') as f: for i in f.readlines(): i = i.strip('\n') if i[0] != '#': rule = str(i) with open('D:\\django_test\\webtool\\tool\\comm_dic.txt', 'r') as f: comm_dics = f.readlines() dic_final = [] for url_dic in url_dics: if url_dic not in dic_bad: for comm_dic in comm_dics: dics = list(exrex.generate(rule.format(url_dic=url_dic, comm_dic=comm_dic.strip('\n')))) for dic in dics: if (len(dic) > 4) and (dic[0] not in filter_list): dic_final.append(dic) #print dic_final fin = '.'.join(url_dics) dic_final.append(fin) return dic_final
def test_dfas_with_generated_strings(self): """ This test uses the exrex module to generate strings from a regular expression Each DFA yaml file includes it's own regular expression that describes the language. """ # Skip this test if exrex isn't installed try: import exrex except ImportError: msg = "Skipping exrex based test because it's missing. Do 'pip install exrex'" print(msg) self.skipTest(msg) for test in self.tests: # Take at most MAX_GENERATED strings from the exrex generator for input_string in islice(exrex.generate(test.yaml["regex"]), self.MAX_GENERATED): # Run them through the DFA, raise exception if it doesn't accept if not dfa_accepts(test.dfa, input_string): raise(UnexpectedRejection(test.yaml["description"], input_string, test.yaml["regex"]))
def dic_create(hosted): # get the words from an url(here we call it host) to create dict # we want to write the exrex generating rules into a config file web_dicts = hosted.split('.') f_pass = open('password','r') f_rule = open('config','r') rule_lines = f_rule.readlines() f_pass_lines = f_pass.readlines() # print(f_rule.readlines()[2]) for rule in rule_lines: if '#' in rule[0]: # print(rule) continue else: print(rule) for web_dict in web_dicts: if web_dict not in web_white: for passw in f_pass_lines: # combine the passwords with the parsed hosts dicts = list(exrex.generate(rule.format(web_dict=web_dict, passw=passw.strip()))) for dic in dicts: print(dic)
def gen_fuzz_subdomains(expression, rule): """ Generate subdomains based on fuzz mode :param str expression: generate subdomains's expression :param str rule: regexp rule :return list subdomains: list of subdomains """ subdomains = list() fuzz_count = exrex.count(rule) if fuzz_count > 10000000: logger.log('ALERT', f'The dictionary generated by this rule is too large:{fuzz_count} > 10000000') logger.log('DEBUG', f'Dictionary size based on fuzz mode: {fuzz_count}') for fuzz_string in exrex.generate(rule): fuzz_string = fuzz_string.lower() if not fuzz_string.isalnum(): continue fuzz_domain = expression.replace('*', fuzz_string) subdomains.append(fuzz_domain) random_domain = random.choice(subdomains) logger.log('ALERT', f'Please check whether {random_domain} is correct or not') return subdomains
def extract_zip(file, password, count): print("GOOD") zip_ref = ZipFile(file) for i in password: try: zip_ref.extractall(pwd=i) print("Found Password is ...... : " + i + "\n") print(zip_ref.namelist()) if os.path.exists('hint.txt'): f = open("hint.txt", 'r') password = list(exrex.generate(f.readline())) f.close() if os.path.exists('archive.zip'): os.rename('archive.zip', 'archive' + str(count) + '.zip') if count > 1: os.remove("archive" + str(count - 1) + ".zip") return password except Exception, e: pass
def gen_fuzz_subdomains(expression, rule): """ 生成基于fuzz模式的爆破子域 :param str expression: 子域域名生成表达式 :param str rule: 生成子域所需的正则规则 :return: 用于爆破的子域 """ subdomains = list() fuzz_count = exrex.count(rule) if fuzz_count > 10000000: logger.log('ALERT', f'请注意该规则生成的字典太大:{fuzz_count} > 10000000') logger.log('DEBUG', f'fuzz模式下生成的字典大小:{fuzz_count}') for fuzz_string in exrex.generate(rule): fuzz_string = fuzz_string.lower() if not fuzz_string.isalnum(): continue fuzz_domain = expression.replace('*', fuzz_string) subdomains.append(fuzz_domain) random_domain = random.choice(subdomains) logger.log('ALERT', f'请注意检查基于fuzz模式生成的{random_domain}是否正确') return subdomains
def make_pass(pwds): #保存生成的字典 fout = open('pass_out.txt', 'w') fout.close() #假设包含三种内容 1.字符串YXZ 2.数字密码 3.下划线或井号 for pwd in pwds: #假设三种组合(含大小写) Yxz123456_ 123456yxz_ _yxZ123456 rules = [ '({pwd})([Yx][Xx][Zz])(_|#)', '([Yx][Xx][Zz])({pwd})(_|#)', '(_|#)({pwd})([Yx][Xx][Zz])' ] #密码生成 for rule in rules: final_pwds = list(exrex.generate(rule.format(pwd=pwd))) for final_pwd in final_pwds: print final_pwd #保存生成的字典 fout = open('pass_out.txt', 'a+') fout.write(final_pwd + '\n') fout.close()
def dic_creat(hosts): web_dics = hosts.split('.') #取出每一个可能的字符串,如demo,webdic,分别放入字典生成的地方,生成字典 #把生成字典的规则,写在配置文件里,易于后期修改使用 rule_ini = open('web_dic.ini', 'r') for i in rule_ini: if '#' not in i: rule = i f_pass = open('pass_1.txt', 'w') f_pass.close() # print i rule_ini.close() for web_dic in web_dics: if web_dic not in web_white: f = open('name.txt', 'r') # print rule for i in f: dics = list( exrex.generate( rule.format(web_dic=web_dic, name=i.strip('\n')))) for dic in dics: try: if len(dic) == 0 or int(dic): pass except: # print dic f_pass = open('pass_1.txt', 'a+') f_pass.write(dic + '\n') f_pass.close() f.close() print '[*]Create dic complete!'
def make_dict(dics, pwds): for dic in dics: lock.acquire() kw2 = {"d":dic.strip()} for pwd in pwds: if '*' in pwd or '?' in pwd: pwd = pwd.replace("*","\\*") pwd = pwd.replace("?","\\?") kw2 = {"d":r''+ dic.strip()} kw3 = {"w":r''+ pwd.strip()} kw = dict(kw1, **kw2, **kw3) rule = options[cmd.position].format(**kw) if cmd.rule: rule = cmd.rule.replace("d","{d}").replace("w","{w}").replace("c","[{c}]{{{n}}}") rule = rule.format(**kw) new_dict = list(exrex.generate(rule)) for _ in new_dict: with open("password.txt","a") as f: f.write(_ + "\r") try: pwds.seek(0) except: pass lock.release()
pattern = '' for line in sys.stdin: if re.search(r"[*+]", line): print( "Ignoring line '{}' because it contains unsupported quantifiers like + or *" .format(line.rstrip())) continue pattern += line.strip(' \t\n') + "|" pattern = str(pattern).rstrip('|') a = set([]) try: #for i in list(exrex.generate(pattern, limit=sys.maxsize)): for i in list(exrex.generate(pattern, limit=(1 << 32))): if not is_valid(i): continue a.add(ipaddress.ip_address(i)) except re.error: print("\nPattern error\n") exit(1) if args.list_only: for i in sort_ip_list(list(a)): print(i) else: for i in create_range(a): start = i.split('-')[0] end = i.split('-')[-1] for x in gen_cidr(start, end): print(str(x).replace('/32', ''))
import exrex res = list(exrex.generate('a?g?b?aab')) print(res)
def _expand_hostlist(self): expanded_hostlist = list() # expanded if (self._hostlist[0] == "'" and self._hostlist[-1] == "'") or ( self._hostlist[0] == '"' and self._hostlist[-1] == '"' ): self._hostlist = self._hostlist[1:-1] # yeah, maybe we'll need that dirty hack in the future if len(self._hostlist) == 1 and self._hostlist[0] == "-": # special case for reading stdin instead of command args self._read_hostlist_from_stdin() hostgroups = self._hostlist.split() for hostgroup in hostgroups: hostlist_addition = list() negation = False hostgroup_modifier = hostgroup[0] # the first symbol hostgroup_remainder = hostgroup[1:] if hostgroup_modifier == "-": negation = True hostgroup_modifier = hostgroup[1] hostgroup_remainder = hostgroup[2:] hostgroup = hostgroup[1:] if hostgroup_modifier not in self._available_parsers: if not hostgroup_modifier.isalnum(): raise exceptions.ExpandingHostlistError( "Couldn't find corresponding parser for {0} modifier.".format(hostgroup_modifier) ) else: # hostgroup is a host or a regex, not a group escaped_hostgroup = self._escape_unsafe_characters(hostgroup) self._die_if_unsafe_characters(escaped_hostgroup) hostlist_addition = list(exrex.generate(escaped_hostgroup, limit=1000)) # hostlist_addition.append(hostgroup) else: # we must call a parser parser = self._available_parsers[hostgroup_modifier] parser_name = parser.__name__ self._config[parser_name] = self._update_config( parser_name, hostgroup_modifier=hostgroup_modifier, hostgroup=hostgroup_remainder ) obj = parser(**self._config[parser_name]) try: hostlist_addition = obj.parse() if len(hostlist_addition) == 0: raise exceptions.ExpandingHostlistError( "Parser {0} didn't return any hosts for hostgroup {1}".format( parser.__name__, hostgroup_remainder ) ) except classes.SWKParsingError as e: raise exceptions.ExpandingHostlistError( "Parser {0} died with message: {1}".format(parser.__name__, str(e)) ) if not negation: # don't add host twice expanded_hostlist.extend([x for x in hostlist_addition if x not in expanded_hostlist]) else: # delete host from list if present expanded_hostlist = [x for x in expanded_hostlist if x not in hostlist_addition] logging.debug("Expanded hostlist: {0}".format(expanded_hostlist)) return sorted(expanded_hostlist)
'https': 'socks5://127.0.0.1:9050' }) if opts.user_agents: lee_user_agents(opts.user_agents) buscadores = [] fabrica = FabricaBuscador() for x in opts.buscadores.split(','): buscadores.append(fabrica.get_buscador(x.strip())) q = [''] if len(args) > 0: q = [ x.strip().replace('[OP_SUM]', '+') for x in args[0].replace('\+', '[OP_SUM]').split('+') ] queries = [y for x in q for y in list(exrex.generate(x))] if opts.regex else q expansiones = expandir(queries, opts) if opts.verbose: print("Expansiones: %s" % str(expansiones)) print("Proxies en uso: %s" % proxies) intervalo = int_or_0(opts.intervalo) num_res = int_or_0(opts.num_res) resultados = {} for d, q in expansiones: proxy = None if len(proxies) > 0: i = randint(0, len(proxies) - 1) proxy = proxies[i] for b in buscadores: if not b: continue
#!/usr/bin/python3 -u import sys,exrex lines = set(exrex.generate(sys.argv[1])) if len(sys.argv) > 2: with open(sys.argv[2], 'a') as file: pass with open(sys.argv[2], 'r') as file: tried_lines = set(file.read().splitlines()) lines = lines - tried_lines tried = [] try: for line in lines: print(line) tried.append(line) finally: print("%s: Tried %d new entries" % (sys.argv[0], len(tried)), file=sys.stderr) if len(sys.argv) > 2: with open(sys.argv[2], 'a') as file: for line in tried: file.write(line + "\n")
import exrex import re print(list(exrex.generate('(hai){2}|world!')))
zip_ref.extractall(pwd=i) print("Found Password is ...... : " + i + "\n") print(zip_ref.namelist()) if os.path.exists('hint.txt'): f = open("hint.txt", 'r') password = list(exrex.generate(f.readline())) f.close() if os.path.exists('archive.zip'): os.rename('archive.zip', 'archive' + str(count) + '.zip') if count > 1: os.remove("archive" + str(count - 1) + ".zip") return password except Exception, e: pass if __name__ == '__main__': password = [] encZip = 'RegularZips.zip' password = list(exrex.generate("^ 7 y RU[A-Z]KKx2 R4\d[a-z]B N$")) password = extract_zip(encZip, password, 1) j = 0 while 1: j += 1 try: password = extract_zip("archive" + str(j) + ".zip", password, j + 1) except: print("DONE")
def execute(mode, code, input_str): result = "" if mode == "l": rows = (pcre.split(r"(?<![^\\]\\)&", row) for row in pcre.split(r"(?<![^\\]\\);", code)) table = handle_table(rows) if input_str in table: result = table[input_str] else: result = table["?"] elif mode == "f": result = code % ast.literal_eval(input_str) elif mode == "F": literal = ast.literal_eval(input_str) if isinstance(literal, tuple): result = code % literal input_str = str(sum((len(str(x)) for x in literal))) else: result = code % literal input_str = str(len(str(literal))) elif mode == "g": for string in exrex.generate(code): print(unescape(string)) return # Generate is always terminal elif mode == "h": if type(input_str) is str: input_str = pcre.escape(input_str) for string in exrex.generate(code % input_str): print(unescape(string)) return elif mode == "p": literal = ast.literal_eval(input_str) if isinstance(literal, int): result = pcre.sub(r"(?<![^\\]\\)~(.+?)(?<![^\\]\\)~",r"\1" * literal, code, flags=pcre.DOTALL) else: result = pcre.sub(r"(?<![^\\]\\)%(.+?)(?<![^\\]\\)%",r"\1" * literal[1], pcre.sub(r"~(.+?)~",r"\1" * literal[0], code, flags=pcre.DOTALL), flags=pcre.DOTALL) elif mode == "P": result = pcre.sub(r"(.)(?<![^\\]\\)~",r"\1" * ast.literal_eval(input_str), code, flags=pcre.DOTALL) elif mode == "e": rows = (pcre.split(r"(?<![^\\]\\)&", row) for row in pcre.split(r"(?<![^\\]\\);", code)) table = handle_table(rows) for char in i: result += table[i] elif mode == "o": pieces = pcre.split(r"(?<![^\\]\\)`", code) print(unescape(pieces[0])) result = handle_pieces(pieces[1:], "") elif mode == "s": pieces = pcre.split(r"(?<![^\\]\\)`", code) subs = pcre.split(r"(?<![^\\]\\)&", pieces[0]) input_str = handle_subs(input_str, subs) result = handle_pieces(pieces[1:], input_str) elif mode == "d": pieces = pcre.split(r"(?<![^\\]\\)`", code) subs = pcre.split(r"(?<![^\\]\\)&", pieces[0]) for sub in subs: input_str = pcre.sub(sub, "", input_str) result = handle_pieces(pieces[1:], input_str) elif mode == "S": pieces = pcre.split(r"(?<![^\\]\\)`", code) subs = pcre.split(r"(?<![^\\]\\)&", pieces[0]) sub_length = len(subs) output = unescape(handle_subs(input_str, subs)) result = handle_pieces(pieces[1:], "") print(output) elif mode == "i": result = code + input_str elif mode == "I": result = code + "\n" + input_str else: result = code if len(result) > 0 and result[0] == "`": input_pieces = pcre.split(r"(?<![^\\]\\)!", result) if len(input_pieces) >= 2: execute(result[1], input_pieces[0][2:], "!".join(input_pieces[1:])) else: execute(result[1], result[2:], get_input(input_str)) else: print(unescape(result))
def execute(mode, code, input_str): result = "" if mode == "l": rows = [pcre.split(r"(?<![^\\]\\)&", row) for row in pcre.split(r"(?<![^\\]\\);", code)] table = {} for row in rows: table.update(dict(zip(row[:-1],[row[-1]]*(len(row)-1)))) if input_str in table: result = table[input_str] else: result = table["?"] elif mode == "f": result = code % ast.literal_eval(input_str) elif mode == "F": literal = ast.literal_eval(input_str) if isinstance(literal, tuple): result = code % literal input_str = str(sum([len(str(x)) for x in literal])) else: result = code % literal input_str = str(len(str(literal))) elif mode == "g": for string in exrex.generate(code): print(string.encode("utf-8").decode("unicode-escape")) return # Generate is always terminal elif mode == "h": if type(input_str) is str: input_str = pcre.escape(input_str) for string in exrex.generate(code % input_str): print(string.encode("utf-8").decode("unicode-escape")) return elif mode == "p": literal = ast.literal_eval(input_str) if isinstance(literal, int): result = pcre.sub(r"(?<![^\\]\\)~(.+?)(?<![^\\]\\)~",r"\1" * literal, code, flags=pcre.DOTALL) else: result = pcre.sub(r"(?<![^\\]\\)%(.+?)(?<![^\\]\\)%",r"\1" * literal[1], pcre.sub(r"~(.+?)~",r"\1" * literal[0], code, flags=pcre.DOTALL), flags=pcre.DOTALL) elif mode == "P": result = pcre.sub(r"(.)(?<![^\\]\\)~",r"\1" * ast.literal_eval(input_str), code, flags=pcre.DOTALL) elif mode == "e": rows = [pcre.split(r"(?<![^\\]\\)&", row) for row in pcre.split(r"(?<![^\\]\\);", code)] table = {} for row in rows: table.update(dict(zip(row[:-1],[row[-1]]*(len(row)-1)))) for char in i: result += table[i] elif mode == "o": pieces = pcre.split(r"(?<![^\\]\\)`", code) print(pieces[0].encode("utf-8").decode("unicode-escape")) result = "`" + "`".join(pieces[1:]) elif mode == "s": pieces = pcre.split(r"(?<![^\\]\\)`", code) subs = pcre.split(r"(?<![^\\]\\)&", pieces[0]) sub_length = len(subs) for i in range(0, len(subs), 2): input_str = pcre.sub(subs[i], subs[i + 1], input_str) if len(pieces) > 1: result = "`" + "`".join(pieces[1:]) else: result = input_str elif mode == "d": pieces = pcre.split(r"(?<![^\\]\\)`", code) subs = pcre.split(r"(?<![^\\]\\)&", pieces[0]) for sub in subs: input_str = pcre.sub(sub, "", input_str) if len(pieces) > 1: result = "`" + "`".join(pieces[1:]) else: result = input_str elif mode == "S": pieces = pcre.split(r"(?<![^\\]\\)`", code) subs = pcre.split(r"(?<![^\\]\\)&", pieces[0]) sub_length = len(subs) output = input_str for i in range(0, len(subs), 2): output = pcre.sub(subs[i], subs[i + 1], output) if len(pieces) > 1: result = "`" + "`".join(pieces[1:]) else: result = "" print(output.encode("utf-8").decode("unicode-escape")) elif mode == "i": result = code + input_str elif mode == "I": result = code + "\n" + input_str else: result = code if len(result) > 0 and result[0] == "`": input_pieces = pcre.split(r"(?<![^\\]\\)!", result) if len(input_pieces) >= 2: execute(result[1], input_pieces[0][2:], "!".join(input_pieces[1:])) else: execute(result[1], result[2:], get_input(input_str)) else: print(result.encode("utf-8").decode("unicode-escape"))
import exrex output_file='dict.lst' regex='bev[A-Z][0-9]{2}[a-z]{2}[`~!@#$%^&*()_+}{|":;.,/?><\']1995' generated_strings=list(exrex.generate(regex)) with open(output_file, 'w') as f: for str in generated_strings: f.write(str + '\n')