def __init__(self): threading.Thread.__init__(self) self.concurrent_uploads = 0 self.max_item_size = settings.max_item_size self.uploads_file = file.File(os.path.join(settings.dir_last_upload, 'uploads.json')) self.uploads = {} self.items_file = file.File(os.path.join(settings.dir_last_upload, 'items.json')) self.items = {}
def distribute_services(self): discovery_targets = self.get_discovery_targets() service_lists = tools.splitlist(self.services, len(discovery_targets)) for i, target in enumerate(discovery_targets): filename = '{name}_services_list'.format(name=target['name']) self.discovery_files[i] = file.File(filename) filename_content = self.discovery_files[i].read_json() if isinstance(filename_content, dict): filename_content['services'] += service_lists[i] else: filename_content = {} filename_content['services'] = service_lists[i] filename_content['nick'] = target['name'] self.discovery_files[i].write_json(filename_content) for target in discovery_targets: filename = '{name}_services_list'.format(name=target['name']) if os.path.isfile(filename): exit = os.system( 'rsync -avz --no-o --no-g --progress --remove-source-files {filename} {target}' .format(filename=filename, target=target['rsync'])) if exit != 0: settings.irc_bot.send( 'PRIVMSG', 'Serviceslist {filename} failed to sync'.format( **locals()), settings.irc_channel_bot)
def __init__(self, port=None, device=None): self._adb = Adb(port, device) # 用于查找失败三次时 程序暂停半小时 self._flag = 0 self._success = [] self._failed = [] self._dict = {'success': self._success, 'failed': self._failed} self._file = file.File() self._json = self._file.json() # config.json 配置信息 # 查找联系人模式 file | loop self._mode = self._json['mode'] # 循环首尾 包含首 不包含尾 self._loop = self._json['loop'] # 文件路径 手机号码一行一个 self._filePath = self._json['file'] # 自动切换账号 微信登录 微信预留账号 self._account = self._json['account'] # 累计查找结果达到指定个数 会从内存写入到文件 self._dump = self._json['dump'] # 切换账号达到一定次数 会休眠 单位分钟 self._sleep = self._json['sleep'] # 切换账号指定次数 self._sleep_flag = self._json['sleep-flag']
async def compress_chunk(i, read_chunks, compressed_chunks, code_start): if (i != 0): # Await on previous chunk if not chunk 0 await compress_chunk(i - 1, read_chunks, compressed_chunks, code_start) # After calling on previous chunk, run current chunk i upload, and check to see if corresponding compression is complete # Loop that exits when read_chunks thread for chunk i is complete while (True): if (read_chunks[i] != None): print("compressing chunk " + str(i)) global compress_time start = time.time() # If there is read data in the list, then we can go ahead and compress await asyncio.sleep(0) q = file.File(0, read_chunks[i], 32, 32, 128, 0, 1) compressed_chunks[i] = q.GetContent().tobytes() q.DeleteContent() del q read_chunks[i] = None end = time.time() compress_time += end - start break else: # otherwise wait 0.05 seconds and check read_chunks again print("waiting for read") await asyncio.sleep(0.01) # write chunk times to file compress_time_file = open("compress_time_file.txt", "a") compress_time_file.write("chunk " + str(i) + " start: " + str(start - code_start) + " duration: " + str(end - start) + "\n")
def scan(self): #kexts kexts = [] #dbg utils.logMessage(utils.MODE_INFO, 'running scan') #init results dictionary results = self.initResults(KEXT_NAME, KEXT_DESCRIPTION) #get all files in kext directories for kextDir in KEXT_DIRECTORIES: #dbg utils.logMessage(utils.MODE_INFO, 'scanning %s' % kextDir) #get kexts kexts.extend(glob.glob(kextDir + '*')) #process # ->gets kext's binary, then create file object and add to results for kextBundle in kexts: #skip kext bundles that don't have kext's if not utils.getBinaryFromBundle(kextBundle): #next! continue #create and append # ->pass bundle, since want to access info.plist, etc results['items'].append(file.File(kextBundle)) return results
def distribute_urls(self): urls_video = list(self.urls_video) urls_normal = list(self.urls_normal) self.urls_video = list(self.urls_video[len(urls_video):]) self.urls_normal = list(self.urls_normal[len(urls_normal):]) urls_video_new = [url['url'] for url in urls_video] urls_video = list(urls_video_new) urls_normal_new = [url['url'] for url in urls_normal] urls_normal = list(urls_normal_new) lists = [{'sort': '-videos', 'list': urls_video}, {'sort': '', 'list': urls_normal}] for list_ in lists: grab_targets = self.get_grab_targets() urls_lists = tools.splitlist(list_['list'], len(grab_targets)) for i, target in enumerate(grab_targets): filename = '{name}{sort}_temp_{i}_{timestamp}'.format( name=target['name'], sort=list_['sort'], i=i, timestamp=time.time()) self.grab_files[i] = file.File(filename) self.grab_files[i].write_json({'urls': urls_lists[i], 'nick': target['name']}) exit = os.system('rsync -avz --no-o --no-g --progress --remove-source-files {filename} {target}'.format( filename=filename, target=target['rsync'])) if exit != 0: settings.irc_bot.send('PRIVMSG', 'URLslist {filename} failed to sync.'.format( **locals()), settings.irc_channel_bot)
def __init__(self, port=None, device=None): # 用于查找失败三次时 程序暂停半小时 self._flag = 0 self._success = [] self._failed = [] self._dict = {'success': self._success, 'failed': self._failed} self._file = file.File() self._done_list=self._file.get_done_list() self._json = self._file.json() self._adb = Adb(password=self._json['password'],port=port, device=device) # config.json 配置信息 # 查找联系人模式 file | loop self._mode = self._json['mode'] # 循环首尾 包含首 不包含尾 self._loop = self._json['loop'] # 文件路径 手机号码一行一个 self._input = self._json['file'] # 自动切换账号 微信登录 微信预留账号 self._account = self._json['account'] # 累计查找结果达到指定个数 会从内存写入到文件 self._dump = self._json['dump'] # 切换账号达到一定次数 会休眠 单位分钟 self._sleep = self._json['sleep-time'] # 切换账号指定次数 self._sleep_flag = self._json['sleep-flag'] self._switch_when=self._json['switch-when'] self._added_number=0
def scanLaunchItems(self, directories): #launch items launchItems = [] #results results = [] #expand directories # ->ensures '~'s are expanded to all user's directories = utils.expandPaths(directories) #get all files (plists) in launch daemon/agent directories for directory in directories: #dbg msg utils.logMessage(utils.MODE_INFO, 'scanning %s' % directory) #get launch daemon/agent launchItems.extend(glob.glob(directory + '*')) #process # ->get all auto-run launch services autoRunItems = self.autoRunBinaries(launchItems) #iterate over all auto-run items (list of the plist and the binary) # ->create file object and add to results for autoRunItem in autoRunItems: #create and append results.append(file.File(autoRunItem[0], autoRunItem[1])) return results
def host_add_file(path, name, size=None): global masterlist global sock_list head, tail = os.path.split(path) masterlist.append(file.File(name)) try: shutil.move(path, os.path.join(files.get_working_directory(), tail)) # adds it to share folder except Exception as e: print("Error in host_add_file: %s" % str(e)) # Sends the file to all client nodes for sock in sock_list: truepath = os.path.join( files.get_working_directory(), name) # Full path of the file including name and filetype print(truepath) if size: fsize = size else: fsize = os.path.getsize(truepath) message = ADD + name + ETX + str(fsize) + EOT print("Sending: %s to %s" % (message, str(sock.getpeername()))) sock.send(message.encode(ENCODING)) with open(truepath, 'rb') as k: bytessent = 0 while bytessent < fsize: data = k.read(BUFFER_SIZE) sock.send(data) bytessent += len(data) k.close() print("Sent Successfully!")
def scan(self): #dbg msg utils.logMessage(utils.MODE_INFO, 'running scan') #init results dictionary results = self.initResults(STARTUP_ITEM_NAME, STARTUP_ITEM_DESCRIPTION) #iterate over all base startup item directories # ->look for startup items for startupItemBaseDirectory in STARTUP_ITEM_BASE_DIRECTORIES: #get sub directories # ->these are the actual startup items startupItemDirectories = glob.glob(startupItemBaseDirectory + '*') #check the sub directory (which is likely a startup item) # ->there should be a file (script) which matches the name of the sub-directory for startupItemDirectory in startupItemDirectories: #init the startup item startupItem = startupItemDirectory + '/' + os.path.split(startupItemDirectory)[1] #check if it exists if os.path.exists(startupItem): #save results['items'].append(file.File(startupItem)) return results
def scan(): list = [] names = get_file_names() for name in names: path = get_filepath(name) mod = get_mod_time(path) list.append(file.File(name, mod, path)) return list
def find_file(self): for i in connection_list: if i.addr == self.addr: print('Using file - {}'.format(i.addr)) return i tmp = file.File(self.addr) connection_list.append(tmp) return tmp
def compress_chunk(i, read_chunks, compressed_chunks): print("compressing chunk " + str(i)) global compress_time start = time.time() q = file.File(0, read_chunks[i], 32, 32, 128, 0, 1) compressed_chunks.append(q.GetContent().tobytes()) q.DeleteContent() del q read_chunks[i] = None compress_time += time.time() - start
def __init__(self): threading.Thread.__init__(self) self.url_lists = [] self.url_count_new = 0 self.url_count = 0 self.urls_video = [] self.urls_normal = [] self.targets = file.File(settings.targets) self.grab_files = {} self.running = True
def scanPlists(plists, key, isLoaded=False): #results results = [] #sanity check if not plists: #bail return None #iterate over all plist # ->check for 'DYLD_INSERT_LIBRARIES' enviroment variable for plist in plists: #wrap try: #load contents of plist if needed if not isLoaded: #save path plistPath = plist #load it and check loadedPlist = utils.loadPlist(plist) if not loadedPlist: #skip continue #otherwise it's already loaded # ->use as is else: #set loadedPlist = plist #get path plistPath = utils.getPathFromPlist(loadedPlist) #check for/extract 'DYLD_INSERT_LIBRARIES' if key in loadedPlist and 'DYLD_INSERT_LIBRARIES' in loadedPlist[ key]: #create file obj and append to results results.append( file.File(loadedPlist[key]['DYLD_INSERT_LIBRARIES'], plistPath)) #ignore exceptions except Exception, e: #ignore pass
def __init__(self, port=None, device=None): self._adb = Adb(port, device) # 用于查找失败三次时 程序暂停半小时 self._flag = 0 # 该账号加了多少个 self._addfriendnum = 0 self._success = [] self._failed = [] self._dict = {'success': self._success, 'failed': self._failed} self.file = file.File() self._json = self.file.json() # config.json 配置信息 # 查找联系人模式 file | loop self._mode = self._json['mode'] # 循环首尾 包含首 不包含尾 self._loop = self._json['loop'] # 文件路径 手机号码一行一个 self._file = self._json['file'] # 自动切换账号 微信登录 微信预留账号 self._account = self._json['account'] # 累计查找结果达到指定个数 会从内存写入到文件 self._dump = self._json['dump'] # 切换账号达到一定次数 会休眠 单位分钟 self._sleep = self._json['sleep'] # 切换账号指定次数 self._sleep_flag = self._json['sleep-flag'] # 微信分身数 self._wechat_count = self._json['wechatcount'] # 该账号加好友的总个数 # self._addfriendcount = random.randint(10,15) self._addfriendcount = self._json['friends'] # 微信分身个数 如果要从微信本身开始运行则设为-1,从微信分身开始运行设为0,从微信分身1开始设为1,以此类推, 需要去config文件中修改startwechat的值,根据你设置的值打开相应的微信分身 self._wechat = self._json['startwechat'] self._old_wechat = self._json['startwechat'] self.clearnum = self._json['cleanmemory'] # 总共需要多少个 8 要随微信分身最后一个的后面那个字来确定 self.friendcount = self._addfriendcount * (self._wechat_count - self._old_wechat) self.list_index = 0 print('self.friendcount', self.friendcount) self.phonelist = [] self._end = False
def grab(self): while True: for filename in os.listdir(settings.dir_new_lists): while not settings.grab_running: time.sleep(1) print filename filename_urls = os.path.join(settings.dir_old_lists, filename + '_urls') filejson = file.File( os.path.join(settings.dir_new_lists, filename)).read_json() if filejson['nick'] != settings.irc_nick: settings.irc_bot.set_nick(filejson['nick']) file.File(filename_urls).write_lines(filejson['urls']) os.rename(os.path.join(settings.dir_new_lists, filename), os.path.join(settings.dir_old_lists, filename)) self.grabs[filename] = threading.Thread( target=self.grab_single, args=(filename_urls, )) self.grabs[filename].daemon = True self.grabs[filename].start() time.sleep(10)
def call_query(event, context): # Download .succinct file from bucket and compress s3 = boto3.client("s3") os.chdir("/tmp") os.mkdir(event['key1']) s3.download_file( "succinct-datasets", event['key1'] + ".metadata", "/tmp/" + event['key1'] + "/" + event['key1'] + ".metadata") # for f in os.listdir("/tmp"): # print(f) q = file.File(event['key1']) print("File deserialization and querying is complete")
def __init__(self, service_name): threading.Thread.__init__(self) self.service_name = service_name self.service_refresh = None self.service_urls = None self.service_regex = None self.service_regex_video = None self.service_regex_live = None self.service_version = None self.service_wikidata = None self.service_log_urls = [] self.service_file_log_urls = file.File(os.path.join(settings.dir_donefiles, self.service_name)) self.service_urls_age = time.time()
def scan(self): #reported path reportedPaths = [] #dbg msg utils.logMessage(utils.MODE_INFO, 'running scan') #init results results = self.initResults(UNCLASSIFIED_NAME, UNCLASSIFIED_DESCRIPTION) #get all running processes processes = utils.getProcessList() #set processes top parent # ->well, besides launchd (pid: 0x1) utils.setFirstParent(processes) #add process type (dock or not) utils.setProcessType(processes) #get all procs that don't have a dock icon # ->assume these aren't started by the user nonDockProcs = self.getNonDockProcs(processes) #save all non-dock procs for pid in nonDockProcs: #extract path path = nonDockProcs[pid]['path'] #ignore dups if path in reportedPaths: #skip continue #ignore things in /opt/X11/ # ->owned by r00t, so this should be ok.... if path.startswith('/opt/X11/'): #skip continue #save results['items'].append(file.File(path)) #record reportedPaths.append(path) return results
def __init__(self): threading.Thread.__init__(self) self.url_lists = [] self.url_count_new = 0 self.url_count = 0 self.urls_video = [] self.urls_normal = [] self.warriorfiles = [] self.targets = file.File(settings.targets) self.grab_files = {} self.running = True if not os.path.isdir('warriorlists'): os.makedirs('warriorlists')
def scanPlists(plists, key, isLoaded=False): #results results = [] #iterate over all plist # ->check 'RunAtLoad' (for true) and then extract the first item in the 'ProgramArguments' for plist in plists: #wrap try: #load contents of plist if needed if not isLoaded: #save path plistPath = plist #load it and check loadedPlist = utils.loadPlist(plist) if not loadedPlist: #skip continue #otherwise its already loaded # ->use as is else: #set loadedPlist = plist #get path plistPath = utils.getPathFromPlist(loadedPlist) #check for/extract 'DYLD_INSERT_LIBRARIES' if key in loadedPlist and 'DYLD_INSERT_LIBRARIES' in loadedPlist[ key]: #create file obj and append to results results.append( file.File(loadedPlist[key]['DYLD_INSERT_LIBRARIES'], plistPath)) #ignore exceptions except Exception, e: #ignore pass
def new_wheel(self, event=None, name=None): tab_name = "⚫ untitled-%r" % COUNTER() if name is None \ else wheel_name(name) wheel = file.File(self.notebook) if name is None: wheel.temporary_name = tab_name wheel.grid(row=0, column=0) if name: wheel.file_path = str(name) self.notebook.add(wheel, text=tab_name) # set the focus in the new tab self.notebook.select(wheel) wheel.draw_default_wheel() wheel.saved = False widgets.update_title(self, wheel) return wheel
def run(self): print "" self.output.info('Starting bruteforce module...') file.File(agent=self.agent, proxy=self.proxy, redirect=self.redirect, timeout=self.timeout, url=self.url, cookie=self.cookie).run() admin.Admin(agent=self.agent, proxy=self.proxy, redirect=self.redirect, timeout=self.timeout, url=self.url, cookie=self.cookie).run() backdoor.Backdoor(agent=self.agent, proxy=self.proxy, redirect=self.redirect, timeout=self.timeout, url=self.url, cookie=self.cookie).run() bdir.Bdir(agent=self.agent, proxy=self.proxy, redirect=self.redirect, timeout=self.timeout, url=self.url, cookie=self.cookie).run() bfile.Bfile(agent=self.agent, proxy=self.proxy, redirect=self.redirect, timeout=self.timeout, url=self.url, cookie=self.cookie).run() dir.Dir(agent=self.agent, proxy=self.proxy, redirect=self.redirect, timeout=self.timeout, url=self.url, cookie=self.cookie).run() log.Log(agent=self.agent, proxy=self.proxy, redirect=self.redirect, timeout=self.timeout, url=self.url, cookie=self.cookie).run()
def __init__(self, port=None, device=None): self._port = port self._device = device self._p = '' if (port is None) else '-P ' + str(port) + ' ' self._s = '' if (device is None) else '-s ' + str(device) + ' ' # 指定端口 指定设备 组装adb命令 self._baseShell = adb_path() + 'adb ' + self._p + self._s # 获取该文件(adb.py) 所在对文件夹路径 self._basePath = file.File()._basePath # 缓存xml 不需要多此进行文件读取操作 self._xml = None # 缓存当前查找到的nodes => type 列表 | value 字典 self._nodes = None self._x = None self._y = None
def upload_url_lists(self): while True: while not settings.upload_running: time.sleep(1) urls = list(self.urls) self.urls = list(self.urls[len(urls):]) if len(urls) != 0: target = self.target.read() file_name = settings.irc_nick + '_' + str(time.time()) self.url_files[file_name] = file.File(file_name) self.url_files[file_name].write_json(urls) os.system('rsync -avz --no-o --no-g --progress --remove-source-files {file_name} {target}'.format( **locals())) if os.path.isfile(file_name): settings.irc_bot.send('PRIVMSG', '{file_name} synced unsuccessful to main server.'.format( **locals()), settings.irc_channel_bot) self.urls += self.url_files[file_name].read_json() os.remove(file_name) time.sleep(60)
def scan(self): #auth plugins authPlugins = [] #dbg utils.logMessage(utils.MODE_INFO, 'running scan') #init results dictionary results = self.initResults(AUTH_PLUGIN_NAME, AUTH_PLUGIN_DESCRIPTION) #get all files in auth plugin directories for authPluginDir in AUTH_PLUGIN_DIRECTORIES: #dbg utils.logMessage(utils.MODE_INFO, 'scanning %s' % authPluginDir) #get auth plugins authPlugins.extend(glob.glob(authPluginDir + '*')) #process # ->gets bundle's binary, then create file object and add to results for authPlugin in authPlugins: #skip any non-bundles # ->just do a directory check if not os.path.isdir(authPlugin): #skip continue #skip any invalid bundles if not utils.getBinaryFromBundle(authPlugin): #skip continue #create and append # ->pass bundle, since want to access info.plist, etc results['items'].append(file.File(authPlugin)) return results
def scan(self): #importers importers = [] #dbg utils.logMessage(utils.MODE_INFO, 'running scan') #init results dictionary results = self.initResults(IMPORTER_NAME, IMPORTER_DESCRIPTION) #get all files in importer directories for importerDir in IMPORTERS_DIRECTORIES: #dbg utils.logMessage(utils.MODE_INFO, 'scanning %s' % importerDir) #get imports importers.extend(glob.glob(importerDir + '*')) #process # ->gets bundle's binary, then create file object and add to results for importerBundle in importers: #skip any non-bundles # ->just do a directory check if not os.path.isdir(importerBundle): #skip continue #skip any invalid bundles if not utils.getBinaryFromBundle(importerBundle): #skip continue #create and append # ->pass bundle, since want to access info.plist, etc results['items'].append(file.File(importerBundle)) return results
def refresh(self): while True: for file_ in [file_ for file_ in os.listdir( settings.dir_assigned_services) if os.path.isfile( os.path.join(settings.dir_assigned_services, file_))]: while not settings.run_services_running: time.sleep(1) read_file = file.File(os.path.join( settings.dir_assigned_services, file_)) services_new = read_file.read_json() self.assigned_services = services_new['services'] if services_new['nick'] != settings.irc_nick: settings.irc_bot.set_nick(services_new['nick']) print(self.assigned_services) settings.irc_bot.send('PRIVMSG', '{i} services assigned.'.format( i=len(self.assigned_services)), settings.irc_channel_bot) os.remove(os.path.join(settings.dir_assigned_services, file_)) self.refresh_services() time.sleep(1)
def getWordList(self): cleanWordList = list() string = self.getFileContent() string = string.lower() stopWords = file.File("stopwords.txt").getFileContent() stopWordsCount = 0 for word in string.split(): cleanWord = "" for char in word: if char.isalpha(): cleanWord = cleanWord + char if len(cleanWord) > 0: if cleanWord in stopWords: stopWordsCount = stopWordsCount + 1 else: cleanWordList.append(cleanWord) return ' '.join(cleanWordList)