async def run(self): try: self.session = get_session(self.db_conn) if self.ad_id is None and self.graph_id is not None: #recalc! self.session.query(Edge).filter_by(graph_id = self.graph_id).filter(Edge.label != 'member').delete() self.session.commit() res = self.session.query(GraphInfo).get(self.graph_id) for giad in self.session.query(GraphInfoAD).filter_by(graph_id = self.graph_id).all(): self.ad_id = giad.ad_id _, err = await self.start_calc() if err is not None: raise err else: _, err = await self.start_calc() if err is not None: raise err return True, None except Exception as e: logger.exception('edge calculation error!') return False, e finally: try: self.session.close() except: pass
async def run(self): if self.db_url is None: raise Exception( 'db_url must be either sqlalchemy url or an established db session' ) if isinstance(self.db_url, str): self.db_session = get_session(self.db_url) else: self.db_session = self.db_url if self.graph_backend.upper() == 'networkx'.upper(): from jackdaw.nest.graph.backends.networkx.domaingraph import JackDawDomainGraphNetworkx self.graph_type = JackDawDomainGraphNetworkx elif self.graph_backend.upper() == 'igraph'.upper(): from jackdaw.nest.graph.backends.igraph.domaingraph import JackDawDomainGraphIGraph self.graph_type = JackDawDomainGraphIGraph elif self.graph_backend.upper() == 'graphtools'.upper(): from jackdaw.nest.graph.backends.graphtools.domaingraph import JackDawDomainGraphGrapthTools self.graph_type = JackDawDomainGraphGrapthTools pathlib.Path(self.work_dir).mkdir(parents=True, exist_ok=True) pathlib.Path(self.work_dir).joinpath('graphcache').mkdir(parents=True, exist_ok=True) self.msg_queue = asyncio.Queue() #handler = functools.partial(process_request, os.getcwd()) self.server = await websockets.serve( self.handle_incoming, self.listen_ip, self.listen_port, ssl=self.ssl_ctx, process_request=self.preprocess_request, subprotocols=self.subprotocols) print('[+] Server is running!') await self.server.wait_closed()
async def run(self): try: self.msg_queue = asyncio.Queue() self.db_session = get_session(self.db_url) while True: try: cmd_raw = await self.websocket.recv() cmd = NestOpCmdDeserializer.from_json(cmd_raw) if cmd.cmd == NestOpCmd.GATHER: asyncio.create_task(self.do_gather(cmd)) elif cmd.cmd == NestOpCmd.KERBEROAST: asyncio.create_task(self.do_kerberoast(cmd)) elif cmd.cmd == NestOpCmd.SMBSESSIONS: asyncio.create_task(self.do_smbsessions(cmd)) elif cmd.cmd == NestOpCmd.PATHSHORTEST: asyncio.create_task(self.do_pathshortest(cmd)) elif cmd.cmd == NestOpCmd.PATHDA: asyncio.create_task(self.do_pathda(cmd)) elif cmd.cmd == NestOpCmd.GETOBJINFO: asyncio.create_task(self.do_getobjinfo(cmd)) elif cmd.cmd == NestOpCmd.LISTADS: asyncio.create_task(self.do_listads(cmd)) elif cmd.cmd == NestOpCmd.CHANGEAD: asyncio.create_task(self.do_changead(cmd)) elif cmd.cmd == NestOpCmd.LISTGRAPHS: asyncio.create_task(self.do_listgraphs(cmd)) elif cmd.cmd == NestOpCmd.CHANGEGRAPH: asyncio.create_task(self.do_changegraph(cmd)) elif cmd.cmd == NestOpCmd.TCPSCAN: asyncio.create_task(self.do_tcpscan(cmd)) #elif cmd.cmd == NestOpCmd.LOADAD: # asyncio.create_task(self.do_load_ad(cmd)) elif cmd.cmd == NestOpCmd.LOADGRAPH: asyncio.create_task(self.do_load_graph(cmd)) elif cmd.cmd == NestOpCmd.ADDCRED: asyncio.create_task(self.do_add_cred(cmd)) elif cmd.cmd == NestOpCmd.GETCRED: asyncio.create_task(self.do_get_cred(cmd)) elif cmd.cmd == NestOpCmd.LISTCRED: asyncio.create_task(self.do_list_cred(cmd)) elif cmd.cmd == NestOpCmd.ADDTARGET: asyncio.create_task(self.do_add_target(cmd)) elif cmd.cmd == NestOpCmd.GETTARGET: asyncio.create_task(self.do_get_target(cmd)) elif cmd.cmd == NestOpCmd.LISTTARGET: asyncio.create_task(self.do_list_target(cmd)) else: print('Unknown Command') except asyncio.CancelledError: return except Exception as e: traceback.print_exc() return except asyncio.CancelledError: return except Exception as e: print(e)
def get_session(self): if self.db_conn is not None: return get_session(self.db_conn) elif self.dbsession is not None: return self.dbsession else: raise Exception('Either db_conn or dbsession MUST be supplied!')
def generate_targets(self): session = get_session(self.db_conn) qry = session.query( JackDawADMachine.sAMAccountName, NetShare.id, NetShare.netname).filter( JackDawADMachine.ad_id == self.settings_base.ad_id).filter( JackDawADMachine.id == NetShare.machine_id) for mname, shareid, sharename in qry.all(): if sharename in self.exclude_shares: continue target = mname if mname[-1] != '$' else mname[:-1] fullpath = '\\\\%s\\%s' % (target, sharename) smbshare = SMBShare(fullpath=fullpath) #print(target) #print(fullpath) #print(smbshare) settings = copy.deepcopy(self.settings_base) settings.share_id = shareid settings.target = target settings.share = smbshare self.in_q.put((SMBShareGathererCmd.START, settings)) session.close() self.in_q.put((SMBShareGathererCmd.END, None))
def setup(self): logger.debug('mgr setup') self.total_progress = tqdm(desc='LDAP info entries', ascii=True) self.session = get_session(self.db_conn) for _ in range(self.agent_cnt): agent = LDAPEnumeratorAgent(self.ldam_mgr, self.agent_in_q, self.agent_out_q) agent.daemon = True agent.start() self.agents.append(agent)
def main(): import argparse parser = argparse.ArgumentParser( description='Calculate edges and flattem them in a file') parser.add_argument('-v', '--verbose', action='count', default=0, help='Increase verbosity, can be stacked') subparsers = parser.add_subparsers(help='dunno') subparsers.required = True subparsers.dest = 'command' full_group = subparsers.add_parser('run', help='Full migration') full_group.add_argument('sql', help='SQL connection string.') full_group.add_argument('ad', type=int, help='AD id to calc the edges on') full_group.add_argument('outfile', help='output file path') full_group.add_argument('-w', '--worker-count', type=int, default=4, help='output file path') args = parser.parse_args() if args.verbose == 0: logging.basicConfig(level=logging.INFO) logger.setLevel(logging.INFO) elif args.verbose == 1: logging.basicConfig(level=logging.DEBUG) logger.setLevel(logging.DEBUG) elif args.verbose > 1: logging.basicConfig(level=1) logger.setLevel(1) session = get_session(args.sql) if args.command == 'run': calc = EdgeCalc(session, args.ad, args.outfile, buffer_size=100, dst_ad_id=None, worker_count=args.worker_count) calc.run() else: print('?????')
def main(): import argparse import os parser = argparse.ArgumentParser(description='Calculate edges and flattem them in a file') parser.add_argument('-v', '--verbose', action='count', default=0, help='Increase verbosity, can be stacked') subparsers = parser.add_subparsers(help = 'dunno') subparsers.required = True subparsers.dest = 'command' full_group = subparsers.add_parser('run', help='Full migration') full_group.add_argument('sql', help='SQL connection string.') full_group.add_argument('ad', type=int, help='AD id to calc the edges on') full_group.add_argument('-g','--graph-id', type=int, default = -1, help='AD id to calc the edges on') full_group.add_argument('-w', '--worker-count', type=int, default = 4, help='output file path') args = parser.parse_args() if args.verbose == 0: logging.basicConfig(level=logging.INFO) logger.setLevel(logging.INFO) elif args.verbose == 1: logging.basicConfig(level=logging.DEBUG) logger.setLevel(logging.DEBUG) elif args.verbose > 1: logging.basicConfig(level=1) logger.setLevel(1) os.environ['JACKDAW_SQLITE'] = '0' if args.sql.lower().startswith('sqlite'): os.environ['JACKDAW_SQLITE'] = '1' session = get_session(args.sql) graph_id = args.graph_id if graph_id == -1: gi = GraphInfo() session.add(gi) session.commit() session.refresh(gi) graph_id = gi.id if args.command == 'run': calc = EdgeCalc(session, args.ad, graph_id, buffer_size = 100, worker_count = args.worker_count) calc.run() else: print('?????')
async def run(self): if self.db_url is None: raise Exception( 'db_url must be either sqlalchemy url or an established db session' ) if isinstance(self.db_url, str): self.db_session = get_session(self.db_url) else: self.db_session = self.db_url if self.graph_backend.upper() == 'networkx'.upper(): from jackdaw.nest.graph.backends.networkx.domaingraph import JackDawDomainGraphNetworkx self.graph_type = JackDawDomainGraphNetworkx elif self.graph_backend.upper() == 'igraph'.upper(): from jackdaw.nest.graph.backends.igraph.domaingraph import JackDawDomainGraphIGraph self.graph_type = JackDawDomainGraphIGraph elif self.graph_backend.upper() == 'graphtools'.upper(): from jackdaw.nest.graph.backends.graphtools.domaingraph import JackDawDomainGraphGrapthTools self.graph_type = JackDawDomainGraphGrapthTools pathlib.Path(self.work_dir).mkdir(parents=True, exist_ok=True) pathlib.Path(self.work_dir).joinpath('graphcache').mkdir(parents=True, exist_ok=True) self.server_in_q = asyncio.Queue() self.server_out_q = asyncio.Queue() self.sspi_proxy_out_q = asyncio.Queue() asyncio.create_task(self.__handle_server_in()) asyncio.create_task(self.__handle_wsnet_router_in()) if self.enable_local_agent is True: agentid = '0' #str(uuid.uuid4()) internal_agent = JackDawAgent(agentid, 'internal', platform.system().upper(), self.db_session) self.agents[agentid] = internal_agent asyncio.create_task(internal_agent.run()) #handler = functools.partial(process_request, os.getcwd()) self.server = await websockets.serve( self.handle_incoming, self.listen_ip, self.listen_port, ssl=self.ssl_ctx, process_request=self.preprocess_request, subprotocols=self.subprotocols) print('[+] Server is running!') await self.server.wait_closed()
async def run(self): try: self.session = get_session(self.db_conn) if self.ad_id is None and self.graph_id is not None: #recalc! self.session.query(Edge).filter_by( graph_id=self.graph_id).filter( Edge.label != 'member').delete() self.session.commit() res = self.session.query(GraphInfo).get(self.graph_id) self.ad_id = res.ad_id adinfo = self.session.query(ADInfo).get(self.ad_id) self.domain_name = str(adinfo.distinguishedName).replace( ',', '.').replace('DC=', '') await self.log_msg('Adding gplink edges') self.gplink_edges() #await self.log_msg() #self.groupmembership_edges() await self.log_msg('Adding trusts edges') self.trust_edges() await self.log_msg('Adding sqladmin edges') self.sqladmin_edges() await self.log_msg('Adding hassession edges') self.hasession_edges() await self.log_msg('Adding localgroup edges') self.localgroup_edges() await self.log_msg('Adding password sharing edges') self.passwordsharing_edges() self.session.commit() _, err = await self.calc_sds_mp() if err is not None: raise err adinfo = self.session.query(ADInfo).get(self.ad_id) adinfo.edges_finished = True self.session.commit() return True, None except Exception as e: logger.exception('edge calculation error!') return False, e finally: try: self.session.close() except: pass
def __target_generator(self): if self.db_conn is not None: session = get_session(self.db_conn) for target in self.targets: tid = -1 yield (tid, target) if self.targets_file is not None: tid = -1 with open(self.targets_file, 'r') as f: for line in f: line = line.strip() yield (tid, line) if self.ldap_conn is not None: ldap_filter = r'(&(sAMAccountType=805306369))' attributes = ['sAMAccountName'] for entry in self.ldap_conn.pagedsearch(ldap_filter, attributes): tid = -1 if self.lookup_ad is not None: self.ad_id = self.lookup_ad res = session.query(JackDawADMachine)\ .filter_by(ad_id = self.lookup_ad)\ .with_entities(JackDawADMachine.objectSid)\ .filter(JackDawADMachine.sAMAccountName == entry['attributes']['sAMAccountName'])\ .first() if res is not None: tid = res[0] yield (tid, entry['attributes']['sAMAccountName'][:-1]) if self.target_ad is not None: self.ad_id = self.target_ad info = session.query(JackDawADInfo).get(self.target_ad) info.smb_enumeration_state = 'STARTED' self.domain_name = str(info.distinguishedName).replace( ',', '.').replace('DC=', '') session.commit() for target_id, dns in session.query(JackDawADMachine).filter_by( ad_id=self.target_ad).with_entities( JackDawADMachine.objectSid, JackDawADMachine.dNSHostName): yield (target_id, dns) if self.db_conn is not None: session.close()
async def setup(self): logger.debug('mgr setup') qs = self.queue_size if qs is None: qs = self.agent_cnt self.agent_in_q = asyncio.Queue() #AsyncProcessQueue() self.agent_out_q = asyncio.Queue(qs) #AsyncProcessQueue(1000) if self.progress_queue is None: self.total_progress = tqdm(desc='LDAP info entries', ascii = True) self.session = get_session(self.db_conn) for _ in range(self.agent_cnt): agent = LDAPEnumeratorAgent(self.ldam_mgr, self.agent_in_q, self.agent_out_q) self.agents.append(asyncio.create_task(agent.arun()))
def __target_generator(self): if self.db_conn is not None: session = get_session(self.db_conn) for target in self.targets: tid = -1 yield (tid, target) if self.targets_file is not None: tid = -1 with open(self.targets_file, 'r') as f: for line in f: line = line.strip() yield (tid, line) if self.ldap_conn is not None: ldap_filter = r'(&(sAMAccountType=805306369))' attributes = ['sAMAccountName'] for entry in self.ldap_conn.pagedsearch(ldap_filter, attributes): tid = -1 if self.lookup_ad is not None: res = session.query(Machine)\ .filter_by(ad_id = self.lookup_ad)\ .with_entities(Machine.id)\ .filter(Machine.sAMAccountName == entry['attributes']['sAMAccountName'])\ .first() if res is not None: tid = res[0] yield (tid, entry['attributes']['sAMAccountName'][:-1]) if self.target_ad is not None: for target_id, target_name in session.query(Machine).filter_by( ad_id=self.target_ad).with_entities( Machine.id, Machine.sAMAccountName): yield (target_id, target_name[:-1]) if self.db_conn is not None: session.close()
def get_results(self): session = None if self.db_conn is not None: session = get_session(self.db_conn) while True: x = self.out_q.get() if x is None: break tid, target, result, error = x if result is None and error is not None: #something went error logger.debug('[AIOSMBScanner][TargetError][%s] %s' % (target.get_ip(), error)) if self.use_progress_bar is True: self.prg_errors.update() if result is not None: if self.use_progress_bar is True: if isinstance(result, NetSession): self.prg_sessions.update() elif isinstance(result, NetShare): self.prg_shares.update() elif isinstance(result, LocalGroup): self.prg_groups.update() if session is None: logger.debug(target, str(result), error) else: session.add(result) session.commit() if result is None and error is None: logger.debug('Finished: %s' % target.ip) if self.use_progress_bar is True: self.prg_hosts.update()
from jackdaw.dbmodel.edgelookup import EdgeLookup from jackdaw.dbmodel.edge import Edge from jackdaw.dbmodel.aduser import ADUser from jackdaw.nest.graph.backends.graphtools.domaingraph import JackDawDomainGraphGrapthTools from jackdaw.dbmodel import get_session import pprint sql = 'sqlite:////home/devel/Desktop/1.db' ad_id = 1 graph_id = 1 work_dir = '/home/devel/Desktop/projects/jackdaw/graphs' print(sql) session = get_session(sql) a = JackDawDomainGraphGrapthTools(session, graph_id, work_dir) a.load() print('Loaded!') #src_sid = 'S-1-5-21-796845957-1547161642-839522115-1286' dst_sid = 'S-1-5-21-796845957-1547161642-839522115-512' da_sids = {} #target_sids = {} # #for res in session.query(ADUser.objectSid)\ # .filter_by(ad_id = ad_id)\ # .filter(ADUser.servicePrincipalName != None).all(): #
async def run(self): try: logger.debug( '[+] Starting SMB information acqusition. This might take a while...' ) self.session = get_session(self.db_conn) self.in_q = asyncio.Queue(self.queue_size) self.out_q = asyncio.Queue(self.queue_size) self.rdns_in_q = asyncio.Queue() if self.rdns_resolver is not None: self.rdns_task = asyncio.create_task( rdns_worker(self.rdns_resolver, self.rdns_in_q, self.out_q)) info = self.session.query(ADInfo).get(self.ad_id) info.smb_enumeration_state = 'STARTED' self.domain_name = str(info.distinguishedName).replace( ',', '.').replace('DC=', '') self.session.commit() self.total_targets = self.session.query(func.count( Machine.id)).filter_by(ad_id=self.ad_id).scalar() if self.show_progress is True: self.prg_hosts = tqdm(desc='HOSTS', ascii=True, total=self.total_targets) self.prg_shares = tqdm(desc='Shares', ascii=True) self.prg_sessions = tqdm(desc='Sessions', ascii=True) self.prg_groups = tqdm(desc='LocalGroup', ascii=True) self.prg_errors = tqdm(desc='Errors', ascii=True) if self.progress_queue is not None: msg = GathererProgress() msg.type = GathererProgressType.SMB msg.msg_type = MSGTYPE.STARTED msg.adid = self.ad_id msg.domain_name = self.domain_name await self.progress_queue.put(msg) self.gatherer = AIOSMBGathererAgent( self.in_q, self.out_q, self.smb_mgr, gather=self.gathering_type, localgroups=self.localgroups, concurrent_connections=self.concurrent_connections, ) self.gatherer_task = asyncio.create_task(self.gatherer.run()) self.job_generator_task = asyncio.create_task( self.generate_targets()) while True: await asyncio.sleep(0) x = await self.out_q.get() if x is None: break tid, target, result, error = x if result is None and error is not None: #something went error if tid is None and target is None: continue logger.debug('[AIOSMBScanner][TargetError][%s] %s' % (target.get_ip(), error)) if self.show_progress is True: self.prg_errors.update() if self.progress_queue is not None: self.prg_errors_cnt += 1 err = NetError() err.ad_id = self.ad_id err.machine_sid = tid err.error = str(error) self.session.add(err) if result is not None: if self.show_progress is True: if isinstance(result, NetSession): self.prg_sessions.update() elif isinstance(result, NetShare): self.prg_shares.update() elif isinstance(result, LocalGroup): self.prg_groups.update() if self.progress_queue is not None: if isinstance(result, NetSession): self.prg_sessions_cnt += 1 if self.stream_data is True: msg = GathererProgress() msg.type = GathererProgressType.SMBSESSION msg.msg_type = MSGTYPE.FINISHED msg.adid = self.ad_id msg.domain_name = self.domain_name msg.data = result await self.progress_queue.put(msg) elif isinstance(result, NetShare): self.prg_shares_cnt += 1 if self.stream_data is True: msg = GathererProgress() msg.type = GathererProgressType.SMBSHARE msg.msg_type = MSGTYPE.FINISHED msg.adid = self.ad_id msg.domain_name = self.domain_name msg.data = result await self.progress_queue.put(msg) elif isinstance(result, LocalGroup): self.prg_groups_cnt += 1 if self.stream_data is True: msg = GathererProgress() msg.type = GathererProgressType.SMBLOCALGROUP msg.msg_type = MSGTYPE.FINISHED msg.adid = self.ad_id msg.domain_name = self.domain_name msg.data = result await self.progress_queue.put(msg) result.ad_id = self.ad_id if isinstance( result, NetSession) and self.rdns_resolver is not None: await self.rdns_in_q.put(result.ip) self.session.add(result) self.session.commit() if result is None and error is None: logger.debug('Finished: %s' % target.ip) if self.show_progress is True: self.prg_hosts.update() if self.progress_queue is not None: self.prg_hosts_cnt += 1 if self.prg_hosts_cnt % self.progress_step_size == 0: msg = GathererProgress() msg.type = GathererProgressType.SMB msg.msg_type = MSGTYPE.PROGRESS msg.adid = self.ad_id msg.domain_name = self.domain_name msg.errors = self.prg_errors_cnt msg.sessions = self.prg_sessions_cnt msg.shares = self.prg_shares_cnt msg.groups = self.prg_groups_cnt msg.total = self.total_targets msg.total_finished = self.prg_hosts_cnt msg.step_size = self.progress_step_size await self.progress_queue.put(msg) info = self.session.query(ADInfo).get(self.ad_id) info.smb_enumeration_state = 'FINISHED' self.session.commit() if self.rdns_task is not None: await self.rdns_in_q.put(None) try: await asyncio.wait_for(asyncio.gather(*[self.rdns_task]), 10) except asyncio.TimeoutError: self.rdns_task.cancel() logger.debug('[+] SMB information acquisition finished!') if self.progress_queue is not None: msg = GathererProgress() msg.type = GathererProgressType.SMB msg.msg_type = MSGTYPE.FINISHED msg.adid = self.ad_id msg.domain_name = self.domain_name await self.progress_queue.put(msg) if self.show_progress is True: self.prg_hosts.refresh() self.prg_shares.refresh() self.prg_sessions.refresh() self.prg_groups.refresh() self.prg_errors.refresh() self.prg_hosts.disable = True self.prg_shares.disable = True self.prg_sessions.disable = True self.prg_groups.disable = True self.prg_errors.disable = True return True, None except Exception as e: return False, e
async def run(self): try: self.disconnected_evt = asyncio.Event() self.server_in_q = asyncio.Queue() self.server_in_task = asyncio.create_task( self.__handle_server_in()) self.db_session = get_session(self.db_url) while self.websocket.open: try: cmd_raw = await self.websocket.recv() try: print('CMD INCOMING: %s' % cmd_raw) cmd = NestOpCmdDeserializer.from_json(cmd_raw) except Exception as e: traceback.print_exc() await self.error('MSG Parsing failed. Reason: %s' % e) continue await self.info('Got command: %s' % cmd.cmd.name) if cmd.token in self.task_in_queue: # this command is for an active agent task await self.task_in_queue[cmd.token].put(cmd) continue if cmd.cmd == NestOpCmd.GATHER: asyncio.create_task(self.do_gather(cmd)) elif cmd.cmd == NestOpCmd.KERBEROAST: asyncio.create_task(self.do_kerberoast(cmd)) elif cmd.cmd == NestOpCmd.ASREPROAST: asyncio.create_task(self.do_asreproast(cmd)) elif cmd.cmd == NestOpCmd.SMBSESSIONS: asyncio.create_task(self.do_smbsessions(cmd)) elif cmd.cmd == NestOpCmd.PATHSHORTEST: asyncio.create_task(self.do_pathshortest(cmd)) elif cmd.cmd == NestOpCmd.PATHDA: asyncio.create_task(self.do_pathda(cmd)) elif cmd.cmd == NestOpCmd.GETOBJINFO: asyncio.create_task(self.do_getobjinfo(cmd)) elif cmd.cmd == NestOpCmd.LISTADS: asyncio.create_task(self.do_listads(cmd)) elif cmd.cmd == NestOpCmd.CHANGEAD: asyncio.create_task(self.do_changead(cmd)) elif cmd.cmd == NestOpCmd.LISTGRAPHS: asyncio.create_task(self.do_listgraphs(cmd)) elif cmd.cmd == NestOpCmd.CHANGEGRAPH: asyncio.create_task(self.do_changegraph(cmd)) elif cmd.cmd == NestOpCmd.TCPSCAN: asyncio.create_task(self.do_tcpscan(cmd)) #elif cmd.cmd == NestOpCmd.LOADAD: # asyncio.create_task(self.do_load_ad(cmd)) elif cmd.cmd == NestOpCmd.LOADGRAPH: asyncio.create_task(self.do_load_graph(cmd)) elif cmd.cmd == NestOpCmd.ADDCRED: asyncio.create_task(self.do_add_cred(cmd)) elif cmd.cmd == NestOpCmd.GETCRED: asyncio.create_task(self.do_get_cred(cmd)) elif cmd.cmd == NestOpCmd.LISTCRED: asyncio.create_task(self.do_list_cred(cmd)) elif cmd.cmd == NestOpCmd.ADDTARGET: asyncio.create_task(self.do_add_target(cmd)) elif cmd.cmd == NestOpCmd.GETTARGET: asyncio.create_task(self.do_get_target(cmd)) elif cmd.cmd == NestOpCmd.LISTTARGET: asyncio.create_task(self.do_list_target(cmd)) elif cmd.cmd == NestOpCmd.LISTAGENTS: asyncio.create_task(self.do_list_agents(cmd)) elif cmd.cmd == NestOpCmd.WSNETROUTERCONNECT: asyncio.create_task(self.do_wsnetrouter_connect(cmd)) elif cmd.cmd == NestOpCmd.WSNETLISTROUTERS: asyncio.create_task(self.do_wsnetrouter_list(cmd)) elif cmd.cmd == NestOpCmd.SMBFILES: asyncio.create_task(self.do_smbfiles(cmd)) elif cmd.cmd == NestOpCmd.SMBSESSIONS: asyncio.create_task(self.do_smbsessions(cmd)) elif cmd.cmd == NestOpCmd.PATHKERB: asyncio.create_task(self.do_pathkerbroast(cmd)) elif cmd.cmd == NestOpCmd.PATHASREP: asyncio.create_task(self.do_pathasreproast(cmd)) elif cmd.cmd == NestOpCmd.PATHOWNED: asyncio.create_task(self.do_pathowned(cmd)) elif cmd.cmd == NestOpCmd.KERBEROSTGT: asyncio.create_task(self.do_kerberos_tgt(cmd)) elif cmd.cmd == NestOpCmd.KERBEROSTGS: asyncio.create_task(self.do_kerberos_tgs(cmd)) elif cmd.cmd == NestOpCmd.SMBDCSYNC: asyncio.create_task(self.do_smbdcsync(cmd)) elif cmd.cmd == NestOpCmd.RDPCONNECT: asyncio.create_task(self.do_rdpconnect(cmd)) elif cmd.cmd == NestOpCmd.LDAPSPNS: asyncio.create_task(self.do_ldapspns(cmd)) else: print('Unknown Command') except asyncio.CancelledError: return except Exception as e: traceback.print_exc() return except asyncio.CancelledError: return except Exception as e: print(e) finally: self.disconnected_evt.set()
async def run(args): print(__banner__) if args.verbose == 0: logging.basicConfig(level=logging.INFO) jdlogger.setLevel(logging.INFO) msldaplogger.setLevel(logging.WARNING) smblogger.setLevel(logging.CRITICAL) elif args.verbose == 1: logging.basicConfig(level=logging.DEBUG) jdlogger.setLevel(logging.DEBUG) msldaplogger.setLevel(logging.INFO) smblogger.setLevel(logging.INFO) elif args.verbose > 1: logging.basicConfig(level=1) msldaplogger.setLevel(logging.DEBUG) jdlogger.setLevel(1) smblogger.setLevel(1) if not args.sql: print( 'SQL connection identification is missing! You need to provide the --sql parameter' ) sys.exit() db_conn = args.sql if args.sql.lower().startswith('sqlite'): os.environ['JACKDAW_SQLITE'] = '1' if args.command == 'enum': smb_mgr = construct_smbdef(args) ldap_mgr = construct_ldapdef(args) mgr = LDAPEnumeratorManager(db_conn, ldap_mgr, agent_cnt=args.ldap_workers) adifo_id = await mgr.run() jdlogger.info('ADInfo entry successfully created with ID %s' % adifo_id) mgr = SMBGathererManager(smb_mgr, worker_cnt=args.smb_workers, queue_size=args.smb_queue_size) mgr.gathering_type = ['all'] mgr.db_conn = db_conn mgr.target_ad = adifo_id await mgr.run() if args.smb_share_enum is True: settings_base = SMBShareGathererSettings(adifo_id, smb_mgr, None, None, None) settings_base.dir_depth = args.smb_folder_depth mgr = ShareGathererManager(settings_base, db_conn=db_conn, worker_cnt=args.smb_workers) mgr.run() elif args.command == 'dbinit': create_db(db_conn) elif args.command == 'adinfo': session = get_session(db_conn) from jackdaw.dbmodel.adinfo import JackDawADInfo from jackdaw.utils.table import print_table rows = [['Ad ID', 'domain name', 'scantime']] for did, distinguishedName, creation in session.query( JackDawADInfo).with_entities(JackDawADInfo.id, JackDawADInfo.distinguishedName, JackDawADInfo.fetched_at).all(): name = distinguishedName.replace('DC=', '') name = name.replace(',', '.') rows.append([str(did), name, creation.isoformat()]) print_table(rows) elif args.command == 'ldap': ldap_mgr = construct_ldapdef(args) ldap_conn = ldap_mgr.get_client() mgr = LDAPEnumeratorManager(db_conn, ldap_mgr, agent_cnt=args.ldap_workers, queue_size=args.ldap_queue_size, ad_id=args.ad_id) adifo_id = await mgr.run() jdlogger.info('ADInfo entry successfully created with ID %s' % adifo_id) elif args.command in ['shares', 'sessions', 'localgroups', 'smball']: if args.command == 'smball': args.command = 'all' smb_mgr = construct_smbdef(args) mgr = SMBGathererManager(smb_mgr, worker_cnt=args.smb_workers, queue_size=args.smb_queue_size) mgr.gathering_type = [args.command] mgr.db_conn = db_conn mgr.lookup_ad = args.lookup_ad if args.ldap_url: ldap_mgr = construct_ldapdef(args) ldap_conn = ldap_mgr.get_client() mgr.ldap_conn = ldap_conn if args.ad_id: mgr.target_ad = args.ad_id if args.target_file: mgr.targets_file = args.target_file await mgr.run() elif args.command == 'files': if args.src == 'domain': if not args.ad_id: raise Exception('ad-id parameter is mandatory in ldap mode') mgr = SMBConnectionURL(args.smb_url) settings_base = SMBShareGathererSettings(args.ad_id, mgr, None, None, None) settings_base.dir_depth = args.smb_folder_depth settings_base.dir_with_sd = args.with_sid settings_base.file_with_sd = args.with_sid mgr = ShareGathererManager(settings_base, db_conn=db_conn, worker_cnt=args.smb_workers) mgr.run() # elif args.src == 'file': # if not args.target_file: # raise Exception('target-file parameter is mandatory in file mode') # # args.target_file # args.lookup_ad # args.with_sid # args.smb_workers # # elif args.src == 'ldap': # if not args.ldap_url: # raise Exception('ldap-url parameter is mandatory in ldap mode') # args.lookup_ad # args.with_sid # args.smb_workers # # # # elif args.src == 'cmd': elif args.command == 'creds': creds = JackDawCredentials(args.db_conn, args.domain_id) creds.add_credentials_impacket(args.impacket_file) elif args.command == 'passwords': creds = JackDawCredentials(args.db_conn) creds.add_cracked_passwords(args.potfile, args.disable_usercheck, args.disable_passwordcheck) elif args.command == 'uncracked': creds = JackDawCredentials(args.db_conn, args.domain_id) creds.get_uncracked_hashes(args.hash_type, args.history) elif args.command == 'cracked': creds = JackDawCredentials(args.db_conn, args.domain_id) creds.get_cracked_info() elif args.command == 'nest': from jackdaw.nest.wrapper import NestServer debug = bool(args.verbose) server = NestServer(args.sql, bind_ip=args.ip, bind_port=args.port, debug=debug) server.run()
async def run(self): try: logger.debug( '[+] Starting SMB file enumeration. This might take a while...' ) self.session = get_session(self.db_conn) self.in_q = asyncio.Queue(self.queue_size) self.out_q = asyncio.Queue(self.queue_size) if isinstance(self.smb_mgr, str): self.smb_mgr = SMBConnectionURL(self.smb_mgr) info = self.session.query(ADInfo).get(self.ad_id) info.smb_enumeration_state = 'STARTED' self.domain_name = str(info.distinguishedName).replace( ',', '.').replace('DC=', '') self.session.commit() self.total_targets = self.session.query(func.count( Machine.id)).filter(Machine.ad_id == self.ad_id).scalar() if self.show_progress is True: self.prg_hosts = tqdm(desc='HOSTS ', ascii=True, total=self.total_targets) self.prg_shares = tqdm(desc='Shares ', ascii=True) self.prg_dirs = tqdm(desc='Dirs ', ascii=True) self.prg_files = tqdm(desc='Files ', ascii=True) self.prg_size = tqdm(desc='Total size', unit='B', unit_scale=True, ascii=True) self.prg_errors = tqdm(desc='Errors ', ascii=True) if self.progress_queue is not None: msg = GathererProgress() msg.type = GathererProgressType.SMBENUM msg.msg_type = MSGTYPE.STARTED msg.adid = self.ad_id msg.domain_name = self.domain_name await self.progress_queue.put(msg) self.gatherer = AIOSMBFileGathererAgent( self.in_q, self.out_q, self.smb_mgr, depth=self.depth, concurrent_connections=self.concurrent_connections, ) self.gatherer_task = asyncio.create_task(self.gatherer.run()) self.job_generator_task = asyncio.create_task( self.generate_targets()) while True: await asyncio.sleep(0) x = await self.out_q.get() if x is None: break tid, target, result, error = x if result is None and error is not None: #something went error if tid is None and target is None: continue logger.debug('[AIOSMBScanner][TargetError][%s] %s' % (target.get_ip(), error)) if self.show_progress is True: self.prg_errors.update() if self.progress_queue is not None: self.prg_errors_cnt += 1 err = NetError() err.ad_id = self.ad_id err.machine_sid = tid err.error = str(error) self.session.add(err) if result is not None: if self.show_progress is True: if result.otype == 'dir': self.prg_dirs.update() elif result.otype == 'file': self.prg_files.update() self.prg_size.update(result.size) elif result.otype == 'share': self.prg_share.update() #if self.progress_queue is not None: # if result.otype == 'dir': # self.prg_sessions_cnt += 1 # if self.stream_data is True: # msg = GathererProgress() # msg.type = GathererProgressType.SMBSESSION # msg.msg_type = MSGTYPE.FINISHED # msg.adid = self.ad_id # msg.domain_name = self.domain_name # msg.data = result # await self.progress_queue.put(msg) # # elif isinstance(result, NetShare): # self.prg_shares_cnt += 1 # if self.stream_data is True: # msg = GathererProgress() # msg.type = GathererProgressType.SMBSHARE # msg.msg_type = MSGTYPE.FINISHED # msg.adid = self.ad_id # msg.domain_name = self.domain_name # msg.data = result # await self.progress_queue.put(msg) # # elif isinstance(result, LocalGroup): # self.prg_groups_cnt += 1 # if self.stream_data is True: # msg = GathererProgress() # msg.type = GathererProgressType.SMBLOCALGROUP # msg.msg_type = MSGTYPE.FINISHED # msg.adid = self.ad_id # msg.domain_name = self.domain_name # msg.data = result # await self.progress_queue.put(msg) result.ad_id = self.ad_id self.result_buffer.append(result) if len(self.result_buffer) >= self.result_buffer_size: self.flush_buffer() if result is None and error is None: logger.debug('Finished: %s' % target.ip) if self.show_progress is True: self.prg_hosts.update() #if self.progress_queue is not None: # self.prg_hosts_cnt += 1 # if self.prg_hosts_cnt % self.progress_step_size == 0: # msg = GathererProgress() # msg.type = GathererProgressType.SMB # msg.msg_type = MSGTYPE.PROGRESS # msg.adid = self.ad_id # msg.domain_name = self.domain_name # msg.errors = self.prg_errors_cnt # msg.sessions = self.prg_sessions_cnt # msg.shares = self.prg_shares_cnt # msg.groups = self.prg_groups_cnt # msg.total = self.total_targets # msg.total_finished = self.prg_hosts_cnt # msg.step_size = self.progress_step_size # # await self.progress_queue.put(msg) #flushing remaining buffer if len(self.result_buffer) > 0: self.flush_buffer() logger.debug('[+] SMB file enumeration finished!') if self.progress_queue is not None: msg = GathererProgress() msg.type = GathererProgressType.SMBENUM msg.msg_type = MSGTYPE.FINISHED msg.adid = self.ad_id msg.domain_name = self.domain_name await self.progress_queue.put(msg) if self.show_progress is True: self.prg_hosts.refresh() self.prg_shares.refresh() self.prg_errors.refresh() self.prg_dirs.refresh() self.prg_files.refresh() self.prg_size.refresh() self.prg_hosts.disable = True self.prg_shares.disable = True self.prg_errors.disable = True self.prg_dirs.disable = True self.prg_files.disable = True self.prg_size.disable = True return True, None except Exception as e: import traceback traceback.print_exc() return False, e
def run(args): if args.verbose == 0: logging.basicConfig(level=logging.INFO) jdlogger.setLevel(logging.INFO) msldaplogger.setLevel(logging.WARNING) smblogger.setLevel(logging.CRITICAL) elif args.verbose == 1: logging.basicConfig(level=logging.DEBUG) jdlogger.setLevel(logging.DEBUG) msldaplogger.setLevel(logging.INFO) smblogger.setLevel(logging.INFO) elif args.verbose > 1: logging.basicConfig(level=1) msldaplogger.setLevel(logging.DEBUG) jdlogger.setLevel(1) smblogger.setLevel(1) if not args.sql: print( 'SQL connection identification is missing! You need to provide the --sql parameter' ) sys.exit() db_conn = args.sql if args.command == 'enum': smb_mgr = construct_smbdef(args) ldap_mgr = construct_ldapdef(args) mgr = LDAPEnumeratorManager(db_conn, ldap_mgr, agent_cnt=args.ldap_workers) adifo_id = mgr.run() print('ADInfo entry successfully created with ID %s' % adifo_id) mgr = SMBGathererManager(smb_mgr, worker_cnt=args.smb_workers) mgr.gathering_type = ['all'] mgr.db_conn = db_conn mgr.target_ad = adifo_id mgr.run() elif args.command == 'dbinit': create_db(db_conn) elif args.command == 'adinfo': session = get_session(db_conn) from jackdaw.dbmodel.adinfo import JackDawADInfo from jackdaw.utils.table import print_table rows = [['Ad ID', 'domain name', 'scantime']] for did, distinguishedName, creation in session.query( JackDawADInfo).with_entities(JackDawADInfo.id, JackDawADInfo.distinguishedName, JackDawADInfo.fetched_at).all(): name = distinguishedName.replace('DC=', '') name = name.replace(',', '.') rows.append([str(did), name, creation.isoformat()]) print_table(rows) elif args.command == 'ldap': ldap_mgr = construct_ldapdef(args) ldap_conn = ldap_mgr.get_connection() ldap_conn.connect() mgr = LDAPEnumeratorManager(db_conn, ldap_mgr, agent_cnt=args.ldap_workers) adifo_id = mgr.run() print('ADInfo entry successfully created with ID %s' % adifo_id) elif args.command in ['shares', 'sessions', 'localgroups']: smb_mgr = construct_smbdef(args) mgr = SMBGathererManager(smb_mgr) mgr.gathering_type = [args.command] mgr.db_conn = db_conn mgr.lookup_ad = args.lookup_ad if args.ldap_url: ldap_mgr = construct_ldapdef(args) ldap_conn = ldap_mgr.get_connection() ldap_conn.connect() mgr.ldap_conn = ldap_conn if args.ad_id: mgr.target_ad = args.ad_id if args.target_file: mgr.targets_file = args.target_file mgr.run() elif args.command == 'creds': creds = JackDawCredentials(args.db_conn, args.domain_id) creds.add_credentials_impacket(args.impacket_file) elif args.command == 'passwords': creds = JackDawCredentials(args.db_conn) creds.add_cracked_passwords(args.potfile, args.disable_usercheck, args.disable_passwordcheck) elif args.command == 'uncracked': creds = JackDawCredentials(args.db_conn, args.domain_id) creds.get_uncracked_hashes(args.hash_type, args.history) elif args.command == 'cracked': creds = JackDawCredentials(args.db_conn, args.domain_id) creds.get_cracked_info() elif args.command == 'nest': from jackdaw.nest.wrapper import NestServer debug = bool(args.verbose) server = NestServer(args.sql, bind_ip=args.ip, bind_port=args.port, debug=debug) server.run()
def get_dbsession(self): if not self.dbsession: self.dbsession = get_session(self.db_conn)
async def run(self): if self.db_url is None: raise Exception('db_url must be either sqlalchemy url or an established db session') if isinstance(self.db_url, str): self.db_session = get_session(self.db_url) else: self.db_session = self.db_url if self.graph_backend.upper() == 'networkx'.upper(): from jackdaw.nest.graph.backends.networkx.domaingraph import JackDawDomainGraphNetworkx self.graph_type = JackDawDomainGraphNetworkx elif self.graph_backend.upper() == 'igraph'.upper(): from jackdaw.nest.graph.backends.igraph.domaingraph import JackDawDomainGraphIGraph self.graph_type = JackDawDomainGraphIGraph elif self.graph_backend.upper() == 'graphtools'.upper(): from jackdaw.nest.graph.backends.graphtools.domaingraph import JackDawDomainGraphGrapthTools self.graph_type = JackDawDomainGraphGrapthTools pathlib.Path(self.work_dir).mkdir(parents=True, exist_ok=True) pathlib.Path(self.work_dir).joinpath('graphcache').mkdir(parents=True, exist_ok=True) self.server_in_q = asyncio.Queue() self.server_out_q = asyncio.Queue() self.sspi_proxy_out_q = asyncio.Queue() asyncio.create_task(self.__handle_server_in()) asyncio.create_task(self.__handle_wsnet_router_in()) if self.enable_local_agent is True: agentid = '0' #str(uuid.uuid4()) internal_agent = JackDawAgent(self, agentid, 'internal', platform.system().upper(), self.db_session, self.work_dir) self.agents[agentid] = internal_agent asyncio.create_task(internal_agent.run()) #handler = functools.partial(process_request, os.getcwd()) self.server = await websockets.serve( self.handle_incoming, self.listen_ip, self.listen_port, ssl=self.ssl_ctx, process_request=self.preprocess_request, subprotocols=self.subprotocols ) print('[+] Server is running on ws://%s:%s' % (self.listen_ip, self.listen_port)) if self.wsnet_router is not None: try: await asyncio.sleep(5) print('[+] Adding WSNET router...') cmd = NestOpWSNETRouterconnect() cmd.token = 'asdfasdfasdfadf' cmd.url = self.wsnet_router proxyid, err = await self.__add_wsnet_router(cmd) if err is not None: raise err print('[+] Connected to WSNET router! Proxyid: %s' % proxyid) except Exception as e: print('[-] Failed to connect to wsnetrouter. Reason: %s' % e) await self.server.wait_closed()
async def run(args): try: if args.silent is True: print(__banner__) if args.verbose == 0: logging.basicConfig(level=logging.INFO) jdlogger.setLevel(logging.INFO) msldaplogger.setLevel(logging.CRITICAL) smblogger.setLevel(100) elif args.verbose == 1: logging.basicConfig(level=logging.DEBUG) jdlogger.setLevel(logging.DEBUG) msldaplogger.setLevel(logging.WARNING) smblogger.setLevel(logging.CRITICAL) elif args.verbose > 1: logging.basicConfig(level=1) msldaplogger.setLevel(logging.DEBUG) jdlogger.setLevel(1) smblogger.setLevel(1) if not args.sql and args.command != 'auto': print( 'SQL connection identification is missing! You need to provide the --sql parameter' ) sys.exit() work_dir = './workdir' ldap_url = None smb_url = None if hasattr(args, 'ldap_url'): ldap_url = args.ldap_url if hasattr(args, 'smb_url'): smb_url = args.smb_url db_conn = args.sql if db_conn is not None: os.environ['JACKDAW_SQLITE'] = '0' if args.sql.lower().startswith('sqlite'): os.environ['JACKDAW_SQLITE'] = '1' else: os.environ['JACKDAW_SQLITE'] = '1' if args.command == 'enum': with multiprocessing.Pool() as mp_pool: gatherer = Gatherer(db_conn, work_dir, ldap_url, smb_url, kerb_url=args.kerberoast, ldap_worker_cnt=args.ldap_workers, smb_worker_cnt=args.smb_workers, mp_pool=mp_pool, smb_gather_types=['all'], progress_queue=None, show_progress=args.silent, calc_edges=True, ad_id=None, dns=args.dns, no_work_dir=args.no_work_dir) res, err = await gatherer.run() if err is not None: raise err elif args.command == 'auto': _, err = await run_auto(ldap_worker_cnt=args.ldap_workers, smb_worker_cnt=args.smb_workers, dns=args.dns, work_dir=work_dir, show_progress=args.silent, no_work_dir=args.no_work_dir) if err is not None: print(err) elif args.command == 'dbinit': create_db(db_conn) elif args.command == 'adinfo': session = get_session(db_conn) from jackdaw.dbmodel.adinfo import ADInfo from jackdaw.utils.table import print_table rows = [['Ad ID', 'domain name', 'scantime']] for did, distinguishedName, creation in session.query( ADInfo).with_entities(ADInfo.id, ADInfo.distinguishedName, ADInfo.fetched_at).all(): name = distinguishedName.replace('DC=', '') name = name.replace(',', '.') rows.append([str(did), name, creation.isoformat()]) print_table(rows) elif args.command == 'ldap': with multiprocessing.Pool() as mp_pool: gatherer = Gatherer(db_conn, work_dir, ldap_url, smb_url, ldap_worker_cnt=args.ldap_workers, smb_worker_cnt=None, mp_pool=mp_pool, smb_gather_types=['all'], progress_queue=None, show_progress=args.silent, calc_edges=args.calculate_edges, ad_id=args.ad_id, no_work_dir=args.no_work_dir) await gatherer.run() elif args.command == 'kerberoast': gatherer = Gatherer(db_conn, work_dir, None, None, kerb_url=args.kerberos_url, ldap_worker_cnt=None, smb_worker_cnt=None, mp_pool=None, smb_gather_types=[], progress_queue=None, show_progress=False, calc_edges=False, ad_id=args.ad_id) await gatherer.run() print('Kerberoast Finished!') elif args.command in ['shares', 'sessions', 'localgroups', 'smball']: if args.command == 'smball': args.command = 'all' gatherer = Gatherer( db_conn, work_dir, ldap_url, smb_url, ad_id=args.ad_id, ldap_worker_cnt=None, smb_worker_cnt=args.smb_workers, mp_pool=None, smb_gather_types=args.command, progress_queue=None, show_progress=args.silent, calc_edges=False, dns=args.dns, ) await gatherer.run() elif args.command == 'dns': gatherer = Gatherer( db_conn, work_dir, None, None, ad_id=args.ad_id, ldap_worker_cnt=None, smb_worker_cnt=None, mp_pool=None, smb_gather_types=None, progress_queue=None, show_progress=args.silent, calc_edges=False, dns=args.dns, ) await gatherer.run() elif args.command == 'version': print('Jackdaw version: %s' % jdversion) print('MSLDAP version : %s' % ldapversion) print('AIOSMB version : %s' % smbversion) elif args.command == 'files': raise Exception('not yet implemented!') #if args.src == 'domain': # if not args.ad_id: # raise Exception('ad-id parameter is mandatory in ldap mode') # # mgr = SMBConnectionURL(args.smb_url) # settings_base = SMBShareGathererSettings(args.ad_id, mgr, None, None, None) # settings_base.dir_depth = args.smb_folder_depth # settings_base.dir_with_sd = args.with_sid # settings_base.file_with_sd = args.with_sid # # mgr = ShareGathererManager(settings_base, db_conn = db_conn, worker_cnt = args.smb_workers) # mgr.run() elif args.command == 'creds': creds = JackDawCredentials(db_conn, args.domain_id) creds.add_credentials_impacket(args.impacket_file) elif args.command == 'passwords': creds = JackDawCredentials(db_conn) creds.add_cracked_passwords(args.potfile, args.disable_usercheck, args.disable_passwordcheck) elif args.command == 'uncracked': creds = JackDawCredentials(db_conn, args.domain_id) creds.get_uncracked_hashes(args.hash_type, args.history) elif args.command == 'cracked': creds = JackDawCredentials(db_conn, args.domain_id) creds.get_cracked_info() elif args.command == 'recalc': with multiprocessing.Pool() as mp_pool: gatherer = Gatherer(db_conn, work_dir, None, None, mp_pool=mp_pool, progress_queue=None, show_progress=args.silent, calc_edges=True, store_to_db=True, ad_id=None, graph_id=args.graphid) await gatherer.run() elif args.command == 'nest': from jackdaw.nest.wrapper import NestServer debug = bool(args.verbose) server = NestServer( args.sql, bind_ip=args.ip, bind_port=args.port, debug=debug, work_dir=args.work_dir, graph_backend=args.backend, ) server.run() elif args.command == 'ws': from jackdaw.nest.ws.server import NestWebSocketServer server = NestWebSocketServer(args.listen_ip, args.listen_port, args.sql, args.work_dir, args.backend, ssl_ctx=None) await server.run() elif args.command == 'bhimport': from jackdaw.utils.bhimport import BHImport print( 'DISCLAIMER! This feature is still beta! Bloodhound acquires way less data than Jackdaw therefore not all functionality will work after import. Any errors during import will be silently ignored, use "-vvv" verbosity level to see all errors.' ) bh = BHImport.from_zipfile(args.bhfile) bh.db_conn = db_conn if args.verbose > 1: bh.set_debug(True) bh.run() print('Import complete!') except Exception as e: jdlogger.exception('main')
async def run(self): try: logger.debug( '[+] Starting LDAP information acqusition. This might take a while...' ) self.session = get_session(self.db_conn) if self.work_dir is None: self.work_dir = pathlib.Path('./workdir') self.work_dir.mkdir(parents=True, exist_ok=True) if isinstance(self.work_dir, str) is True: self.work_dir = pathlib.Path(self.work_dir) self.members_target_file_name = str( self.work_dir.joinpath('temp_members_list.gz')) self.sd_target_file_name = ( self.work_dir.joinpath('temp_sd_list.gz')) if self.resumption is False: self.members_file_handle = gzip.GzipFile( self.members_target_file_name, mode='wb') self.sd_file_handle = gzip.GzipFile(self.sd_target_file_name, mode='wb') bc = BaseCollector( self.session, self.ldap_mgr, agent_cnt=self.agent_cnt, progress_queue=self.progress_queue, show_progress=self.show_progress, members_file_handle=self.members_file_handle, sd_file_handle=self.sd_file_handle, stream_data=self.stream_data) self.ad_id, self.graph_id, err = await bc.run() if err is False: return None, None, err if self.base_collection_finish_evt is not None: self.base_collection_finish_evt.set() self.members_file_handle.close() self.sd_file_handle.close() _, err = await self.collect_sd() if err is not None: raise err _, err = await self.collect_members() if err is not None: raise err else: adinfo = self.session.query(ADInfo).get(self.ad_id) self.graph_id = adinfo.graph_id if adinfo.ldap_sds_finished is True and adinfo.ldap_members_finished is True: return self.ad_id, self.graph_id, None if adinfo.ldap_sds_finished is False: self.session.query(JackDawSD).filter_by( ad_id=self.ad_id).delete() self.session.commit() if adinfo.ldap_members_finished is False: self.session.query(Edge).delete() self.session.commit() if adinfo.ldap_members_finished is False: self.members_file_handle = gzip.GzipFile( self.members_target_file_name, mode='wb') if adinfo.ldap_sds_finished is False: self.sd_file_handle = gzip.GzipFile( self.sd_target_file_name, mode='wb') res = self.session.query(ADInfo).get(self.ad_id) data = { 'dn': res.distinguishedName, 'sid': res.objectSid, 'guid': res.objectGUID, 'object_type': 'domain' } if adinfo.ldap_sds_finished is False: self.sd_file_handle.write( json.dumps(data).encode() + b'\r\n') q = self.session.query(ADUser).filter_by(ad_id=self.ad_id) for res in windowed_query(q, ADUser.id, 100): data = { 'dn': res.dn, 'sid': res.objectSid, 'guid': res.objectGUID, 'object_type': 'user' } if adinfo.ldap_sds_finished is False: self.sd_file_handle.write( json.dumps(data).encode() + b'\r\n') if adinfo.ldap_members_finished is False: self.members_file_handle.write( json.dumps(data).encode() + b'\r\n') q = self.session.query(Machine).filter_by(ad_id=self.ad_id) for res in windowed_query(q, Machine.id, 100): data = { 'dn': res.dn, 'sid': res.objectSid, 'guid': res.objectGUID, 'object_type': 'machine' } if adinfo.ldap_sds_finished is False: self.sd_file_handle.write( json.dumps(data).encode() + b'\r\n') if adinfo.ldap_members_finished is False: self.members_file_handle.write( json.dumps(data).encode() + b'\r\n') q = self.session.query(Group).filter_by(ad_id=self.ad_id) for res in windowed_query(q, Group.id, 100): data = { 'dn': res.dn, 'sid': res.objectSid, 'guid': res.objectGUID, 'object_type': 'group' } if adinfo.ldap_sds_finished is False: self.sd_file_handle.write( json.dumps(data).encode() + b'\r\n') if adinfo.ldap_members_finished is False: self.members_file_handle.write( json.dumps(data).encode() + b'\r\n') q = self.session.query(ADOU).filter_by(ad_id=self.ad_id) for res in windowed_query(q, ADOU.id, 100): data = { 'dn': res.dn, 'sid': None, 'guid': res.objectGUID, 'object_type': 'ou' } if adinfo.ldap_sds_finished is False: self.sd_file_handle.write( json.dumps(data).encode() + b'\r\n') q = self.session.query(GPO).filter_by(ad_id=self.ad_id) for res in windowed_query(q, GPO.id, 100): data = { 'dn': res.dn, 'sid': None, 'guid': res.objectGUID, 'object_type': 'gpo' } if adinfo.ldap_sds_finished is False: self.sd_file_handle.write( json.dumps(data).encode() + b'\r\n') if adinfo.ldap_members_finished is False: self.members_file_handle.close() if adinfo.ldap_sds_finished is False: self.sd_file_handle.close() logger.debug('[+] LDAP information acqusition finished!') return self.ad_id, self.graph_id, None except Exception as e: return None, None, e
def init_dbsession(self): if self.db_session is not None: return self.db_session = get_session(self.db_conn)
async def run(self): try: if self.progress_queue is not None: msg = GathererProgress() msg.type = GathererProgressType.KERBEROAST msg.msg_type = MSGTYPE.STARTED msg.adid = self.ad_id msg.domain_name = self.domain_name await self.progress_queue.put(msg) self.session = get_session(self.db_conn) if self.domain_name is None: info = self.session.query(ADInfo).get(self.ad_id) self.domain_name = str(info.distinguishedName).replace(',','.').replace('DC=','') _, err = await self.get_targets() if err is not None: raise err if len(self.targets_asreq) == 0 and len(self.targets_spn) == 0: logger.debug('No targets found!') return True, None if self.kerb_url == 'auto': if platform.system() == 'Windows': _, err = await self.asreproast() if err is not None: raise err _, err = await self.kerberoast_sspi() if err is not None: raise err return True, None else: raise Exception('No kerberos URL was provided and not running on Windows!') elif self.kerb_url.startswith('kerberos'): self.kerb_mgr = KerberosClientURL.from_url(self.kerb_url) _, err = await self.asreproast() if err is not None: raise err _, err = await self.kerberoast() if err is not None: raise err elif self.kerb_url.startswith('ws'): if self.kerb_url.find('type=sspiproxy'): await self.kerberoast_sspiproxy() else: await self.kerberoast_multiplexor() return True, None except Exception as e: return None, e finally: if self.progress_queue is not None: msg = GathererProgress() msg.type = GathererProgressType.KERBEROAST msg.msg_type = MSGTYPE.FINISHED msg.adid = self.ad_id msg.domain_name = self.domain_name await self.progress_queue.put(msg)
async def run(self): logger.info( '[+] Starting SMB information acqusition. This might take a while...' ) self.in_q = asyncio.Queue(self.queue_size) self.out_q = asyncio.Queue(self.queue_size) if self.progress_queue is None: self.prg_hosts = tqdm(desc='HOSTS', ascii=True) self.prg_shares = tqdm(desc='Shares', ascii=True) self.prg_sessions = tqdm(desc='Sessions', ascii=True) self.prg_groups = tqdm(desc='LocalGroup', ascii=True) self.prg_errors = tqdm(desc='Errors', ascii=True) else: msg = SMBEnumeratorProgress() msg.msg_type = 'STARTED' msg.adid = self.target_ad msg.domain_name = self.domain_name await self.progress_queue.put(msg) #self.results_thread = threading.Thread(target = self.get_results) #self.results_thread.daemon = True #self.results_thread.start() self.gatherer = AIOSMBGatherer( self.in_q, self.out_q, self.smb_mgr, gather=self.gathering_type, localgroups=self.localgroups, concurrent_connections=self.concurrent_connections, progress_queue=self.progress_queue) self.gatherer_task = asyncio.create_task(self.gatherer.run()) self.job_generator_task = asyncio.create_task(self.generate_targets()) session = None if self.db_conn is not None: session = get_session(self.db_conn) while True: x = await self.out_q.get() if x is None: break tid, target, result, error = x if result is None and error is not None: #something went error logger.debug('[AIOSMBScanner][TargetError][%s] %s' % (target.get_ip(), error)) if self.progress_queue is None: self.prg_errors.update() else: self.prg_errors_cnt += 1 if result is not None: if self.progress_queue is None: if isinstance(result, NetSession): self.prg_sessions.update() elif isinstance(result, NetShare): self.prg_shares.update() elif isinstance(result, LocalGroup): self.prg_groups.update() else: if isinstance(result, NetSession): self.prg_sessions_cnt += 1 elif isinstance(result, NetShare): self.prg_shares_cnt += 1 elif isinstance(result, LocalGroup): self.prg_groups_cnt += 1 if session is None: logger.debug(target, str(result), error) else: result.ad_id = self.ad_id session.add(result) session.commit() if result is None and error is None: logger.debug('Finished: %s' % target.ip) if self.progress_queue is None: self.prg_hosts.update() else: self.prg_hosts_cnt += 1 msg = SMBEnumeratorProgress() msg.adid = self.target_ad msg.domain_name = self.domain_name msg.errors = self.prg_errors_cnt msg.sessions = self.prg_sessions_cnt msg.shares = self.prg_shares_cnt msg.groups = self.prg_groups_cnt msg.hosts = self.prg_hosts_cnt await self.progress_queue.put(msg) logger.info('[+] SMB information acquisition finished!') if self.progress_queue is not None: msg = SMBEnumeratorProgress() msg.msg_type = 'FINISHED' msg.adid = self.target_ad msg.domain_name = self.domain_name await self.progress_queue.put(msg) if session is not None and self.target_ad is not None: info = session.query(JackDawADInfo).get(self.target_ad) info.smb_enumeration_state = 'FINISHED' session.commit()
async def run(self): try: self.session = get_session(self.db_conn) info = self.session.query(ADInfo).get(self.ad_id) self.domain_name = str(info.distinguishedName).replace( ',', '.').replace('DC=', '') self.total_targets = self.session.query(func.count( Machine.id)).filter(Machine.ad_id == self.ad_id).scalar() self.job_generator_task = asyncio.create_task( self.generate_targets()) for _ in range(self.worker_cnt): self.rdns_tasks.append(asyncio.create_task(self.rdns_worker())) if self.progress_queue is not None: msg = GathererProgress() msg.type = GathererProgressType.DNS msg.msg_type = MSGTYPE.STARTED msg.adid = self.ad_id msg.domain_name = self.domain_name await self.progress_queue.put(msg) while self.total_targets > (self.prg_hosts_cnt + self.prg_errors_cnt): await asyncio.sleep(0) sid, result, error = await self.out_q.get() if error is not None: err = NetError() err.ad_id = self.ad_id err.machine_sid = sid err.error = str(error) self.session.add(err) self.prg_errors_cnt += 1 continue self.session.add(result) if self.prg_hosts_cnt % self.progress_step_size == 0: self.session.commit() self.prg_hosts_cnt += 1 if self.progress_queue is not None: if self.prg_hosts_cnt % self.progress_step_size == 0: msg = GathererProgress() msg.type = GathererProgressType.DNS msg.msg_type = MSGTYPE.PROGRESS msg.adid = self.ad_id msg.domain_name = self.domain_name msg.errors = self.prg_errors_cnt msg.total = self.total_targets msg.total_finished = self.prg_hosts_cnt msg.step_size = self.progress_step_size await self.progress_queue.put(msg) if self.progress_queue is not None: msg = GathererProgress() msg.type = GathererProgressType.DNS msg.msg_type = MSGTYPE.FINISHED msg.adid = self.ad_id msg.domain_name = self.domain_name await self.progress_queue.put(msg) return True, None except Exception as e: logger.debug('[DNSGatherer] Exception %s' % e) return False, e