def passwordsharing_edges(self): logger.info('Adding password sharing edges') cnt = 0 def get_sid_by_nthash(ad_id, nt_hash): return self.session.query( ADUser.objectSid, Credential.username).filter_by(ad_id=ad_id).filter( Credential.username == ADUser.sAMAccountName).filter( Credential.nt_hash == nt_hash) dup_nthashes_qry = self.session.query( Credential.nt_hash).filter(Credential.history_no == 0).filter( Credential.ad_id == self.ad_id).filter( Credential.username != 'NA').filter( Credential.domain != '<LOCAL>').group_by( Credential.nt_hash).having( func.count(Credential.nt_hash) > 1) for res in dup_nthashes_qry.all(): sidd = {} for sid, _ in get_sid_by_nthash(self.ad_id, res[0]).all(): sidd[sid] = 1 for sid1 in sidd: for sid2 in sidd: if sid1 == sid2: continue self.add_edge(sid1, sid2, label='pwsharing') cnt += 1 logger.info('Added %s password sharing edges' % cnt)
async def __handle_server_in(self): try: while True: try: operator_id, cmd, op_cmd_q = await self.server_out_q.get() if cmd.cmd == NestOpCmd.CANCEL: if cmd.agent_id not in self.agent_cancellable_tasks: await self.operators[operator_id].server_in_q.put(NestOpErr(cmd.token, 'Agent id not found!')) continue if cmd.token not in self.agent_cancellable_tasks[cmd.agent_id]: await self.operators[operator_id].server_in_q.put(NestOpErr(cmd.token, 'Token incorrect!')) self.agent_cancellable_tasks[cmd.agent_id][cmd.token].cancel() elif cmd.cmd == NestOpCmd.LISTAGENTS: for agentid in self.agents: agentreply = self.agents[agentid].get_list_reply(cmd) await self.operators[operator_id].server_in_q.put(agentreply) await self.operators[operator_id].server_in_q.put(NestOpOK(cmd.token)) elif cmd.cmd == NestOpCmd.WSNETLISTROUTERS: for router_id in self.sspi_proxies: notify = NestOpWSNETRouter() notify.token = cmd.token notify.url = self.sspi_proxies[router_id].url notify.router_id = router_id await self.operators[operator_id].server_in_q.put(notify) await self.operators[operator_id].server_in_q.put(NestOpOK(cmd.token)) elif cmd.cmd == NestOpCmd.WSNETROUTERCONNECT: # operator is requesting the server to create a connection to a wsnet router proxy_id, err = await self.__add_wsnet_router(cmd) if err is not None: await self.operators[operator_id].server_in_q.put(NestOpErr(cmd.token, str(e))) continue await self.operators[operator_id].server_in_q.put(NestOpOK(cmd.token)) notify = NestOpWSNETRouter() notify.token = 0 notify.url = cmd.url notify.router_id = proxy_id for operator_id in self.operators: await self.operators[operator_id].server_in_q.put(notify) else: logger.info('Dispatching message from operator "%s" to Agent "%s" Command: %s' % (operator_id, cmd.agent_id, cmd.cmd )) # operator asks for a full gather to be executed on an agent if cmd.agent_id not in self.agents: await self.operators[operator_id].server_in_q.put(NestOpErr(cmd.token, 'Agent id not found!')) continue agent = self.agents[cmd.agent_id] await agent.cmd_in_q.put((self.operators[operator_id], cmd, self.operators[operator_id].server_in_q, op_cmd_q)) except Exception as e: print('Error processing command!') traceback.print_exc() except Exception as e: traceback.print_exc()
def run(self): create_db(self.db_con) self.result_process = SMResProc(self.outQ, self.db_con, dns_server = self.dns_server) self.result_process.daemon = True self.result_process.start() for i in range(self.agent_proccnt): p = ShareEnumProc(self.inQ, self.outQ, self.agent_threadcnt) p.daemon = True p.start() self.agents.append(p) logger.info('=== Enumerating shares ===') for t in self.hosts: self.inQ.put(t) for a in self.agents: for i in range(self.agent_threadcnt): self.inQ.put(None) for a in self.agents: a.join() self.outQ.put(None) self.result_process.join()
def run(self): logger.info('[+] Starting SMB information acqusition. This might take a while...') self.in_q = AsyncProcessQueue() self.out_q = AsyncProcessQueue() if self.use_progress_bar is True: self.prg_hosts = tqdm(desc='HOSTS', ascii = True) self.prg_shares = tqdm(desc='Shares', ascii = True) self.prg_sessions = tqdm(desc='Sessions', ascii = True) self.prg_groups = tqdm(desc='LocalGroup', ascii = True) self.prg_errors = tqdm(desc='Errors', ascii = True) self.results_thread = threading.Thread(target = self.get_results) self.results_thread.daemon = True self.results_thread.start() self.gatherer = AIOSMBGatherer(self.in_q, self.out_q, self.smb_mgr, gather = self.gathering_type, localgroups = self.localgroups, concurrent_connections = self.concurrent_connections) self.gatherer.start() for target in self.__target_generator(): self.total_targets += 1 if self.use_progress_bar is True: self.prg_hosts.total = self.total_targets self.in_q.put(target) self.in_q.put(None) #if self.use_progress_bar is True: # self.prg_hosts.total = self.total_targets self.results_thread.join() logger.info('[+] SMB information acqusition finished!')
def add_credentials_impacket(self, impacket_file): self.get_dbsession() ctr = 0 ctr_fail = 0 try: for cred in Credential.from_impacket_file(impacket_file, self.domain_id): try: self.dbsession.add(cred) if ctr % 10000 == 0: logger.info(ctr) self.dbsession.commit() except exc.IntegrityError as e: ctr_fail += 1 self.dbsession.rollback() continue else: ctr += 1 self.dbsession.commit() logger.info('Added %d users. Failed inserts: %d' % (ctr, ctr_fail)) except Exception as e: logger.exception() finally: self.dbsession.close()
def run(self): create_db(self.db_conn) self.result_process = SessMonResProc(self.outQ, self.db_conn, dns_server=None) self.result_process.daemon = True self.result_process.start() for i in range(self.agent_proccnt): p = SessMonProc(self.inQ, self.outQ, self.agent_threadcnt) p.daemon = True p.start() self.agents.append(p) while True: #print('=== Polling sessions ===') logger.info('=== Polling sessions ===') for t in self.hosts: self.inQ.put(t) if self.monitor_time != -1: time.sleep(self.monitor_time) break time.sleep(10) for a in self.agents: for i in range(self.agent_threadcnt): self.inQ.put(None) for a in self.agents: a.join() self.outQ.put(None) self.result_process.join()
async def handle_operator(self, websocket, path): remote_ip, remote_port = websocket.remote_address logger.info('Operator connected from %s:%s' % (remote_ip, remote_port)) operator = NestOperator(websocket, self.db_url, self.msg_queue, self.work_dir, self.graph_type) self.operators[operator] = 1 await operator.run() logger.info('Operator disconnected! %s:%s' % (remote_ip, remote_port))
def setup(self): logger.info('mgr setup') self.session = get_session(self.db_conn) for _ in range(self.agent_cnt): agent = LDAPEnumeratorAgent(self.ldam_mgr, self.agent_in_q, self.agent_out_q) agent.daemon = True agent.start() self.agents.append(agent)
async def handle_operator(self, websocket, path): remote_ip, remote_port = websocket.remote_address logger.info('Operator connected from %s:%s' % (remote_ip, remote_port)) operator_id = str(uuid.uuid4()) operator = NestOperator(operator_id, websocket, self.db_url, self.server_out_q, self.work_dir, self.graph_type) self.operators[operator_id] = operator await operator.run() logger.info('Operator disconnected! %s:%s' % (remote_ip, remote_port))
def stop_agents(self): logger.info('mgr stop') self.session.commit() self.session.close() for _ in self.agents: self.agent_in_q.put(None) for agent in self.agents: agent.join() logger.info('stopped all agents!')
async def do_get_cred(self, cmd): logger.info('do_get_cred') sc = self.db_session.query(StoredCred).get(cmd.cid) cr = NestOpCredRes() cr.token = cmd.token cr.cid = sc.id cr.description = sc.description await self.websocket.send(cr.to_json()) await self.send_ok(cmd) logger.info('OK!')
def gplink_edges(self): logger.info('Adding gplink edges') q = self.session.query(ADOU.objectGUID, GPO.objectGUID)\ .filter_by(ad_id = self.ad_id)\ .filter(ADOU.objectGUID == Gplink.ou_guid)\ .filter(Gplink.gpo_dn == GPO.cn) cnt = 0 for res in q.all(): self.add_edge(res[0], res[1], 'gplink') cnt += 1 logger.info('Added %s gplink edges' % cnt)
async def spam_sessions(self, temp_tok_testing, temp_adid_testing, machine_sids_testing, usernames_testing): ###### TESTING!!!!! DELETE THIS!!!! logger.info('SPEM!!!!!!!!!!!!!!!!!!!!!!!!!!!!!1') for _ in range(1000): await asyncio.sleep(0.01) reply = NestOpSMBSessionRes() reply.token = temp_tok_testing reply.adid = temp_adid_testing reply.machinesid = random.choice(machine_sids_testing) reply.username = random.choice(usernames_testing) await self.websocket.send(reply.to_json())
def load(dbsession, graph_id, graph_cache_dir, use_cache = False): logger.info('Loading Graphcache file to memory') graph_file = graph_cache_dir.joinpath(JackDawDomainGraphIGraph.graph_file_name) g = JackDawDomainGraphIGraph(dbsession, graph_id, graph_dir=graph_cache_dir, use_cache=use_cache) with open(graph_file, 'r') as f: g.graph = igraph.Graph.Read_Edgelist(f, directed=True) g.setup() logger.info('Loaded Graphcache file to memory OK') return g
async def do_get_target(self, cmd): logger.info('do_get_target') sc = self.db_session.query(CustomTarget).get(cmd.tid) cr = NestOpTargetRes() cr.token = cmd.token cr.tid = sc.id cr.hostname = sc.hostname cr.description = sc.description await self.websocket.send(cr.to_json()) await self.send_ok(cmd) logger.info('OK!')
async def do_list_cred(self, cmd): logger.info('do_list_cred') ownerid = None for res in db.session.query(StoredCred.id, StoredCred.description).filter_by(ownerid = ownerid).all(): await asyncio.sleep(0) cr = NestOpCredRes() cr.token = cmd.token cr.cid = res.id cr.description = res.description await self.websocket.send(cr.to_json()) await self.send_ok(cmd) logger.info('OK!')
async def handle_incoming(self, websocket, path): print(path) if path == '/': await self.handle_operator(websocket, path) elif path.startswith('/guac/rdp'): await self.handle_guac(websocket, path, 'rdp') elif path.startswith('/guac/ssh'): await self.handle_guac(websocket, path, 'ssh') elif path.startswith('/guac/vnc'): await self.handle_guac(websocket, path, 'vnc') else: logger.info('Cant handle path %s' % path)
async def do_add_cred(self, cmd): logger.info('do_add_cred') sc = StoredCred(cmd.username, cmd.password, cmd.description, cmd.domain, ownerid=None) #TODO: fill out owner id self.db_session.add(sc) self.db_session.commit() self.db_session.refresh(sc) cr = NestOpCredRes() cr.token = cmd.token cr.cid = sc.id cr.description = sc.description await self.websocket.send(cr.to_json()) await self.send_ok(cmd) logger.info('OK!')
def load(dbsession, graph_id, graph_cache_dir): logger.info('Loading Graphcache file to memory') graph_file = graph_cache_dir.joinpath( JackDawDomainGraphGrapthTools.graph_file_name) g = JackDawDomainGraphGrapthTools(dbsession, graph_id) g.graph = graph_tool.load_graph_from_csv(str(graph_file), directed=True, string_vals=False, hashed=False) g.setup() logger.debug('Graph loaded to memory') logger.info('Loaded Graphcache file to memory OK') return g
async def do_list_target(self, cmd): logger.info('do_list_target') ownerid = None for res in db.session.query(CustomTarget).filter_by(ownerid = ownerid).all(): await asyncio.sleep(0) cr = NestOpTargetRes() cr.token = cmd.token cr.tid = res.id cr.hostname = res.hostname cr.description = res.description await self.websocket.send(cr.to_json()) await self.send_ok(cmd) logger.info('OK!')
def load(dbsession, graph_id, graph_cache_dir, use_cache=True): logger.info('Loading Graphcache file to memory') graph_file = graph_cache_dir.joinpath( JackDawDomainGraphNetworkx.graph_file_name) graph = nx.DiGraph() g = JackDawDomainGraphNetworkx(dbsession, graph_id, graph_dir=graph_cache_dir, use_cache=use_cache) g.graph = nx.read_edgelist(str(graph_file), nodetype=int, create_using=graph) g.setup() logger.info('Loaded Graphcache file to memory OK') return g
async def do_add_target(self, cmd): logger.info('do_add_target') ownerid = None sc = CustomTarget(cmd.hostname, cmd.description, ownerid=ownerid) #TODO: fill out owner id self.db_session.add(sc) self.db_session.commit() self.db_session.refresh(sc) cr = NestOpTargetRes() cr.token = cmd.token cr.tid = sc.id cr.hostname = sc.hostname cr.description = sc.description await self.websocket.send(cr.to_json()) await self.send_ok(cmd) logger.info('OK!')
async def do_get_target(self, cmd): try: logger.info('do_get_target') sc = self.db_session.query(CustomTarget).get(cmd.tid) cr = NestOpTargetRes() cr.token = cmd.token cr.tid = sc.id cr.adid = 0 cr.hostname = sc.hostname cr.description = sc.description await self.websocket.send(cr.to_json()) await self.send_ok(cmd) logger.info('OK!') except Exception as e: traceback.print_exc() await self.send_error(cmd, str(e))
def create(dbsession, graph_id, graph_dir, sqlite_file = None): logger.info('Create called!') graph_id = int(graph_id) graph_file = graph_dir.joinpath(JackDawDomainGraphIGraph.graph_file_name) logger.debug('Creating a new graph file: %s' % graph_file) adids = dbsession.query(GraphInfoAD.ad_id).filter_by(graph_id = graph_id).all() if adids is None: raise Exception('No ADIDS were found for graph %s' % graph_id) using_sqlite_tool = False if sqlite_file is not None: logger.info('Trying sqlite3 dumping method...') # This is a hack. # Problem: using sqlalchemy to dump a large table (to get the graph data file) is extremely resource intensive # Solution: if sqlite is used as the database backend we can use the sqlite3 cmdline utility to do the dumping much faster # sf = str(sqlite_file) gf = str(graph_file) if platform.system() == 'Windows': sf = sf.replace('\\', '\\\\') gf = gf.replace('\\', '\\\\') qry_str = '.open %s\r\n.mode csv\r\n.output %s\r\n.separator " "\r\nSELECT src,dst FROM adedges, adedgelookup WHERE adedges.graph_id = %s AND adedgelookup.id = adedges.src AND adedgelookup.oid IS NOT NULL;\r\n.exit' % (sf, gf, graph_id) with open('buildnode.sql', 'w', newline='') as f: f.write(qry_str) import subprocess import shlex cmd = 'cat buildnode.sql | sqlite3' if platform.system() == 'Windows': cmd = 'type buildnode.sql | sqlite3' process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) _, stderr = process.communicate() process.wait() if process.returncode == 0: using_sqlite_tool = True logger.info('sqlite3 dumping method OK!') else: logger.warning('Failed to use the sqlite3 tool to speed up graph datafile generation. Reason: %s' % stderr) if using_sqlite_tool is False: for ad_id in adids: ad_id = ad_id[0] t2 = dbsession.query(func.count(Edge.id)).filter_by(graph_id = graph_id).filter(EdgeLookup.id == Edge.src).filter(EdgeLookup.oid != None).scalar() q = dbsession.query(Edge).filter_by(graph_id = graph_id).filter(EdgeLookup.id == Edge.src).filter(EdgeLookup.oid != None) with open(graph_file, 'w', newline = '') as f: for edge in tqdm(windowed_query(q,Edge.id, 10000), desc = 'edge', total = t2): r = '%s %s\r\n' % (edge.src, edge.dst) f.write(r) logger.info('Graphcache file created!')
async def do_get_cred(self, cmd): try: logger.info('do_get_cred') sc = self.db_session.query(CustomCred).get(cmd.cid) cr = NestOpCredRes() cr.adid = 0 cr.token = cmd.token cr.username = sc.username cr.stype = sc.stype cr.secret = sc.secret cr.domain = sc.domain cr.description = sc.description await self.websocket.send(cr.to_json()) await self.send_ok(cmd) logger.info('OK!') except Exception as e: traceback.print_exc() await self.send_error(cmd, str(e))
async def do_list_target(self, cmd: NestOpListTarget): try: logger.info('do_list_target') ownerid = None for res in self.db_session.query(CustomTarget).filter_by( ownerid=ownerid).all(): await asyncio.sleep(0) cr = NestOpTargetRes() cr.token = cmd.token cr.tid = res.id cr.adid = 0 cr.hostname = res.hostname cr.description = res.description await self.websocket.send(cr.to_json()) await self.send_ok(cmd) logger.info('OK!') except Exception as e: traceback.print_exc() await self.send_error(cmd, str(e))
async def do_list_cred(self, cmd): try: logger.info('do_list_cred') ownerid = None for res in self.db_session.query(CustomCred).filter_by( ownerid=ownerid).all(): await asyncio.sleep(0) res = typing.cast(CustomCred, res) cr = NestOpCredRes() cr.adid = 0 # always 0 for custom creds cr.token = cmd.token cr.cid = res.id cr.domain = res.domain cr.username = res.username cr.stype = res.stype cr.secret = res.secret cr.description = res.description await self.websocket.send(cr.to_json()) await self.send_ok(cmd) logger.info('OK!') except Exception as e: traceback.print_exc() await self.send_error(cmd, str(e))
async def do_add_cred(self, cmd): try: logger.info('do_add_cred') print(cmd.domain) sc = CustomCred(cmd.username, cmd.stype, cmd.secret, cmd.description, cmd.domain, ownerid=None) #TODO: fill out owner id self.db_session.add(sc) self.db_session.commit() self.db_session.refresh(sc) cr = NestOpCredRes() cr.token = cmd.token cr.cid = sc.id cr.adid = 0 cr.description = sc.description await self.websocket.send(cr.to_json()) await self.send_ok(cmd) logger.info('OK!') except Exception as e: traceback.print_exc() await self.send_error(cmd, str(e))
async def run(self): logger.info( '[+] Starting SMB information acqusition. This might take a while...' ) self.in_q = asyncio.Queue(self.queue_size) self.out_q = asyncio.Queue(self.queue_size) if self.progress_queue is None: self.prg_hosts = tqdm(desc='HOSTS', ascii=True) self.prg_shares = tqdm(desc='Shares', ascii=True) self.prg_sessions = tqdm(desc='Sessions', ascii=True) self.prg_groups = tqdm(desc='LocalGroup', ascii=True) self.prg_errors = tqdm(desc='Errors', ascii=True) else: msg = SMBEnumeratorProgress() msg.msg_type = 'STARTED' msg.adid = self.target_ad msg.domain_name = self.domain_name await self.progress_queue.put(msg) #self.results_thread = threading.Thread(target = self.get_results) #self.results_thread.daemon = True #self.results_thread.start() self.gatherer = AIOSMBGatherer( self.in_q, self.out_q, self.smb_mgr, gather=self.gathering_type, localgroups=self.localgroups, concurrent_connections=self.concurrent_connections, progress_queue=self.progress_queue) self.gatherer_task = asyncio.create_task(self.gatherer.run()) self.job_generator_task = asyncio.create_task(self.generate_targets()) session = None if self.db_conn is not None: session = get_session(self.db_conn) while True: x = await self.out_q.get() if x is None: break tid, target, result, error = x if result is None and error is not None: #something went error logger.debug('[AIOSMBScanner][TargetError][%s] %s' % (target.get_ip(), error)) if self.progress_queue is None: self.prg_errors.update() else: self.prg_errors_cnt += 1 if result is not None: if self.progress_queue is None: if isinstance(result, NetSession): self.prg_sessions.update() elif isinstance(result, NetShare): self.prg_shares.update() elif isinstance(result, LocalGroup): self.prg_groups.update() else: if isinstance(result, NetSession): self.prg_sessions_cnt += 1 elif isinstance(result, NetShare): self.prg_shares_cnt += 1 elif isinstance(result, LocalGroup): self.prg_groups_cnt += 1 if session is None: logger.debug(target, str(result), error) else: result.ad_id = self.ad_id session.add(result) session.commit() if result is None and error is None: logger.debug('Finished: %s' % target.ip) if self.progress_queue is None: self.prg_hosts.update() else: self.prg_hosts_cnt += 1 msg = SMBEnumeratorProgress() msg.adid = self.target_ad msg.domain_name = self.domain_name msg.errors = self.prg_errors_cnt msg.sessions = self.prg_sessions_cnt msg.shares = self.prg_shares_cnt msg.groups = self.prg_groups_cnt msg.hosts = self.prg_hosts_cnt await self.progress_queue.put(msg) logger.info('[+] SMB information acquisition finished!') if self.progress_queue is not None: msg = SMBEnumeratorProgress() msg.msg_type = 'FINISHED' msg.adid = self.target_ad msg.domain_name = self.domain_name await self.progress_queue.put(msg) if session is not None and self.target_ad is not None: info = session.query(JackDawADInfo).get(self.target_ad) info.smb_enumeration_state = 'FINISHED' session.commit()
def construct(self, construct): """ Fills the network graph from database to memory """ #self.ad_id = ad_id session = self.get_session() adinfo = session.query(JackDawADInfo).get(construct.ad_id) self.domain_sid = str(adinfo.objectSid) #self.calc_acl_edges(session, construct) #adding group nodes logger.debug('Adding group nodes') cnt = 0 for group in adinfo.groups: self.add_sid_to_node(group.sid, 'group', construct, name=group.name) cnt += 1 logger.debug('Added %s group nodes' % cnt) logger.debug('Adding user nodes') cnt = 0 for user in adinfo.users: self.add_sid_to_node(user.objectSid, 'user', construct, name=user.sAMAccountName) cnt += 1 logger.debug('Added %s user nodes' % cnt) logger.debug('Adding machine nodes') cnt = 0 for user in adinfo.computers: self.add_sid_to_node(user.objectSid, 'machine', construct, name=user.sAMAccountName) cnt += 1 logger.debug('Added %s machine nodes' % cnt) logger.debug('Adding hassession edges') cnt = 0 for res in session.query( JackDawADUser.objectSid, JackDawADMachine.objectSid ).filter(NetSession.username == JackDawADUser.sAMAccountName).filter( NetSession.source == JackDawADMachine.sAMAccountName).distinct( NetSession.username): self.add_edge(res[0], res[1], construct, label='hasSession') self.add_edge(res[1], res[0], construct, label='hasSession') cnt += 2 logger.debug('Added %s hassession edges' % cnt) logger.debug('Adding localgroup edges') cnt = 0 for res in session.query( JackDawADUser.objectSid, JackDawADMachine.objectSid, LocalGroup.groupname).filter( JackDawADMachine.id == LocalGroup.machine_id ).filter(JackDawADMachine.ad_id == construct.ad_id).filter( JackDawADUser.ad_id == construct.ad_id).filter( JackDawADUser.objectSid == LocalGroup.sid).all(): label = None if res[2] == 'Remote Desktop Users': label = 'canRDP' weight = 1 elif res[2] == 'Distributed COM Users': label = 'executeDCOM' weight = 1 elif res[2] == 'Administrators': label = 'adminTo' weight = 1 self.add_edge(res[0], res[1], construct, label=label, weight=weight) cnt += 1 logger.debug('Added %s localgroup edges' % cnt) # TODO: implement this! #if self.show_constrained_delegations == True: # pass # TODO: implement this! #if self.show_unconstrained_delegations == True: # pass # TODO: implement this! #for relation in construct.custom_relations: # relation.calc() # self.add_edge(res.sid, res.target_sid) #print('adding membership edges') #adding membership edges logger.debug('Adding membership edges') cnt = 0 q = session.query(JackDawTokenGroup).filter_by(ad_id=construct.ad_id) for tokengroup in windowed_query(q, JackDawTokenGroup.id, 10000): #for tokengroup in adinfo.group_lookups: self.add_sid_to_node(tokengroup.sid, 'unknown', construct) self.add_sid_to_node(tokengroup.member_sid, 'unknown', construct) if tokengroup.is_user == True: try: self.add_edge(tokengroup.sid, tokengroup.member_sid, construct, label='member') cnt += 1 except AssertionError as e: logger.exception() elif tokengroup.is_machine == True: try: self.add_edge(tokengroup.sid, tokengroup.member_sid, construct, label='member') cnt += 1 except AssertionError as e: logger.exception() elif tokengroup.is_group == True: try: self.add_edge(tokengroup.sid, tokengroup.member_sid, construct, label='member') cnt += 1 except AssertionError as e: logger.exception() logger.debug('Added %s membership edges' % cnt) #adding ACL edges #self.calc_acl_edges(session, construct) #self.calc_acl_edges(adinfo, construct) self.calc_acl_edges_mp(session, construct.ad_id, construct) logger.info('Adding password sharing edges') cnt = 0 def get_sid_by_nthash(ad_id, nt_hash): return session.query( JackDawADUser.objectSid, Credential.username).filter_by( ad_id=ad_id).filter(Credential.username == JackDawADUser.sAMAccountName).filter( Credential.nt_hash == nt_hash) dup_nthashes_qry = session.query( Credential.nt_hash).filter(Credential.history_no == 0).filter( Credential.ad_id == construct.ad_id).filter( Credential.username != 'NA').filter( Credential.domain != '<LOCAL>').group_by( Credential.nt_hash).having( func.count(Credential.nt_hash) > 1) for res in dup_nthashes_qry.all(): sidd = {} for sid, _ in get_sid_by_nthash(construct.ad_id, res[0]).all(): sidd[sid] = 1 for sid1 in sidd: for sid2 in sidd: if sid1 == sid2: continue self.add_edge(sid1, sid2, construct, label='pwsharing') cnt += 1 logger.info('Added %s password sharing edges' % cnt)