Ejemplo n.º 1
0
	def hasession_edges(self):
		logger.debug('Adding hassession edges')
		cnt = 0
		#for user sessions
		q = self.session.query(ADUser.objectSid, Machine.objectSid)\
			.filter(NetSession.username == ADUser.sAMAccountName)\
			.filter(func.lower(NetSession.source) == func.lower(Machine.dNSHostName))\
			.distinct(NetSession.username)
		
		for res in windowed_query(q, ADUser.id, self.buffer_size, False):
			self.add_edge(res[0], res[1],'hasSession')
			self.add_edge(res[1], res[0],'hasSession')
			cnt += 2
		#for machine account sessions
		q = self.session.query(Machine.objectSid, Machine.objectSid)\
			.filter(NetSession.username == Machine.sAMAccountName)\
			.filter(func.lower(NetSession.source) == func.lower(Machine.dNSHostName))\
			.distinct(NetSession.username)

		for res in windowed_query(q, Machine.id, self.buffer_size, False):
			
			self.add_edge(res[0], res[1],'hasSession')
			self.add_edge(res[1], res[0],'hasSession')
			cnt += 2
		logger.debug('Added %s hassession edges' % cnt)
Ejemplo n.º 2
0
	def localgroup_edges(self):
		logger.debug('Adding localgroup edges')
		cnt = 0
		q = self.session.query(ADUser.objectSid, Machine.objectSid, LocalGroup.groupname
					).filter(Machine.objectSid == LocalGroup.machine_sid
					).filter(Machine.ad_id == self.ad_id
					).filter(ADUser.ad_id == self.ad_id
					).filter(ADUser.objectSid == LocalGroup.sid
					)
		for res in windowed_query(q, ADUser.id, self.buffer_size, False):
			label = None
			if res[2] == 'Remote Desktop Users':
				label = 'canRDP'
				weight = 1
					
			elif res[2] == 'Distributed COM Users':
				label = 'executeDCOM'
				weight = 1
					
			elif res[2] == 'Administrators':
				label = 'adminTo'
				weight = 1

			elif res[2] == 'Remote Management Users':
				label = 'psremote'
				weight = 1
					
			self.add_edge(res[0], res[1], label)
			cnt += 1

		logger.debug('Added %s localgroup edges' % cnt)
Ejemplo n.º 3
0
    def create(dbsession, ad_id, graph_id, graph_dir):
        graph_file = graph_dir.joinpath(
            JackDawDomainGraphNetworkx.graph_file_name)

        logger.debug('Creating a new graph file: %s' % graph_file)

        ## remove this
        fi = dbsession.query(EdgeLookup.id).filter_by(ad_id=ad_id).filter(
            EdgeLookup.oid == 'S-1-5-32-545').first()
        fi = fi[0]
        ##

        t2 = dbsession.query(func.count(Edge.id)).filter_by(
            graph_id=graph_id).filter(EdgeLookup.id == Edge.src).filter(
                EdgeLookup.oid != None).scalar()
        q = dbsession.query(Edge).filter_by(graph_id=graph_id).filter(
            EdgeLookup.id == Edge.src).filter(EdgeLookup.oid != None)

        with open(graph_file, 'w', newline='') as f:
            for edge in tqdm(windowed_query(q, Edge.id, 10000),
                             desc='edge',
                             total=t2):
                #if edge.src  == fi:
                #	continue
                #if edge.dst  == fi:
                #	continue
                r = '%s %s\r\n' % (edge.src, edge.dst)
                f.write(r)
        logger.debug('Graph created!')
Ejemplo n.º 4
0
	def create(dbsession, graph_id, graph_dir, sqlite_file = None):
		logger.info('Create called!')
		graph_id = int(graph_id)
		graph_file = graph_dir.joinpath(JackDawDomainGraphIGraph.graph_file_name)

		logger.debug('Creating a new graph file: %s' % graph_file)
		
		adids = dbsession.query(GraphInfoAD.ad_id).filter_by(graph_id = graph_id).all()
		if adids is None:
			raise Exception('No ADIDS were found for graph %s' % graph_id)
		
		using_sqlite_tool = False
		if sqlite_file is not None:
			logger.info('Trying sqlite3 dumping method...')
			# This is a hack.
			# Problem: using sqlalchemy to dump a large table (to get the graph data file) is extremely resource intensive 
			# Solution: if sqlite is used as the database backend we can use the sqlite3 cmdline utility to do the dumping much faster
			# 

			sf = str(sqlite_file)
			gf = str(graph_file)
			if platform.system() == 'Windows':
				sf = sf.replace('\\', '\\\\')
				gf = gf.replace('\\', '\\\\')
			qry_str = '.open %s\r\n.mode csv\r\n.output %s\r\n.separator " "\r\nSELECT src,dst FROM adedges, adedgelookup WHERE adedges.graph_id = %s AND adedgelookup.id = adedges.src AND adedgelookup.oid IS NOT NULL;\r\n.exit' % (sf, gf, graph_id)
			with open('buildnode.sql', 'w', newline='') as f:
				f.write(qry_str)
			
			import subprocess
			import shlex
			
			cmd = 'cat buildnode.sql | sqlite3'
			if platform.system() == 'Windows':
				cmd = 'type buildnode.sql | sqlite3'
			process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
			_, stderr = process.communicate()
			process.wait()
			
			if process.returncode == 0:
				using_sqlite_tool = True
				logger.info('sqlite3 dumping method OK!')
			else:
				logger.warning('Failed to use the sqlite3 tool to speed up graph datafile generation. Reason: %s' % stderr)
				

		if using_sqlite_tool is False:
		
			for ad_id in adids:
				ad_id = ad_id[0]
				t2 = dbsession.query(func.count(Edge.id)).filter_by(graph_id = graph_id).filter(EdgeLookup.id == Edge.src).filter(EdgeLookup.oid != None).scalar()
				q = dbsession.query(Edge).filter_by(graph_id = graph_id).filter(EdgeLookup.id == Edge.src).filter(EdgeLookup.oid != None)

				with open(graph_file, 'w', newline = '') as f:
					for edge in tqdm(windowed_query(q,Edge.id, 10000), desc = 'edge', total = t2):
						r = '%s %s\r\n' % (edge.src, edge.dst)
						f.write(r)
		logger.info('Graphcache file created!')
Ejemplo n.º 5
0
	def allowedtoact_edges(self):
		logger.debug('Adding allowedtoact edges')
		q = self.session.query(MachineAllowedToAct.machine_sid, MachineAllowedToAct.target_sid)\
				.filter_by(ad_id = self.ad_id)
		cnt = 0
		for res in windowed_query(q, MachineAllowedToAct.id, self.buffer_size, False):
				self.add_edge(res[1], res[0], 'allowedtoact')
				cnt += 1
		logger.debug('Added %s allowedtoact edges' % cnt)
Ejemplo n.º 6
0
	def gplink_edges(self):
		logger.debug('Adding gplink edges')
		q = self.session.query(ADOU.objectGUID, GPO.objectGUID)\
				.filter_by(ad_id = self.ad_id)\
				.filter(ADOU.objectGUID == Gplink.ou_guid)\
				.filter(Gplink.gpo_dn == GPO.cn)
		cnt = 0
		for res in windowed_query(q, GPO.id, self.buffer_size, False):
				self.add_edge(res[0], res[1], 'gplink')
				cnt += 1
		logger.debug('Added %s gplink edges' % cnt)
Ejemplo n.º 7
0
	async def resumption_target_gen_member(self,q, id_filed, obj_type, jobtype):
		for dn, sid, guid in windowed_query(q, id_filed, 10, is_single_entity = False):
			#print(dn)
			data = {
				'dn' : dn,
				'sid' : sid,
				'guid' : guid,
				'object_type' : obj_type
			}
			self.members_target_file_handle.write(json.dumps(data).encode() + b'\r\n')
			self.total_members_to_poll += 1
Ejemplo n.º 8
0
    def run(self):
        try:
            logger.debug('[ACL] Starting sd edge calc')
            self.inqueue = mp.Queue(self.buffer_size)
            self.outqueue = mp.Queue(self.buffer_size)
            logger.debug('[ACL] Starting processes')

            self.writer = mp.Process(target=edge_calc_writer,
                                     args=(self.outqueue,
                                           self.output_file_path, self.ad_id,
                                           self.append_to_file))
            self.writer.daemon = True
            self.writer.start()

            self.workers = [
                mp.Process(target=edge_calc_worker,
                           args=(self.inqueue, self.outqueue))
                for i in range(self.worker_count)
            ]
            for proc in self.workers:
                proc.daemon = True
                proc.start()
                print(1)

            logger.debug('[ACL] data generation')

            total = self.session.query(func.count(
                JackDawSD.id)).filter_by(ad_id=self.ad_id).scalar()
            q = self.session.query(JackDawSD).filter_by(ad_id=self.ad_id)

            for adsd in tqdm(windowed_query(q, JackDawSD.id, 10), total=total):
                self.inqueue.put(adsd)

            for _ in range(procno):
                self.inqueue.put(None)
            logger.debug('Gen done!')

            logger.debug('[ACL] Added %s edges' % (p_cnt))

            logger.debug('[ACL] joining workers')
            for proc in self.workers:
                proc.join()

            logger.debug('[ACL] workers finished, waiting for writer')
            self.outqueue.put(None)
            self.writer.join()

            logger.debug('[ACL] All Finished!')

        except:
            logger.exception('[ACL]')
Ejemplo n.º 9
0
    async def generate_targets(self):
        try:
            q = self.session.query(Machine).filter_by(ad_id=self.ad_id)
            for machine in windowed_query(q, Machine.id, 100):
                try:
                    dns_name = machine.dNSHostName
                    if dns_name is None or dns_name == '':
                        dns_name = '%s.%s' % (str(machine.sAMAccountName[:-1]),
                                              str(self.domain_name))
                    await self.in_q.put((machine.objectSid, dns_name))
                except:
                    continue

            #signaling the ed of target generation
            await self.in_q.put(None)
        except Exception as e:
            logger.exception('smb generate_targets')
Ejemplo n.º 10
0
    def run(self):
        try:
            logger.debug('[ACL] Starting sd edge calc')
            logger.debug('[ACL] data generation')

            total = self.session.query(func.count(
                JackDawSD.id)).filter_by(ad_id=self.ad_id).scalar()
            q = self.session.query(JackDawSD).filter_by(ad_id=self.ad_id)

            for adsd in tqdm(windowed_query(q, JackDawSD.id,
                                            self.worker_count),
                             total=total):
                self.inqueue.put(adsd)

            logger.debug('[ACL] All Finished!')

        except:
            logger.exception('[ACL]')
Ejemplo n.º 11
0
    def shortest_paths(self, src_sid=None, dst_sid=None):
        nv = GraphData()
        if src_sid is None and dst_sid is None:
            raise Exception('src_sid or dst_sid must be set')
        elif src_sid is None and dst_sid is not None:
            dst = self.__resolve_sid_to_id(dst_sid)
            if dst is None:
                raise Exception('SID not found!')

            total = self.dbsession.query(func.count(
                EdgeLookup.id)).filter_by(ad_id=self.domain_id).filter(
                    EdgeLookup.oid != self.domain_sid + '-513').scalar()
            q = self.dbsession.query(
                EdgeLookup.id).filter_by(ad_id=self.domain_id).filter(
                    EdgeLookup.oid != self.domain_sid + '-513')
            for nodeid in tqdm(windowed_query(q, EdgeLookup.id, 1000),
                               desc='running',
                               total=total):
                for i, res in enumerate(shortest_path(self.graph, nodeid,
                                                      dst)):
                    if res == []:
                        continue
                    if i % 2 == 0:
                        self.__result_path_add(nv, res)

        elif src_sid is not None and dst_sid is not None:
            dst = self.__resolve_sid_to_id(dst_sid)
            if dst is None:
                raise Exception('SID not found!')

            src = self.__resolve_sid_to_id(src_sid)
            if src is None:
                raise Exception('SID not found!')

            for i, res in enumerate(shortest_path(self.graph, src, dst)):
                if res == []:
                    continue
                if i % 2 == 0:
                    self.__result_path_add(nv, res)

        else:
            raise Exception('Not implemented!')

        return nv
Ejemplo n.º 12
0
	def load(self):
		graphinfo = self.dbsession.query(GraphInfo).get(self.graph_id)
		domaininfo = self.dbsession.query(ADInfo).get(graphinfo.ad_id)
		self.domain_sid = domaininfo.objectSid
		self.domain_id = domaininfo.id

		fname = 'tempfile.bla'
		t2 = self.dbsession.query(func.count(Edge.id)).filter_by(graph_id = self.graph_id).scalar()
		q = self.dbsession.query(Edge).filter_by(graph_id = self.graph_id)

		with open(fname, 'w', newline = '') as f:
			for edge in tqdm(windowed_query(q,Edge.id, 10000), desc = 'edge', total = t2):
				r = '%s %s %s\r\n' % (edge.src, edge.dst, edge.label)
				f.write(r)

		self.graph = igraph.Graph.Read_Ncol(fname, directed=True)

		os.unlink(fname)
		print('Added!')
Ejemplo n.º 13
0
    def all_shortest_paths(self, src_sid=None, dst_sid=None):
        nv = GraphData()
        if src_sid is None and dst_sid is None:
            raise Exception('src_sid or dst_sid must be set')
        elif src_sid is None and dst_sid is not None:
            dst = self.__resolve_sid_to_id(dst_sid)
            if dst is None:
                raise Exception('SID not found!')

            total = self.dbsession.query(func.count(
                EdgeLookup.id)).filter_by(ad_id=self.domain_id).filter(
                    EdgeLookup.oid != self.domain_sid + '-513').scalar()
            q = self.dbsession.query(
                EdgeLookup.id).filter_by(ad_id=self.domain_id).filter(
                    EdgeLookup.oid != self.domain_sid + '-513')
            for nodeid in tqdm(windowed_query(q, EdgeLookup.id, 1000),
                               desc='running',
                               total=total):
                for path in all_shortest_paths(self.graph, nodeid[0], dst):
                    print(path)
                    self.__result_path_add(nv, path)

        elif src_sid is not None and dst_sid is not None:
            print(1)
            print(src_sid)
            print(dst_sid)
            src = self.__resolve_sid_to_id(src_sid)
            if src is None:
                raise Exception('SID not found!')

            dst = self.__resolve_sid_to_id(dst_sid)
            if dst is None:
                raise Exception('SID not found!')

            print(src)
            print(dst)

            for path in all_shortest_paths(self.graph, src, dst):
                print(path)
                self.__result_path_add(nv, path)

        return nv
Ejemplo n.º 14
0
    async def generate_targets(self):
        try:
            q = self.session.query(Machine).filter_by(ad_id=self.ad_id)
            for filter in self.target_filters:
                if filter == 'live':
                    filter_after = datetime.datetime.today(
                    ) - datetime.timedelta(days=90)
                    q = q.filter(Machine.pwdLastSet >= filter_after)

            for machine in windowed_query(q, Machine.id, 100):
                try:
                    dns_name = machine.dNSHostName
                    if dns_name is None or dns_name == '':
                        dns_name = '%s.%s' % (str(machine.sAMAccountName[:-1]),
                                              str(self.domain_name))
                    await self.in_q.put((machine.objectSid, dns_name))
                except:
                    continue

            #signaling the ed of target generation
            await self.in_q.put(None)
        except Exception as e:
            logger.exception('smb generate_targets')
Ejemplo n.º 15
0
    def create(dbsession, graph_id, graph_dir):
        graph_file = graph_dir.joinpath(
            JackDawDomainGraphNetworkx.graph_file_name)

        logger.debug('Creating a new graph file: %s' % graph_file)

        adids = dbsession.query(
            GraphInfoAD.ad_id).filter_by(graph_id=graph_id).all()
        if adids is None:
            raise Exception('No ADIDS were found for graph %s' % graph_id)

        for ad_id in adids:
            ad_id = ad_id[0]
            ## remove this
            #fi = dbsession.query(EdgeLookup.id).filter_by(ad_id = ad_id).filter(EdgeLookup.oid == 'S-1-5-32-545').first()
            #if fi is not None:
            #	fi = fi[0]
            ##

            t2 = dbsession.query(func.count(Edge.id)).filter_by(
                graph_id=graph_id).filter(EdgeLookup.id == Edge.src).filter(
                    EdgeLookup.oid != None).scalar()
            q = dbsession.query(Edge).filter_by(graph_id=graph_id).filter(
                EdgeLookup.id == Edge.src).filter(EdgeLookup.oid != None)

            with open(graph_file, 'w', newline='') as f:
                for edge in tqdm(windowed_query(q, Edge.id, 10000),
                                 desc='edge',
                                 total=t2):
                    #if edge.src  == fi:
                    #	continue
                    #if edge.dst  == fi:
                    #	continue
                    r = '%s %s\r\n' % (edge.src, edge.dst)
                    f.write(r)
        logger.debug('Graph created!')
Ejemplo n.º 16
0
    async def do_load_graph(self, cmd):
        try:
            # loads an AD scan and sends all results to the client
            logger.info('do_load_graph')
            # sanity check if the AD exists
            qry_res = self.db_session.query(
                GraphInfoAD.ad_id).filter_by(graph_id=cmd.graphid).all()
            if qry_res is None:
                await self.send_error(cmd, 'No AD ID exists with that ID')
                return
            res = []
            for r in qry_res:
                res.append(r[0])  #ugly, pls fix!

            #logger.info('res %s' % res)
            for adid in res:
                #sending machines
                logger.info('computer!')
                compbuff = NestOpComputerBuffRes()
                compbuff.token = cmd.token
                qry = self.db_session.query(Machine).filter_by(ad_id=adid)
                for computer in windowed_query(qry, Machine.id, 100):
                    await asyncio.sleep(0)
                    reply = NestOpComputerRes()
                    reply.token = cmd.token
                    reply.name = computer.sAMAccountName
                    reply.adid = computer.ad_id
                    reply.sid = computer.objectSid
                    reply.domainname = computer.dNSHostName
                    reply.osver = computer.operatingSystem
                    reply.ostype = computer.operatingSystemVersion
                    reply.description = computer.description
                    if computer.isAdmin is not None:
                        reply.is_admin = int(computer.isAdmin)
                    reply.isinactive = 1
                    if computer.lastLogonTimestamp is not None:
                        if (datetime.datetime.utcnow() -
                                computer.lastLogonTimestamp).days > (6 * 30):
                            reply.isinactive = 0

                    if computer.UAC_SERVER_TRUST_ACCOUNT is True:
                        reply.computertype = 'DOMAIN_CONTROLLER'
                    elif computer.operatingSystem is not None:
                        if computer.operatingSystem.lower().find(
                                'windows') != -1:
                            if computer.operatingSystem.lower().find(
                                    'server') != -1:
                                reply.computertype = 'SERVER'
                            else:
                                reply.computertype = 'WORKSTATION'
                        else:
                            reply.computertype = 'NIX'
                    else:
                        reply.computertype = 'DUNNO'

                    compbuff.computers.append(reply)
                    if len(compbuff.computers) >= 100:
                        await self.websocket.send(compbuff.to_json())
                        compbuff = NestOpComputerBuffRes()
                        compbuff.token = cmd.token

                if len(compbuff.computers) > 0:
                    await self.websocket.send(compbuff.to_json())
                    compbuff = NestOpComputerBuffRes()
                    compbuff.token = cmd.token

            for adid in res:
                #sending users
                logger.info('users!')
                userbuff = NestOpUserBuffRes()
                userbuff.token = cmd.token
                qry = self.db_session.query(ADUser).filter_by(ad_id=adid)
                for user in windowed_query(qry, ADUser.id, 100):
                    await asyncio.sleep(0)
                    reply = NestOpUserRes()
                    reply.token = cmd.token
                    reply.name = user.sAMAccountName
                    reply.adid = user.ad_id
                    reply.sid = user.objectSid
                    reply.kerberoast = 1 if user.servicePrincipalName is not None else 0
                    reply.asreproast = int(user.UAC_DONT_REQUIRE_PREAUTH)
                    reply.nopassw = int(user.UAC_PASSWD_NOTREQD)
                    reply.cleartext = int(
                        user.UAC_ENCRYPTED_TEXT_PASSWORD_ALLOWED)
                    reply.smartcard = int(user.UAC_SMARTCARD_REQUIRED)
                    reply.active = int(user.canLogon)
                    reply.description = user.description
                    if user.adminCount is not None:
                        reply.is_admin = int(user.adminCount)
                    else:
                        reply.is_admin = 0
                    userbuff.users.append(reply)
                    if len(userbuff.users) >= 100:
                        await self.websocket.send(userbuff.to_json())
                        userbuff = NestOpUserBuffRes()
                        userbuff.token = cmd.token

                if len(userbuff.users) > 0:
                    await self.websocket.send(userbuff.to_json())
                    userbuff = NestOpUserBuffRes()
                    userbuff.token = cmd.token

            for adid in res:
                #sending localgroups
                logger.info('localgroups!')
                for lgroup in self.db_session.query(LocalGroup).filter_by(
                        ad_id=adid).all():
                    await asyncio.sleep(0)
                    reply = NestOpSMBLocalGroupRes()
                    reply.token = cmd.token
                    reply.adid = lgroup.ad_id
                    reply.machinesid = lgroup.machine_sid
                    reply.usersid = lgroup.sid
                    reply.groupname = lgroup.groupname
                    await self.websocket.send(reply.to_json())

            for adid in res:
                #sending smb shares
                logger.info('SHARES!')
                sharebuffer = NestOpSMBShareBuffRes()
                sharebuffer.token = cmd.token
                qry = self.db_session.query(NetShare).filter_by(ad_id=adid)
                for share in windowed_query(qry, NetShare.id, 100):
                    await asyncio.sleep(0)
                    reply = NestOpSMBShareRes()
                    reply.token = cmd.token
                    reply.adid = share.ad_id
                    reply.machinesid = share.machine_sid
                    reply.netname = share.netname
                    if len(sharebuffer.shares) >= 100:
                        await self.websocket.send(sharebuffer.to_json())
                        sharebuffer = NestOpSMBShareBuffRes()
                        sharebuffer.token = cmd.token

                if len(sharebuffer.shares) > 0:
                    await self.websocket.send(sharebuffer.to_json())
                    sharebuffer = NestOpSMBShareBuffRes()
                    sharebuffer.token = cmd.token

            for adid in res:
                #sending smb sessions
                logger.info('SESSIONS!')
                for session in self.db_session.query(NetSession).filter_by(
                        ad_id=adid).all():
                    await asyncio.sleep(0)
                    reply = NestOpSMBSessionRes()
                    reply.token = cmd.token
                    reply.adid = session.ad_id
                    reply.machinesid = session.machine_sid
                    reply.username = session.username
                    await self.websocket.send(reply.to_json())

            for adid in res:
                #sending groups
                logger.info('GROUPS!')
                groupbuffer = NestOpGroupBuffRes()
                groupbuffer.token = cmd.token
                qry = self.db_session.query(Group).filter_by(ad_id=adid)
                for group in windowed_query(qry, Group.id, 100):
                    await asyncio.sleep(0)
                    reply = NestOpGroupRes()
                    reply.token = cmd.token
                    reply.adid = group.ad_id
                    reply.name = group.sAMAccountName
                    reply.dn = group.dn
                    reply.guid = group.objectGUID
                    reply.sid = group.objectSid
                    reply.description = group.description
                    if group.adminCount is not None:
                        reply.is_admin = int(group.adminCount)
                    else:
                        reply.is_admin = 0

                    groupbuffer.groups.append(reply)

                    if len(groupbuffer.groups) >= 100:
                        await self.websocket.send(groupbuffer.to_json())
                        groupbuffer = NestOpGroupBuffRes()
                        groupbuffer.token = cmd.token

                if len(groupbuffer.groups) > 0:
                    await self.websocket.send(groupbuffer.to_json())
                    groupbuffer = NestOpGroupBuffRes()
                    groupbuffer.token = cmd.token

            for adid in res:
                #sending edges
                logger.info('EDGES!')
                edgebuffer = NestOpEdgeBuffRes()
                edgebuffer.token = cmd.token

                qry = self.db_session.query(Edge).filter_by(ad_id=adid)
                for edge in windowed_query(qry, Edge.id, 100):
                    await asyncio.sleep(0)
                    reply = NestOpEdgeRes()
                    reply.token = cmd.token
                    reply.adid = edge.ad_id
                    reply.graphid = edge.graph_id
                    reply.src = self.lookup_oid(edge.src, edge.ad_id,
                                                cmd.token)
                    reply.dst = self.lookup_oid(edge.dst, edge.ad_id,
                                                cmd.token)
                    reply.label = edge.label
                    if reply.src is None or reply.src == '':
                        #print('ERROR!!! %s %s' % (edge.src, reply.src))
                        continue

                    edgebuffer.edges.append(reply)
                    if len(edgebuffer.edges) >= 100:
                        await self.websocket.send(edgebuffer.to_json())
                        edgebuffer = NestOpEdgeBuffRes()
                        edgebuffer.token = cmd.token

                if len(edgebuffer.edges) > 0:
                    await self.websocket.send(edgebuffer.to_json())
                    edgebuffer = NestOpEdgeBuffRes()
                    edgebuffer.token = cmd.token

            await self.send_ok(cmd)
            logger.info('OK!')
        except Exception as e:
            await self.send_error(cmd, "Error! Reason: %s" % e)
            logger.exception('do_load_ad')
Ejemplo n.º 17
0
	async def calc_sds_mp(self):
		await self.log_msg('Calculating SD edges')
		logger.debug('starting calc_sds_mp')
		try:
			cnt = 0
			total = self.session.query(func.count(JackDawSD.id)).filter(JackDawSD.ad_id == self.ad_id).scalar()
			logger.debug('calc_sds_mp total SDs %s' % str(total))
			q = self.session.query(JackDawSD).filter_by(ad_id = self.ad_id)

			if self.progress_queue is not None:
				msg = GathererProgress()
				msg.type = GathererProgressType.SDCALC
				msg.msg_type = MSGTYPE.STARTED
				msg.adid = self.ad_id
				msg.domain_name = self.domain_name
				await self.progress_queue.put(msg)

			sdcalc_pbar = None
			if self.show_progress is True:
				sdcalc_pbar = tqdm(desc ='Writing SD edges to file', total=total, disable=self.disable_tqdm)

			sdfilename = 'sdcalc.csv'
			if self.work_dir is not None:
				sdfilename = str(self.work_dir.joinpath('sdcalc.csv'))

			testfile = open(sdfilename, 'w+', newline = '') #tempfile.TemporaryFile('w+', newline = '')
			buffer = []
			if self.mp_pool is None:
				try:
					self.mp_pool = mp.Pool()
				except ImportError:
					self.mp_pool = None
					
			logger.debug('calc_sds_mp starting calc')
			tf = 0
			last_stat_cnt = 0
			try:
				for adsd in windowed_query(q, JackDawSD.id, self.buffer_size):
					tf += 1
					adsd = JackDawSD.from_dict(adsd.to_dict())
					buffer.append(adsd)
					if len(buffer) == self.buffer_size:
						self.calc_sds_batch(buffer, testfile)
						buffer = []
						
						if sdcalc_pbar is not None:
							sdcalc_pbar.update(self.buffer_size)
								
					if self.progress_queue is not None and tf % self.progress_step_size == 0:
						last_stat_cnt += self.progress_step_size
						now = datetime.datetime.utcnow()
						td = (now - self.progress_last_updated).total_seconds()
						self.progress_last_updated = now
						msg = GathererProgress()
						msg.type = GathererProgressType.SDCALC
						msg.msg_type = MSGTYPE.PROGRESS
						msg.adid = self.ad_id
						msg.domain_name = self.domain_name
						msg.total = total
						msg.total_finished = tf
						if td > 0:
							msg.speed = str(self.progress_step_size // td)
						msg.step_size = self.progress_step_size
						await self.progress_queue.put(msg)
						await asyncio.sleep(0)
				
				if len(buffer) > 0:
					self.calc_sds_batch(buffer, testfile)
					if self.progress_queue is not None:
						now = datetime.datetime.utcnow()
						td = (now - self.progress_last_updated).total_seconds()
						self.progress_last_updated = now
						msg = GathererProgress()
						msg.type = GathererProgressType.SDCALC
						msg.msg_type = MSGTYPE.PROGRESS
						msg.adid = self.ad_id
						msg.domain_name = self.domain_name
						msg.total = total
						msg.total_finished = tf
						if td > 0:
							msg.speed = str(len(buffer) // td)
						msg.step_size = tf - last_stat_cnt
						await self.progress_queue.put(msg)
						await asyncio.sleep(0)

					buffer = []

				if self.progress_queue is not None:
					msg = GathererProgress()
					msg.type = GathererProgressType.SDCALC
					msg.msg_type = MSGTYPE.FINISHED
					msg.adid = self.ad_id
					msg.domain_name = self.domain_name
					await self.progress_queue.put(msg)

				
				if self.show_progress is True and sdcalc_pbar is not None:
					sdcalc_pbar.refresh()
					sdcalc_pbar.disable = True

			except Exception as e:
				logger.exception('SD calc exception!')
				raise e
			finally:
				if self.foreign_pool is False and self.mp_pool is not None:
					self.mp_pool.close()

			if self.progress_queue is not None:
				msg = GathererProgress()
				msg.type = GathererProgressType.SDCALCUPLOAD
				msg.msg_type = MSGTYPE.STARTED
				msg.adid = self.ad_id
				msg.domain_name = self.domain_name
				await self.progress_queue.put(msg)
			
			logger.debug('Writing SD edge file contents to DB')
			await self.log_msg('Writing SD edge file contents to DB')
			sdcalcupload_pbar = None
			if self.show_progress is True:
				sdcalcupload_pbar = tqdm(desc = 'Writing SD edge file contents to DB', total = cnt, disable=self.disable_tqdm)

			engine = self.session.get_bind()
			print(engine)

			testfile.seek(0,0)
			last_stat_cnt = 0
			i = 0

			insert_buffer = []
			for line in testfile:
				i += 1
				line = line.strip()
				src_id, dst_id, label, _ = line.split(',')
				insert_buffer.append(
					{
						"ad_id": self.ad_id,
						'graph_id' : self.graph_id,
						'src' : int(src_id),
						'dst' : int(dst_id),
						'label' : label
					}
				)
				if i % (self.buffer_size*100) == 0:
					engine.execute(Edge.__table__.insert(), insert_buffer)
					if self.show_progress is True:
						sdcalcupload_pbar.update(self.buffer_size*100)
					insert_buffer = []
					
				if self.progress_queue is not None and i % self.progress_step_size == 0:
					last_stat_cnt += self.progress_step_size
					now = datetime.datetime.utcnow()
					td = (now - self.progress_last_updated).total_seconds()
					self.progress_last_updated = now
					msg = GathererProgress()
					msg.type = GathererProgressType.SDCALCUPLOAD
					msg.msg_type = MSGTYPE.PROGRESS
					msg.adid = self.ad_id
					msg.domain_name = self.domain_name
					msg.total = self.sd_edges_written
					msg.total_finished = i
					if td > 0:
						msg.speed = str(self.progress_step_size // td)
					msg.step_size = self.progress_step_size
					await self.progress_queue.put(msg)
					await asyncio.sleep(0)
			
			if len(insert_buffer) > 0:
				engine.execute(Edge.__table__.insert(), insert_buffer)
				if self.show_progress is True:
					sdcalcupload_pbar.update(len(insert_buffer))
				insert_buffer = []

			if self.progress_queue is not None:
				msg = GathererProgress()
				msg.type = GathererProgressType.SDCALCUPLOAD
				msg.msg_type = MSGTYPE.FINISHED
				msg.adid = self.ad_id
				msg.domain_name = self.domain_name
				await self.progress_queue.put(msg)
			
			self.session.commit()
			return True, None
		except Exception as e:
			logger.exception('sdcalc!')
			return False, e
		finally:
			os.remove(sdfilename)
Ejemplo n.º 18
0
    async def run(self):
        try:
            logger.debug(
                '[+] Starting LDAP information acqusition. This might take a while...'
            )
            self.session = get_session(self.db_conn)

            if self.work_dir is None:
                self.work_dir = pathlib.Path('./workdir')
                self.work_dir.mkdir(parents=True, exist_ok=True)
            if isinstance(self.work_dir, str) is True:
                self.work_dir = pathlib.Path(self.work_dir)

            self.members_target_file_name = str(
                self.work_dir.joinpath('temp_members_list.gz'))
            self.sd_target_file_name = (
                self.work_dir.joinpath('temp_sd_list.gz'))

            if self.resumption is False:
                self.members_file_handle = gzip.GzipFile(
                    self.members_target_file_name, mode='wb')
                self.sd_file_handle = gzip.GzipFile(self.sd_target_file_name,
                                                    mode='wb')
                bc = BaseCollector(
                    self.session,
                    self.ldap_mgr,
                    agent_cnt=self.agent_cnt,
                    progress_queue=self.progress_queue,
                    show_progress=self.show_progress,
                    members_file_handle=self.members_file_handle,
                    sd_file_handle=self.sd_file_handle,
                    stream_data=self.stream_data)
                self.ad_id, self.graph_id, err = await bc.run()
                if err is False:
                    return None, None, err

                if self.base_collection_finish_evt is not None:
                    self.base_collection_finish_evt.set()
                self.members_file_handle.close()
                self.sd_file_handle.close()

                _, err = await self.collect_sd()
                if err is not None:
                    raise err

                _, err = await self.collect_members()
                if err is not None:
                    raise err

            else:
                adinfo = self.session.query(ADInfo).get(self.ad_id)
                self.graph_id = adinfo.graph_id
                if adinfo.ldap_sds_finished is True and adinfo.ldap_members_finished is True:
                    return self.ad_id, self.graph_id, None

                if adinfo.ldap_sds_finished is False:
                    self.session.query(JackDawSD).filter_by(
                        ad_id=self.ad_id).delete()
                    self.session.commit()

                if adinfo.ldap_members_finished is False:
                    self.session.query(Edge).delete()
                    self.session.commit()

                if adinfo.ldap_members_finished is False:
                    self.members_file_handle = gzip.GzipFile(
                        self.members_target_file_name, mode='wb')
                if adinfo.ldap_sds_finished is False:
                    self.sd_file_handle = gzip.GzipFile(
                        self.sd_target_file_name, mode='wb')

                res = self.session.query(ADInfo).get(self.ad_id)
                data = {
                    'dn': res.distinguishedName,
                    'sid': res.objectSid,
                    'guid': res.objectGUID,
                    'object_type': 'domain'
                }
                if adinfo.ldap_sds_finished is False:
                    self.sd_file_handle.write(
                        json.dumps(data).encode() + b'\r\n')

                q = self.session.query(ADUser).filter_by(ad_id=self.ad_id)
                for res in windowed_query(q, ADUser.id, 100):
                    data = {
                        'dn': res.dn,
                        'sid': res.objectSid,
                        'guid': res.objectGUID,
                        'object_type': 'user'
                    }
                    if adinfo.ldap_sds_finished is False:
                        self.sd_file_handle.write(
                            json.dumps(data).encode() + b'\r\n')
                    if adinfo.ldap_members_finished is False:
                        self.members_file_handle.write(
                            json.dumps(data).encode() + b'\r\n')

                q = self.session.query(Machine).filter_by(ad_id=self.ad_id)
                for res in windowed_query(q, Machine.id, 100):
                    data = {
                        'dn': res.dn,
                        'sid': res.objectSid,
                        'guid': res.objectGUID,
                        'object_type': 'machine'
                    }
                    if adinfo.ldap_sds_finished is False:
                        self.sd_file_handle.write(
                            json.dumps(data).encode() + b'\r\n')
                    if adinfo.ldap_members_finished is False:
                        self.members_file_handle.write(
                            json.dumps(data).encode() + b'\r\n')

                q = self.session.query(Group).filter_by(ad_id=self.ad_id)
                for res in windowed_query(q, Group.id, 100):
                    data = {
                        'dn': res.dn,
                        'sid': res.objectSid,
                        'guid': res.objectGUID,
                        'object_type': 'group'
                    }
                    if adinfo.ldap_sds_finished is False:
                        self.sd_file_handle.write(
                            json.dumps(data).encode() + b'\r\n')
                    if adinfo.ldap_members_finished is False:
                        self.members_file_handle.write(
                            json.dumps(data).encode() + b'\r\n')

                q = self.session.query(ADOU).filter_by(ad_id=self.ad_id)
                for res in windowed_query(q, ADOU.id, 100):
                    data = {
                        'dn': res.dn,
                        'sid': None,
                        'guid': res.objectGUID,
                        'object_type': 'ou'
                    }
                    if adinfo.ldap_sds_finished is False:
                        self.sd_file_handle.write(
                            json.dumps(data).encode() + b'\r\n')

                q = self.session.query(GPO).filter_by(ad_id=self.ad_id)
                for res in windowed_query(q, GPO.id, 100):
                    data = {
                        'dn': res.dn,
                        'sid': None,
                        'guid': res.objectGUID,
                        'object_type': 'gpo'
                    }
                    if adinfo.ldap_sds_finished is False:
                        self.sd_file_handle.write(
                            json.dumps(data).encode() + b'\r\n')

                if adinfo.ldap_members_finished is False:
                    self.members_file_handle.close()
                if adinfo.ldap_sds_finished is False:
                    self.sd_file_handle.close()

            logger.debug('[+] LDAP information acqusition finished!')
            return self.ad_id, self.graph_id, None
        except Exception as e:
            return None, None, e