def enumerate_gpos(self): gpos = [] resolver = AceResolver(self.addomain, self.addomain.objectresolver) entries = self.addc.get_gpos() for entry in entries: gpo = { "Properties": { "highvalue": ADUtils.get_entry_property(entry, 'isCriticalSystemObject', default=False), "name": ADUtils.get_entry_property(entry, 'displayName'), "domain": '.'.join(str(ADUtils.get_entry_property(entry, 'distinguishedName')).split('DC')[1:]).translate({ord(c):'' for c in '=,'}), "objectid": str(ADUtils.get_entry_property(entry, 'objectGUID')).translate({ord(c):'' for c in '}{'}), "distinguishedname": ADUtils.get_entry_property(entry, 'distinguishedName'), "description": None, "gpcpath": ADUtils.get_entry_property(entry, 'gPCFileSysPath') }, "ObjectIdentifier": str(ADUtils.get_entry_property(entry, 'objectGUID')).translate({ord(c):'' for c in '}{'}), "Aces": [] } _, aces = parse_binary_acl(gpo, 'gpo', ADUtils.get_entry_property(entry, 'nTSecurityDescriptor'), self.addc.objecttype_guid_map) gpo['Aces'] = resolver.resolve_aces(aces) gpos.append(gpo) self.dump_gpos(gpos)
class MembershipEnumerator(object): """ Class to enumerate memberships in the domain. Contains the dumping functions which methods from the bloodhound.ad module. """ def __init__(self, addomain, addc, collect, disable_pooling): """ Membership enumeration. Enumerates all groups/users/other memberships. """ self.addomain = addomain self.addc = addc # Store collection methods specified self.collect = collect self.disable_pooling = disable_pooling self.aclenumerator = AclEnumerator(addomain, addc, collect) self.aceresolver = AceResolver(addomain, addomain.objectresolver) self.result_q = None def get_membership(self, member): """ Attempt to resolve the membership (DN) of a group to an object """ # First assume it is a user try: resolved_entry = self.addomain.users[member] except KeyError: # Try if it is a group try: resolved_entry = self.addomain.groups[member] except KeyError: # Try if it is a computer try: entry = self.addomain.computers[member] # Computers are stored as raw entries resolved_entry = ADUtils.resolve_ad_entry(entry) except KeyError: use_gc = ADUtils.ldap2domain( member) != self.addomain.domain qobject = self.addomain.objectresolver.resolve_distinguishedname( member, use_gc=use_gc) if qobject is None: return None resolved_entry = ADUtils.resolve_ad_entry(qobject) # Store it in the cache if resolved_entry['type'] == 'User': self.addomain.users[member] = resolved_entry if resolved_entry['type'] == 'Group': self.addomain.groups[member] = resolved_entry # Computers are stored as raw entries if resolved_entry['type'] == 'Computer': self.addomain.computers[member] = qobject return { "ObjectIdentifier": resolved_entry['objectid'], "ObjectType": resolved_entry['type'].capitalize() } @staticmethod def get_primary_membership(entry): """ Construct primary membership from RID to SID (BloodHound 3.0 only) """ try: primarygroupid = int(entry['attributes']['primaryGroupID']) except (TypeError, KeyError): # Doesn't have a primarygroupid, means it is probably a Group instead of a user return None return '%s-%d' % ('-'.join( entry['attributes']['objectSid'].split('-')[:-1]), primarygroupid) @staticmethod def add_user_properties(user, entry): """ Resolve properties for user objects """ props = user['Properties'] # print entry # Is user enabled? Checked by seeing if the UAC flag 2 (ACCOUNT_DISABLED) is not set props['enabled'] = ADUtils.get_entry_property( entry, 'userAccountControl', default=0) & 2 == 0 props['lastlogon'] = ADUtils.win_timestamp_to_unix( ADUtils.get_entry_property(entry, 'lastLogon', default=0, raw=True)) props['lastlogontimestamp'] = ADUtils.win_timestamp_to_unix( ADUtils.get_entry_property(entry, 'lastlogontimestamp', default=0, raw=True)) if props['lastlogontimestamp'] == 0: props['lastlogontimestamp'] = -1 props['pwdlastset'] = ADUtils.win_timestamp_to_unix( ADUtils.get_entry_property(entry, 'pwdLastSet', default=0, raw=True)) props['dontreqpreauth'] = ADUtils.get_entry_property( entry, 'userAccountControl', default=0) & 0x00400000 == 0x00400000 props['pwdneverexpires'] = ADUtils.get_entry_property( entry, 'userAccountControl', default=0) & 0x00010000 == 0x00010000 props['sensitive'] = ADUtils.get_entry_property( entry, 'userAccountControl', default=0) & 0x00100000 == 0x00100000 props['serviceprincipalnames'] = ADUtils.get_entry_property( entry, 'servicePrincipalName', []) props['hasspn'] = len(props['serviceprincipalnames']) > 0 props['displayname'] = ADUtils.get_entry_property(entry, 'displayName') props['email'] = ADUtils.get_entry_property(entry, 'mail') props['title'] = ADUtils.get_entry_property(entry, 'title') props['homedirectory'] = ADUtils.get_entry_property( entry, 'homeDirectory') props['description'] = ADUtils.get_entry_property(entry, 'description') props['userpassword'] = ADUtils.ensure_string( ADUtils.get_entry_property(entry, 'userPassword')) props['admincount'] = ADUtils.get_entry_property( entry, 'adminCount', 0) == 1 if len( ADUtils.get_entry_property(entry, 'msDS-AllowedToDelegateTo', [])) > 0: props['allowedtodelegate'] = ADUtils.get_entry_property( entry, 'msDS-AllowedToDelegateTo', []) props['sidhistory'] = [ LDAP_SID(bsid).formatCanonical() for bsid in ADUtils.get_entry_property(entry, 'sIDHistory', []) ] # v4 props whencreated = ADUtils.get_entry_property(entry, 'whencreated', default=0) if isinstance(whencreated, int): props['whencreated'] = whencreated else: props['whencreated'] = calendar.timegm(whencreated.timetuple()) props['unixpassword'] = ADUtils.ensure_string( ADUtils.get_entry_property(entry, 'unixuserpassword')) props['unicodepassword'] = ADUtils.ensure_string( ADUtils.get_entry_property(entry, 'unicodepwd')) # Non-default schema? # props['sfupassword'] = ADUtils.ensure_string(ADUtils.get_entry_property(entry, 'msSFU30Password')) props['sfupassword'] = None def enumerate_users(self, timestamp=""): filename = timestamp + 'users.json' # Should we include extra properties in the query? with_properties = 'objectprops' in self.collect acl = 'acl' in self.collect entries = self.addc.get_users(include_properties=with_properties, acl=acl) logging.debug('Writing users to file: %s', filename) # Use a separate queue for processing the results self.result_q = queue.Queue() results_worker = threading.Thread( target=OutputWorker.membership_write_worker, args=(self.result_q, 'users', filename)) results_worker.daemon = True results_worker.start() if acl and not self.disable_pooling: self.aclenumerator.init_pool() # This loops over a generator, results are fetched from LDAP on the go for entry in entries: resolved_entry = ADUtils.resolve_ad_entry(entry) # Skip trust objects if resolved_entry['type'] == 'trustaccount': continue user = { "AllowedToDelegate": [], "ObjectIdentifier": ADUtils.get_entry_property(entry, 'objectSid'), "PrimaryGroupSID": MembershipEnumerator.get_primary_membership(entry), "Properties": { "name": resolved_entry['principal'], "domain": self.addomain.domain.upper(), "domainsid": self.addomain.domain_object.sid, "distinguishedname": ADUtils.get_entry_property(entry, 'distinguishedName').upper(), "unconstraineddelegation": ADUtils.get_entry_property( entry, 'userAccountControl', default=0) & 0x00080000 == 0x00080000, "trustedtoauth": ADUtils.get_entry_property( entry, 'userAccountControl', default=0) & 0x01000000 == 0x01000000, "passwordnotreqd": ADUtils.get_entry_property( entry, 'userAccountControl', default=0) & 0x00000020 == 0x00000020 }, "Aces": [], "SPNTargets": [], "HasSIDHistory": [], "IsDeleted": ADUtils.get_entry_property(entry, 'isDeleted', default=False) } if with_properties: MembershipEnumerator.add_user_properties(user, entry) if 'allowedtodelegate' in user['Properties']: for host in user['Properties']['allowedtodelegate']: try: target = host.split('/')[1] except IndexError: logging.warning('Invalid delegation target: %s', host) continue try: sid = self.addomain.computersidcache.get( target.lower()) user['AllowedToDelegate'].append(sid) except KeyError: if '.' in target: user['AllowedToDelegate'].append( target.upper()) # Parse SID history if len(user['Properties']['sidhistory']) > 0: for historysid in user['Properties']['sidhistory']: user['HasSIDHistory'].append( self.aceresolver.resolve_sid(historysid)) # If this is a GMSA, process it's ACL. We don't bother with threads/processes here # since these accounts shouldn't be that common and neither should they have very complex # DACLs which control who can read their password if ADUtils.get_entry_property( entry, 'msDS-GroupMSAMembership', default=b'', raw=True) != b'': self.parse_gmsa(user, entry) self.addomain.users[entry['dn']] = resolved_entry # If we are enumerating ACLs, we break out of the loop here # this is because parsing ACLs is computationally heavy and therefor is done in subprocesses if acl: if self.disable_pooling: # Debug mode, don't run this pooled since it hides exceptions self.process_acldata( parse_binary_acl( user, 'user', ADUtils.get_entry_property(entry, 'nTSecurityDescriptor', raw=True), self.addc.objecttype_guid_map)) else: # Process ACLs in separate processes, then call the processing function to resolve entries and write them to file self.aclenumerator.pool.apply_async( parse_binary_acl, args=(user, 'user', ADUtils.get_entry_property( entry, 'nTSecurityDescriptor', raw=True), self.addc.objecttype_guid_map), callback=self.process_acldata) else: # Write it to the queue -> write to file in separate thread # this is solely for consistency with acl parsing, the performance improvement is probably minimal self.result_q.put(user) self.write_default_users() # If we are parsing ACLs, close the parsing pool first # then close the result queue and join it if acl and not self.disable_pooling: self.aclenumerator.pool.close() self.aclenumerator.pool.join() self.result_q.put(None) else: self.result_q.put(None) self.result_q.join() logging.debug('Finished writing users') def enumerate_groups(self, timestamp=""): highvalue = [ "S-1-5-32-544", "S-1-5-32-550", "S-1-5-32-549", "S-1-5-32-551", "S-1-5-32-548" ] def is_highvalue(sid): if sid.endswith("-512") or sid.endswith("-516") or sid.endswith( "-519") or sid.endswith("-520"): return True if sid in highvalue: return True return False # Should we include extra properties in the query? with_properties = 'objectprops' in self.collect acl = 'acl' in self.collect filename = timestamp + 'groups.json' entries = self.addc.get_groups(include_properties=with_properties, acl=acl) logging.debug('Writing groups to file: %s', filename) # Use a separate queue for processing the results self.result_q = queue.Queue() results_worker = threading.Thread( target=OutputWorker.membership_write_worker, args=(self.result_q, 'groups', filename)) results_worker.daemon = True results_worker.start() if acl and not self.disable_pooling: self.aclenumerator.init_pool() for entry in entries: resolved_entry = ADUtils.resolve_ad_entry(entry) self.addomain.groups[entry['dn']] = resolved_entry try: sid = entry['attributes']['objectSid'] except KeyError: #Somehow we found a group without a sid? logging.warning('Could not determine SID for group %s', entry['attributes']['distinguishedName']) continue group = { "ObjectIdentifier": sid, "Properties": { "domain": self.addomain.domain.upper(), "domainsid": self.addomain.domain_object.sid, "name": resolved_entry['principal'], "distinguishedname": ADUtils.get_entry_property(entry, 'distinguishedName').upper() }, "Members": [], "Aces": [], "IsDeleted": ADUtils.get_entry_property(entry, 'isDeleted', default=False) } if sid in ADUtils.WELLKNOWN_SIDS: # Prefix it with the domain group['ObjectIdentifier'] = '%s-%s' % ( self.addomain.domain.upper(), sid) if with_properties: group['Properties']['admincount'] = ADUtils.get_entry_property( entry, 'adminCount', default=0) == 1 group['Properties'][ 'description'] = ADUtils.get_entry_property( entry, 'description') whencreated = ADUtils.get_entry_property(entry, 'whencreated', default=0) group['Properties']['whencreated'] = calendar.timegm( whencreated.timetuple()) for member in entry['attributes']['member']: resolved_member = self.get_membership(member) if resolved_member: group['Members'].append(resolved_member) # If we are enumerating ACLs, we break out of the loop here # this is because parsing ACLs is computationally heavy and therefor is done in subprocesses if acl: if self.disable_pooling: # Debug mode, don't run this pooled since it hides exceptions self.process_acldata( parse_binary_acl( group, 'group', ADUtils.get_entry_property(entry, 'nTSecurityDescriptor', raw=True), self.addc.objecttype_guid_map)) else: # Process ACLs in separate processes, then call the processing function to resolve entries and write them to file self.aclenumerator.pool.apply_async( parse_binary_acl, args=(group, 'group', ADUtils.get_entry_property( entry, 'nTSecurityDescriptor', raw=True), self.addc.objecttype_guid_map), callback=self.process_acldata) else: # Write it to the queue -> write to file in separate thread # this is solely for consistency with acl parsing, the performance improvement is probably minimal self.result_q.put(group) self.write_default_groups() # If we are parsing ACLs, close the parsing pool first # then close the result queue and join it if acl and not self.disable_pooling: self.aclenumerator.pool.close() self.aclenumerator.pool.join() self.result_q.put(None) else: self.result_q.put(None) self.result_q.join() logging.debug('Finished writing groups') def enumerate_computers_dconly(self, timestamp=""): ''' Enumerate computer objects. This function is only used if no collection was requested that required connecting to computers anyway. ''' filename = timestamp + 'computers.json' acl = 'acl' in self.collect entries = self.addc.ad.computers.values() logging.debug('Writing computers ACL to file: %s', filename) # Use a separate queue for processing the results self.result_q = queue.Queue() results_worker = threading.Thread( target=OutputWorker.membership_write_worker, args=(self.result_q, 'computers', filename)) results_worker.daemon = True results_worker.start() if acl and not self.disable_pooling: self.aclenumerator.init_pool() # This loops over the cached entries for entry in entries: if not 'attributes' in entry: continue if 'dNSHostName' not in entry['attributes']: continue hostname = entry['attributes']['dNSHostName'] if not hostname: continue samname = entry['attributes']['sAMAccountName'] cobject = ADComputer(hostname=hostname, samname=samname, ad=self.addomain, addc=self.addc, objectsid=entry['attributes']['objectSid']) cobject.primarygroup = MembershipEnumerator.get_primary_membership( entry) computer = cobject.get_bloodhound_data(entry, self.collect, skip_acl=True) # If we are enumerating ACLs, we break out of the loop here # this is because parsing ACLs is computationally heavy and therefor is done in subprocesses if acl: if self.disable_pooling: # Debug mode, don't run this pooled since it hides exceptions self.process_acldata( parse_binary_acl( computer, 'computer', ADUtils.get_entry_property(entry, 'nTSecurityDescriptor', raw=True), self.addc.objecttype_guid_map)) else: # Process ACLs in separate processes, then call the processing function to resolve entries and write them to file self.aclenumerator.pool.apply_async( parse_binary_acl, args=(computer, 'computer', ADUtils.get_entry_property( entry, 'nTSecurityDescriptor', raw=True), self.addc.objecttype_guid_map), callback=self.process_acldata) else: # Write it to the queue -> write to file in separate thread # this is solely for consistency with acl parsing, the performance improvement is probably minimal self.result_q.put(computer) # If we are parsing ACLs, close the parsing pool first # then close the result queue and join it if acl and not self.disable_pooling: self.aclenumerator.pool.close() self.aclenumerator.pool.join() self.result_q.put(None) else: self.result_q.put(None) self.result_q.join() logging.debug('Finished writing computers') def parse_gmsa(self, user, entry): """ Parse GMSA DACL which states which users can read the password """ _, aces = parse_binary_acl( user, 'user', ADUtils.get_entry_property(entry, 'msDS-GroupMSAMembership', raw=True), self.addc.objecttype_guid_map) processed_aces = self.aceresolver.resolve_aces(aces) for ace in processed_aces: if ace['RightName'] == 'Owner': continue ace['RightName'] = 'ReadGMSAPassword' user['Aces'].append(ace) def process_acldata(self, result): """ Process ACLs that resulted from parsing with cstruct """ data, aces = result # Parse aces data['Aces'] += self.aceresolver.resolve_aces(aces) self.result_q.put(data) def write_default_users(self): """ Write built-in users to users.json file """ domainsid = self.addomain.domain_object.sid domainname = self.addomain.domain.upper() user = { "AllowedToDelegate": [], "ObjectIdentifier": "%s-S-1-5-20" % domainname, "PrimaryGroupSID": None, "Properties": { "domain": domainname, "domainsid": self.addomain.domain_object.sid, "name": "NT AUTHORITY@%s" % domainname, }, "Aces": [], "SPNTargets": [], "HasSIDHistory": [], "IsDeleted": False, "IsACLProtected": False, } self.result_q.put(user) def write_default_groups(self): """ Put default groups in the groups.json file """ # Domain controllers rootdomain = self.addc.get_root_domain().upper() entries = self.addc.get_domain_controllers() group = { "IsDeleted": False, "IsACLProtected": False, "ObjectIdentifier": "%s-S-1-5-9" % rootdomain, "Properties": { "domain": rootdomain.upper(), "name": "ENTERPRISE DOMAIN CONTROLLERS@%s" % rootdomain, }, "Members": [], "Aces": [] } for entry in entries: resolved_entry = ADUtils.resolve_ad_entry(entry) memberdata = { "ObjectIdentifier": resolved_entry['objectid'], "ObjectType": resolved_entry['type'].capitalize() } group["Members"].append(memberdata) self.result_q.put(group) domainsid = self.addomain.domain_object.sid domainname = self.addomain.domain.upper() # Everyone evgroup = { "IsDeleted": False, "IsACLProtected": False, "ObjectIdentifier": "%s-S-1-1-0" % domainname, "Properties": { "domain": domainname, "domainsid": self.addomain.domain_object.sid, "name": "EVERYONE@%s" % domainname, }, "Members": [], "Aces": [] } self.result_q.put(evgroup) # Authenticated users augroup = { "IsDeleted": False, "IsACLProtected": False, "ObjectIdentifier": "%s-S-1-5-11" % domainname, "Properties": { "domain": domainname, "domainsid": self.addomain.domain_object.sid, "name": "AUTHENTICATED USERS@%s" % domainname, }, "Members": [], "Aces": [] } self.result_q.put(augroup) # Interactive iugroup = { "IsDeleted": False, "IsACLProtected": False, "ObjectIdentifier": "%s-S-1-5-4" % domainname, "Properties": { "domain": domainname, "domainsid": self.addomain.domain_object.sid, "name": "INTERACTIVE@%s" % domainname, }, "Members": [], "Aces": [] } self.result_q.put(iugroup) def enumerate_memberships(self, timestamp=""): """ Run appropriate enumeration tasks """ self.enumerate_users(timestamp) self.enumerate_groups(timestamp) if not ('localadmin' in self.collect or 'session' in self.collect or 'loggedon' in self.collect or 'experimental' in self.collect): self.enumerate_computers_dconly(timestamp)
class ADComputer(object): """ Computer connected to Active Directory """ def __init__(self, hostname=None, samname=None, ad=None, addc=None, objectsid=None): self.ad = ad self.addc = addc self.samname = samname self.rpc = None self.dce = None self.admins = [] self.dcom = [] self.rdp = [] self.psremote = [] self.trusts = [] self.services = [] self.sessions = [] self.loggedon = [] self.addr = None self.smbconnection = None # The SID of the local domain self.sid = None # The SID within the domain self.objectsid = objectsid self.primarygroup = None if addc: self.aceresolver = AceResolver(ad, ad.objectresolver) # Did connecting to this host fail before? self.permanentfailure = False # Process invalid hosts if not hostname: self.hostname = '%s.%s' % (samname[:-1].upper(), self.ad.domain.upper()) else: self.hostname = hostname def get_bloodhound_data(self, entry, collect, skip_acl=False): data = { 'ObjectIdentifier': self.objectsid, 'AllowedToAct': [], 'PrimaryGroupSID': self.primarygroup, 'LocalAdmins': { 'Collected': 'localadmin' in collect and not self.permanentfailure, 'FailureReason': None, 'Results': self.admins, }, 'PSRemoteUsers': { 'Collected': 'psremote' in collect and not self.permanentfailure, 'FailureReason': None, 'Results': self.psremote }, 'Properties': { 'name': self.hostname.upper(), 'domainsid': self.ad.domain_object.sid, 'domain': self.ad.domain.upper(), 'distinguishedname': ADUtils.get_entry_property(entry, 'distinguishedName').upper() }, 'RemoteDesktopUsers': { 'Collected': 'rdp' in collect and not self.permanentfailure, 'FailureReason': None, 'Results': self.rdp }, 'DcomUsers': { 'Collected': 'dcom' in collect and not self.permanentfailure, 'FailureReason': None, 'Results': self.dcom }, 'AllowedToDelegate': [], 'Sessions': { 'Collected': 'session' in collect and not self.permanentfailure, 'FailureReason': None, 'Results': self.sessions }, 'PrivilegedSessions': { 'Collected': 'loggedon' in collect and not self.permanentfailure, 'FailureReason': None, 'Results': self.loggedon }, # Unsupported for now 'RegistrySessions': { 'Collected': False, 'FailureReason': None, 'Results': [] }, 'Aces': [], 'HasSIDHistory': [], 'IsDeleted': ADUtils.get_entry_property(entry, 'isDeleted', default=False), 'Status': None } props = data['Properties'] # via the TRUSTED_FOR_DELEGATION (0x00080000) flag in UAC props['unconstraineddelegation'] = ADUtils.get_entry_property( entry, 'userAccountControl', default=0) & 0x00080000 == 0x00080000 props['enabled'] = ADUtils.get_entry_property( entry, 'userAccountControl', default=0) & 2 == 0 props['trustedtoauth'] = ADUtils.get_entry_property( entry, 'userAccountControl', default=0) & 0x01000000 == 0x01000000 if 'objectprops' in collect or 'acl' in collect: props['haslaps'] = ADUtils.get_entry_property( entry, 'ms-mcs-admpwdexpirationtime', 0) != 0 if 'objectprops' in collect: props['lastlogon'] = ADUtils.win_timestamp_to_unix( ADUtils.get_entry_property(entry, 'lastlogon', default=0, raw=True)) props['lastlogontimestamp'] = ADUtils.win_timestamp_to_unix( ADUtils.get_entry_property(entry, 'lastlogontimestamp', default=0, raw=True)) if props['lastlogontimestamp'] == 0: props['lastlogontimestamp'] = -1 props['pwdlastset'] = ADUtils.win_timestamp_to_unix( ADUtils.get_entry_property(entry, 'pwdLastSet', default=0, raw=True)) whencreated = ADUtils.get_entry_property(entry, 'whencreated', default=0) if not isinstance(whencreated, int): whencreated = calendar.timegm(whencreated.timetuple()) props['whencreated'] = whencreated props['serviceprincipalnames'] = ADUtils.get_entry_property( entry, 'servicePrincipalName', []) props['description'] = ADUtils.get_entry_property( entry, 'description') props['operatingsystem'] = ADUtils.get_entry_property( entry, 'operatingSystem') # Add SP to OS if specified servicepack = ADUtils.get_entry_property( entry, 'operatingSystemServicePack') if servicepack: props['operatingsystem'] = '%s %s' % (props['operatingsystem'], servicepack) props['sidhistory'] = [ LDAP_SID(bsid).formatCanonical() for bsid in ADUtils.get_entry_property(entry, 'sIDHistory', []) ] delegatehosts = ADUtils.get_entry_property( entry, 'msDS-AllowedToDelegateTo', []) for host in delegatehosts: try: target = host.split('/')[1] except IndexError: logging.warning('Invalid delegation target: %s', host) continue try: sid = self.ad.computersidcache.get(target.lower()) data['AllowedToDelegate'].append(sid) except KeyError: if '.' in target: data['AllowedToDelegate'].append(target.upper()) if len(delegatehosts) > 0: props['allowedtodelegate'] = delegatehosts # Process resource-based constrained delegation _, aces = parse_binary_acl( data, 'computer', ADUtils.get_entry_property( entry, 'msDS-AllowedToActOnBehalfOfOtherIdentity', raw=True), self.addc.objecttype_guid_map) outdata = self.aceresolver.resolve_aces(aces) for delegated in outdata: if delegated['RightName'] == 'Owner': continue if delegated['RightName'] == 'GenericAll': data['AllowedToAct'].append({ 'ObjectIdentifier': delegated['PrincipalSID'], 'ObjectType': delegated['PrincipalType'] }) # Run ACL collection if this was not already done centrally if 'acl' in collect and not skip_acl: _, aces = parse_binary_acl( data, 'computer', ADUtils.get_entry_property(entry, 'nTSecurityDescriptor', raw=True), self.addc.objecttype_guid_map) # Parse aces data['Aces'] = self.aceresolver.resolve_aces(aces) return data def try_connect(self): addr = None try: addr = self.ad.dnscache.get(self.hostname) except KeyError: try: q = self.ad.dnsresolver.query(self.hostname, 'A', tcp=self.ad.dns_tcp) for r in q: addr = r.address if addr == None: return False # Do exit properly on keyboardinterrupts except KeyboardInterrupt: raise except Exception as e: # Doesn't exist if "None of DNS query names exist" in str(e): logging.info( 'Skipping enumeration for %s since it could not be resolved.', self.hostname) else: logging.warning('Could not resolve: %s: %s', self.hostname, e) return False logging.debug('Resolved: %s' % addr) self.ad.dnscache.put(self.hostname, addr) self.addr = addr logging.debug('Trying connecting to computer: %s', self.hostname) # We ping the host here, this adds a small overhead for setting up an extra socket # but saves us from constructing RPC Objects for non-existing hosts. Also RPC over # SMB does not support setting a connection timeout, so we catch this here. if ADUtils.tcp_ping(addr, 445) is False: return False return True def dce_rpc_connect(self, binding, uuid, integrity=False): if self.permanentfailure: logging.debug('Skipping connection because of previous failure') return None logging.debug('DCE/RPC binding: %s', binding) try: self.rpc = transport.DCERPCTransportFactory(binding) self.rpc.set_connect_timeout(1.0) if hasattr(self.rpc, 'set_credentials'): self.rpc.set_credentials(self.ad.auth.username, self.ad.auth.password, domain=self.ad.auth.domain, lmhash=self.ad.auth.lm_hash, nthash=self.ad.auth.nt_hash, aesKey=self.ad.auth.aes_key) # Use strict validation if possible if hasattr(self.rpc, 'set_hostname_validation'): self.rpc.set_hostname_validation(True, False, self.hostname) # TODO: check Kerberos support # if hasattr(self.rpc, 'set_kerberos'): # self.rpc.set_kerberos(True, self.ad.auth.kdc) # Uncomment to force SMB2 (especially for development to prevent encryption) # will break clients only supporting SMB1 ofc # self.rpc.preferred_dialect(smb3structs.SMB2_DIALECT_21) # Re-use the SMB connection if possible if self.smbconnection: self.rpc.set_smb_connection(self.smbconnection) dce = self.rpc.get_dce_rpc() # Some interfaces require integrity (such as scheduled tasks) # others don't support it at all and error out. if integrity: dce.set_auth_level(RPC_C_AUTHN_LEVEL_PKT_INTEGRITY) # Try connecting, catch hostname validation try: dce.connect() except HostnameValidationExceptions as exc: logging.info( 'Ignoring host %s since its hostname does not match: %s', self.hostname, str(exc)) self.permanentfailure = True return None if self.smbconnection is None: self.smbconnection = self.rpc.get_smb_connection() # We explicity set the smbconnection back to the rpc object # this way it won't be closed when we call disconnect() self.rpc.set_smb_connection(self.smbconnection) # Hostname validation authname = self.smbconnection.getServerName() if authname.lower() != self.hostname.split('.')[0].lower(): logging.info( 'Ignoring host %s since its reported name %s does not match', self.hostname, authname) self.permanentfailure = True return None # Implement encryption? # dce.set_auth_level(NTLM_AUTH_PKT_PRIVACY) dce.bind(uuid) except DCERPCException as e: logging.debug(traceback.format_exc()) logging.warning('DCE/RPC connection failed: %s', str(e)) return None except KeyboardInterrupt: raise except Exception as e: logging.debug(traceback.format_exc()) logging.warning('DCE/RPC connection failed: %s', e) return None except: logging.warning('DCE/RPC connection failed (unknown error)') return None return dce def rpc_get_loggedon(self): """ Query logged on users via RPC. Requires admin privs """ binding = r'ncacn_np:%s[\PIPE\wkssvc]' % self.addr loggedonusers = set() dce = self.dce_rpc_connect(binding, wkst.MSRPC_UUID_WKST) if dce is None: logging.warning('Connection failed: %s', binding) return try: # 1 means more detail, including the domain resp = wkst.hNetrWkstaUserEnum(dce, 1) for record in resp['UserInfo']['WkstaUserInfo']['Level1'][ 'Buffer']: # Skip computer accounts if record['wkui1_username'][-2] == '$': continue # Skip sessions for local accounts if record['wkui1_logon_domain'][:-1].upper( ) == self.samname.upper(): continue domain = record['wkui1_logon_domain'][:-1].upper() domain_entry = self.ad.get_domain_by_name(domain) if domain_entry is not None: domain = ADUtils.ldap2domain( domain_entry['attributes']['distinguishedName']) logging.debug( 'Found logged on user at %s: %s@%s' % (self.hostname, record['wkui1_username'][:-1], domain)) loggedonusers.add((record['wkui1_username'][:-1], domain)) except DCERPCException as e: if 'rpc_s_access_denied' in str(e): logging.debug( 'Access denied while enumerating LoggedOn on %s, probably no admin privs', self.hostname) else: logging.debug('Exception connecting to RPC: %s', e) except Exception as e: if 'connection reset' in str(e): logging.debug('Connection was reset: %s', e) else: raise e dce.disconnect() return list(loggedonusers) def rpc_close(self): if self.smbconnection: self.smbconnection.logoff() def rpc_get_sessions(self): binding = r'ncacn_np:%s[\PIPE\srvsvc]' % self.addr dce = self.dce_rpc_connect(binding, srvs.MSRPC_UUID_SRVS) if dce is None: return try: resp = srvs.hNetrSessionEnum(dce, '\x00', NULL, 10) except DCERPCException as e: if 'rpc_s_access_denied' in str(e): logging.debug( 'Access denied while enumerating Sessions on %s, likely a patched OS', self.hostname) return [] else: raise except Exception as e: if str(e).find('Broken pipe') >= 0: return else: raise sessions = [] for session in resp['InfoStruct']['SessionInfo']['Level10']['Buffer']: userName = session['sesi10_username'][:-1] ip = session['sesi10_cname'][:-1] # Strip \\ from IPs if ip[:2] == '\\\\': ip = ip[2:] # Skip empty IPs if ip == '': continue # Skip our connection if userName == self.ad.auth.username: continue # Skip empty usernames if len(userName) == 0: continue # Skip machine accounts if userName[-1] == '$': continue # Skip local connections if ip in ['127.0.0.1', '[::1]']: continue # IPv6 address if ip[0] == '[' and ip[-1] == ']': ip = ip[1:-1] logging.info('User %s is logged in on %s from %s' % (userName, self.hostname, ip)) sessions.append({ 'user': userName, 'source': ip, 'target': self.hostname }) dce.disconnect() return sessions """ """ def rpc_get_domain_trusts(self): binding = r'ncacn_np:%s[\PIPE\netlogon]' % self.addr dce = self.dce_rpc_connect(binding, nrpc.MSRPC_UUID_NRPC) if dce is None: return try: req = nrpc.DsrEnumerateDomainTrusts() req['ServerName'] = NULL req['Flags'] = 1 resp = dce.request(req) except Exception as e: raise e for domain in resp['Domains']['Domains']: logging.info('Found domain trust from %s to %s', self.hostname, domain['NetbiosDomainName']) self.trusts.append({ 'domain': domain['DnsDomainName'], 'type': domain['TrustType'], 'flags': domain['Flags'] }) dce.disconnect() def rpc_get_services(self): """ Query services with stored credentials via RPC. These credentials can be dumped with mimikatz via lsadump::secrets or via secretsdump.py """ binding = r'ncacn_np:%s[\PIPE\svcctl]' % self.addr serviceusers = [] dce = self.dce_rpc_connect(binding, scmr.MSRPC_UUID_SCMR) if dce is None: return serviceusers try: resp = scmr.hROpenSCManagerW(dce) scManagerHandle = resp['lpScHandle'] # TODO: Figure out if filtering out service types makes sense resp = scmr.hREnumServicesStatusW( dce, scManagerHandle, dwServiceType=scmr.SERVICE_WIN32_OWN_PROCESS, dwServiceState=scmr.SERVICE_STATE_ALL) # TODO: Skip well-known services to save on traffic for i in range(len(resp)): try: ans = scmr.hROpenServiceW(dce, scManagerHandle, resp[i]['lpServiceName'][:-1]) serviceHandle = ans['lpServiceHandle'] svcresp = scmr.hRQueryServiceConfigW(dce, serviceHandle) svc_user = svcresp['lpServiceConfig'][ 'lpServiceStartName'][:-1] if '@' in svc_user: logging.info( "Found user service: %s running as %s on %s", resp[i]['lpServiceName'][:-1], svc_user, self.hostname) serviceusers.append(svc_user) except DCERPCException as e: if 'rpc_s_access_denied' not in str(e): logging.debug( 'Exception querying service %s via RPC: %s', resp[i]['lpServiceName'][:-1], e) except DCERPCException as e: logging.debug('Exception connecting to RPC: %s', e) except Exception as e: if 'connection reset' in str(e): logging.debug('Connection was reset: %s', e) else: raise e dce.disconnect() return serviceusers def rpc_get_schtasks(self): """ Query the scheduled tasks via RPC. Requires admin privileges. These credentials can be dumped with mimikatz via vault::cred """ # Blacklisted folders (Default ones) blacklist = [u'Microsoft\x00'] # Start with the root folder folders = ['\\'] tasks = [] schtaskusers = [] binding = r'ncacn_np:%s[\PIPE\atsvc]' % self.addr try: dce = self.dce_rpc_connect(binding, tsch.MSRPC_UUID_TSCHS, True) if dce is None: return schtaskusers # Get root folder resp = tsch.hSchRpcEnumFolders(dce, '\\') for item in resp['pNames']: data = item['Data'] if data not in blacklist: folders.append('\\' + data) # Enumerate the folders we found # subfolders not supported yet for folder in folders: try: resp = tsch.hSchRpcEnumTasks(dce, folder) for item in resp['pNames']: data = item['Data'] if folder != '\\': # Make sure to strip the null byte tasks.append(folder[:-1] + '\\' + data) else: tasks.append(folder + data) except DCERPCException as e: logging.debug('Error enumerating task folder %s: %s', folder, e) for task in tasks: try: resp = tsch.hSchRpcRetrieveTask(dce, task) # This returns a tuple (sid, logontype) or None userinfo = ADUtils.parse_task_xml(resp['pXml']) if userinfo: if userinfo[1] == u'Password': # Convert to byte string because our cache format is in bytes schtaskusers.append(str(userinfo[0])) logging.info( 'Found scheduled task %s on %s with stored credentials for SID %s', task, self.hostname, userinfo[0]) except DCERPCException as e: logging.debug('Error querying task %s: %s', task, e) except DCERPCException as e: logging.debug('Exception enumerating scheduled tasks: %s', e) dce.disconnect() return schtaskusers """ This magic is mostly borrowed from impacket/examples/netview.py """ def rpc_get_group_members(self, group_rid, resultlist): binding = r'ncacn_np:%s[\PIPE\samr]' % self.addr unresolved = [] dce = self.dce_rpc_connect(binding, samr.MSRPC_UUID_SAMR) if dce is None: return try: resp = samr.hSamrConnect(dce) serverHandle = resp['ServerHandle'] # Attempt to get the SID from this computer to filter local accounts later try: resp = samr.hSamrLookupDomainInSamServer( dce, serverHandle, self.samname[:-1]) self.sid = resp['DomainId'].formatCanonical() # This doesn't always work (for example on DCs) except DCERPCException as e: # Make it a string which is guaranteed not to match a SID self.sid = 'UNKNOWN' # Enumerate the domains known to this computer resp = samr.hSamrEnumerateDomainsInSamServer(dce, serverHandle) domains = resp['Buffer']['Buffer'] # Query the builtin domain (derived from this SID) sid = RPC_SID() sid.fromCanonical('S-1-5-32') logging.debug('Opening domain handle') # Open a handle to this domain resp = samr.hSamrOpenDomain(dce, serverHandle=serverHandle, desiredAccess=samr.DOMAIN_LOOKUP | MAXIMUM_ALLOWED, domainId=sid) domainHandle = resp['DomainHandle'] try: resp = samr.hSamrOpenAlias( dce, domainHandle, desiredAccess=samr.ALIAS_LIST_MEMBERS | MAXIMUM_ALLOWED, aliasId=group_rid) except samr.DCERPCSessionError as error: # Group does not exist if 'STATUS_NO_SUCH_ALIAS' in str(error): logging.debug('No group with RID %d exists', group_rid) return resp = samr.hSamrGetMembersInAlias(dce, aliasHandle=resp['AliasHandle']) for member in resp['Members']['Sids']: sid_string = member['SidPointer'].formatCanonical() logging.debug('Found %d SID: %s', group_rid, sid_string) if not sid_string.startswith(self.sid): # If the sid is known, we can add the admin value directly try: siddata = self.ad.sidcache.get(sid_string) if siddata is None: unresolved.append(sid_string) else: logging.debug('Sid is cached: %s', siddata['principal']) resultlist.append({ 'ObjectIdentifier': sid_string, 'ObjectType': siddata['type'].capitalize() }) except KeyError: # Append it to the list of unresolved SIDs unresolved.append(sid_string) else: logging.debug('Ignoring local group %s', sid_string) except DCERPCException as e: if 'rpc_s_access_denied' in str(e): logging.debug( 'Access denied while enumerating groups on %s, likely a patched OS', self.hostname) else: raise except Exception as e: if 'connection reset' in str(e): logging.debug('Connection was reset: %s', e) else: raise e dce.disconnect() return unresolved def rpc_resolve_sids(self, sids, resultlist): """ Resolve any remaining unknown SIDs for local accounts. """ # If all sids were already cached, we can just return if sids is None or len(sids) == 0: return binding = r'ncacn_np:%s[\PIPE\lsarpc]' % self.addr dce = self.dce_rpc_connect(binding, lsat.MSRPC_UUID_LSAT) if dce is None: return try: resp = lsad.hLsarOpenPolicy2( dce, lsat.POLICY_LOOKUP_NAMES | MAXIMUM_ALLOWED) except Exception as e: if str(e).find('Broken pipe') >= 0: return else: raise policyHandle = resp['PolicyHandle'] # We could look up the SIDs all at once, but if not all SIDs are mapped, we don't know which # ones were resolved and which not, making it impossible to map them in the cache. # Therefor we use more SAMR calls at the start, but after a while most SIDs will be reliable # in our cache and this function doesn't even need to get called anymore. for sid_string in sids: try: resp = lsat.hLsarLookupSids( dce, policyHandle, [sid_string], lsat.LSAP_LOOKUP_LEVEL.enumItems.LsapLookupWksta) except DCERPCException as e: if str(e).find('STATUS_NONE_MAPPED') >= 0: logging.warning( 'SID %s lookup failed, return status: STATUS_NONE_MAPPED', sid_string) # Try next SID continue elif str(e).find('STATUS_SOME_NOT_MAPPED') >= 0: # Not all could be resolved, work with the ones that could resp = e.get_packet() else: raise domains = [] for entry in resp['ReferencedDomains']['Domains']: domains.append(entry['Name']) for entry in resp['TranslatedNames']['Names']: domain = domains[entry['DomainIndex']] domain_entry = self.ad.get_domain_by_name(domain) if domain_entry is not None: domain = ADUtils.ldap2domain( domain_entry['attributes']['distinguishedName']) # TODO: what if it isn't? Should we fall back to LDAP? if entry['Name'] != '': resolved_entry = ADUtils.resolve_sid_entry(entry, domain) logging.debug('Resolved SID to name: %s', resolved_entry['principal']) resultlist.append({ 'ObjectIdentifier': sid_string, 'ObjectType': resolved_entry['type'].capitalize() }) # Add it to our cache self.ad.sidcache.put(sid_string, resolved_entry) else: logging.warning('Resolved name is empty [%s]', entry) dce.disconnect()
def dump_domain(self, collect, filename='domains.json'): """ Dump trusts. This is currently the only domain info we support, so this function handles the entire domain dumping. """ if 'trusts' in collect: entries = self.addc.get_trusts() else: entries = [] try: logging.debug('Opening file for writing: %s' % filename) out = codecs.open(filename, 'w', 'utf-8') except: logging.warning('Could not write file: %s' % filename) return # If the logging level is DEBUG, we ident the objects if logging.getLogger().getEffectiveLevel() == logging.DEBUG: indent_level = 1 else: indent_level = None # Todo: fix this properly. Current code is quick fix to work with domains # that have custom casing in their DN domain_object = None for domain in self.addomain.domains.keys(): if domain.lower() == self.addomain.baseDN.lower(): domain_object = self.addomain.domains[domain] break if not domain_object: logging.error( 'Could not find domain object. Aborting domain enumeration') return # Initialize json structure datastruct = { "domains": [], "meta": { "type": "domains", "count": 0, "version": 3 } } # Get functional level level_id = ADUtils.get_entry_property(domain_object, 'msds-behavior-version') try: functional_level = ADUtils.FUNCTIONAL_LEVELS[int(level_id)] except KeyError: functional_level = 'Unknown' domain = { "ObjectIdentifier": domain_object['attributes']['objectSid'], "Properties": { "name": self.addomain.domain.upper(), "domain": self.addomain.domain.upper(), "highvalue": True, "objectid": ADUtils.get_entry_property(domain_object, 'objectSid'), "distinguishedname": ADUtils.get_entry_property(domain_object, 'distinguishedName'), "description": ADUtils.get_entry_property(domain_object, 'description'), "functionallevel": functional_level }, "Trusts": [], "Aces": [], # The below is all for GPO collection, unsupported as of now. "Links": [], "Users": [], "Computers": [], "ChildOus": [] } if 'acl' in collect: resolver = AceResolver(self.addomain, self.addomain.objectresolver) _, aces = parse_binary_acl( domain, 'domain', ADUtils.get_entry_property(domain_object, 'nTSecurityDescriptor'), self.addc.objecttype_guid_map) domain['Aces'] = resolver.resolve_aces(aces) if 'trusts' in collect: num_entries = 0 for entry in entries: num_entries += 1 trust = ADDomainTrust( ADUtils.get_entry_property(entry, 'name'), ADUtils.get_entry_property(entry, 'trustDirection'), ADUtils.get_entry_property(entry, 'trustType'), ADUtils.get_entry_property(entry, 'trustAttributes'), ADUtils.get_entry_property(entry, 'securityIdentifier')) domain['Trusts'].append(trust.to_output()) logging.info('Found %u trusts', num_entries) # Single domain only datastruct['meta']['count'] = 1 datastruct['domains'].append(domain) json.dump(datastruct, out, indent=indent_level) logging.debug('Finished writing domain info') out.close()
class MembershipEnumerator(object): """ Class to enumerate memberships in the domain. Contains the dumping functions which methods from the bloodhound.ad module. """ def __init__(self, addomain, addc, collect, disable_pooling): """ Membership enumeration. Enumerates all groups/users/other memberships. """ self.addomain = addomain self.addc = addc # Store collection methods specified self.collect = collect self.disable_pooling = disable_pooling self.aclenumerator = AclEnumerator(addomain, addc, collect) self.aceresolver = AceResolver(addomain, addomain.objectresolver) def get_membership(self, member): # First assume it is a user try: resolved_entry = self.addomain.users[member] except KeyError: # Try if it is a group try: resolved_entry = self.addomain.groups[member] except KeyError: # Try if it is a computer try: entry = self.addomain.computers[member] # Computers are stored as raw entries resolved_entry = ADUtils.resolve_ad_entry(entry) except KeyError: use_gc = ADUtils.ldap2domain( member) != self.addomain.domain qobject = self.addomain.objectresolver.resolve_distinguishedname( member, use_gc=use_gc) if qobject is None: return resolved_entry = ADUtils.resolve_ad_entry(qobject) # Store it in the cache if resolved_entry['type'] == 'user': self.addomain.users[member] = resolved_entry if resolved_entry['type'] == 'group': self.addomain.groups[member] = resolved_entry # Computers are stored as raw entries if resolved_entry['type'] == 'computer': self.addomain.computers[member] = qobject return { "MemberName": resolved_entry['principal'], "MemberType": resolved_entry['type'].lower() } def get_primary_membership(self, entry): """ Looks up the primary membership based on RID. Resolves it if needed """ try: primarygroupid = int(entry['attributes']['primaryGroupID']) except (TypeError, KeyError): # Doesn't have a primarygroupid, means it is probably a Group instead of a user return try: group = self.addomain.groups[ self.addomain.groups_dnmap[primarygroupid]] return group['principal'] except KeyError: # Look it up # Construct group sid by taking the domain sid, removing the user rid and appending the group rid groupsid = '%s-%d' % ('-'.join( entry['attributes']['objectSid'].split('-')[:-1]), primarygroupid) group = self.addomain.objectresolver.resolve_sid(groupsid, use_gc=False) if group is None: logging.warning('Warning: Unknown primarygroupid %d', primarygroupid) return None resolved_entry = ADUtils.resolve_ad_entry(group) self.addomain.groups[group['attributes'] ['distinguishedName']] = resolved_entry self.addomain.groups_dnmap[primarygroupid] = group['attributes'][ 'distinguishedName'] return resolved_entry['principal'] @staticmethod def add_user_properties(user, entry): props = user['Properties'] # print entry # Is user enabled? Checked by seeing if the UAC flag 2 (ACCOUNT_DISABLED) is not set props['enabled'] = ADUtils.get_entry_property( entry, 'userAccountControl', default=0) & 2 == 0 props['lastlogon'] = ADUtils.win_timestamp_to_unix( ADUtils.get_entry_property(entry, 'lastLogon', default=0, raw=True)) props['lastlogontimestamp'] = ADUtils.win_timestamp_to_unix( ADUtils.get_entry_property(entry, 'lastlogontimestamp', default=0, raw=True)) props['pwdlastset'] = ADUtils.win_timestamp_to_unix( ADUtils.get_entry_property(entry, 'pwdLastSet', default=0, raw=True)) props['dontreqpreauth'] = ADUtils.get_entry_property( entry, 'userAccountControl', default=0) & 0x00400000 == 0x00400000 props['sensitive'] = ADUtils.get_entry_property( entry, 'userAccountControl', default=0) & 0x00100000 == 0x00100000 props['serviceprincipalnames'] = ADUtils.get_entry_property( entry, 'servicePrincipalName', []) props['hasspn'] = len(props['serviceprincipalnames']) > 0 props['displayname'] = ADUtils.get_entry_property(entry, 'displayName') props['email'] = ADUtils.get_entry_property(entry, 'mail') props['title'] = ADUtils.get_entry_property(entry, 'title') props['homedirectory'] = ADUtils.get_entry_property( entry, 'homeDirectory') props['description'] = ADUtils.get_entry_property(entry, 'description') props['userpassword'] = ADUtils.get_entry_property( entry, 'userPassword') props['admincount'] = ADUtils.get_entry_property( entry, 'adminCount', 0) == 1 def enumerate_users(self): filename = 'users.json' # Should we include extra properties in the query? with_properties = 'objectprops' in self.collect acl = 'acl' in self.collect entries = self.addc.get_users(include_properties=with_properties, acl=acl) logging.debug('Writing users to file: %s', filename) # Use a separate queue for processing the results self.result_q = queue.Queue() results_worker = threading.Thread( target=OutputWorker.membership_write_worker, args=(self.result_q, 'users', filename)) results_worker.daemon = True results_worker.start() if acl and not self.disable_pooling: self.aclenumerator.init_pool() # This loops over a generator, results are fetched from LDAP on the go for entry in entries: resolved_entry = ADUtils.resolve_ad_entry(entry) user = { "Name": resolved_entry['principal'], "PrimaryGroup": self.get_primary_membership(entry), "Properties": { "domain": self.addomain.domain.upper(), "objectsid": entry['attributes']['objectSid'], "highvalue": False, "unconstraineddelegation": ADUtils.get_entry_property( entry, 'userAccountControl', default=0) & 0x00080000 == 0x00080000 }, "Aces": [] } if with_properties: MembershipEnumerator.add_user_properties(user, entry) self.addomain.users[entry['dn']] = resolved_entry # If we are enumerating ACLs, we break out of the loop here # this is because parsing ACLs is computationally heavy and therefor is done in subprocesses if acl: if self.disable_pooling: # Debug mode, don't run this pooled since it hides exceptions self.process_stuff( parse_binary_acl( user, 'user', ADUtils.get_entry_property(entry, 'nTSecurityDescriptor', raw=True))) else: # Process ACLs in separate processes, then call the processing function to resolve entries and write them to file self.aclenumerator.pool.apply_async( parse_binary_acl, args=(user, 'user', ADUtils.get_entry_property( entry, 'nTSecurityDescriptor', raw=True)), callback=self.process_stuff) else: # Write it to the queue -> write to file in separate thread # this is solely for consistency with acl parsing, the performance improvement is probably minimal self.result_q.put(user) # If we are parsing ACLs, close the parsing pool first # then close the result queue and join it if acl and not self.disable_pooling: self.aclenumerator.pool.close() self.aclenumerator.pool.join() self.result_q.put(None) else: self.result_q.put(None) self.result_q.join() logging.debug('Finished writing users') def enumerate_groups(self): highvalue = [ "S-1-5-32-544", "S-1-5-32-550", "S-1-5-32-549", "S-1-5-32-551", "S-1-5-32-548" ] def is_highvalue(sid): if sid.endswith("-512") or sid.endswith("-516") or sid.endswith( "-519") or sid.endswith("-520"): return True if sid in highvalue: return True return False # Should we include extra properties in the query? with_properties = 'objectprops' in self.collect acl = 'acl' in self.collect filename = 'groups.json' entries = self.addc.get_groups(include_properties=with_properties, acl=acl) logging.debug('Writing groups to file: %s' % filename) # Use a separate queue for processing the results self.result_q = queue.Queue() results_worker = threading.Thread( target=OutputWorker.membership_write_worker, args=(self.result_q, 'groups', filename)) results_worker.daemon = True results_worker.start() if acl and not self.disable_pooling: self.aclenumerator.init_pool() for entry in entries: resolved_entry = ADUtils.resolve_ad_entry(entry) self.addomain.groups[entry['dn']] = resolved_entry try: sid = entry['attributes']['objectSid'] except KeyError: #Somehow we found a group without a sid? logging.warning('Could not determine SID for group %s' % entry['attributes']['distinguishedName']) continue group = { "Name": resolved_entry['principal'], "Properties": { "domain": self.addomain.domain.upper(), "objectsid": sid, "highvalue": is_highvalue(sid) }, "Members": [], "Aces": [] } if with_properties: group['Properties']['admincount'] = ADUtils.get_entry_property( entry, 'adminCount', default=0) == 1 group['Properties'][ 'description'] = ADUtils.get_entry_property( entry, 'description') for member in entry['attributes']['member']: resolved_member = self.get_membership(member) if resolved_member: group['Members'].append(resolved_member) # If we are enumerating ACLs, we break out of the loop here # this is because parsing ACLs is computationally heavy and therefor is done in subprocesses if acl: if self.disable_pooling: # Debug mode, don't run this pooled since it hides exceptions self.process_stuff( parse_binary_acl( group, 'group', ADUtils.get_entry_property(entry, 'nTSecurityDescriptor', raw=True))) else: # Process ACLs in separate processes, then call the processing function to resolve entries and write them to file self.aclenumerator.pool.apply_async( parse_binary_acl, args=(group, 'group', ADUtils.get_entry_property( entry, 'nTSecurityDescriptor', raw=True)), callback=self.process_stuff) else: # Write it to the queue -> write to file in separate thread # this is solely for consistency with acl parsing, the performance improvement is probably minimal self.result_q.put(group) # If we are parsing ACLs, close the parsing pool first # then close the result queue and join it if acl and not self.disable_pooling: self.aclenumerator.pool.close() self.aclenumerator.pool.join() self.result_q.put(None) else: self.result_q.put(None) self.result_q.join() logging.debug('Finished writing groups') def process_stuff(self, result): data, aces = result # Parse aces data['Aces'] = self.aceresolver.resolve_aces(aces) self.result_q.put(data) # logging.debug('returned stuff') def enumerate_memberships(self): self.enumerate_users() self.enumerate_groups()