def __init__(self, local_path=None): if local_path is not None and os.path.isdir(os.path.join(local_path, ENTERPRISE_ATTCK_LOCAL_DIR)) \ and os.path.isdir(os.path.join(local_path, PRE_ATTCK_LOCAL_DIR)) \ and os.path.isdir(os.path.join(local_path, MOBILE_ATTCK_LOCAL_DIR)): self.TC_ENTERPRISE_SOURCE = FileSystemSource( os.path.join(local_path, ENTERPRISE_ATTCK_LOCAL_DIR)) self.TC_PRE_SOURCE = FileSystemSource( os.path.join(local_path, PRE_ATTCK_LOCAL_DIR)) self.TC_MOBILE_SOURCE = FileSystemSource( os.path.join(local_path, MOBILE_ATTCK_LOCAL_DIR)) else: ENTERPRISE_COLLECTION = Collection(ATTCK_STIX_COLLECTIONS + ENTERPRISE_ATTCK + "/") PRE_COLLECTION = Collection(ATTCK_STIX_COLLECTIONS + PRE_ATTCK + "/") MOBILE_COLLECTION = Collection(ATTCK_STIX_COLLECTIONS + MOBILE_ATTCK + "/") self.TC_ENTERPRISE_SOURCE = TAXIICollectionSource( ENTERPRISE_COLLECTION) self.TC_PRE_SOURCE = TAXIICollectionSource(PRE_COLLECTION) self.TC_MOBILE_SOURCE = TAXIICollectionSource(MOBILE_COLLECTION) self.COMPOSITE_DS = CompositeDataSource() self.COMPOSITE_DS.add_data_sources([ self.TC_ENTERPRISE_SOURCE, self.TC_PRE_SOURCE, self.TC_MOBILE_SOURCE ])
def __init__(self, services, ac_data_svc): self.ac_data_svc = ac_data_svc self.data_svc = services.get('data_svc') self.auth_svc = services.get('auth_svc') self.log = Logger('atomiccaldera') self.log.debug('Atomic-Caldera Plugin Logging started.') self.get_conf() self.fs = FileSystemSource(self.ctipath)
def get_technique_and_mitigation_relationships() -> List[CourseOfAction]: file_system = FileSystemSource(MitreApiInterface.ATTACK_DATA_PATH) technique_filter = [ Filter("type", "=", "relationship"), Filter("relationship_type", "=", "mitigates"), ] all_techniques = file_system.query(technique_filter) return all_techniques
def get_technique_and_mitigation_relationships() -> List[CourseOfAction]: file_system = FileSystemSource(MitreApiInterface.ATTACK_DATA_PATH) technique_filter = [ Filter('type', '=', 'relationship'), Filter('relationship_type', '=', 'mitigates') ] all_techniques = file_system.query(technique_filter) return all_techniques
def get_technique_and_mitigation_relationships( attack_data_path: Path) -> List[CourseOfAction]: file_system = FileSystemSource(attack_data_path) technique_filter = [ Filter("type", "=", "relationship"), Filter("relationship_type", "=", "mitigates"), ] all_techniques = file_system.query(technique_filter) return all_techniques
def load_datasource(base=CTI_BASE): print("Loading CTI datasources map...") enterprise_attack_fs = FileSystemSource( os.path.join(base, "enterprise-attack")) mobile_attack_fs = FileSystemSource(os.path.join(base, "mobile-attack")) composite_ds = CompositeDataSource() composite_ds.add_data_sources([enterprise_attack_fs, mobile_attack_fs]) return composite_ds
def get_all_mitigations() -> Dict[str, CourseOfAction]: file_system = FileSystemSource(MitreApiInterface.ATTACK_DATA_PATH) mitigation_filter = [Filter('type', '=', 'course-of-action')] all_mitigations = file_system.query(mitigation_filter) all_mitigations = { mitigation['id']: mitigation for mitigation in all_mitigations } return all_mitigations
def get_all_attack_techniques() -> Dict[str, AttackPattern]: file_system = FileSystemSource(MitreApiInterface.ATTACK_DATA_PATH) technique_filter = [Filter('type', '=', 'attack-pattern')] all_techniques = file_system.query(technique_filter) all_techniques = { technique['id']: technique for technique in all_techniques } return all_techniques
def get_all_mitigations(attack_data_path: Path) -> Dict[str, CourseOfAction]: file_system = FileSystemSource(attack_data_path) mitigation_filter = [Filter("type", "=", "course-of-action")] all_mitigations = file_system.query(mitigation_filter) all_mitigations = { mitigation["id"]: mitigation for mitigation in all_mitigations } return all_mitigations
def get_all_attack_techniques( attack_data_path: Path) -> Dict[str, AttackPattern]: file_system = FileSystemSource(attack_data_path) technique_filter = [Filter("type", "=", "attack-pattern")] all_techniques = file_system.query(technique_filter) all_techniques = { technique["id"]: technique for technique in all_techniques } return all_techniques
def __init__(self, services): self.data_svc = services.get('data_svc') self.auth_svc = services.get('auth_svc') self.log = Logger('abilitymanager') self.log.debug('Ability Manager Plugin logging started.') self.get_conf() self.fs = FileSystemSource(self.ctipath) self.stockPath = os.path.join( os.path.dirname(os.path.realpath(__file__)), '../../stockpile//data/abilities/')
def __init__(self): # initialize the file system source for the MITRE ATT&CK data self.fs = FileSystemSource(MITRE_STIX_DATA_PATH) with open(VALIDATOR_YAML_PATH, "r") as yaml_file: self.scheme = yaml.safe_load(yaml_file) with open(CONFIGURATION_YAML_PATH, "r") as config_file: self.yara_config = yaml.safe_load(config_file) self.validators = Validators() self.required_fields = {} self.import_yara_cfg() self.required_fields_index = [ Positional(i) for i in range(len(self.required_fields)) ] self.category_types = self.__parse_scheme('category_types') self.mitre_group_alias = None self.required_fields_children = {} self.validators.update(self.required_fields, self.required_fields_index, self.required_fields_children, self.category_types, self.mitre_group_alias) self.warning_functions = [ self.warning_author_no_report_check, self.warning_author_no_hash_check, self.warning_actor_no_mitre_group ]
def __init__(self, stix_data_path, validator_yaml, validator_yaml_values): # initialize the file system source for the MITRE ATT&CK data self.STIX_DATA_PATH = stix_data_path self.fs = FileSystemSource(self.STIX_DATA_PATH) self.validator_yaml_values = validator_yaml_values with open(validator_yaml_values, 'r', encoding='utf8') as yaml_file: self.scheme = yaml.safe_load(yaml_file) self.validator_yaml = validator_yaml with open(validator_yaml, 'r', encoding='utf8') as config_file: self.yara_config = yaml.safe_load(config_file) self.validators = Validators() self.required_fields = {} self.metadata_keys_regex = r'' self.metadata_keys_filter = r'^malware_type$|^actor_type$' self.import_yara_cfg() self.required_fields_index = [ Positional(i) for i in range(len(self.required_fields)) ] self.category_types = self.__parse_scheme('category_types') self.required_fields_children = {} self.validators.update(self.required_fields, self.required_fields_index, self.required_fields_children, self.category_types) self.warning_functions = [ self.warning_author_no_report_check, self.warning_author_no_hash_check, self.warning_actor_no_mitre_group, self.warning_no_category_type, self.warning_common_metadata_errors ]
def fs_source(): # create fs = FileSystemSource(FS_PATH) assert fs.stix_dir == FS_PATH yield fs # remove campaign dir shutil.rmtree(os.path.join(FS_PATH, "campaign"), True)
def get_map(base=CTI_BASE): from stix2 import FileSystemSource, CompositeDataSource, Filter print("Loading CTI attack-pattern map...") enterprise_attack_fs = FileSystemSource( os.path.join(base, "enterprise-attack")) mobile_attack_fs = FileSystemSource(os.path.join(base, "mobile-attack")) composite_ds = CompositeDataSource() composite_ds.add_data_sources([enterprise_attack_fs, mobile_attack_fs]) filt = Filter('type', '=', 'attack-pattern') attack_map = {} for item in composite_ds.query(filt): name = item['name'] if item['revoked']: print( f"[WARN] Ignored {name.upper()}: This attack-pattern has been revoked." ) continue categories = [x['phase_name'] for x in item['kill_chain_phases']] desc = item['description'] platforms = item['x_mitre_platforms'] attack_id = None for er in item['external_references']: if er['source_name'] in [ "mitre-attack", "mobile-mitre-attack", "mitre-mobile-attack" ]: attack_id = er['external_id'] if attack_id: attack_map[attack_id] = { "name": name, "categories": categories, "description": desc, "platforms": platforms, "attack_id": attack_id } print(f"\tAdding {name.upper()} as ID: {attack_id}") else: print(f"[ERR] Ignored {name.upper()}: No attack ID found.") return attack_map
def __init__( self, base_cti_path: Path = Path(__file__).parents[1] / "cti", log_path: Path = Path("~/riskmap.log").expanduser(), ) -> None: self.base_cti_path = base_cti_path self.log_path = log_path # Configure sources self.src = CompositeDataSource() self.src.add_data_source( FileSystemSource(base_cti_path / "enterprise-attack")) self.src.add_data_source( FileSystemSource(base_cti_path / "mobile-attack")) self.src.add_data_source(FileSystemSource(base_cti_path / "ics-attack")) self.src.add_data_source(FileSystemSource(base_cti_path / "capec")) logger.remove() logger.add(sink=self.log_path, format="{message}")
def __init__(self, cti_folder): self.cti_folder = cti_folder repo = Repo(cti_folder) origin = repo.remotes.origin try: origin.pull() except Exception as e: pass enterprise_attack_fs = FileSystemSource(cti_folder + "enterprise-attack") pre_attack_fs = FileSystemSource(cti_folder + "pre-attack") mobile_attack_fs = FileSystemSource(cti_folder + "mobile-attack") self.src = CompositeDataSource() self.src.add_data_sources( [enterprise_attack_fs, pre_attack_fs, mobile_attack_fs]) # self.columns_list = {"techniques":['mitre_id', 'name', 'description', 'permissions_required', 'platforms', 'adversary-opsec', 'build-capabilities', 'collection', 'command-and-control', 'compromise', 'credential-access', 'defense-evasion', 'discovery', 'effects', 'establish-&-maintain-infrastructure', 'execution', 'exfiltration', 'impact', 'initial-access', 'lateral-movement', 'launch', 'network-effects', 'organizational-information-gathering', 'organizational-weakness-identification', 'people-information-gathering', 'people-weakness-identification', 'persistence', 'persona-development', 'priority-definition-direction', 'priority-definition-planning', 'privilege-escalation', 'remote-service-effects', 'stage-capabilities', 'target-selection', 'technical-information-gathering', 'technical-weakness-identification', 'test-capabilities','kill_chain_phases']} self.columns_list = { "techniques": [ 'mitre_id', 'tactics', 'name', 'permissions_required', 'platforms' ] }
def get_all_techniques_for_groups(projects_path): path_cti = path.join(projects_path,'cti/enterprise-attack') fs = FileSystemSource(path_cti) all_techniques = get_all_techniques(fs) techniques = [] groups = get_all_groups(fs) for group_obj in groups: techniques.extend(get_technique_by_group(fs, group_obj)) # ONLY FOR TESTING #if len(techniques) > 50 : # return techniques, all_techniques return techniques, all_techniques
return relations def get_references_for_techniques(relations): # get all reports from relations reports = [] for i in relations: try: for j in i.external_references: reports.append(j.url) except: pass return reports fs = FileSystemSource('/home/zen/cti/enterprise-attack') techniques = get_all_techniques(fs) tech_reports = {} T = {} for i in techniques: tech_reports[i.name] = get_relations_for_techniques(fs, i) # do some filtering temp = {} for i in tech_reports: if len(tech_reports[i]) > 0: temp[i] = tech_reports[i] # get the references from the relationships tech_reports = {} for i in temp:
# pip install chain ... from itertools import chain # pip install filesystemsource from stix2 import FileSystemSource fs = FileSystemSource('./cti/enterprise-attack') def get_all_software(src): filts = [ [Filter('type', '=', 'malware')], [Filter('type', '=', 'tool')] ] return list(chain.from_iterable( src.query(f) for f in filts )) get_all_software(fs)
def get_all_techniques(projects_path): path_cti = path.join(projects_path,'cti/enterprise-attack') fs = FileSystemSource(path_cti) all_techniques = get_techniques(fs) return all_techniques
def test_filesystem_source_nonexistent_folder(): with pytest.raises(ValueError) as excinfo: FileSystemSource('nonexistent-folder') assert "for STIX data does not exist" in str(excinfo)
from stix2 import FileSystemSource fs = FileSystemSource('./enterprise-attack') from stix2 import Filter filt = Filter('type', '=', 'attack-pattern') malwares = fs.query(Filter("type", "=", 'malware')) [print(m) for m in malwares if m.name == 'Emotet'] # print(malwares[3].name) # * Query relationships all_rs = fs.query(Filter("type", "=", 'relationship')) # print(all_rs[3]) relationships = [ r for r in all_rs if r.source_ref == 'malware--32066e94-3112-48ca-b9eb-ba2b59d2f023' ] print(relationships) # print(type(relationships)) # * Query relationships # all_rs = fs.query(Filter("type", "=", 'relationship')) # [print(r) for r in all_rs if r.target_ref == 'malware--32066e94-3112-48ca-b9eb-ba2b59d2f023'] # * Query techniques # techniques = fs.query([filt]) # print(techniques[0].x_mitre_data_sources) # [print(t) for t in techniques] # * Query software # from itertools import chain
capecId = "" cweIds = [] for external_reference in result["external_references"]: if external_reference["source_name"] == "capec": # print(external_reference["external_id"], end=" ") capecId = external_reference["external_id"] if external_reference["source_name"] == "cwe": # print(external_reference["external_id"], end=" ") cweIds.append(external_reference["external_id"]) print(capecId, cweIds) txn = client.txn() res = txn.query(query, variables={'$name':capecId}) capecUid = json.loads(res.json)["q"][0]["uid"] for cweId in cweIds: res = txn.query(query, variables={'$name':cweId}) if len(json.loads(res.json)["q"]) > 0: cweUid = json.loads(res.json)["q"][0]["uid"] txn.mutate(set_nquads='<' + cweUid + '> <capec> <' + capecUid + '> .') txn.commit() print("cwe to capec relations created") if __name__ == "__main__": fs = FileSystemSource('./att&ck/cti-ATT-CK-v8.2/capec') client_stub = pydgraph.DgraphClientStub('localhost:9080') client = pydgraph.DgraphClient(client_stub) initGraphTypes(client) load(client, fs) # total: 581 link2cwe(client, fs)
for result in results: attackTechniqueId = "" capecIds = [] for external_reference in result["external_references"]: if external_reference["source_name"] == "mitre-attack": attackTechniqueId = external_reference["external_id"] elif external_reference["source_name"] == "capec": capecIds.append(external_reference["external_id"]) print(attackTechniqueId, capecIds) txn = client.txn() res = txn.query(query, variables={'$name':attackTechniqueId}) attackTechniqueUid = json.loads(res.json)["q"][0]["uid"] for capecId in capecIds: res = txn.query(query, variables={'$name':capecId}) if len(json.loads(res.json)["q"]) > 0: capecUid = json.loads(res.json)["q"][0]["uid"] txn.mutate(set_nquads='<' + capecUid + '> <attack_technique> <' + attackTechniqueUid + '> .') txn.commit() print("capec to att&ck technique relations created") if __name__ == '__main__': client_stub = pydgraph.DgraphClientStub('localhost:9080') client = pydgraph.DgraphClient(client_stub) fs = FileSystemSource('./att&ck/cti-ATT-CK-v8.2/enterprise-attack') initGraphTypes(client) load(client, fs) link2capec(client, fs)
def main(args, config): """ Wrapper to run all components of CDAS Calls context, agents, and asset builders to create simulation componenets. Passes resulting components to simulation module. Manages output. Parameters ---------- args : list The arguments passed in from argparse or read from the configuration file in the arguments method config : dict The configuration file opened and loaded from json """ # Set up the Output directory if os.path.isdir(args.output): q = (f"Overwrite the output folder {os.getcwd() + '/' + args.output}? " f"(y/n) ") else: q = f"Output path {os.getcwd() + '/' + args.output} does not exist.\n\ Create this directory? (y/n) " if not args.overwrite_output: answer = "" while answer not in ['y', 'n']: answer = input(q) else: answer = 'y' if answer == 'n': sys.exit(f"CDAS exited without completing") else: if os.path.isdir(args.output): for filename in os.listdir(args.output): file_path = os.path.join(args.output, filename) try: if os.path.isfile(file_path) or os.path.islink(file_path): os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print('Failed to delete %s. %s' % (file_path, e)) else: os.mkdir(args.output) # Set up the STIX data stores # Check if it's okay to overwrite the contents of the temporary data store temp_path = pkg_resources.resource_filename(__name__, config['temp_path']) if os.path.isdir(temp_path): q = f"Overwrite temporary stix data folder ({temp_path})? (y/n) " overwrite = input(q) if overwrite == 'n': print(f"Rename the 'temp path' variable in config file and \ restart the simulation.") sys.exit() elif overwrite == 'y': shutil.rmtree(temp_path) os.mkdir(temp_path) else: overwrite = input(q) else: os.mkdir(temp_path) fs_gen = FileSystemStore(temp_path) fs_real = FileSystemSource( pkg_resources.resource_filename(__name__, "assets/mitre_cti/")) # Load or create country data countries = [] if args.randomize_geopol is True: print("Creating countries...") with open(args.random_geodata, encoding='utf-8') as f: context_options = json.load(f) # seed file f.close() map_matrix = context.Map(args.num_countries) for c in range(0, args.num_countries): countries.append( context.Country(fs_gen, context_options, map_matrix.map)) for c in countries: # This loop is used mainly to convert references to other countries # to the names of those countries instead of their ID numbers, # since, during the generation of each country it only has access # to map_matrix with ID numbers of the other countries # Convert the neighbors listed by id# to neighbor country names neighbors = {} for n in c.neighbors: n_name = next((x.name for x in countries if x.id == n), None) neighbors[n_name] = c.neighbors[n] c.neighbors = neighbors if len(c.neighbors) == 0: c.neighbors = "None (island nation)" # if country is a terrority, find its owner if c.government_type == "non-self-governing territory": gdps = [(int(gdp.gdp[1:].replace(',', '')), gdp.name) for gdp in countries] gdps.sort() # Territory owners are most likely to be high GDP countries # pick a random one from the top three GDP owner_name = np.random.choice([gdp[1] for gdp in gdps][-3:]) if c.name in [gdp[1] for gdp in gdps][-3:]: # if the territory itself is in the top three GDP, change # its gov type to a republic instead of a territory c.government_type = "federal parliamentary republic" else: c.government_type += f" of {str(owner_name)}" # update ethnic groups to include owner instead of random owner = next( (x.id for x in countries if x.name == owner_name), None) if str(owner) not in c.ethnic_groups: egs = {} for eg in c.ethnic_groups: try: int(eg) if str(owner) not in egs: egs[str(owner)] = c.ethnic_groups[eg] else: egs[eg] = c.ethnic_groups[eg] except ValueError: egs[eg] = c.ethnic_groups[eg] c.ethnic_groups = egs # update forces to include owner name if necessary msf = c.military_and_security_forces c.military_and_security_forces = msf.replace( "[COUNTRY]", owner_name) # update languages to include owner instead of random if str(owner) not in c.languages: langs = {} for eg in c.languages: try: int(eg) if str(owner) not in langs: langs[str(owner)] = c.languages[eg] else: langs[eg] = c.languages[eg] except ValueError: langs[eg] = c.languages[eg] c.languages = langs # Apply nationalities to ethnic groups listed by id# egs = {} for eg in c.ethnic_groups: try: egs[next( (x.nationality for x in countries if x.id == int(eg)), None)] = c.ethnic_groups[eg] except ValueError: egs[eg] = c.ethnic_groups[eg] c.ethnic_groups = egs # Convert languges listed by id# to country names egs = {} for eg in c.languages: try: eg_name = next( (x.name for x in countries if x.id == int(eg)), None) if eg_name.endswith(('a', 'e', 'i', 'o', 'u')): eg_name += "nese" else: eg_name += 'ish' egs[eg_name] = c.languages[eg] except ValueError: egs[eg] = c.languages[eg] c.languages = egs else: # Using country data files instead of random generation print("Loading countries...") for fn in os.listdir(args.country_data): with open(args.country_data + fn, 'r') as f: country_data = json.load(f) f.close() countries.append(context.Country(fs_gen, **country_data)) # Load or create actor data print("Creating threat actors...") with open(pkg_resources.resource_filename(__name__, "assets/stix_vocab.json"), encoding='utf-8') as json_file: stix_vocab = json.load(json_file) json_file.close() if config['agents']['randomize_threat_actors'] is True: apt_store = fs_gen with open(pkg_resources.resource_filename( __name__, config['agents']['random_variables']['actor_name_1']), encoding='utf-8') as f: adjectives = [line.rstrip() for line in f] f.close() with open(pkg_resources.resource_filename( __name__, config['agents']['random_variables']['actor_name_2']), encoding='utf-8') as f: nouns = [line.rstrip() for line in f] f.close() actors = 1 while actors <= config['agents']['random_variables']['num_agents']: agents.create_threatactor(stix_vocab, nouns, adjectives, countries, apt_store) actors += 1 else: # no randomization - use provided data set if config['agents']['non_random_vars']['apt_data'] == "mitre_cti": apt_store = fs_real else: apt_store = FileSystemStore( config['agents']['non_random_vars']['apt_data']) # Create organizations print('Creating organizations...') with open( pkg_resources.resource_filename( __name__, config['agents']['org_variables']['org_names'])) as f: org_names = f.read().splitlines() # organization name possibilities f.close() with open(pkg_resources.resource_filename(__name__, 'assets/NIST_assess.json'), encoding='utf-8') as json_file: assessment = json.load(json_file) json_file.close() for c in countries: orgs = 0 while orgs < config['agents']['org_variables']["orgs_per_country"]: agents.create_organization(stix_vocab, fs_gen, c, org_names, assessment) orgs += 1 # Run simulation print('Running simulation...') start = datetime.strptime(config["simulation"]['time_range'][0], '%Y-%m-%d') end = datetime.strptime(config["simulation"]['time_range'][1], '%Y-%m-%d') td = end - start actors = apt_store.query(Filter("type", "=", "intrusion-set")) orgs = fs_gen.query([ Filter("type", "=", "identity"), Filter("identity_class", "=", "organization") ]) tools = fs_real.query(Filter('type', '=', 'tool')) malwares = fs_real.query(Filter('type', '=', 'malware')) for r in range(1, int(config["simulation"]['number_of_rounds']) + 1): print(f'\tRound {r}') simulator.simulate( actors, orgs, tools, malwares, fs_gen, start, td.days / (config["simulation"]['number_of_rounds'] * len(actors))) start += timedelta(days=td.days / config["simulation"]['number_of_rounds']) # Create output files print('Saving output...') # Map country_names = {} for country in countries: country_names[str(country.id)] = country.name try: map_matrix.plot_map(args.output, **country_names) except NameError: pass for ot in args.output_types: print(f'\t{ot}') path = args.output + "/" + ot if ot == "stix": shutil.copytree(temp_path, path) else: os.mkdir(path) os.mkdir(path + '/countries/') os.mkdir(path + '/actors/') os.mkdir(path + '/reports/') os.mkdir(path + '/organizations/') for country in countries: country.save(path + '/countries/', ot) apts = apt_store.query(Filter("type", "=", "intrusion-set")) for apt in apts: agents.save(apt, path + '/actors/', ot, fs_gen, fs_real) events = fs_gen.query(Filter("type", "=", "sighting")) for e in events: simulator.save(e, apt_store, fs_real, path + '/reports/', ot) for org in orgs: agents.save_org(org, path + '/organizations/', ot, assessment) if ot == "html": html_src = pkg_resources.resource_filename( __name__, 'assets/html_templates') html_templates = os.listdir(html_src) for f in html_templates: shutil.copy(html_src + '/' + f, path) f = open(path + '/COUNTRY.html', 'r') c_template = f.read() f.close() for country in countries: f = open(path + '/countries/' + country.name + '.html', 'w') f.write(c_template.replace('COUNTRY', country.name)) f.close() os.remove(path + '/COUNTRY.html') shutil.rmtree(temp_path) print('Done')
from stix2 import FileSystemSource, Filter from stix2.utils import get_type_from_id import stix2 as sx from itertools import chain import sys import globals as gv import database_actions as db enterprise_attack = FileSystemSource(gv._MITRE_GIT + "enterprise-attack") mobile_attack = FileSystemSource(gv._MITRE_GIT + "mobile-attack") pre_attack = FileSystemSource(gv._MITRE_GIT + "pre-attack") composite_ds = sx.CompositeDataSource() composite_ds.add_data_sources([enterprise_attack, pre_attack, mobile_attack]) def get_all_techniques(src): filt = [Filter('type', '=', 'attack-pattern')] return src.query(filt) def get_technique_by_name(src, name): filt = [ Filter('type', '=', 'attack-pattern'), Filter('name', '=', name) ] return src.query(filt) def get_techniques_by_content(src, content): techniques = get_all_techniques(src) return [ tech for tech in techniques if content.lower() in tech.description.lower() ]
class AtomicCaldera: def __init__(self, services, ac_data_svc): self.ac_data_svc = ac_data_svc self.data_svc = services.get('data_svc') self.auth_svc = services.get('auth_svc') self.log = Logger('atomiccaldera') self.log.debug('Atomic-Caldera Plugin Logging started.') self.get_conf() self.fs = FileSystemSource(self.ctipath) def get_conf(self): confPath = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../conf/artconf.yml') try: with open(confPath, 'r') as c: conf = yaml.load(c, Loader=yaml.Loader) self.ctipath = os.path.expanduser( os.path.join(conf['ctipath'], 'enterprise-attack/')) self.artpath = os.path.expanduser(conf['artpath']) self.log.debug(self.ctipath) self.log.debug(self.artpath) except: pass @template('atomiccaldera.html') async def landing(self, request): await self.auth_svc.check_permissions(request) abilities = [] tactics = [] variables = [] try: abilities = await self.ac_data_svc.explode_art_abilities() for ab in abilities: if not ab['tactic'] in tactics: tactics.append(ab['tactic']) except Exception as e: self.log.error(e) try: variables = await self.ac_data_svc.explode_art_variables() except Exception as e: self.log.error(e) return { 'abilities': json.dumps(abilities), 'tactics': tactics, 'variables': json.dumps(variables) } async def getMITREPhase(self, attackID): filter = [ Filter('type', '=', 'attack-pattern'), Filter('external_references.external_id', '=', attackID) ] result = self.fs.query(filter) if result: return result[0].kill_chain_phases[0].phase_name else: return 'unknown' async def get_atomics(self): await self.ac_data_svc.build_db( os.path.join(os.path.dirname(os.path.realpath(__file__)), '../conf/ac.sql')) artAbilities = [] artVars = [] if os.path.exists(self.artpath): for root, dirs, files in os.walk(self.artpath): for procFile in files: fullFile = os.path.join(root, procFile) if os.path.splitext(fullFile)[-1].lower() == '.yaml': self.log.debug('Processing {}'.format(fullFile)) try: artObj = ARTyaml() except: continue with open(fullFile, 'r') as yamlFile: try: artObj.load(yamlFile) except: continue # Loop through the tests if artObj.atomicTests: for atomic in artObj.atomicTests: for platform in atomic['supported_platforms']: if platform.lower() in [ 'windows', 'linux', 'macos' ]: name = atomic['name'] description = atomic['description'] if 'command' in atomic[ 'executor'].keys(): command = re.sub( r'x07', r'a', repr(atomic['executor'] ['command'])).strip() command = command.encode( 'utf-8').decode( 'unicode_escape') executor = atomic['executor'][ 'name'] if command[0] == '\'': command = command.strip('\'') elif command[0] == '\"': command = command.strip('\"') else: command = '' executor = '' try: if command != '': checkUnique = { 'technique': int(artObj.attackTech[1:]), 'command': b64encode( command.encode('utf-8') ).decode('utf-8') } except Exception as e: print(e) # Check to see if the command has been added to the database if (command != '' and not await self.ac_data_svc. check_art_ability(checkUnique) ): uuidBool = True while (uuidBool): ability_id = str(uuid.uuid4()) if not await self.ac_data_svc.check_art_ability( {'ability_id': ability_id }): uuidBool = False try: # Add the new ability to export artAbilities.append({ 'ability_id': ability_id, 'technique': artObj.attackTech[1:], 'name': name, 'description': description, 'tactic': await self.getMITREPhase( artObj.attackTech), 'attack_name': artObj.displayName, 'platform': platform, 'executor': executor, 'command': b64encode( command.encode('utf-8') ).decode('utf-8') }) except Exception as e: print(e) if 'input_arguments' in atomic.keys( ): for argument in atomic[ 'input_arguments'].keys( ): try: curVar = re.sub( r'x07', r'a', repr(atomic[ 'input_arguments'] [argument] ['default']) ).strip() if curVar[0] == '\'': curVar = curVar.strip( '\'') elif curVar[0] == '\"': curVar = curVar.strip( '\"') curVar = curVar.replace( '\\\\', '\\') artVars.append({ 'ability_id': ability_id, 'var_name': argument, 'value': b64encode( curVar.encode( 'utf-8')). decode('utf-8') }) except: pass else: self.log.debug('Paths are not valid') return {'abilities': [], 'variables': []} self.log.debug('Got to the end.') return {'abilities': artAbilities, 'variables': artVars} async def export_all_to_stockpile(self, data): try: abilities = await self.ac_data_svc.explode_art_abilities() except Exception as e: self.log.error(e) try: variables = await self.ac_data_svc.explode_art_variables() except Exception as e: self.log.error(e) if await self.export_to_stockpile(abilities, variables): return 'Abilities successfully exported.' else: return 'Failed to export abilities.' async def export_one_to_stockpile(self, data): abilities = [] variables = [] ability_id = {'ability_id': data.pop('ability_id')} try: abilities = await self.ac_data_svc.get_art_ability(ability_id) except Exception as e: self.log.error(e) try: variables = await self.ac_data_svc.get_art_variable(ability_id) except Exception as e: self.log.error(e) if await self.export_to_stockpile(abilities, variables): return 'Ability successfully exported.' else: return 'Failed to export ability.' async def export_to_stockpile(self, abilities, variables): # String representer foy PyYAML to format the command string yaml.add_representer(cmdStr, cmd_presenter) for ability in abilities: executor = ability['executor'] platform = ability['platform'] payload = '' # Fix the command formatting command = b64decode(ability['command']) command = command.decode('utf-8') if command[0] == '\'': command = command.strip('\'') elif command[0] == '\"': command = command.strip('\"') # Determin the executor # Fill in the variables for variable in variables: if variable['ability_id'] == ability['ability_id']: value = b64decode(variable['value']).decode('utf-8') if value[0] == '\'': value = value.strip('\'') elif value[0] == '\"': value = value.strip('\"') value = value.replace('\\\\', '\\') command = re.sub( r"\#{{{argName}}}".format( argName=str(variable['var_name'])), value.encode('unicode-escape').decode(), command) if (executor.lower() == 'sh' or executor.lower() == 'bash'): if platform.lower() == 'linux': platform = 'linux' elif platform.lower() == 'macos': platform = 'darwin' elif (executor.lower() == 'command_prompt' or executor.lower() == 'powershell'): if (executor.lower() == 'command_prompt'): executor = 'cmd' else: executor = 'psh' command = command.replace('\\n', '\n') # Future additions parserName = '' parserProperty = '' parserScript = '' # Build the YAML data #newYaml = [{ 'id': ability['ability_id'], # 'name': ability['name'], # 'description': ability['description'], # 'tactic': ability['tactic'], # 'technique': { 'attack_id': 'T{}'.format(str(ability['technique'])), 'name': ability['attack_name'] }, # 'platforms': { platform: { executor.lower(): { 'command': cmdStr(command), 'payload': payload, 'parser': { 'name': parserName, 'property': parserProperty, 'script': parserScript }}}}}] newYaml = [{ 'id': ability['ability_id'], 'name': ability['name'], 'description': ability['description'], 'tactic': ability['tactic'], 'technique': { 'attack_id': 'T{}'.format(str(ability['technique'])), 'name': ability['attack_name'] }, 'platforms': { platform: { executor.lower(): { 'command': cmdStr(command), 'payload': payload } } } }] payloadPath = os.path.join( os.path.dirname(os.path.realpath(__file__)), '../../stockpile/data/payloads/') abilityPath = os.path.join( os.path.dirname(os.path.realpath(__file__)), '../../stockpile//data/abilities/') # Check and create payloads folder if it does not exist try: if not os.path.exists(payloadPath): os.makedirs(payloadPath) except Exception as e: self.log.error(e) return False # Write the BAT file if needed if payload != '': with open(os.path.join(payloadPath, payload), 'w') as payloadFile: payloadFile.write(batCommand) # Check and create ability folder if it does not exist try: if not os.path.exists( os.path.join(abilityPath, ability['tactic'])): os.makedirs(os.path.join(abilityPath, ability['tactic'])) except Exception as e: self.log.error(e) return False # Write the YAML file to the correct directory try: with open( os.path.join(abilityPath, ability['tactic'], '{}.yml'.format(ability['ability_id'])), 'w') as newYAMLFile: dump = yaml.dump(newYaml, default_style=None, default_flow_style=False, allow_unicode=True, encoding=None, sort_keys=False) newYAMLFile.write(dump) except Exception as e: self.log.error(e) return False return True async def get_art(self, request): self.log.debug('Landed in get_art.') try: atomics = await self.get_atomics() except Exception as e: self.log.error(e) pass return atomics async def import_art_abilities(self): try: atomics = await self.get_atomics() except Exception as e: self.log.error(e) return 'Failed to load abilities.' for ability in atomics['abilities']: await self.ac_data_svc.create_art_ability(ability) for variable in atomics['variables']: await self.ac_data_svc.create_art_variable(variable) return 'Successfully imported new abilities.' async def save_art_ability(self, data): key = data.pop('key') value = data.pop('value') updates = data.pop('data') if await self.ac_data_svc.update_art_ability(key, value, updates): return 'Updated ability: {}'.format(value) else: return 'Update failed for ability: {}'.format(value) async def save_art_variables(self, data): updates = data.pop('data') if await self.ac_data_svc.update_art_variables(updates): return 'Updated variables successfully.' else: return 'Updates to variables failed.' async def delete_all(self): abilities = [] payloadPath = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../stockpile/data/payloads/') abilityPath = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../stockpile/data/abilities/') try: abilities = await self.ac_data_svc.explode_art_abilities() except Exception as e: self.log.error(e) for ability in abilities: if os.path.exists( os.path.join(abilityPath, ability['tactic'], '{}.yml'.format(ability['ability_id']))): os.remove( os.path.join(abilityPath, ability['tactic'], '{}.yml'.format(ability['ability_id']))) if os.path.exists( os.path.join(payloadPath, '{}.bat'.format(ability['ability_id']))): os.remove( os.path.join(payloadPath, '{}.bat'.format(ability['ability_id']))) status = await self.ac_data_svc.delete_all() await self.ac_data_svc.build_db( os.path.join(os.path.dirname(os.path.realpath(__file__)), '../conf/ac.sql')) return status async def rest_api(self, request): self.log.debug('Starting Rest call.') await self.auth_svc.check_permissions(request) data = dict(await request.json()) index = data.pop('index') self.log.debug('Index: {}'.format(index)) options = dict( PUT=dict(ac_ability=lambda d: self.import_art_abilities(**d)), POST=dict( ac_ability=lambda d: self.ac_data_svc.explode_art_abilities(**d ), ac_ability_save=lambda d: self.save_art_ability(data=d), ac_variables_save=lambda d: self.save_art_variables(data=d), ac_export_all=lambda d: self.export_all_to_stockpile(**d), ac_export_one=lambda d: self.export_one_to_stockpile(data=d)), DELETE=dict(delete_all=lambda d: self.delete_all(**d))) try: output = await options[request.method][index](data) except Exception as e: self.log.debug('Stopped at api call.') self.log.error(e) return web.json_response(output)
else: # no link with capec tmpExternalReference = { "description": external_reference["description"], "refsource": external_reference["source_name"], "dgraph.type": "Reference" } if "url" in external_reference.keys(): tmpExternalReference["url"] = external_reference["url"] tmpResult["reference"].append(tmpExternalReference) data.append(tmpResult) # print(data) txn = client.txn() try: mu = pydgraph.Mutation(set_json=json.dumps(data[:]).encode('utf8')) txn.mutate(mu) txn.commit() except pydgraph.AbortedError: print("error") finally: txn.discard() print("ics att&ck technique data without relations loaded") if __name__ == '__main__': client_stub = pydgraph.DgraphClientStub('localhost:9080') client = pydgraph.DgraphClient(client_stub) fs = FileSystemSource('./att&ck/cti-ATT-CK-v8.2/ics-attack') load(client, fs)
def main(inputDir, ouptutDir, csvPath, varCsvPath, ctiPath): logging.debug('Starting main function.') # Load the MITRE library fs = FileSystemSource(os.path.join(ctiPath, 'enterprise-attack/')) # Check for an existing catalog CSV file try: csvFile = [] with open(csvPath, 'r') as oldCSVFile: reader = csv.DictReader(oldCSVFile) for line in reader: csvFile.append(line) logging.debug('Successfully loaded catalog CSV file.') except: csvFile = [] logging.debug('Catalog CSV was not loaded, creating empty list.') # Check for an existing variable CSV file try: varCsvFile = [] with open(varCsvPath, 'r') as oldVarCSVFile: reader = csv.DictReader(oldVarCSVFile) for line in reader: varCsvFile.append(line) logging.debug('Successfully loaded variable CSV file.') except: varCsvFile = [] logging.debug('Variable CSV was not loaded, creating empty list.') # Walk the directory provided as the input directory to find # the YAML files to process. # ---------------------------------------------------------- for root, dirs, files in os.walk(inputDir): for procFile in files: fullFile = os.path.join(root, procFile) if os.path.splitext(fullFile)[-1].lower() == '.yaml': print("Processing: {}".format(fullFile)) # Load the YAML file with open(fullFile, 'r') as yamlFile: try: yamlData = yaml.load(yamlFile, Loader=yaml.Loader) logging.debug( 'Successfully loaded: {}.'.format(fullFile)) except: logging.debug('Unable to load: {}.'.format(fullFile)) raise SystemExit( 'Unable to load: {}.'.format(fullFile)) # Get the description if 'display_name' in yamlData.keys(): displayName = yamlData['display_name'] #print(displayName) # Get the attackID & Kill Phase if 'attack_technique' in yamlData.keys(): attackID = yamlData['attack_technique'] tactic = getMITREPhase(fs, attackID) #print(attackID) #print(tactic) if tactic == None: tactic = 'unknown' else: logging.debug('No attack in this YAML, continuing.') continue # Get the testDescription, name, command, and executor if 'atomic_tests' in yamlData.keys(): # Loop through each Atomic test (Atomic Red Team lists multiple tests per YAML file) for atomic in yamlData['atomic_tests']: # Grab the attack name attackName = atomic['name'] # Grab the attack description testDescription = atomic['description'] # Some tests do not have a 'command' key, skip it if it does not. if 'command' in atomic['executor'].keys(): # Ensure we don't somehow use a duplicate UUID value uuidBool = True while (uuidBool): attackUUID = uuid.uuid4() if not any( line['attackUUID'] == str(attackUUID) for line in csvFile): uuidBool = False # Grab the executor name executor = atomic['executor']['name'] # grab the command and fix incorrect encoding of '\a' character sequence. command = re.sub( r'x07', r'a', repr(atomic['executor']['command'])) command = command.encode('utf-8').decode( 'unicode_escape') if command[0] == '\'': command = command.strip('\'') elif command[0] == '\"': command = command.strip('\"') # Initialize a new list to collect varialbe/argument values varList = [] # If input arguments exist, replace them by looping through each # and using regex replacement. if 'input_arguments' in atomic.keys(): for argument in atomic['input_arguments'].keys( ): try: #curVar = str(atomic['input_arguments'][argument]['default']).encode('unicode-escape').decode() # Fix incorrect encoding of '\a' character sequence curVar = re.sub( r'x07', r'a', repr(atomic['input_arguments'] [argument]['default'])) except: logging.error( 'Unable to encode command.') raise SystemExit varList.append({ 'attackUUID': attackUUID, 'attackID': attackID, 'executor': executor, 'variable': argument, 'value': curVar }) else: command = '' executor = '' origCommand = command if (executor.lower() == 'sh' or executor.lower() == 'bash'): executor = 'linux' command = command.replace('\\n', '\n') elif (executor.lower() == 'command_prompt' or executor.lower() == 'powershell'): if (executor.lower() == 'command_prompt'): with open('Cmd-Wrapper.txt', mode='r') as cmdFile: cmdWrap = cmdFile.read() reCmd = re.sub("\#{command}", command, cmdWrap) command = str(reCmd) else: command = command.replace('\\n', '\n') executor = 'windows' else: executor = 'darwin' continue logging.debug( 'The command variable type is: {}'.format( type(command))) logging.debug( 'Collected attack name: {}'.format(attackName)) logging.debug( 'Collected attack executor: {}'.format(executor)) logging.debug( 'Collected attack command: {}'.format(command)) # Check to see if the command has been catalogued in the CSV previously if not any((line['attackID'] == attackID) and (line['origCommand'] == origCommand) for line in csvFile): logging.debug('Collecting new YAML info.') # Put the custom dictionary together that will be exported/dumped to a YAML file # the 'command' is formatted as a scalar string. newYAML = [{ 'id': str(attackUUID), 'name': displayName, 'description': '{} (Atomic Red Team)'.format( testDescription.strip().replace( '\n', ' ').replace(' ', ' ')), 'tactic': tactic, 'technique': { 'attack_id': attackID, 'name': attackName }, 'executors': { executor: { 'command': cmdStr(command) } } }] logging.debug(newYAML) # Generate New YAML # Make sure the abilities directory exists and create it if it does not. try: abilityDir = os.path.join( ouptutDir, 'abilities/') if not os.path.exists(abilityDir): os.makedirs(abilityDir) logging.debug( 'Ability directory created: {}'.format( abilityDir)) else: logging.debug( 'Ability directory exists: {}'.format( abilityDir)) except: logging.error( 'Failed to create the abilty directory.') raise SystemExit # Make sure the tactic directory exists and create it if it does not. try: if not os.path.exists( os.path.join(abilityDir, tactic)): os.makedirs( os.path.join(abilityDir, tactic)) logging.debug( 'Tactic directory created: {}'.format( os.path.join(abilityDir, tactic))) else: logging.debug( 'Tactic directory exists: {}'.format( os.path.join(abilityDir, tactic))) except: logging.error('Tactic is empty?') raise SystemExit # Write the YAML file to the correct directory using the UUID as the name. try: with open( os.path.join( abilityDir, tactic, '{}.yml'.format(str(attackUUID))), 'w') as newYAMLFile: dump = yaml.dump(newYAML, default_style=None, default_flow_style=False, allow_unicode=True, encoding=None, sort_keys=False) newYAMLFile.write(dump) logging.debug('YAML file written: {}'.format( os.path.join( abilityDir, tactic, '{}.yml'.format(str(attackUUID))))) except Exception as e: logging.error('Error creating YAML file.') print(e) raise SystemExit # Append the newly converted ability information to the variable that will written to the CSV file newLine = { 'attackUUID': attackUUID, 'attackID': attackID, 'origCommand': origCommand, 'command': command } csvFile.append(newLine) # Append the variables to the variable CSV file if len(varList) != 0: for variable in varList: newLine = { 'attackUUID': variable['attackUUID'], 'attackID': variable['attackID'], 'executor': variable['executor'], 'variable': variable['variable'], 'value': variable['value'] } varCsvFile.append(newLine) else: logging.debug('The technique already exists.') # Write the content of CSV file to disk with open(csvPath, 'w', newline='') as newCSVFile: fieldNames = ['attackUUID', 'attackID', 'origCommand', 'command'] writer = csv.DictWriter(newCSVFile, fieldnames=fieldNames) writer.writeheader() for line in csvFile: writer.writerow(line) # Write the content of variable CSV file to disk with open(varCsvPath, 'w', newline='') as newVarCSVFile: fieldNames = [ 'attackUUID', 'attackID', 'executor', 'variable', 'value' ] writer = csv.DictWriter(newVarCSVFile, fieldnames=fieldNames) writer.writeheader() for line in varCsvFile: writer.writerow(line)