def insert_initial_detection_values(*args, **kwargs): """Insert Task category default database values from a yaml template file.""" detection_run = YamlInfo("aucr_app/plugins/Horatio/detection_methods.yml", "none", "none") detection_data = detection_run.get() for items in detection_data: new_detection_table_row = Detection(detection_method=items) db.session.add(new_detection_table_row) db.session.commit()
def insert_initial_error_values(*args, **kwargs): """Insert HTTP error code default database values from a yaml template file.""" run = YamlInfo("aucr_app/plugins/errors/errors.yml", "none", "none") error_data = run.get() for items in error_data: new_error_table_row = Errors( error_name=items, error_message=error_data[items]["message"]) db.session.add(new_error_table_row) db.session.commit()
def insert_initial_classification_values(*args, **kwargs): """Insert Task category default database values from a yaml template file.""" classification_run = YamlInfo("aucr_app/plugins/unum/classification.yml", "none", "none") classification_data = classification_run.get() for items in classification_data: new_classification_table_row = Classification(classification=items) db.session.add(new_classification_table_row) db.session.commit()
def get_group_permission_navbar(): """Return group nav list from database.""" if current_user: current_user_id = current_user.id else: current_user_id = 1 user_groups_ids["items"] = Group.query.filter_by( username_id=current_user_id).all() user_groups_links = {} tasks_list = [] analysis_list = [] main_list = [] reports_list = [] for items in user_groups_ids["items"]: group_object = Groups.query.filter_by(id=items.groups_id).first() for filename in glob.iglob('aucr_app/plugins/**/navbar.yml', recursive=True): menu_links = YamlInfo(filename, "none", "none") run = menu_links.get() tasks_result_list = generate_navbar_list_item( "tasks", run, group_object.name, tasks_list) if tasks_result_list: tasks_list = tasks_result_list analysis_result_list = generate_navbar_list_item( "analysis", run, group_object.name, analysis_list) if analysis_result_list: analysis_list = analysis_result_list reports_result_list = generate_navbar_list_item( "reports", run, group_object.name, reports_list) if reports_result_list: reports_list = reports_result_list main_result_list = generate_navbar_list_item( "main", run, group_object.name, main_list) if main_result_list: main_list = main_result_list # Used to make sure empty lists are not populating the navbar dictionary if tasks_list: user_groups_links["tasks"] = tasks_list if reports_list: user_groups_links["reports"] = reports_list if analysis_list: user_groups_links["analysis"] = analysis_list if main_list: user_groups_links["main"] = main_list return user_groups_links
def aucr_app(): """AUCR app flask function framework create and get things started.""" YamlInfo("projectinfo.yml", "projectinfo", "LICENSE") app = create_app() app = init_app(app) app.secret_key = app.config['SECRET_KEY'] app.app_context().push() db.create_all() return app
def get_mq_yaml_configs(): """MQ aucr yaml config file from each plugin.""" mq_yaml_dict_config = {} tasks_mq_list = [] reports_mq_list = [] analysis_mq_list = [] for filename in glob.iglob('aucr_app/plugins/**/mqtasks.yml', recursive=True): mq_tasks = YamlInfo(filename, "none", "none") run = mq_tasks.get() if "tasks" in run: tasks_mq_list.append(run["tasks"]) if "reports" in run: reports_mq_list.append(run["reports"]) if "analysis" in run: analysis_mq_list.append(run["analysis"]) mq_yaml_dict_config["tasks"] = tasks_mq_list mq_yaml_dict_config["reports"] = reports_mq_list mq_yaml_dict_config["analysis"] = analysis_mq_list return mq_yaml_dict_config
def insert_initial_user_values(*args, **kwargs): """Create default database values from auth yaml template file.""" run = YamlInfo("aucr_app/plugins/auth/auth.yml", "none", "none") admin_data = run.get() for items in admin_data: hashed_password = generate_password_hash(admin_data[items]["password"]).decode('utf-8') default_groups = Groups.__call__(name="admin") default_user_groups = Groups.__call__(name="user") default_system_groups = Groups.__call__(name="system") db.session.add(default_groups) db.session.add(default_user_groups) db.session.add(default_system_groups) db.session.commit() default_admin = User.__call__(username=items, password_hash=hashed_password, email=admin_data[items]["email"]) admin_group = Group.__call__(groups_id=1, username_id=1) user_group = Group.__call__(groups_id=2, username_id=1) db.session.add(admin_group) db.session.add(user_group) db.session.add(default_admin) db.session.commit()
def parser(raw_data: dict, output: str, input_file: str): """Output package to md format.""" package_export_content_modules = get_value(CONTENT_MOD_STRING, raw_data)[CONTENT_MOD_STRING] package_data = get_package_data(package_export_content_modules) main_package_data = package_export_package_info(raw_data) file_name = f"{strip_unsafe_file_names(main_package_data[N_STR].strip(' '))}{YAML_EXT}" raw_data[PACKAGE_STR][CONTENT_MODS] = package_data package_yml = PackageExport(raw_data[PACKAGE_STR]).to_yml() path_builder(output, True) write_to_file(f"{output}{DIR_CHARACTER}{file_name}", package_yml) media_path = f"{output}{DIR_CHARACTER}media" path_builder(media_path, True) extract_tar_data(input_file, output) for values in package_export_content_modules: raw_task_data = get_task_data_listed(package_export_content_modules, values, output) info("Processing tasks with compile plugin now!") attachment_data = package_export_content_modules[values][CONTENT_MOD_EXPORT_TASK_ATTACHMENTS] answer_data = package_export_content_modules[values][EXPORT_TASKS] for package in raw_task_data: package_name_value = strip_unsafe_file_names(package) package_path = f"{output}{DIR_CHARACTER}{package_name_value}{DIR_CHARACTER}" path_builder(package_path, True) write_to_file(f"{package_path}module.yml", ModuleExportContentModule(package_export_content_modules[values]).to_yml()) module_task_yml = "" module_task_md = "" for task_item in raw_task_data[package]: task_answer_key = yml_format_str(answer_data, task_item, attachment_data) module_task_md += (h_one_format(task_item) + raw_task_data[package][task_item]) + "\n" if task_answer_key: module_task_yml += task_answer_key tasks_path = f"{package_path}tasks.md" write_to_file(tasks_path, module_task_md) tasks_answer_key_path = f"{package_path}tasks.yml" write_to_file(tasks_answer_key_path, module_task_yml) with open(tasks_path, 'r') as task_markdown_data: ordered_markdown_data = task_markdown_data.read() module_config_yaml = YamlInfo(f"{package_path}module.yml", "none", "none").get() fixed_order = "" if "tasks" in module_config_yaml: for tasks in module_config_yaml["tasks"]: if isinstance(tasks, list): for task in tasks: fixed_order += f"# {task}{NEW_LINE}" fixed_order += \ f"{get_task_markdown_data(ordered_markdown_data, task)}{BREAK_LINE}{NEW_LINE}" else: fixed_order += f"# {tasks}{NEW_LINE}" fixed_order += f"{get_task_markdown_data(ordered_markdown_data, tasks)}{BREAK_LINE}{NEW_LINE}" with open(tasks_path, 'w') as fixed_markdown: fixed_markdown.write(fixed_order)
def setUp(self): """Set up needed base environment data for unittests.""" self.project_data = YamlInfo("projectinfo.yml", "projectinfo", "LICENSE").get()
"""The Intelreaper python package.""" # coding=utf-8 import os from setuptools import setup, find_packages from yaml_info.yamlinfo import YamlInfo project_data = YamlInfo("projectinfo.yml", "projectinfo", "LICENSE").get() project_info_data = project_data["info"] project_version_data = project_data["version"] __version__ = "%(major)s.%(minor)s.%(revision)s.%(release)s" % project_version_data package_dir = (os.path.join('..')) setup( name=project_data["info"]["name"], version=__version__, include_package_data=True, packages=find_packages(exclude=['docs', 'tests', 'tools', 'utils']), url=project_data["info"]["url"], license=project_data["info"]["license"], author=project_data["info"]["authors"], author_email=project_data["info"]["authors_email"], description=project_data["info"]["description"], classifiers=project_data["info"]["classifiers"], package_dir={'.': ['intelreaper/', 'config.py']}, scripts=['ircli.py'], )
def compile_package_data(package_export_name: str, input_dir: str, export_dir: str): """Compile export package with input package/module directory.""" package_dir = input_dir dir_paths = package_dir.split(f"{DIR_CHARACTER}") total_length = len(dir_paths) package_name = dir_paths[(total_length - 1)] input_package_export_name = package_export_name package_config_path = f"{input_dir}{DIR_CHARACTER}{package_name}.yml" package_config_yaml = YamlInfo(package_config_path, "none", "none").get() build_json = {} build_json[PACKAGE_STR] = {} build_json[PACKAGE_STR] = package_config_yaml build_json['packageKey'] = str(uuid4()) package_path = f"{export_dir}{DIR_CHARACTER}{package_name}" try: mkdir(f"{package_path}") except FileNotFoundError: mkdir(f"{export_dir}") mkdir(f"{package_path}") except FileExistsError: rmtree(f"{package_path}") mkdir(f"{package_path}") mkdir(f"{package_path}{DIR_CHARACTER}content-modules") file_list = package_config_yaml["contentModules"] package_config_yaml[CONTENT_MODS] = [] build_json[CONTENT_MOD_STRING] = {} for file_name in file_list: task_task_exports = {} attachment_data_dict = {} mod_config_path = f"{input_dir}{DIR_CHARACTER}{file_name}{DIR_CHARACTER}module.yml" module_config_yaml = YamlInfo(mod_config_path, "none", "none").get() task_info_dict = {} question_data = {} questions_descriptions = {} task_task_list = [] module_id = str(uuid4()) module_id_string = f"{module_id}" content_module_path = f"{package_path}{DIR_CHARACTER}content-modules" module_path = f"{content_module_path}{DIR_CHARACTER}module-{module_id_string}" attachment_build_path = f"{module_path}{DIR_CHARACTER}attachments{DIR_CHARACTER}" test_build = YamlInfo( f"{input_dir}{DIR_CHARACTER}{file_name}{DIR_CHARACTER}tasks.yml", "none", "none").get() with open( f"{input_dir}{DIR_CHARACTER}{file_name}{DIR_CHARACTER}tasks.md", 'r') as markdown_data_file: module_markdown_data = markdown_data_file.read() if "tasks" in module_config_yaml: for tasks in module_config_yaml["tasks"]: sub_task_list = [] if isinstance(tasks, list): for task in tasks: task_id = str(uuid4()) task_id_string = f"task-{task_id}" task_dict = test_build[task] if "vmKeys" not in task_dict: task_dict["vmKeys"] = [] else: vm_list = [] for vm in task_dict["vmKeys"]: vm_list.append({ 'key': { 'repetitionGroup': task_dict["vmKeys"][vm]['ID'], 'index': task_dict["vmKeys"][vm]['index'] }, 'val': vm }) task_dict["vmKeys"] = vm_list if "attachments" in task_dict: del task_dict["attachments"] if "answers" in task_dict: question_list = task_dict["answers"] tasks_dict = {} tasks_dict["questions"] = {} choices = [] true_str = "true" false_str = "false" for correct in question_list["correct"]: choices.append({ 'value': correct, 'correct': True }) if "incorrect" in question_list: for wrong in question_list["incorrect"]: choices.append({ 'value': wrong, 'correct': False }) hints = [] if "hints" in task_dict: if task_dict["hints"]: for hint in task_dict["hints"]: hints.append({ 'text': task_dict["hints"][hint] ["message"], 'pointsDeduction': task_dict["hints"][hint]["cost"] }) del task_dict["hints"] tasks_dict["questions"]["choices"] = choices tasks_dict["questions"]["hints"] = hints tasks_dict["questions"]["retryCount"] = task_dict[ "retrycount"] tasks_dict["questions"]["type"] = task_dict["type"] tasks_dict["questions"]["points"] = task_dict[ "pointtotal"] tasks_dict["questions"]["extraData"] = {} tasks_dict["questions"]["mappingTags"] = [] if "mappingtags" in task_dict: tasks_dict["questions"][ "mappingTags"] = task_dict['mappingtags'] else: tasks_dict["questions"]["mappingTags"] = [] if "extradata" in task_dict: tasks_dict["questions"]["extraData"] = {} del task_dict["extradata"] else: tasks_dict["questions"]["extraData"] = {} task_dict["question"] = tasks_dict["questions"] del task_dict["answers"] del task_dict["type"] del task_dict["pointtotal"] del task_dict["retrycount"] if "question" in task_dict: custom_task_dict = { "vmKeys": task_dict["vmKeys"], "title": task, "question": task_dict["question"] } else: if "vmKeys" in task_dict: custom_task_dict = { "vmKeys": task_dict["vmKeys"], "title": task } else: custom_task_dict = { "vmKeys": [], "title": task } sub_task_list.append({ "key": task_id_string, "val": custom_task_dict }) task_task_exports[task_id_string] = custom_task_dict task_info_dict[ task_id_string] = get_task_markdown_data( module_markdown_data, task) attachment_data_dict[task_id_string] = {} task_task_list.append(sub_task_list) else: task_id = str(uuid4()) task_id_string = f"task-{task_id}" task_dict = test_build[tasks] if "vmKeys" not in task_dict: task_dict["vmKeys"] = [] else: vm_list = [] for vm in task_dict["vmKeys"]: vm_list.append({ 'key': { 'repetitionGroup': task_dict["vmKeys"][vm]['ID'], 'index': task_dict["vmKeys"][vm]['index'] }, 'val': vm }) task_dict["vmKeys"] = vm_list if "answers" in task_dict: question_list = task_dict["answers"] tasks_dict = {} tasks_dict["questions"] = {} choices = [] true_str = "true" false_str = "false" for correct in question_list["correct"]: choices.append({'value': correct, 'correct': True}) if "incorrect" in question_list: for wrong in question_list["incorrect"]: choices.append({ 'value': wrong, 'correct': False }) hints = [] if "hints" in task_dict: if task_dict["hints"]: for hint in task_dict["hints"]: hints.append({ 'text': task_dict["hints"][hint]["message"], 'pointsDeduction': task_dict["hints"][hint]["cost"] }) del task_dict["hints"] tasks_dict["questions"]["choices"] = choices tasks_dict["questions"]["hints"] = hints tasks_dict["questions"]["retryCount"] = task_dict[ "retrycount"] tasks_dict["questions"]["type"] = task_dict["type"] tasks_dict["questions"]["points"] = task_dict[ "pointtotal"] tasks_dict["questions"]["extraData"] = {} tasks_dict["questions"]["mappingTags"] = [] if "mappingtags" in task_dict: tasks_dict["questions"]["mappingTags"] = task_dict[ 'mappingtags'] else: tasks_dict["questions"]["mappingTags"] = [] if "extradata" in task_dict: tasks_dict["questions"]["extraData"] = {} del task_dict["extradata"] else: tasks_dict["questions"]["extraData"] = {} task_dict["question"] = tasks_dict["questions"] del task_dict["answers"] del task_dict["type"] del task_dict["pointtotal"] del task_dict["retrycount"] if "mappingtags" in task_dict: del task_dict["mappingtags"] if "extradata" in task_dict: del task_dict["extradata"] attachment_list = 0 if "question" in task_dict: custom_task_dict_sub = { "vmKeys": task_dict["vmKeys"], "title": tasks, "question": task_dict["question"] } else: custom_task_dict_sub = { "vmKeys": task_dict["vmKeys"], "title": tasks } task_task_exports[task_id_string] = custom_task_dict_sub if "attachments" in task_dict: attachment_list = len(task_dict["attachments"]) attachment_data_dict[task_id_string] = {} path_builder(content_module_path, False) path_builder(module_path, False) path_builder(attachment_build_path, False) attachment_data_dict[task_id_string] = {} if attachment_list > 0: for attachment in task_dict["attachments"]: attachment_uuid = str(uuid4()) # TODO update this to support more than just pdf files attachment_data_dict[task_id_string][attachment_uuid] = \ {"attachmentMetaContentType": "application/pdf", "attachmentMetaKey": attachment_uuid, "attachmentMetaName": attachment } file_safe_attachment = attachment.replace( " ", "%20") try: read_file = \ f"{input_dir}{DIR_CHARACTER}attachments{DIR_CHARACTER}{file_safe_attachment}" with open(read_file, 'rb') \ as module_file: module_data = module_file.read() attachment_path = f"{attachment_build_path}{attachment_uuid}" path_builder(attachment_path, False) with open(f'{attachment_path}{DIR_CHARACTER}{file_safe_attachment}', 'wb') \ as module_file_data: module_file_data.write(module_data) except FileNotFoundError: error( "Attachment data misconfig, attachment not found" ) else: attachment_data_dict[task_id_string] = {} if "question" in task_dict: custom_task_dict_main = { "vmKeys": task_dict["vmKeys"], "title": tasks, "question": task_dict["question"] } else: custom_task_dict_main = { "vmKeys": task_dict["vmKeys"], "title": tasks } task_task_list.append([{ "key": task_id_string, "val": custom_task_dict_main }]) task_info_dict[task_id_string] = get_task_markdown_data( module_markdown_data, tasks) module_dict_value = f"module-{module_id_string}" package_config_yaml[CONTENT_MODS].append(module_id_string) module_list = [] for module in package_config_yaml[CONTENT_MODS]: module_list.append(f"module-{module}") package_config_yaml[CONTENT_MODS] = module_list build_json[CONTENT_MOD_STRING][module_dict_value] = {} build_json[CONTENT_MOD_STRING][module_dict_value][ EXPORT_MOD_STRING] = module_config_yaml build_json[CONTENT_MOD_STRING][module_dict_value][ CONTENT_MOD_EXPORT_MAPPING_TAGS] = {} build_json[CONTENT_MOD_STRING][module_dict_value][EXPORT_MOD_STRING][ TASKS] = task_task_list build_json[CONTENT_MOD_STRING][module_dict_value][QUESTION_DESC] = {} build_json[CONTENT_MOD_STRING][module_dict_value][TASK_DESC] = {} for node_data in task_info_dict: task_list_items = [] code_list = [] star_list = [] for line_data in task_info_dict[node_data].splitlines(): line_chunk = line_data[:4] if line_chunk[:-2] == "# ": info("Processing task Title ") elif line_chunk == "### ": task_list_items.append( build_node_package([build_text_line(line_data[4:])], "heading-two")) elif line_chunk == " -": star_list.append( build_node_package([ build_node_package( [build_text_line(line_data[5:])], "list-item-child") ], "list-item")) elif line_chunk == " *": star_list.append( build_node_package([ build_node_package( [build_text_line(line_data[5:])], "list-item-child") ], "list-item")) elif line_chunk == " ": code_list.append( build_node_package( [build_text_line(f"{line_data[4:]}\r")], "code-line")) elif line_chunk[:-1] == "## ": task_list_items.append( build_node_package([build_text_line(line_data[3:])], "heading-one")) elif line_chunk[:-2] == "**": task_list_items.append( build_node_package([build_text_line(line_data[2:-2])], "bold")) elif line_chunk[:-2] == "![": image_data = build_node_package([build_text_line("")], "image-block") picture_path = f"{input_dir}{DIR_CHARACTER}media{line_data[12:-1]}" with open(picture_path, 'rb') as picture_file: raw_picture = b64encode( picture_file.read()).decode('utf-8') image_data["data"] = { "imageData": f"data:image/png;base64,{raw_picture}" } task_list_items.append(image_data) elif len(code_list) > 0: task_list_items.append( build_node_package(code_list, "code-block")) code_list = [] elif len(star_list) > 0: task_list_items.append( build_node_package(star_list, "unordered-list")) star_list = [] elif len(line_data) > 0: task_list_items, line_data, star_list, code_list = render_link_chunk( task_list_items, line_data, star_list, code_list) if len(code_list) > 0: task_list_items.append( build_node_package(code_list, "code-block")) elif len(star_list) > 0: task_list_items.append( build_node_package(star_list, "unordered-list")) question_data[node_data] = { "data": { "document": { "data": {}, "object": "document", "nodes": task_list_items }, "object": "value" }, "version": 2 } build_json[CONTENT_MOD_STRING][module_dict_value][EXPORT_TASKS] = {} build_json[CONTENT_MOD_STRING][module_dict_value][ EXPORT_TASKS] = task_task_exports build_json[CONTENT_MOD_STRING][module_dict_value][ TASK_DESC] = question_data for tasks in task_task_exports: if "question" in task_task_exports[tasks]: questions_descriptions[tasks] = question_data[tasks] build_json[CONTENT_MOD_STRING][module_dict_value][ QUESTION_DESC] = questions_descriptions build_json[CONTENT_MOD_STRING][module_dict_value][ CONTENT_MOD_EXPORT_TASK_ATTACHMENTS] = attachment_data_dict with open(f'{package_path}{DIR_CHARACTER}export.version', 'w') as export_version_file: export_version_file.write("9") with open(f'{package_path}{DIR_CHARACTER}package_export{JS_EXT}', 'w') as package_json: package_json.write(dumps(build_json)) compile_export_package(f"{package_path}", input_package_export_name)
# coding=utf-8 from sqlalchemy.exc import ProgrammingError from yaml_info.yamlinfo import YamlInfo from aucr_app import db, create_app from aucr_app.plugins.unum.models import Classification app = create_app() db.init_app(app) CLASSIFICATION_AVAILABLE_CHOICES = None with app.app_context(): count = 0 items_available_choices_list = [] try: classification_data = Classification.query.all() except: classification_data = YamlInfo("aucr_app/plugins/unum/classification.yml", "none", "none").get() for items in classification_data: count += 1 new_list = (str(count), items) items_available_choices_list.append(new_list) CLASSIFICATION_AVAILABLE_CHOICES = items_available_choices_list
def print_version(ctx, param, value): if not value or ctx.resilient_parsing: return click.echo(YamlInfo("projectinfo.yml", "projectinfo", "LICENSE")) ctx.exit()
# coding=utf-8 from yaml_info.yamlinfo import YamlInfo run = YamlInfo("aucr_app/plugins/Horatio/detection_methods.yml", "none", "none") detection_data = run.get() count = 0 items_available_choices_list = [] for items in detection_data: count += 1 new_list = (str(count), items) items_available_choices_list.append(new_list) AVAILABLE_CHOICES = items_available_choices_list