def run(self, silent=False): if custom_help: self.client.remove_command('help') cog_count = 0 warnings = [] cogs = extensions() for extension in cogs: try: self.client.load_extension(extension) cog_count += 1 except Exception as e: if not silent: warnings.append(f'Failed to load extension {extension}\n{e}') if not silent: if not cogs: log.warn('No extensions were found.') else: for x in warnings: y = x.split('\n') log.warning(f'> {y[0]}') log.error(f'> {y[1]}') if len(warnings) > 0: # if saved() < enums.LogLevel.error.value: if len(warnings) == 1: log.error(f'> Failed to load {trace.yellow.s}{len(warnings)}{trace.cyan} extension.') else: log.error(f'> Failed to load {trace.yellow.s}{len(warnings)}{trace.cyan} extensions.') log.info(f'{trace.cyan}> Loaded {trace.yellow.s}{cog_count}{trace.cyan} extensions!')
def latest(cls): try: # Late for latest, curr for current. base = f"https://api.github.com/repos/Rapptz/discord.py/tags" info = requests.get(base).json()[0]['name'] info = info.replace('v', '') # info = '1.2.9' late = info curr = discord.__version__.split('.') late = late.split('.') for x in range(len(curr)): curr[x] = int(curr[x]) for x in range(len(late)): late[x] = int(late[x]) new = version.parse(late, curr) if new: log.warning( f'{trace.alert}> {trace.white}Discord{trace.green.s}Py {trace.cyan}v{trace.cyan.s}' f'{info}{trace.green.s} is {trace.yellow.s}available{trace.cyan}.' ) log.warning( f'{trace.alert}> {trace.yellow.s}Please update to {trace.cyan}v{trace.cyan.s}{info}{trace.cyan}.' ) except Exception: pass
def latest(cls): try: # Late for latest, curr for current. base = f"https://api.github.com/repos/ytdl-org/youtube-dl/tags" info = requests.get(base).json()[0]['name'] # info = '2038.01.19' late = info import youtube_dl # curr = youtube_dl.options.__version__.split('.') curr = youtube_dl.options.__version__ show = curr curr = curr.split('.') late = late.split('.') for x in range(len(curr)): curr[x] = int(curr[x]) for x in range(len(late)): late[x] = int(late[x]) new = version.parse(late, curr) if new: log.warning( f'{trace.alert}> {trace.cyan}Running on {trace.white.b}{trace.black}You{trace.red.b.s}' f'{trace.white.s}Tube{trace.reset}-{trace.red.s}DL {trace.cyan}v{trace.cyan.s}{show}{trace.cyan}.' ) log.warning( f'{trace.alert}> {trace.yellow.s}Please update {trace.white.b}{trace.black}' f'You{trace.red.b.s}{trace.white.s}Tube{trace.reset}-{trace.red.s}DL ' f'{trace.yellow.s}to {trace.cyan}v{trace.cyan.s}{info}{trace.cyan}.' ) except Exception: pass
def init(): """Prepare test environment""" log.debug("Initializing test environment") if not os.path.exists(config.downloaddir): log.debug("Creating download folder for downloaded files") os.makedirs(config.downloaddir) if not os.path.exists(".install_log.txt"): log.warning("install.sh script wasn't executed!") startflask()
def input_link(links): """ Let the user input a choice. """ while True: try: log.info('Choose your number:') the_chosen_one = int(input('> ')) if 1 <= the_chosen_one <= len(links): return links[the_chosen_one - 1] elif the_chosen_one == 0: return None else: log.warning('Choose a valid number!') except ValueError: log.warning('Choose a valid number!')
def embed(music_file, meta_tags): """ Embed metadata. """ if meta_tags is None: log.warning('Could not find metadata') return None elif music_file.endswith('.m4a'): log.info('Applying metadata') return embed_m4a(music_file, meta_tags) elif music_file.endswith('.mp3'): log.info('Applying metadata') return embed_mp3(music_file, meta_tags) else: log.warning('Cannot embed metadata into given output extension') return False
def _process_file (filename, data, strip_useless=False, entry_points=None, time_limit=None, display_int_iocs=False, artifact_dir=None, out_file_name=None, do_jit=False): """Process a single file. @param container (str) Path and filename of container if the file is within a zip archive, None otherwise. @param filename (str) path and filename of file on disk, or within the container. @param data (bytes) content of the file if it is in a container, None if it is a file on disk. @param strip_useless (boolean) Flag turning on/off modification of VB code prior to parsing. @param entry_points (list) A list of the names (str) of the VB functions from which to start emulation. @param time_limit (int) The emulation time limit, in minutes. If None there is not time limit. @param display_int_iocs (boolean) Flag turning on/off the reporting of intermediate IOCs (base64 strings and URLs) found during the emulation process. @param artifact_dir (str) The directory in which to save artifacts dropped by the sample under analysis. If None the artifact @param out_file_name (str) The name of the file in which to store the ViperMonkey analysis results as JSON. If None no JSON results will be saved. @param do_jit (str) A flag turning on/off doing VB -> Python transpiling of loops to speed up loop emulation. @return (list) A list of actions if actions found, an empty list if no actions found, and None if there was an error. """ # Increase Python call depth. sys.setrecursionlimit(13000) # Set the emulation time limit. if (time_limit is not None): core.vba_object.max_emulation_time = datetime.now() + timedelta(minutes=time_limit) # Create the emulator. log.info("Starting emulation...") vm = core.ViperMonkey(filename, data, do_jit=do_jit) orig_filename = filename if (entry_points is not None): for entry_point in entry_points: vm.user_entry_points.append(entry_point) try: #TODO: handle olefile errors, when an OLE file is malformed if (isinstance(data, Exception)): data = None vba = None try: vba = _get_vba_parser(data) except FileOpenError as e: # Is this an unrecognized format? if ("Failed to open file is not a supported file type, cannot extract VBA Macros." not in safe_str_convert(e)): # No, it is some other problem. Pass on the exception. raise e # This may be VBScript with some null characters. Remove those and try again. data = data.replace("\x00", "") vba = _get_vba_parser(data) # Do we have analyzable VBA/VBScript? Do the analysis even # without VBA/VBScript if we are scraping for intermediate # IOCs. if (vba.detect_vba_macros() or display_int_iocs): # Read in document metadata. try: log.info("Reading document metadata...") ole = olefile.OleFileIO(data) vm.set_metadata(ole.get_metadata()) except Exception as e: log.warning("Reading in metadata failed. Trying fallback. " + safe_str_convert(e)) vm.set_metadata(get_metadata_exif(orig_filename)) # If this is an Excel spreadsheet, read it in. vm.loaded_excel = excel.load_excel(data) # Set where to store directly dropped files if needed. if (artifact_dir is None): artifact_dir = "./" if ((filename is not None) and ("/" in filename)): artifact_dir = filename[:filename.rindex("/")] only_filename = filename if ((filename is not None) and ("/" in filename)): only_filename = filename[filename.rindex("/")+1:] # Set the output directory in which to put dumped files generated by # the macros. out_dir = None if (only_filename is not None): out_dir = artifact_dir + "/" + only_filename + "_artifacts/" if os.path.exists(out_dir): shutil.rmtree(out_dir) else: out_dir = "/tmp/tmp_file_" + safe_str_convert(random.randrange(0, 10000000000)) log.info("Saving dropped analysis artifacts in " + out_dir) core.vba_context.out_dir = out_dir del filename # We already have this in memory, we don't need to read it again. # Parse the VBA streams. log.info("Parsing VB...") comp_modules = parse_streams(vba, strip_useless) if (comp_modules is None): return None got_code = False for module_info in comp_modules: m = module_info[0] stream = module_info[1] if (m != "empty"): vm.add_compiled_module(m, stream) got_code = True if ((not got_code) and (not display_int_iocs)): log.info("No VBA or VBScript found. Exiting.") return ([], [], [], []) # Get the VBA code. vba_code = "" for (_, _, _, macro_code) in vba.extract_macros(): if (macro_code is not None): vba_code += macro_code # Do not analyze the file if the VBA looks like garbage. if (read_ole_fields.is_garbage_vba(vba_code)): raise ValueError("VBA looks corrupted. Not analyzing.") # Read in text values from all of the various places in # Office 97/2000+ that text values can be hidden. So many # places. read_ole_fields.read_payload_hiding_places(data, orig_filename, vm, vba_code, vba) # Do Emulation. safe_print("") safe_print('-'*79) safe_print('TRACING VBA CODE (entrypoint = Auto*):') if (entry_points is not None): log.info("Starting emulation from function(s) " + safe_str_convert(entry_points)) pyparsing.ParserElement.resetCache() vm.vba = vba vm.trace() # Done with emulation. # Report the results. str_actions, tmp_iocs, shellcode_bytes = _report_analysis_results(vm, data, display_int_iocs, orig_filename, out_file_name) # Save any embedded files as artifacts. _save_embedded_files(out_dir, vm) # Return the results. return (str_actions, vm.external_funcs, tmp_iocs, shellcode_bytes) # No VBA/VBScript found? else: safe_print('Finished analyzing ' + safe_str_convert(orig_filename) + " .\n") safe_print('No VBA macros found.') safe_print('') return ([], [], [], []) # Handle uncaught exceptions triggered during analysis. except Exception as e: # Print error info. if (("SystemExit" not in safe_str_convert(e)) and (". Aborting analysis." not in safe_str_convert(e))): traceback.print_exc() log.error(safe_str_convert(e)) # If this is an out of memory error terminate the process with an # error code indicating that there are memory problems. This is so # that higer level systems using ViperMonkey can see that there is a # memory issue and handle it accordingly. if isinstance(e, MemoryError): log.error("Exiting ViperMonkey with error code 137 (out of memory)") sys.exit(137) # Done. Analysis failed. return None
def parse_stream(subfilename, stream_path=None, vba_filename=None, vba_code=None, strip_useless=False, local_funcs=None): """Parse the macros from a single OLE stream. @param subfilename (str) The name of the file containing the macros. @param stream_path (??) ?? @param vba_filename (??) ?? @param vba_code (str) The macro code to parse. @param local_funcs (list) A list of the names of already declared local VBA functions. @return (Module object) A parsed module object. """ # Set local func list if needed. if (local_funcs is None): local_funcs = [] # Check for timeouts. core.vba_object.limits_exceeded(throw_error=True) # Are the arguments all in a single tuple? if (stream_path is None): subfilename, stream_path, vba_filename, vba_code = subfilename # Skip old-style XLM macros. if (repr(stream_path).strip() == "'xlm_macro'"): log.warning("Skipping XLM macro stream...") return "empty" # Collapse long lines. vba_code = core.vba_collapse_long_lines(vba_code) # Filter cruft from the VBA. vba_code = filter_vba(vba_code) # Pull out Visual Basic from .hta contents (if we are looking at a # .hta file). vba_code = get_vb_contents_from_hta(vba_code) # Do not analyze the file if the VBA looks like garbage characters. if (read_ole_fields.is_garbage_vba(vba_code, no_html=True)): log.warning("Failed to extract VBScript from HTA. Skipping.") return "empty" # Skip some XML that olevba gives for some 2007+ streams. if (vba_code.strip().startswith("<?xml")): log.warning("Skipping XML stream.") return "empty" # Strip out code that does not affect the end result of the program. if (strip_useless): vba_code = core.strip_lines.strip_useless_code(vba_code, local_funcs) safe_print('-'*79) safe_print('VBA MACRO %s ' % vba_filename) safe_print('in file: %s - OLE stream: %s' % (subfilename, repr(stream_path))) safe_print('- '*39) # Parse the macro. m = None if vba_code.strip() == '': safe_print('(empty macro)') m = "empty" else: safe_print('-'*79) safe_print('VBA CODE (with long lines collapsed):') safe_print(vba_code) safe_print('-'*79) #sys.exit(0) safe_print('PARSING VBA CODE:') try: m = core.module.parseString(vba_code + "\n", parseAll=True)[0] pyparsing.ParserElement.resetCache() m.code = vba_code except pyparsing.ParseException as err: safe_print(err.line) safe_print(" "*(err.column-1) + "^") safe_print(err) log.error("Parse Error. Processing Aborted.") return None # Check for timeouts. core.vba_object.limits_exceeded(throw_error=True) # Return the parsed macro. return m
def process_file_scanexpr (container, filename, data): """Process a single file. @param container (str) Path and filename of container if the file is within a zip archive, None otherwise. @param filename (str) path and filename of file on disk, or within the container. @param data (bytes) Content of the file if it is in a container, None if it is a file on disk. """ #TODO: replace print by writing to a provided output file (sys.stdout by default) if container: display_filename = '%s in %s' % (filename, container) else: display_filename = filename safe_print('='*79) safe_print('FILE: ' + safe_str_convert(display_filename)) all_code = '' try: #TODO: handle olefile errors, when an OLE file is malformed import oletools oletools.olevba.enable_logging() if (log.getEffectiveLevel() == logging.DEBUG): log.debug('opening %r' % filename) vba = VBA_Parser(filename, data, relaxed=True) if vba.detect_vba_macros(): # Read in document metadata. vm = core.ViperMonkey(filename, data) ole = olefile.OleFileIO(filename) try: vm.set_metadata(ole.get_metadata()) except Exception as e: log.warning("Reading in metadata failed. Trying fallback. " + safe_str_convert(e)) vm.set_metadata(get_metadata_exif(filename)) #print 'Contains VBA Macros:' for (subfilename, stream_path, vba_filename, vba_code) in vba.extract_macros(): # hide attribute lines: #TODO: option to disable attribute filtering vba_code = filter_vba(vba_code) safe_print('-'*79) safe_print('VBA MACRO %s ' % vba_filename) safe_print('in file: %s - OLE stream: %s' % (subfilename, repr(stream_path))) safe_print('- '*39) # detect empty macros: if vba_code.strip() == '': safe_print('(empty macro)') else: # TODO: option to display code safe_print(vba_code) vba_code = core.vba_collapse_long_lines(vba_code) all_code += '\n' + vba_code safe_print('-'*79) safe_print('EVALUATED VBA EXPRESSIONS:') t = prettytable.PrettyTable(('Obfuscated expression', 'Evaluated value')) t.align = 'l' t.max_width['Obfuscated expression'] = 36 t.max_width['Evaluated value'] = 36 for expression, expr_eval in core.scan_expressions(all_code): t.add_row((repr(expression), repr(expr_eval))) safe_print(t) else: safe_print('No VBA macros found.') except Exception as e: log.error("Caught exception. " + safe_str_convert(e)) if (log.getEffectiveLevel() == logging.DEBUG): traceback.print_exc() safe_print('')
def _report_analysis_results(vm, data, display_int_iocs, orig_filename, out_file_name): """Report analysis results (screen and file) to the user. Results will be printed to stdout and saved in an output file as JSON if needed. @param vm (ViperMonkey object) The ViperMonkey emulation engine object that did the emulation. @param data (str) The read in Office file (data). @param display_int_iocs (boolean) Flag turning on/off the reporting of intermediate IOCs (base64 strings and URLs) found during the emulation process. @param orig_filename (str) path and filename of file on disk, or within the container. @param out_file_name (str) The name of the file in which to store the ViperMonkey analysis results as JSON. If None no JSON results will be saved. @return (tuple) A 3 element tuple where the 1st element is a list of reported actions all converted to strings, the 2nd element is a list of unique intermediate IOCs, and the 3rd element is a list of shell code bytes injected by the VB (empty list if no shell code). """ # Print table of all recorded actions safe_print('\nRecorded Actions:') safe_print(vm.dump_actions()) safe_print('') full_iocs = core.vba_context.intermediate_iocs raw_b64_iocs = read_ole_fields.pull_base64(data) for ioc in raw_b64_iocs: if (core.vba_context.num_b64_iocs > 200): log.warning("Found too many potential base64 IOCs. Skipping the rest.") break full_iocs.add(ioc) core.vba_context.num_b64_iocs += 1 # Report intermediate IOCs. tmp_iocs = [] if (len(full_iocs) > 0): tmp_iocs = _remove_duplicate_iocs(full_iocs) if (display_int_iocs): safe_print('Intermediate IOCs:') safe_print('') for ioc in tmp_iocs: safe_print("+---------------------------------------------------------+") safe_print(ioc) safe_print("+---------------------------------------------------------+") safe_print('') # Display injected shellcode. shellcode_bytes = core.vba_context.get_shellcode_data() if (len(shellcode_bytes) > 0): safe_print("+---------------------------------------------------------+") safe_print("Shell Code Bytes: " + str(shellcode_bytes)) safe_print("+---------------------------------------------------------+") safe_print('') # See if we can directly pull any embedded PE files from the file. pull_embedded_pe_files(data, core.vba_context.out_dir) safe_print('VBA Builtins Called: ' + str(vm.external_funcs)) safe_print('') safe_print('Finished analyzing ' + str(orig_filename) + " .\n") # Reporting results in JSON file? if out_file_name: # Create the results data structure. actions_data = [] for action in vm.actions: actions_data.append({ "action": str(action[0]), "parameters": str(action[1]), "description": str(action[2]) }) out_data = { "file_name": orig_filename, "potential_iocs": list(tmp_iocs), "shellcode" : shellcode_bytes, "vba_builtins": vm.external_funcs, "actions": actions_data } # Write out the results as JSON. try: with open(out_file_name, 'w') as out_file: out_file.write("\n" + json.dumps(out_data, indent=4)) except Exception as exc: log.error("Failed to output results to output file. " + str(exc)) # Make sure all the action fields are strings before returning. str_actions = [] for action in vm.actions: str_actions.append((safe_str_convert(action[0]), safe_str_convert(action[1]), safe_str_convert(action[2]))) # Done. return (str_actions, tmp_iocs, shellcode_bytes)
def _get_shapes_text_values_xml(fname): """ Read in the text associated with Shape objects in a document saved as Flat OPC XML files. NOTE: This currently is a hack. """ contents = None if fname.startswith("<?xml"): contents = fname else: # it's probably a filename, not a blob of data.. # Read in the file contents. try: f = open(fname, "r") contents = f.read().strip() f.close() except: contents = fname # Is this an XML file? if ((not contents.startswith("<?xml")) or ("<w:txbxContent>" not in contents)): return [] # It is an XML file. log.warning("Looking for Shapes() strings in Flat OPC XML file...") # Pull out the text surrounded by <w:txbxContent> ... </w:txbxContent>. # These big blocks hold the XML for each piece of Shapes() text. blocks = [] start = contents.index("<w:txbxContent>") + len("<w:txbxContent>") end = contents.index("</w:txbxContent>") while (start is not None): blocks.append(contents[start:end]) if ("<w:txbxContent>" in contents[end:]): start = end + contents[end:].index("<w:txbxContent>") + len( "<w:txbxContent>") end = end + len("</w:txbxContent>") + contents[ end + len("</w:txbxContent>"):].index("</w:txbxContent>") else: start = None end = None break cmd_strs = [] for block in blocks: # Get all strings surrounded by <w:t> ... </w:t> tags in the block. pat = r"\<w\:t[^\>]*\>([^\<]+)\</w\:t\>" strs = re.findall(pat, block) # These could be broken up with many <w:t> ... </w:t> tags. See if we need to # reassemble strings. if (len(strs) > 1): # Reassemble command string. curr_str = "" for s in strs: # Save current part of command string. curr_str += s # Use this as the Shape() strings. strs = [curr_str] # Save the string from this block. cmd_strs.append(strs[0]) # Hope that the Shape() object indexing follows the same order as the strings # we found. r = [] pos = 1 for shape_text in cmd_strs: # Skip strings that are too short. if (len(shape_text) < 100): continue # Access value with .TextFrame.TextRange.Text accessor. shape_text = shape_text.replace("&", "&") var = "Shapes('" + str(pos) + "').TextFrame.TextRange.Text" r.append((var, shape_text)) # Access value with .TextFrame.ContainingRange accessor. var = "Shapes('" + str(pos) + "').TextFrame.ContainingRange" r.append((var, shape_text)) # Access value with .AlternativeText accessor. var = "Shapes('" + str(pos) + "').AlternativeText" r.append((var, shape_text)) # Move to next shape. pos += 1 return r
from core.bot.time import Time import traceback import asyncio import session import sys if __name__ == '__main__': log.info(f'>{trace.cyan} Starting at {Time.readable.at()}.') # Initialize database log.info(f'{trace.cyan}> Initializing {trace.black.s}dataset{trace.cyan} Database.') try: data() log.info(f'{trace.cyan}> Initialized {trace.black.s}dataset{trace.cyan}.') except Exception as err: log.warning(f'> Failed to load {trace.black.s}dataset{trace.warn}. Please restart!') log.error(f'> {short_traceback()}') log.critical(f'> {traceback.format_exc()}') # Start API import api api.Main() # Initialize extensions # Append cCogs # append_cog('session.py') # Load session append_cog('debug.py') # Load debug things append_cog('main.py') # Load essentials # Login from core.bot import time