Esempio n. 1
0
	def handle(self, *args, **options):
		subprocess.call( ['python', 'dependencies.py', '--upgrade'] )
		call_command( 'migrate' )
		safe_print()
		safe_print( u'**********************************************************' )
		safe_print( u'**** RaceDB updated at {}'.format(datetime.datetime.now()) )
		safe_print( u'**** start RaceDB with "python manage.py launch <options>' )
Esempio n. 2
0
 def handle(self, *args, **options):
     with open(options['json_file'], 'rb') as fp:
         payload = json.load(fp)
     result = read_results_crossmgr(payload)
     if result['errors']:
         safe_print(u'Upload FAILED.  Errors.')
         for e in result['errors']:
             safe_print(u'    Error:', e)
     if result['errors']:
         safe_print(u'Upload Succeeded.')
         for w in result['warnings']:
             safe_print(u'    Warning: ', w)
Esempio n. 3
0
def check_connection(host, port):
    safe_print(u'Checking web server connection {}:{}'.format(host, port))

    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

    try:
        s.bind((host, port))
        success = True
    except socket.error as e:
        safe_print(e)
        success = False

    s.close()

    if success:
        safe_print(u'Web Server connection check succeeded.')

    return success
Esempio n. 4
0
def _process_file (filename,
                   data,
                   strip_useless=False,
                   entry_points=None,
                   time_limit=None,
                   display_int_iocs=False,
                   artifact_dir=None,
                   out_file_name=None,
                   do_jit=False):
    """Process a single file.

    @param container (str) Path and filename of container if the file is within
    a zip archive, None otherwise.

    @param filename (str) path and filename of file on disk, or within
    the container.

    @param data (bytes) content of the file if it is in a container,
    None if it is a file on disk.

    @param strip_useless (boolean) Flag turning on/off modification of
    VB code prior to parsing.

    @param entry_points (list) A list of the names (str) of the VB functions
    from which to start emulation.

    @param time_limit (int) The emulation time limit, in minutes. If
    None there is not time limit.

    @param display_int_iocs (boolean) Flag turning on/off the
    reporting of intermediate IOCs (base64 strings and URLs) found
    during the emulation process.

    @param artifact_dir (str) The directory in which to save artifacts
    dropped by the sample under analysis. If None the artifact

    @param out_file_name (str) The name of the file in which to store
    the ViperMonkey analysis results as JSON. If None no JSON results
    will be saved.

    @param do_jit (str) A flag turning on/off doing VB -> Python
    transpiling of loops to speed up loop emulation.

    @return (list) A list of actions if actions found, an empty list
    if no actions found, and None if there was an error.

    """

    # Increase Python call depth.
    sys.setrecursionlimit(13000)

    # Set the emulation time limit.
    if (time_limit is not None):
        core.vba_object.max_emulation_time = datetime.now() + timedelta(minutes=time_limit)

    # Create the emulator.
    log.info("Starting emulation...")
    vm = core.ViperMonkey(filename, data, do_jit=do_jit)
    orig_filename = filename
    if (entry_points is not None):
        for entry_point in entry_points:
            vm.user_entry_points.append(entry_point)
    try:
        #TODO: handle olefile errors, when an OLE file is malformed
        if (isinstance(data, Exception)):
            data = None
        vba = None
        try:
            vba = _get_vba_parser(data)
        except FileOpenError as e:

            # Is this an unrecognized format?
            if ("Failed to open file  is not a supported file type, cannot extract VBA Macros." not in safe_str_convert(e)):

                # No, it is some other problem. Pass on the exception.
                raise e

            # This may be VBScript with some null characters. Remove those and try again.
            data = data.replace("\x00", "")
            vba = _get_vba_parser(data)

        # Do we have analyzable VBA/VBScript? Do the analysis even
        # without VBA/VBScript if we are scraping for intermediate
        # IOCs.
        if (vba.detect_vba_macros() or display_int_iocs):

            # Read in document metadata.
            try:
                log.info("Reading document metadata...")
                ole = olefile.OleFileIO(data)
                vm.set_metadata(ole.get_metadata())
            except Exception as e:
                log.warning("Reading in metadata failed. Trying fallback. " + safe_str_convert(e))
                vm.set_metadata(get_metadata_exif(orig_filename))

            # If this is an Excel spreadsheet, read it in.
            vm.loaded_excel = excel.load_excel(data)

            # Set where to store directly dropped files if needed.
            if (artifact_dir is None):
                artifact_dir = "./"
                if ((filename is not None) and ("/" in filename)):
                    artifact_dir = filename[:filename.rindex("/")]
            only_filename = filename
            if ((filename is not None) and ("/" in filename)):
                only_filename = filename[filename.rindex("/")+1:]
            
            # Set the output directory in which to put dumped files generated by
            # the macros.
            out_dir = None
            if (only_filename is not None):
                out_dir = artifact_dir + "/" + only_filename + "_artifacts/"
                if os.path.exists(out_dir):
                    shutil.rmtree(out_dir)
            else:
                out_dir = "/tmp/tmp_file_" + safe_str_convert(random.randrange(0, 10000000000))
            log.info("Saving dropped analysis artifacts in " + out_dir)
            core.vba_context.out_dir = out_dir
            del filename # We already have this in memory, we don't need to read it again.
                
            # Parse the VBA streams.
            log.info("Parsing VB...")
            comp_modules = parse_streams(vba, strip_useless)
            if (comp_modules is None):
                return None
            got_code = False
            for module_info in comp_modules:
                m = module_info[0]
                stream = module_info[1]
                if (m != "empty"):
                    vm.add_compiled_module(m, stream)
                    got_code = True
            if ((not got_code) and (not display_int_iocs)):
                log.info("No VBA or VBScript found. Exiting.")
                return ([], [], [], [])

            # Get the VBA code.
            vba_code = ""
            for (_, _, _, macro_code) in vba.extract_macros():
                if (macro_code is not None):
                    vba_code += macro_code

            # Do not analyze the file if the VBA looks like garbage.
            if (read_ole_fields.is_garbage_vba(vba_code)):
                raise ValueError("VBA looks corrupted. Not analyzing.")

            # Read in text values from all of the various places in
            # Office 97/2000+ that text values can be hidden. So many
            # places.
            read_ole_fields.read_payload_hiding_places(data, orig_filename, vm, vba_code, vba)
            
            # Do Emulation.
            safe_print("")
            safe_print('-'*79)
            safe_print('TRACING VBA CODE (entrypoint = Auto*):')
            if (entry_points is not None):
                log.info("Starting emulation from function(s) " + safe_str_convert(entry_points))
            pyparsing.ParserElement.resetCache()
            vm.vba = vba
            vm.trace()

            # Done with emulation.

            # Report the results.
            str_actions, tmp_iocs, shellcode_bytes = _report_analysis_results(vm, data, display_int_iocs, orig_filename, out_file_name)

            # Save any embedded files as artifacts.
            _save_embedded_files(out_dir, vm)
            
            # Return the results.
            return (str_actions, vm.external_funcs, tmp_iocs, shellcode_bytes)

        # No VBA/VBScript found?
        else:
            safe_print('Finished analyzing ' + safe_str_convert(orig_filename) + " .\n")
            safe_print('No VBA macros found.')
            safe_print('')
            return ([], [], [], [])

    # Handle uncaught exceptions triggered during analysis.
    except Exception as e:

        # Print error info.
        if (("SystemExit" not in safe_str_convert(e)) and (". Aborting analysis." not in safe_str_convert(e))):
            traceback.print_exc()
        log.error(safe_str_convert(e))

        # If this is an out of memory error terminate the process with an
        # error code indicating that there are memory problems. This is so
        # that higer level systems using ViperMonkey can see that there is a
        # memory issue and handle it accordingly.
        if isinstance(e, MemoryError):
            log.error("Exiting ViperMonkey with error code 137 (out of memory)")
            sys.exit(137)

        # Done. Analysis failed.
        return None
Esempio n. 5
0
def _report_analysis_results(vm, data, display_int_iocs, orig_filename, out_file_name):
    """Report analysis results (screen and file) to the user. Results will
    be printed to stdout and saved in an output file as JSON if needed.

    @param vm (ViperMonkey object) The ViperMonkey emulation engine
    object that did the emulation.

    @param data (str) The read in Office file (data).

    @param display_int_iocs (boolean) Flag turning on/off the
    reporting of intermediate IOCs (base64 strings and URLs) found
    during the emulation process.

    @param orig_filename (str) path and filename of file on disk, or
    within the container.

    @param out_file_name (str) The name of the file in which to store
    the ViperMonkey analysis results as JSON. If None no JSON results
    will be saved.

    @return (tuple) A 3 element tuple where the 1st element is a list
    of reported actions all converted to strings, the 2nd element is a
    list of unique intermediate IOCs, and the 3rd element is a list of
    shell code bytes injected by the VB (empty list if no shell code).

    """

    # Limit the number of base64 IOCs.
    full_iocs = core.vba_context.intermediate_iocs
    tmp_b64_iocs = []
    for ioc in full_iocs:
        if ("http" not in ioc):
            tmp_b64_iocs.append(ioc)
    tmp_b64_iocs = tmp_b64_iocs + list(read_ole_fields.pull_base64(data))
    tmp_b64_iocs = sorted(tmp_b64_iocs, key=len)[::-1][:200]
    for ioc in tmp_b64_iocs:
        full_iocs.add(ioc)
        core.vba_context.num_b64_iocs += 1
    
    
    # Print table of all recorded actions
    safe_print('\nRecorded Actions:')
    safe_print(vm.dump_actions())
    safe_print('')

    # Report intermediate IOCs.
    tmp_iocs = []
    if (len(full_iocs) > 0):
        tmp_iocs = _remove_duplicate_iocs(full_iocs)
        if (display_int_iocs):
            safe_print('Intermediate IOCs:')
            safe_print('')
            for ioc in tmp_iocs:
                safe_print("+---------------------------------------------------------+")
                safe_print(ioc)
            safe_print("+---------------------------------------------------------+")
            safe_print('')

    # Display injected shellcode.
    shellcode_bytes = core.vba_context.get_shellcode_data()
    if (len(shellcode_bytes) > 0):
        safe_print("+---------------------------------------------------------+")
        safe_print("Shell Code Bytes: " + safe_str_convert(shellcode_bytes))
        safe_print("+---------------------------------------------------------+")
        safe_print('')

    # See if we can directly pull any embedded PE files from the file.
    pull_embedded_pe_files(data, core.vba_context.out_dir)

    # Report VBA builtin fingerprint.
    safe_print('VBA Builtins Called: ' + safe_str_convert(vm.external_funcs))
    safe_print('')

    # Report decoded strings.
    if (len(vm.decoded_strs) > 0):
        safe_print("Decoded Strings (" + str(len(vm.decoded_strs)) + "):")
        for s in vm.decoded_strs:
            safe_print("  " + s)
        safe_print('')

    # Done printing results.
    safe_print('Finished analyzing ' + safe_str_convert(orig_filename) + " .\n")

    # Reporting results in JSON file?
    if out_file_name:

        # Create the results data structure.
        actions_data = []
        for action in vm.actions:
            actions_data.append({
                "action": safe_str_convert(action[0]),
                "parameters": safe_str_convert(action[1]),
                "description": safe_str_convert(action[2])
            })

        out_data = {
            "file_name": orig_filename,
            "potential_iocs": list(tmp_iocs),
            "shellcode" : shellcode_bytes,
            "vba_builtins": vm.external_funcs,
            "actions": actions_data,
            "decoded_strs": list(vm.decoded_strs)
        }

        # Write out the results as JSON.
        try:
            with open(out_file_name, 'w') as out_file:
                out_file.write("\n" + json.dumps(out_data, indent=4))
        except Exception as exc:
            log.error("Failed to output results to output file. " + safe_str_convert(exc))

    # Make sure all the action fields are strings before returning.
    str_actions = []
    for action in vm.actions:
        str_actions.append((safe_str_convert(action[0]),
                            safe_str_convert(action[1]),
                            safe_str_convert(action[2])))    

    # Done.
    return (str_actions, tmp_iocs, shellcode_bytes)
Esempio n. 6
0
def process_file(container,
                 filename,
                 data,
                 strip_useless=False,
                 entry_points=None,
                 time_limit=None,
                 verbose=False,
                 display_int_iocs=False,
                 set_log=False,
                 tee_log=False,
                 tee_bytes=0,
                 artifact_dir=None,
                 out_file_name=None,
                 do_jit=False):
    """Process an Office file with VBA macros, a VBScript file, or
    VBScript HTA file with ViperMonkey. This is the main programatic
    interface for ViperMonkey.

    @param container (str) Path and filename of container if the file is within
    a zip archive, None otherwise.

    @param filename (str) str, path and filename of file on disk, or
    within the container.

    @param data (bytes) content of the file if it is in a container,
    None if it is a file on disk.
 
    @param strip_useless (boolean) Flag turning on/off modification of
    VB code prior to parsing.

    @param entry_points (list) A list of the names (str) of the VB functions
    from which to start emulation.
    
    @param time_limit (int) The emulation time limit, in minutes. If
    None there is not time limit.

    @param verbose (boolean) Flag turning debug logging on/off.

    @param display_int_iocs (boolean) Flag turning on/off the
    reporting of intermediate IOCs (base64 strings and URLs) found
    during the emulation process.

    @param set_log (boolean) A flag??

    @param tee_log (boolean) A flag turning on/off saving all of
    ViperMonkey's output in a text log file. The log file will be
    FNAME.log, where FNAME is the name of the file being analyzed.

    @param tee_bytes (int) If tee_log is true, this gives the number
    of bytes at which to cap the saved log file.

    @param artifact_dir (str) The directory in which to save artifacts
    dropped by the sample under analysis. If None the artifact
    directory will be FNAME_artifacts/ where FNAME is the name of the
    file being analyzed.

    @param out_file_name (str) The name of the file in which to store
    the ViperMonkey analysis results as JSON. If None no JSON results
    will be saved.

    @param do_jit (str) A flag turning on/off doing VB -> Python
    transpiling of loops to speed up loop emulation.

    @return (list) A list of actions if actions found, an empty list
    if no actions found, and None if there was an error.

    """
    
    # set logging level
    if verbose:
        colorlog.basicConfig(level=logging.DEBUG, format='%(log_color)s%(levelname)-8s %(message)s')
    elif set_log:
        colorlog.basicConfig(level=logging.INFO, format='%(log_color)s%(levelname)-8s %(message)s')

    # assume they want a tee'd file if they give bytes for it
    if tee_bytes > 0:
        tee_log = True

    # add handler for tee'd log file
    if tee_log:

        tee_filename = "./" + filename
        if ("/" in filename):
            tee_filename = "./" + filename[filename.rindex("/") + 1:]

        if tee_bytes > 0:
            capped_handler = CappedFileHandler(tee_filename + ".log", sizecap=tee_bytes)
            capped_handler.setFormatter(logging.Formatter("%(levelname)-8s %(message)s"))
            log.addHandler(capped_handler)
        else:
            file_handler = FileHandler(tee_filename + ".log", mode="w")
            file_handler.setFormatter(logging.Formatter("%(levelname)-8s %(message)s"))
            log.addHandler(file_handler)

    # Check for files that do not exist.
    if (isinstance(data, Exception)):
        log.error("Cannot open file '" + safe_str_convert(filename) + "'.")
        return None
    
    # Read in file contents if we have not already been provided data to analyze.
    if not data:
        # TODO: replace print by writing to a provided output file (sys.stdout by default)
        if container:
            display_filename = '%s in %s' % (filename, container)
        else:
            display_filename = filename
        safe_print('='*79)
        safe_print('FILE: ' + safe_str_convert(display_filename))
        # FIXME: the code below only works if the file is on disk and not in a zip archive
        # TODO: merge process_file and _process_file
        try:
            input_file = open(filename,'rb')
            data = input_file.read()
            input_file.close()
        except IOError as e:
            log.error("Cannot open file '" + safe_str_convert(filename) + "'. " + safe_str_convert(e))
            return None
    r = _process_file(filename,
                      data,
                      strip_useless=strip_useless,
                      entry_points=entry_points,
                      time_limit=time_limit,
                      display_int_iocs=display_int_iocs,
                      artifact_dir=artifact_dir,
                      out_file_name=out_file_name,
                      do_jit=do_jit)

    # Reset logging.
    colorlog.basicConfig(level=logging.ERROR, format='%(log_color)s%(levelname)-8s %(message)s')

    # Done.
    return r
Esempio n. 7
0
def parse_stream(subfilename,
                 stream_path=None,
                 vba_filename=None,
                 vba_code=None,
                 strip_useless=False,
                 local_funcs=None):
    """Parse the macros from a single OLE stream.

    @param subfilename (str) The name of the file containing the    
    macros.

    @param stream_path (??) ??

    @param vba_filename (??) ??

    @param vba_code (str) The macro code to parse.

    @param local_funcs (list) A list of the names of already declared
    local VBA functions.

    @return (Module object) A parsed module object.

    """
    
    # Set local func list if needed.
    if (local_funcs is None):
        local_funcs = []
    
    # Check for timeouts.
    core.vba_object.limits_exceeded(throw_error=True)
    
    # Are the arguments all in a single tuple?
    if (stream_path is None):
        subfilename, stream_path, vba_filename, vba_code = subfilename

    # Skip old-style XLM macros.
    if (repr(stream_path).strip() == "'xlm_macro'"):
        log.warning("Skipping XLM macro stream...")
        return "empty"
        
    # Collapse long lines.
    vba_code = core.vba_collapse_long_lines(vba_code)
        
    # Filter cruft from the VBA.
    vba_code = filter_vba(vba_code)

    # Pull out Visual Basic from .hta contents (if we are looking at a
    # .hta file).
    vba_code = get_vb_contents_from_hta(vba_code)

    # Do not analyze the file if the VBA looks like garbage characters.
    if (read_ole_fields.is_garbage_vba(vba_code, no_html=True)):
        log.warning("Failed to extract VBScript from HTA. Skipping.")
        return "empty"
        
    # Skip some XML that olevba gives for some 2007+ streams.
    if (vba_code.strip().startswith("<?xml")):
        log.warning("Skipping XML stream.")
        return "empty"
    
    # Strip out code that does not affect the end result of the program.
    if (strip_useless):
        vba_code = core.strip_lines.strip_useless_code(vba_code, local_funcs)
    safe_print('-'*79)
    safe_print('VBA MACRO %s ' % vba_filename)
    safe_print('in file: %s - OLE stream: %s' % (subfilename, repr(stream_path)))
    safe_print('- '*39)
    
    # Parse the macro.
    m = None
    if vba_code.strip() == '':
        safe_print('(empty macro)')
        m = "empty"
    else:
        safe_print('-'*79)
        safe_print('VBA CODE (with long lines collapsed):')
        safe_print(vba_code)
        safe_print('-'*79)
        #sys.exit(0)
        safe_print('PARSING VBA CODE:')
        try:
            m = core.module.parseString(vba_code + "\n", parseAll=True)[0]
            pyparsing.ParserElement.resetCache()
            m.code = vba_code
        except pyparsing.ParseException as err:
            safe_print(err.line)
            safe_print(" "*(err.column-1) + "^")
            safe_print(err)
            log.error("Parse Error. Processing Aborted.")
            return None

    # Check for timeouts.
    core.vba_object.limits_exceeded(throw_error=True)
        
    # Return the parsed macro.
    return m
Esempio n. 8
0
def main():
    """Main function, called when vipermonkey is run from the command
    line.

    """

    # Increase recursion stack depth.
    sys.setrecursionlimit(13000)
    
    # print banner with version
    # Generated with http://www.patorjk.com/software/taag/#p=display&f=Slant&t=ViperMonkey
    safe_print(''' _    ___                 __  ___            __             
| |  / (_)___  ___  _____/  |/  /___  ____  / /_____  __  __
| | / / / __ \/ _ \/ ___/ /|_/ / __ \/ __ \/ //_/ _ \/ / / /
| |/ / / /_/ /  __/ /  / /  / / /_/ / / / / ,< /  __/ /_/ / 
|___/_/ .___/\___/_/  /_/  /_/\____/_/ /_/_/|_|\___/\__, /  
     /_/                                           /____/   ''')
    safe_print('vmonkey %s - https://github.com/decalage2/ViperMonkey' % __version__)
    safe_print('THIS IS WORK IN PROGRESS - Check updates regularly!')
    safe_print('Please report any issue at https://github.com/decalage2/ViperMonkey/issues')
    safe_print('')

    DEFAULT_LOG_LEVEL = "info" # Default log level
    LOG_LEVELS = {
        'debug':    logging.DEBUG,
        'info':     logging.INFO,
        'warning':  logging.WARNING,
        'error':    logging.ERROR,
        'critical': logging.CRITICAL
        }

    usage = 'usage: %prog [options] <filename> [filename2 ...]'
    parser = optparse.OptionParser(usage=usage)
    parser.add_option("-r", action="store_true", dest="recursive",
                      help='find files recursively in subdirectories.')
    parser.add_option("-z", "--zip", dest='zip_password', type='str', default=None,
                      help='if the file is a zip archive, open first file from it, using the '
                           'provided password (requires Python 2.6+)')
    parser.add_option("-f", "--zipfname", dest='zip_fname', type='str', default='*',
                      help='if the file is a zip archive, file(s) to be opened within the zip. '
                           'Wildcards * and ? are supported. (default:*)')
    parser.add_option("-e", action="store_true", dest="scan_expressions",
                      help='Extract and evaluate/deobfuscate constant expressions')
    parser.add_option('-l', '--loglevel', dest="loglevel", action="store", default=DEFAULT_LOG_LEVEL,
                      help="logging level debug/info/warning/error/critical (default=%default)")
    parser.add_option("-s", '--strip', action="store_true", dest="strip_useless_code",
                      help='Strip useless VB code from macros prior to parsing.')
    parser.add_option("-j", '--jit', action="store_true", dest="do_jit",
                      help='Speed up emulation by JIT compilation of VB loops to Python.')
    parser.add_option('-i', '--init', dest="entry_points", action="store", default=None,
                      help="Emulate starting at the given function name(s). Use comma seperated "
                           "list for multiple entries.")
    parser.add_option('-t', '--time-limit', dest="time_limit", action="store", default=None,
                      type='int', help="Time limit (in minutes) for emulation.")
    parser.add_option("-c", '--iocs', action="store_true", dest="display_int_iocs",
                      help='Display potential IOCs stored in intermediate VBA variables '
                           'assigned during emulation (URLs and base64).')
    parser.add_option("-v", '--version', action="store_true", dest="print_version",
                      help='Print version information of packages used by ViperMonkey.')
    parser.add_option("-o", "--out-file", action="store", default=None, type="str",
                      help="JSON output file containing resulting IOCs, builtins, and actions")
    parser.add_option("-p", "--tee-log", action="store_true", default=False,
                      help="output also to a file in addition to standard out")
    parser.add_option("-b", "--tee-bytes", action="store", default=0, type="int",
                      help="number of bytes to limit the tee'd log to")

    (options, args) = parser.parse_args()

    # Print version information and exit?
    if (options.print_version):
        print_version()
        sys.exit(0)
    
    # Print help if no arguments are passed
    if len(args) == 0:
        safe_print(__doc__)
        parser.print_help()
        sys.exit(0)

    # setup logging to the console
    # logging.basicConfig(level=LOG_LEVELS[options.loglevel], format='%(levelname)-8s %(message)s')
    colorlog.basicConfig(level=LOG_LEVELS[options.loglevel], format='%(log_color)s%(levelname)-8s %(message)s')

    json_results = []

    for container, filename, data in xglob.iter_files(args,
                                                      recursive=options.recursive,
                                                      zip_password=options.zip_password,
                                                      zip_fname=options.zip_fname):

        # ignore directory names stored in zip files:
        if container and filename.endswith('/'):
            continue
        if options.scan_expressions:
            process_file_scanexpr(container, filename, data)
        else:
            entry_points = None
            if (options.entry_points is not None):
                entry_points = options.entry_points.split(",")
            process_file(container,
                         filename,
                         data,
                         strip_useless=options.strip_useless_code,
                         entry_points=entry_points,
                         time_limit=options.time_limit,
                         display_int_iocs=options.display_int_iocs,
                         tee_log=options.tee_log,
                         tee_bytes=options.tee_bytes,
                         out_file_name=options.out_file,
                         do_jit=options.do_jit)

            # add json results to list
            if (options.out_file):
                with open(options.out_file, 'r') as json_file:
                    try:
                        json_results.append(json.loads(json_file.read()))
                    except ValueError:
                        pass

    if (options.out_file):
        with open(options.out_file, 'w') as json_file:
            if (len(json_results) > 1):
                json_file.write(json.dumps(json_results, indent=2))
            else:
                json_file.write(json.dumps(json_results[0], indent=2))

        log.info("Saved results JSON to output file " + options.out_file)
Esempio n. 9
0
def print_version():
    """Print ViperMonkey version information.

    """

    safe_print("Version Information:\n")
    safe_print("ViperMonkey:\t\t" + safe_str_convert(__version__))
    safe_print("Python:\t\t\t" + safe_str_convert(sys.version_info))
    safe_print("pyparsing:\t\t" + safe_str_convert(pyparsing.__version__))
    safe_print("olefile:\t\t" + safe_str_convert(olefile.__version__))
    import oletools.olevba
    safe_print("olevba:\t\t\t" + safe_str_convert(oletools.olevba.__version__))
Esempio n. 10
0
def process_file_scanexpr (container, filename, data):
    """Process a single file.

    @param container (str) Path and filename of container if the file is within
    a zip archive, None otherwise.

    @param filename (str) path and filename of file on disk, or within
    the container.

    @param data (bytes) Content of the file if it is in a container,
    None if it is a file on disk.

    """
    #TODO: replace print by writing to a provided output file (sys.stdout by default)
    if container:
        display_filename = '%s in %s' % (filename, container)
    else:
        display_filename = filename
    safe_print('='*79)
    safe_print('FILE: ' + safe_str_convert(display_filename))
    all_code = ''
    try:
        #TODO: handle olefile errors, when an OLE file is malformed
        import oletools
        oletools.olevba.enable_logging()
        if (log.getEffectiveLevel() == logging.DEBUG):
            log.debug('opening %r' % filename)
        vba = VBA_Parser(filename, data, relaxed=True)
        if vba.detect_vba_macros():

            # Read in document metadata.
            vm = core.ViperMonkey(filename, data)
            ole = olefile.OleFileIO(filename)
            try:
                vm.set_metadata(ole.get_metadata())
            except Exception as e:
                log.warning("Reading in metadata failed. Trying fallback. " + safe_str_convert(e))
                vm.set_metadata(get_metadata_exif(filename))
            
            #print 'Contains VBA Macros:'
            for (subfilename, stream_path, vba_filename, vba_code) in vba.extract_macros():
                # hide attribute lines:
                #TODO: option to disable attribute filtering
                vba_code = filter_vba(vba_code)
                safe_print('-'*79)
                safe_print('VBA MACRO %s ' % vba_filename)
                safe_print('in file: %s - OLE stream: %s' % (subfilename, repr(stream_path)))
                safe_print('- '*39)
                # detect empty macros:
                if vba_code.strip() == '':
                    safe_print('(empty macro)')
                else:
                    # TODO: option to display code
                    safe_print(vba_code)
                    vba_code = core.vba_collapse_long_lines(vba_code)
                    all_code += '\n' + vba_code
            safe_print('-'*79)
            safe_print('EVALUATED VBA EXPRESSIONS:')
            t = prettytable.PrettyTable(('Obfuscated expression', 'Evaluated value'))
            t.align = 'l'
            t.max_width['Obfuscated expression'] = 36
            t.max_width['Evaluated value'] = 36
            for expression, expr_eval in core.scan_expressions(all_code):
                t.add_row((repr(expression), repr(expr_eval)))
                safe_print(t)

        else:
            safe_print('No VBA macros found.')
    except Exception as e:
        log.error("Caught exception. " + safe_str_convert(e))
        if (log.getEffectiveLevel() == logging.DEBUG):
            traceback.print_exc()

    safe_print('')
Esempio n. 11
0
def launch_server(command, **options):

    # Migrate the database.
    cmd_args = {'no_input': True, 'verbosity': 3}
    if options['database']:
        cmd_args['database'] = options['database']
    management.call_command('migrate', **cmd_args)

    create_users()
    models_fix_data()
    try:
        reset_font_cache()
    except:
        pass

    # Initialize the database with pre-seeded data if it was not done.
    init_data_if_necessary()

    # Read the config file and adjust any options.
    config_parser = ConfigParser()
    try:
        with open(options['config'], 'r') as fp:
            config_parser.readfp(fp, options['config'])
        safe_print(u'Read config file "{}".'.format(options['config']))
    except Exception as e:
        if options['config'] != 'RaceDB.cfg':
            safe_print(u'Could not parse config file "{}" - {}'.format(
                options['config'], e))
        config_parser = None

    if config_parser:
        kwargs = KWArgs()
        command.add_arguments(kwargs)
        for arg, value in list(options.items()):
            try:
                config_value = config_parser.get('launch', arg)
            except NoOptionError:
                continue

            config_value, error = kwargs.validate(arg, config_value)
            if error:
                safe_print(u'Error: {}={}: {}'.format(arg, config_value,
                                                      error))
                continue

            options[arg] = config_value
            safe_print(u'    {}={}'.format(arg, config_value))

    if options['hub']:
        set_hub_mode(True)
        safe_print(u'Hub mode.')

    # Start the rfid server.
    if not options['hub'] and any([
            options['rfid_reader'], options['rfid_reader_host'],
            options['rfid_transmit_power'] > 0,
            options['rfid_receiver_sensitivity'] > 0
    ]):
        kwargs = {
            'llrp_host': options['rfid_reader_host'],
        }
        if options['rfid_transmit_power'] > 0:
            kwargs['transmitPower'] = options['rfid_transmit_power']
        if options['rfid_receiver_sensitivity'] > 0:
            kwargs['receiverSensitivity'] = options[
                'rfid_receiver_sensitivity']
        safe_print(u'Launching RFID server thread...')
        for k, v in kwargs.items():
            safe_print(u'    {}={}'.format(
                k, v if isinstance(v, (int, float)) else '"{}"'.format(v)))
        thread = threading.Thread(target=runServer, kwargs=kwargs)
        thread.name = 'LLRPServer'
        thread.daemon = True
        thread.start()
        time.sleep(0.5)

    connection_good = check_connection(options['host'], options['port'])

    if not options['no_browser']:
        if not connection_good:
            safe_print(
                u'Attempting to launch broswer connecting to an existing RaceDB server...'
            )

        # Schedule a web browser to launch a few seconds after starting the server.
        url = 'http://{}:{}/RaceDB/'.format(
            socket.gethostbyname(socket.gethostname()), options['port'])
        threading.Timer(3.0 if connection_good else 0.01,
                        webbrowser.open,
                        kwargs=dict(url=url, autoraise=True)).start()
        safe_print(
            u'A browser will be launched in a few moments at: {}'.format(url))

    if connection_good:
        safe_print(
            u'To stop the server, click in this window and press Ctrl-c.')

        # Add Cling to serve up static files efficiently.
        waitress.serve(Cling(RaceDB.wsgi.application),
                       host=options['host'],
                       port=options['port'],
                       threads=8,
                       clear_untrusted_proxy_headers=False)
    else:
        time.sleep(0.5)