Exemplo n.º 1
0
async def _login(client, token=None):
    if not token:
        try:
            log.info(f'{trace.cyan}> Attempting Login.')
            log.info(
                f'{trace.cyan}> Running on {trace.white}Discord{trace.green.s}Py '
                f'{trace.cyan}v{trace.cyan.s}{discord.__version__}{trace.cyan}.'
            )
            version.Discord.latest()
            version.YouTubeDL.latest()
            # token = json.json.reader('token')
            if token == enums.ReturnType.fail or token == enums.ReturnType.none:
                raise discord.errors.LoginFailure('No token')
            else:
                await client.start(crypt(token))
                # client.run(crypt(token))
                return
        except discord.errors.LoginFailure as e:
            if json.external.exists(json.default):
                try:
                    os.remove(json.default)
                except OSError:
                    pass
            log.critical(
                f'{type(e)} has occurred. Please check your login token')
            log.critical('SESSION HAS BEEN TERMINATED')
            log.critical(f'{e}')
        except Exception as err:  # This should never occur.
            log.error(f'> {short_traceback()}')
            log.error(f'> {traceback.format_exc()}')
    else:
        await client.start(token)
        # client.run(token)
        return
Exemplo n.º 2
0
def _read_form_strings(vba):
    """
    Read in the form strings in order as a lists of tuples like (stream name, form string).
    """

    try:
        r = []
        skip_strings = ["Tahoma", "Tahomaz"]
        for (subfilename, stream_path,
             form_string) in vba.extract_form_strings():

            # Skip default strings.
            if (form_string in skip_strings):
                continue
            # Skip unprintable strings.
            if (not all((ord(c) > 31 and ord(c) < 127) for c in form_string)):
                continue

            # Save the stream name.
            stream_name = stream_path.replace("Macros/", "")
            if ("/" in stream_name):
                stream_name = stream_name[:stream_name.index("/")]

            # Save the stream name and form string.
            r.append((stream_name, form_string))

        # Done.
        return r

    except Exception as e:
        log.error("Cannot read form strings. " + str(e))
        return []
Exemplo n.º 3
0
def _save_embedded_files(out_dir, vm):
    """Save any extracted embedded files from the sample in the artifact
    directory.

    @param vm (ViperMonkey object) The ViperMonkey emulation engine
    object that did the emulation.

    @param out_dir (str) The artifact directory.
    """

    # Make the output directory if needed.
    out_dir = safe_str_convert(out_dir)
    if (not os.path.exists(out_dir)):
        log.info("Making dropped sample directory ...")
        os.mkdir(out_dir)
        
    # Save each file.
    out_dir = safe_str_convert(out_dir)
    for file_info in vm.embedded_files:
        short_name = safe_str_convert(file_info[0])
        long_name = safe_str_convert(file_info[1])
        contents = safe_str_convert(file_info[2])
        log.info("Saving embedded file " + long_name + " ...")
        try:
            f = open(out_dir + "/" + short_name, "w")
            f.write(contents)
            f.close()
        except IOError as e:
            log.error("Saving embedded file " + long_name + " failed. " + str(e))
Exemplo n.º 4
0
    def process_changes_in_remote(self, changes):
        """
        This method is used to changes changes in Mega (synchronize).
        """

        log.debug("Processing changes in remote")

        remove_files = changes['removed_files']
        for file in remove_files:
            log.debug("Removing file %s" % file)
            status = self.uploader.remove(
                path='%s/%s' %
                (settings.get_config('remote', 'folder'), file.relative_path),
                filename=file.name)

            if not status:
                log.error("ERROR DELETING REMOTE FILE %s" % file)

        remove_folders = changes['removed_folders']
        for folder in remove_folders:
            log.debug("Removing folder %s" % folder)
            status = self.uploader.remove(path='%s/%s' % (settings.get_config(
                'remote', 'folder'), folder.relative_path),
                                          filename=folder.name)
            if not status:
                log.error("Folder not deleted correctly in remote %s" % folder)

        new_folders = changes['new_folders']
        for folder in new_folders:
            log.debug("Creating remote folder %s" % folder)
            remote_folder = '%s/%s/%s' % (settings.get_config(
                'remote', 'folder'), folder.relative_path, folder.name)
            rem_desc = self.uploader.mkdir(remote_folder)

        new_files = changes['new_files']
        for file in new_files:
            log.debug("New file %s" % file)
            remote_folder = '%s/%s' % (settings.get_config(
                'remote', 'folder'), file.relative_path)
            rem_desc = self.uploader.upload(remote_folder, file.path)

        to_download = changes['to_download']
        for file in to_download:
            log.debug("Download modified %s" % file)
            path = '%s/%s' % (settings.get_config(
                'remote', 'folder'), file.relative_path)
            content = self.uploader.get_content_by_path(path=path,
                                                        filename=file.name)
            filesystem.create_file(path=os.path.join(self.backup_path,
                                                     file.relative_path),
                                   name=file.name,
                                   content=content)

        new_files = changes['to_upload']
        for file in new_files:
            log.debug("Uploading file %s" % file)
            remote_folder = '%s/%s' % (settings.get_config(
                'remote', 'folder'), file.relative_path)
            rem_desc = self.uploader.upload(remote_folder, file.path)
Exemplo n.º 5
0
 async def on_error(self, event, *args, **kwargs):  # GENERAL ERROR HANDLER
     exc = sys.exc_info()
     random.seed(traceback.format_exc())
     number = random.randint(10000, 99999)
     log.error(
         f'> {trace.red}{event.capitalize()}{trace.alert} encountered {trace.red}'
         f'{exc[0].__name__}{trace.alert} with message {trace.red}{exc[1]}')
     log.exception(f'Code #{number}', exc_info=exc)
Exemplo n.º 6
0
def get_request(url):
    req = Request(url)
    try:
        response = urlopen(req, timeout=DEFAULT_LOAD_TIMEOUT)
        return response.read()
    except HTTPError as e:
        log.error('%s - %d %s' % (url, e.code, e.reason))
    except URLError as e:
        log.error('Server connection failed: %s' % e.reason)
Exemplo n.º 7
0
def init_args_parser():
    parser = libs.argparse.ArgumentParser()
    parser.add_argument("--env", help="set environment for app")
    args = parser.parse_args()

    if args.env is None:
        log.error("Error: Please enter your ENV")
        sys.exit(1)
    return args
Exemplo n.º 8
0
def post_request(url, values):
    data = urlencode(values)
    data = data.encode('utf-8')
    req = Request(url, data)
    try:
        response = urlopen(req)
        return response.getcode()
    except HTTPError as e:
        log.error('%s - %d %s' % (url, e.code, e.reason))
    except URLError as e:
        log.error('Server connection failed: %s' % e.reason)
Exemplo n.º 9
0
def str_convert(arg):
    """
    Convert a VBA expression to an str, handling VBA NULL.
    """
    if (arg == "NULL"):
        return ''
    if (excel.is_cell_dict(arg)):
        arg = arg["value"]
    try:
        return str(arg)
    except Exception as e:
        if (isinstance(arg, unicode)):
            return ''.join(filter(lambda x:x in string.printable, arg))
        log.error("Cannot convert given argument to str. Defaulting to ''. " + str(e))
        return ''
Exemplo n.º 10
0
    async def play(self, ctx, url=None):
        if await Player.can_connect(ctx, False):
            await Player.join(ctx.message.author)
        if Player.is_connected(ctx):
            try:
                if url is not None:
                    async with ctx.typing():
                        info, data = await Player.info(url, loop=self.bot.loop, ctx=ctx)
                        # log.debug(info)
                        if info is not None:
                            if len(info) > 1:
                                playlist = data['title']
                            else:
                                playlist = None
                            embed = tls.Embed(ctx)
                            for item in info:  # Append tracks to queue. Eventually it'll just be pfp and data.
                                try:
                                    item.update({'discord_mention': ctx.author.mention})
                                    extractor = Player.Extractor.fetch(data)
                                    queue[ctx.guild.id]['q'].append(item)
                                    # log.debug(item)
                                except KeyError as err:
                                    log.error(err)
                                    # break
                            if playlist is None:
                                item = await Player.process_picture(item, extractor[0])
                                try:
                                    # log.debug(queue[ctx.guild.id]['q'][-1]['extractor'])
                                    embed.set_author(name=item['uploader'], url=item['uploader_url'], icon_url=queue[ctx.guild.id]['q'][-1]['pfp'])
                                except KeyError as err:
                                    embed.set_author(name=item['uploader'], icon_url=queue[ctx.guild.id]['q'][-1]['pfp'])
                                embed.add_field(name=f"{item['title']}", value=f"has been added to the queue.", inline=False)
                            else:
                                embed.add_field(name=f"{len(info)} tracks added from", value=f" playlist **{playlist}**", inline=False)
                            await ctx.send(embed=embed)

                else:  # If no URL specified, try to resume
                    Player.resume(ctx)

                await Player.loop(self, ctx)

            except Exception as err:
                random.seed(traceback.format_exc())
                number = random.randint(10000, 99999)
                await ctx.send(f'Oops! Something went wrong! `(#{number})`')
                log.exception(f'Code #{number}', exc_info=err)
Exemplo n.º 11
0
    def form_valid(self, form, data):
        # Сущесвующие в базе дни
        days_exists = list(
            Day.objects.filter(
                date__year=form.cleaned_data['year'], ).values_list('date',
                                                                    flat=True))

        # Дни для добавления
        days_raw = filter(
            lambda x: x['date'] not in days_exists,
            data,
        )

        log.info("Starting import CSV...")
        errors = False

        for day_raw in days_raw:
            # Добавляем дни, используя форму
            day_form = DayForm(day_raw)
            if day_form.is_valid():
                day_form.save()
            else:
                errors = True
                log.error("{}: {} ".format(
                    model_to_dict(day_form.instance),
                    day_form.errors.as_data(),
                ))

        if errors:
            log.error("CSV import finished with errors")
            messages.error(
                self.request,
                _("CSV import finished with errors (see more in logs)"),
            )
        else:
            log.info("CSV import finished without errors")
            messages.success(
                self.request,
                _("CSV import finished without errors"),
            )

        return super().form_valid(form)
Exemplo n.º 12
0
    def run(self, silent=False):
        if custom_help:
            self.client.remove_command('help')
        cog_count = 0
        warnings = []
        cogs = extensions()
        for extension in cogs:
            try:
                self.client.load_extension(extension)
                cog_count += 1
            except Exception as e:
                if not silent:
                    warnings.append(f'Failed to load extension {extension}\n{e}')

        if not silent:
            if not cogs:
                log.warn('No extensions were found.')
            else:
                for x in warnings:
                    y = x.split('\n')
                    log.warning(f'> {y[0]}')
                    log.error(f'> {y[1]}')
                if len(warnings) > 0:
                    # if saved() < enums.LogLevel.error.value:
                    if len(warnings) == 1:
                        log.error(f'> Failed to load {trace.yellow.s}{len(warnings)}{trace.cyan} extension.')
                    else:
                        log.error(f'> Failed to load {trace.yellow.s}{len(warnings)}{trace.cyan} extensions.')
                log.info(f'{trace.cyan}> Loaded {trace.yellow.s}{cog_count}{trace.cyan} extensions!')
Exemplo n.º 13
0
async def live_loop(self):
    await reset(self)
    log.debug('CTV Loop Started')
    # if test:
    #     log.debug('CTV Loop Started')
    from cogs.core.system import lockdown
    global looping
    while not lockdown and do_loop:
        if test_debug:
            log.debug(f'{self.bot.user.name} is looping')
        from cogs.core.system import lockdown
        if lockdown:
            break
        looping = True
        try:
            info = await is_online()
            if info:
                # print(info)
                global past
                past = now.copy()
                now.clear()
                for x in info['streams']:  # Add userID to memory
                    now.append(x['channel']['_id'])
                # if past:  # If has past history data, continue
                if True:
                    # print(info)
                    for x in now:  # Compare. If not in memory previously, it's new
                        if x not in past:
                            await on_live(self, x)
                    for x in past:  # Compare. If not in memory anymore, went offline
                        if x not in now:
                            await on_offline(self, x)
        except Exception as err:
            log.exception(err)
        await asyncio.sleep(10)
    looping = False
    log.error('CTV Loop Stopped')
Exemplo n.º 14
0
def int_convert(arg, leave_alone=False):
    """
    Convert a VBA expression to an int, handling VBA NULL.
    """

    # Easy case.
    if (isinstance(arg, int)):
        return arg

    # NULLs are 0.
    if (arg == "NULL"):
        return 0

    # Empty strings are NULL.
    if (arg == ""):
        return "NULL"

    # Leave the wildcard matching value alone.
    if (arg == "**MATCH ANY**"):
        return arg

    # Convert float to int?
    if (isinstance(arg, float)):
        arg = int(round(arg))

    # Convert hex to int?
    if (isinstance(arg, str) and (arg.strip().lower().startswith("&h"))):
        hex_str = "0x" + arg.strip()[2:]
        try:
            return int(hex_str, 16)
        except:
            log.error("Cannot convert hex '" + str(arg) +
                      "' to int. Defaulting to 0. " + str(e))
            return 0

    arg_str = str(arg)
    if ("." in arg_str):
        arg_str = arg_str[:arg_str.index(".")]
    try:
        return int(arg_str)
    except Exception as e:
        if (not leave_alone):
            log.error("Cannot convert '" + str(arg_str) +
                      "' to int. Defaulting to 0. " + str(e))
            return 0
        log.error("Cannot convert '" + str(arg_str) +
                  "' to int. Leaving unchanged. " + str(e))
        return arg_str
Exemplo n.º 15
0
 async def on_server_remove(self, server):
     log.error(f'> Removed from {server.name}')
Exemplo n.º 16
0
def _report_analysis_results(vm, data, display_int_iocs, orig_filename, out_file_name):
    """Report analysis results (screen and file) to the user. Results will
    be printed to stdout and saved in an output file as JSON if needed.

    @param vm (ViperMonkey object) The ViperMonkey emulation engine
    object that did the emulation.

    @param data (str) The read in Office file (data).

    @param display_int_iocs (boolean) Flag turning on/off the
    reporting of intermediate IOCs (base64 strings and URLs) found
    during the emulation process.

    @param orig_filename (str) path and filename of file on disk, or
    within the container.

    @param out_file_name (str) The name of the file in which to store
    the ViperMonkey analysis results as JSON. If None no JSON results
    will be saved.

    @return (tuple) A 3 element tuple where the 1st element is a list
    of reported actions all converted to strings, the 2nd element is a
    list of unique intermediate IOCs, and the 3rd element is a list of
    shell code bytes injected by the VB (empty list if no shell code).

    """

    # Limit the number of base64 IOCs.
    full_iocs = core.vba_context.intermediate_iocs
    tmp_b64_iocs = []
    for ioc in full_iocs:
        if ("http" not in ioc):
            tmp_b64_iocs.append(ioc)
    tmp_b64_iocs = tmp_b64_iocs + list(read_ole_fields.pull_base64(data))
    tmp_b64_iocs = sorted(tmp_b64_iocs, key=len)[::-1][:200]
    for ioc in tmp_b64_iocs:
        full_iocs.add(ioc)
        core.vba_context.num_b64_iocs += 1
    
    
    # Print table of all recorded actions
    safe_print('\nRecorded Actions:')
    safe_print(vm.dump_actions())
    safe_print('')

    # Report intermediate IOCs.
    tmp_iocs = []
    if (len(full_iocs) > 0):
        tmp_iocs = _remove_duplicate_iocs(full_iocs)
        if (display_int_iocs):
            safe_print('Intermediate IOCs:')
            safe_print('')
            for ioc in tmp_iocs:
                safe_print("+---------------------------------------------------------+")
                safe_print(ioc)
            safe_print("+---------------------------------------------------------+")
            safe_print('')

    # Display injected shellcode.
    shellcode_bytes = core.vba_context.get_shellcode_data()
    if (len(shellcode_bytes) > 0):
        safe_print("+---------------------------------------------------------+")
        safe_print("Shell Code Bytes: " + safe_str_convert(shellcode_bytes))
        safe_print("+---------------------------------------------------------+")
        safe_print('')

    # See if we can directly pull any embedded PE files from the file.
    pull_embedded_pe_files(data, core.vba_context.out_dir)

    # Report VBA builtin fingerprint.
    safe_print('VBA Builtins Called: ' + safe_str_convert(vm.external_funcs))
    safe_print('')

    # Report decoded strings.
    if (len(vm.decoded_strs) > 0):
        safe_print("Decoded Strings (" + str(len(vm.decoded_strs)) + "):")
        for s in vm.decoded_strs:
            safe_print("  " + s)
        safe_print('')

    # Done printing results.
    safe_print('Finished analyzing ' + safe_str_convert(orig_filename) + " .\n")

    # Reporting results in JSON file?
    if out_file_name:

        # Create the results data structure.
        actions_data = []
        for action in vm.actions:
            actions_data.append({
                "action": safe_str_convert(action[0]),
                "parameters": safe_str_convert(action[1]),
                "description": safe_str_convert(action[2])
            })

        out_data = {
            "file_name": orig_filename,
            "potential_iocs": list(tmp_iocs),
            "shellcode" : shellcode_bytes,
            "vba_builtins": vm.external_funcs,
            "actions": actions_data,
            "decoded_strs": list(vm.decoded_strs)
        }

        # Write out the results as JSON.
        try:
            with open(out_file_name, 'w') as out_file:
                out_file.write("\n" + json.dumps(out_data, indent=4))
        except Exception as exc:
            log.error("Failed to output results to output file. " + safe_str_convert(exc))

    # Make sure all the action fields are strings before returning.
    str_actions = []
    for action in vm.actions:
        str_actions.append((safe_str_convert(action[0]),
                            safe_str_convert(action[1]),
                            safe_str_convert(action[2])))    

    # Done.
    return (str_actions, tmp_iocs, shellcode_bytes)
Exemplo n.º 17
0
def process_file(container,
                 filename,
                 data,
                 strip_useless=False,
                 entry_points=None,
                 time_limit=None,
                 verbose=False,
                 display_int_iocs=False,
                 set_log=False,
                 tee_log=False,
                 tee_bytes=0,
                 artifact_dir=None,
                 out_file_name=None,
                 do_jit=False):
    """Process an Office file with VBA macros, a VBScript file, or
    VBScript HTA file with ViperMonkey. This is the main programatic
    interface for ViperMonkey.

    @param container (str) Path and filename of container if the file is within
    a zip archive, None otherwise.

    @param filename (str) str, path and filename of file on disk, or
    within the container.

    @param data (bytes) content of the file if it is in a container,
    None if it is a file on disk.
 
    @param strip_useless (boolean) Flag turning on/off modification of
    VB code prior to parsing.

    @param entry_points (list) A list of the names (str) of the VB functions
    from which to start emulation.
    
    @param time_limit (int) The emulation time limit, in minutes. If
    None there is not time limit.

    @param verbose (boolean) Flag turning debug logging on/off.

    @param display_int_iocs (boolean) Flag turning on/off the
    reporting of intermediate IOCs (base64 strings and URLs) found
    during the emulation process.

    @param set_log (boolean) A flag??

    @param tee_log (boolean) A flag turning on/off saving all of
    ViperMonkey's output in a text log file. The log file will be
    FNAME.log, where FNAME is the name of the file being analyzed.

    @param tee_bytes (int) If tee_log is true, this gives the number
    of bytes at which to cap the saved log file.

    @param artifact_dir (str) The directory in which to save artifacts
    dropped by the sample under analysis. If None the artifact
    directory will be FNAME_artifacts/ where FNAME is the name of the
    file being analyzed.

    @param out_file_name (str) The name of the file in which to store
    the ViperMonkey analysis results as JSON. If None no JSON results
    will be saved.

    @param do_jit (str) A flag turning on/off doing VB -> Python
    transpiling of loops to speed up loop emulation.

    @return (list) A list of actions if actions found, an empty list
    if no actions found, and None if there was an error.

    """
    
    # set logging level
    if verbose:
        colorlog.basicConfig(level=logging.DEBUG, format='%(log_color)s%(levelname)-8s %(message)s')
    elif set_log:
        colorlog.basicConfig(level=logging.INFO, format='%(log_color)s%(levelname)-8s %(message)s')

    # assume they want a tee'd file if they give bytes for it
    if tee_bytes > 0:
        tee_log = True

    # add handler for tee'd log file
    if tee_log:

        tee_filename = "./" + filename
        if ("/" in filename):
            tee_filename = "./" + filename[filename.rindex("/") + 1:]

        if tee_bytes > 0:
            capped_handler = CappedFileHandler(tee_filename + ".log", sizecap=tee_bytes)
            capped_handler.setFormatter(logging.Formatter("%(levelname)-8s %(message)s"))
            log.addHandler(capped_handler)
        else:
            file_handler = FileHandler(tee_filename + ".log", mode="w")
            file_handler.setFormatter(logging.Formatter("%(levelname)-8s %(message)s"))
            log.addHandler(file_handler)

    # Check for files that do not exist.
    if (isinstance(data, Exception)):
        log.error("Cannot open file '" + safe_str_convert(filename) + "'.")
        return None
    
    # Read in file contents if we have not already been provided data to analyze.
    if not data:
        # TODO: replace print by writing to a provided output file (sys.stdout by default)
        if container:
            display_filename = '%s in %s' % (filename, container)
        else:
            display_filename = filename
        safe_print('='*79)
        safe_print('FILE: ' + safe_str_convert(display_filename))
        # FIXME: the code below only works if the file is on disk and not in a zip archive
        # TODO: merge process_file and _process_file
        try:
            input_file = open(filename,'rb')
            data = input_file.read()
            input_file.close()
        except IOError as e:
            log.error("Cannot open file '" + safe_str_convert(filename) + "'. " + safe_str_convert(e))
            return None
    r = _process_file(filename,
                      data,
                      strip_useless=strip_useless,
                      entry_points=entry_points,
                      time_limit=time_limit,
                      display_int_iocs=display_int_iocs,
                      artifact_dir=artifact_dir,
                      out_file_name=out_file_name,
                      do_jit=do_jit)

    # Reset logging.
    colorlog.basicConfig(level=logging.ERROR, format='%(log_color)s%(levelname)-8s %(message)s')

    # Done.
    return r
Exemplo n.º 18
0
def parse_stream(subfilename,
                 stream_path=None,
                 vba_filename=None,
                 vba_code=None,
                 strip_useless=False,
                 local_funcs=None):
    """Parse the macros from a single OLE stream.

    @param subfilename (str) The name of the file containing the    
    macros.

    @param stream_path (??) ??

    @param vba_filename (??) ??

    @param vba_code (str) The macro code to parse.

    @param local_funcs (list) A list of the names of already declared
    local VBA functions.

    @return (Module object) A parsed module object.

    """
    
    # Set local func list if needed.
    if (local_funcs is None):
        local_funcs = []
    
    # Check for timeouts.
    core.vba_object.limits_exceeded(throw_error=True)
    
    # Are the arguments all in a single tuple?
    if (stream_path is None):
        subfilename, stream_path, vba_filename, vba_code = subfilename

    # Skip old-style XLM macros.
    if (repr(stream_path).strip() == "'xlm_macro'"):
        log.warning("Skipping XLM macro stream...")
        return "empty"
        
    # Collapse long lines.
    vba_code = core.vba_collapse_long_lines(vba_code)
        
    # Filter cruft from the VBA.
    vba_code = filter_vba(vba_code)

    # Pull out Visual Basic from .hta contents (if we are looking at a
    # .hta file).
    vba_code = get_vb_contents_from_hta(vba_code)

    # Do not analyze the file if the VBA looks like garbage characters.
    if (read_ole_fields.is_garbage_vba(vba_code, no_html=True)):
        log.warning("Failed to extract VBScript from HTA. Skipping.")
        return "empty"
        
    # Skip some XML that olevba gives for some 2007+ streams.
    if (vba_code.strip().startswith("<?xml")):
        log.warning("Skipping XML stream.")
        return "empty"
    
    # Strip out code that does not affect the end result of the program.
    if (strip_useless):
        vba_code = core.strip_lines.strip_useless_code(vba_code, local_funcs)
    safe_print('-'*79)
    safe_print('VBA MACRO %s ' % vba_filename)
    safe_print('in file: %s - OLE stream: %s' % (subfilename, repr(stream_path)))
    safe_print('- '*39)
    
    # Parse the macro.
    m = None
    if vba_code.strip() == '':
        safe_print('(empty macro)')
        m = "empty"
    else:
        safe_print('-'*79)
        safe_print('VBA CODE (with long lines collapsed):')
        safe_print(vba_code)
        safe_print('-'*79)
        #sys.exit(0)
        safe_print('PARSING VBA CODE:')
        try:
            m = core.module.parseString(vba_code + "\n", parseAll=True)[0]
            pyparsing.ParserElement.resetCache()
            m.code = vba_code
        except pyparsing.ParseException as err:
            safe_print(err.line)
            safe_print(" "*(err.column-1) + "^")
            safe_print(err)
            log.error("Parse Error. Processing Aborted.")
            return None

    # Check for timeouts.
    core.vba_object.limits_exceeded(throw_error=True)
        
    # Return the parsed macro.
    return m
Exemplo n.º 19
0
def process_file_scanexpr (container, filename, data):
    """Process a single file.

    @param container (str) Path and filename of container if the file is within
    a zip archive, None otherwise.

    @param filename (str) path and filename of file on disk, or within
    the container.

    @param data (bytes) Content of the file if it is in a container,
    None if it is a file on disk.

    """
    #TODO: replace print by writing to a provided output file (sys.stdout by default)
    if container:
        display_filename = '%s in %s' % (filename, container)
    else:
        display_filename = filename
    safe_print('='*79)
    safe_print('FILE: ' + safe_str_convert(display_filename))
    all_code = ''
    try:
        #TODO: handle olefile errors, when an OLE file is malformed
        import oletools
        oletools.olevba.enable_logging()
        if (log.getEffectiveLevel() == logging.DEBUG):
            log.debug('opening %r' % filename)
        vba = VBA_Parser(filename, data, relaxed=True)
        if vba.detect_vba_macros():

            # Read in document metadata.
            vm = core.ViperMonkey(filename, data)
            ole = olefile.OleFileIO(filename)
            try:
                vm.set_metadata(ole.get_metadata())
            except Exception as e:
                log.warning("Reading in metadata failed. Trying fallback. " + safe_str_convert(e))
                vm.set_metadata(get_metadata_exif(filename))
            
            #print 'Contains VBA Macros:'
            for (subfilename, stream_path, vba_filename, vba_code) in vba.extract_macros():
                # hide attribute lines:
                #TODO: option to disable attribute filtering
                vba_code = filter_vba(vba_code)
                safe_print('-'*79)
                safe_print('VBA MACRO %s ' % vba_filename)
                safe_print('in file: %s - OLE stream: %s' % (subfilename, repr(stream_path)))
                safe_print('- '*39)
                # detect empty macros:
                if vba_code.strip() == '':
                    safe_print('(empty macro)')
                else:
                    # TODO: option to display code
                    safe_print(vba_code)
                    vba_code = core.vba_collapse_long_lines(vba_code)
                    all_code += '\n' + vba_code
            safe_print('-'*79)
            safe_print('EVALUATED VBA EXPRESSIONS:')
            t = prettytable.PrettyTable(('Obfuscated expression', 'Evaluated value'))
            t.align = 'l'
            t.max_width['Obfuscated expression'] = 36
            t.max_width['Evaluated value'] = 36
            for expression, expr_eval in core.scan_expressions(all_code):
                t.add_row((repr(expression), repr(expr_eval)))
                safe_print(t)

        else:
            safe_print('No VBA macros found.')
    except Exception as e:
        log.error("Caught exception. " + safe_str_convert(e))
        if (log.getEffectiveLevel() == logging.DEBUG):
            traceback.print_exc()

    safe_print('')
Exemplo n.º 20
0
    async def on_command_error(self, ctx, exc):  # COMMAND ERROR HANDLER
        if hasattr(ctx.command, 'on_error'):
            return

        elif isinstance(exc, core.exceptions.GlobanBanExcpetion):
            return await ctx.send(
                f'You are globally banned from using this bot {ctx.message.author.mention}.'
            )

        if isinstance(exc, commands.CommandNotFound):
            delete_invalid_command = True
            if delete_invalid_command:
                com_name = list(ctx.message.content)
                if len(com_name) > 0:
                    if com_name[0] == self.bot.command_prefix and len(
                            com_name) > 1:
                        import re
                        if re.match(r'[a-zA-Z]',
                                    com_name[1]):  # IS IN COMMAND FORMAT?
                            try:
                                await ctx.message.delete()
                            except Exception:
                                pass
                            # Implement custom command support. This is per guild.
                            try:
                                ccom = ast.literal_eval(
                                    data.base['ccom'].find_one(
                                        server=ctx.guild.id)['commands'])
                            except Exception:  # Whatever the exception, move on. People who use this base might not have cComs.
                                ccom = {}
                            cname = funcs.split_string(
                                ctx.message.content,
                                f'{self.bot.command_prefix}').split(' ')[0]
                            if cname not in ccom:
                                return await ctx.send(
                                    f'Sorry {ctx.message.author.mention}, but the given command does not currently exist!'
                                )

        elif isinstance(exc, commands.MissingRequiredArgument):
            return await funcs.respond(
                ctx,
                exc,
                content=
                f'Sorry {ctx.message.author.mention}, but you\'re missing an important argument!'
            )

        elif isinstance(exc, commands.BadArgument):
            return await funcs.respond(
                ctx,
                exc,
                content=
                f'Sorry {ctx.message.author.mention}, but an argument is incorrect!'
            )

        elif isinstance(exc, commands.DisabledCommand):
            return await ctx.send(
                f'Sorry {ctx.message.author.mention}, but the given command is currently disabled!'
            )

        elif isinstance(exc, commands.MissingPermissions):
            return await ctx.send(
                f'Sorry {ctx.message.author.mention}, you do not have permission to run that command!'
            )

        elif isinstance(exc, commands.NotOwner):
            return await ctx.send(
                f'Sorry {ctx.message.author.mention}, you do not have permission to run that command!'
            )

        elif isinstance(exc, commands.CheckFailure):
            if ctx.guild is not None:
                return await ctx.send(
                    f'Sorry {ctx.message.author.mention}, you do not have permission to run that command!'
                )

        else:
            log.error(
                f'> {trace.red}{ctx.cog.qualified_name.capitalize()}{trace.alert} encountered {trace.red}{type(exc).__name__}'
                f'{trace.alert} while running {trace.red}{ctx.prefix}{ctx.command}{trace.alert} with error {trace.red}{exc}'
            )
Exemplo n.º 21
0
def _process_file (filename,
                   data,
                   strip_useless=False,
                   entry_points=None,
                   time_limit=None,
                   display_int_iocs=False,
                   artifact_dir=None,
                   out_file_name=None,
                   do_jit=False):
    """Process a single file.

    @param container (str) Path and filename of container if the file is within
    a zip archive, None otherwise.

    @param filename (str) path and filename of file on disk, or within
    the container.

    @param data (bytes) content of the file if it is in a container,
    None if it is a file on disk.

    @param strip_useless (boolean) Flag turning on/off modification of
    VB code prior to parsing.

    @param entry_points (list) A list of the names (str) of the VB functions
    from which to start emulation.

    @param time_limit (int) The emulation time limit, in minutes. If
    None there is not time limit.

    @param display_int_iocs (boolean) Flag turning on/off the
    reporting of intermediate IOCs (base64 strings and URLs) found
    during the emulation process.

    @param artifact_dir (str) The directory in which to save artifacts
    dropped by the sample under analysis. If None the artifact

    @param out_file_name (str) The name of the file in which to store
    the ViperMonkey analysis results as JSON. If None no JSON results
    will be saved.

    @param do_jit (str) A flag turning on/off doing VB -> Python
    transpiling of loops to speed up loop emulation.

    @return (list) A list of actions if actions found, an empty list
    if no actions found, and None if there was an error.

    """

    # Increase Python call depth.
    sys.setrecursionlimit(13000)

    # Set the emulation time limit.
    if (time_limit is not None):
        core.vba_object.max_emulation_time = datetime.now() + timedelta(minutes=time_limit)

    # Create the emulator.
    log.info("Starting emulation...")
    vm = core.ViperMonkey(filename, data, do_jit=do_jit)
    orig_filename = filename
    if (entry_points is not None):
        for entry_point in entry_points:
            vm.user_entry_points.append(entry_point)
    try:
        #TODO: handle olefile errors, when an OLE file is malformed
        if (isinstance(data, Exception)):
            data = None
        vba = None
        try:
            vba = _get_vba_parser(data)
        except FileOpenError as e:

            # Is this an unrecognized format?
            if ("Failed to open file  is not a supported file type, cannot extract VBA Macros." not in safe_str_convert(e)):

                # No, it is some other problem. Pass on the exception.
                raise e

            # This may be VBScript with some null characters. Remove those and try again.
            data = data.replace("\x00", "")
            vba = _get_vba_parser(data)

        # Do we have analyzable VBA/VBScript? Do the analysis even
        # without VBA/VBScript if we are scraping for intermediate
        # IOCs.
        if (vba.detect_vba_macros() or display_int_iocs):

            # Read in document metadata.
            try:
                log.info("Reading document metadata...")
                ole = olefile.OleFileIO(data)
                vm.set_metadata(ole.get_metadata())
            except Exception as e:
                log.warning("Reading in metadata failed. Trying fallback. " + safe_str_convert(e))
                vm.set_metadata(get_metadata_exif(orig_filename))

            # If this is an Excel spreadsheet, read it in.
            vm.loaded_excel = excel.load_excel(data)

            # Set where to store directly dropped files if needed.
            if (artifact_dir is None):
                artifact_dir = "./"
                if ((filename is not None) and ("/" in filename)):
                    artifact_dir = filename[:filename.rindex("/")]
            only_filename = filename
            if ((filename is not None) and ("/" in filename)):
                only_filename = filename[filename.rindex("/")+1:]
            
            # Set the output directory in which to put dumped files generated by
            # the macros.
            out_dir = None
            if (only_filename is not None):
                out_dir = artifact_dir + "/" + only_filename + "_artifacts/"
                if os.path.exists(out_dir):
                    shutil.rmtree(out_dir)
            else:
                out_dir = "/tmp/tmp_file_" + safe_str_convert(random.randrange(0, 10000000000))
            log.info("Saving dropped analysis artifacts in " + out_dir)
            core.vba_context.out_dir = out_dir
            del filename # We already have this in memory, we don't need to read it again.
                
            # Parse the VBA streams.
            log.info("Parsing VB...")
            comp_modules = parse_streams(vba, strip_useless)
            if (comp_modules is None):
                return None
            got_code = False
            for module_info in comp_modules:
                m = module_info[0]
                stream = module_info[1]
                if (m != "empty"):
                    vm.add_compiled_module(m, stream)
                    got_code = True
            if ((not got_code) and (not display_int_iocs)):
                log.info("No VBA or VBScript found. Exiting.")
                return ([], [], [], [])

            # Get the VBA code.
            vba_code = ""
            for (_, _, _, macro_code) in vba.extract_macros():
                if (macro_code is not None):
                    vba_code += macro_code

            # Do not analyze the file if the VBA looks like garbage.
            if (read_ole_fields.is_garbage_vba(vba_code)):
                raise ValueError("VBA looks corrupted. Not analyzing.")

            # Read in text values from all of the various places in
            # Office 97/2000+ that text values can be hidden. So many
            # places.
            read_ole_fields.read_payload_hiding_places(data, orig_filename, vm, vba_code, vba)
            
            # Do Emulation.
            safe_print("")
            safe_print('-'*79)
            safe_print('TRACING VBA CODE (entrypoint = Auto*):')
            if (entry_points is not None):
                log.info("Starting emulation from function(s) " + safe_str_convert(entry_points))
            pyparsing.ParserElement.resetCache()
            vm.vba = vba
            vm.trace()

            # Done with emulation.

            # Report the results.
            str_actions, tmp_iocs, shellcode_bytes = _report_analysis_results(vm, data, display_int_iocs, orig_filename, out_file_name)

            # Save any embedded files as artifacts.
            _save_embedded_files(out_dir, vm)
            
            # Return the results.
            return (str_actions, vm.external_funcs, tmp_iocs, shellcode_bytes)

        # No VBA/VBScript found?
        else:
            safe_print('Finished analyzing ' + safe_str_convert(orig_filename) + " .\n")
            safe_print('No VBA macros found.')
            safe_print('')
            return ([], [], [], [])

    # Handle uncaught exceptions triggered during analysis.
    except Exception as e:

        # Print error info.
        if (("SystemExit" not in safe_str_convert(e)) and (". Aborting analysis." not in safe_str_convert(e))):
            traceback.print_exc()
        log.error(safe_str_convert(e))

        # If this is an out of memory error terminate the process with an
        # error code indicating that there are memory problems. This is so
        # that higer level systems using ViperMonkey can see that there is a
        # memory issue and handle it accordingly.
        if isinstance(e, MemoryError):
            log.error("Exiting ViperMonkey with error code 137 (out of memory)")
            sys.exit(137)

        # Done. Analysis failed.
        return None
Exemplo n.º 22
0
    async def reload(self, ctx, arg='silent'):
        warnings = []
        loading = []
        unloaded = []
        import time
        before = time.monotonic()
        log.info(f'{trace.red.s}> Reloading Extensions')
        loaded = []
        for x in self.bot.extensions:
            loaded.append(x)
        cogs = extensions()
        for x in cogs:
            if x in loaded:
                try:
                    self.bot.reload_extension(x)
                    if not arg == 'silent':
                        log.info(f'{trace.cyan}> Reloaded {trace.yellow.s}{x}')
                    loaded.remove(x)
                except Exception as e:
                    warnings.append(f'Failed to reload extension {x}.\n{e}')
            elif x not in loaded:
                try:
                    self.bot.load_extension(x)
                    loading.append(x)
                except Exception as e:
                    warnings.append(f'Failed to load extension {x}.\n{e}')

        for x in loaded:
            try:
                self.bot.unload_extension(x)
                unloaded.append(x)
                loaded.remove(x)
            except Exception as e:
                warnings.append(f'Failed to unload extension {x}.\n{e}')

        if not cogs:
            log.warn('No extensions were found.')
        else:
            for x in loading:
                log.warn(f'> Loaded {x}')
            for x in unloaded:
                log.error(f'> Unloaded {x}')
            for x in warnings:
                y = x.split('\n')
                log.warn(f'> {y[0]}')
                log.error(f'> {y[1]}')
            ping = round((time.monotonic() - before) * 1000)
            log.info(
                f'{trace.cyan}> Reloaded {trace.yellow.s}{len(self.bot.extensions)} extensions {trace.cyan}in {trace.yellow.s}{ping}ms{trace.cyan}.'
            )
            global lockdown
            lockdown = False
            await ctx.send(
                f'Extensions reloaded. ({len(self.bot.extensions)}) (`{ping}ms`)'
            )
            from core import json
            json.json()  # Reload memory
            from core.bot.login import version
            version.Discord.latest()  # Check for updates for Discord.py
            version.YouTubeDL.latest()  # Check for updates for YouTubeDL
            await tls.Activity.preset(self.bot)  # Update activity
Exemplo n.º 23
0
import traceback
import asyncio
import session
import sys


if __name__ == '__main__':
    log.info(f'>{trace.cyan} Starting at {Time.readable.at()}.')
    # Initialize database
    log.info(f'{trace.cyan}> Initializing {trace.black.s}dataset{trace.cyan} Database.')
    try:
        data()
        log.info(f'{trace.cyan}> Initialized {trace.black.s}dataset{trace.cyan}.')
    except Exception as err:
        log.warning(f'> Failed to load {trace.black.s}dataset{trace.warn}. Please restart!')
        log.error(f'> {short_traceback()}')
        log.critical(f'> {traceback.format_exc()}')

    # Start API
    import api
    api.Main()

    # Initialize extensions
    # Append cCogs
    # append_cog('session.py')  # Load session
    append_cog('debug.py')  # Load debug things
    append_cog('main.py')  # Load essentials

    # Login
    from core.bot import time
    time.uptime = datetime.datetime.utcnow()
Exemplo n.º 24
0
def _get_shapes_text_values(fname, stream):
    """
    Read in the text associated with Shape objects in the document.
    NOTE: This currently is a hack.
    """

    # Maybe 2007+ file?
    r = _get_shapes_text_values_2007(fname)
    if (len(r) > 0):
        return r

    r = []
    try:
        # Read the WordDocument stream.
        ole = olefile.OleFileIO(fname, write_mode=False)
        if (not ole.exists(stream)):
            return []
        data = ole.openstream(stream).read()

        # It looks like maybe(?) the shapes text appears as ASCII blocks bounded by
        # 0x0D bytes. We will look for that.
        pat = r"\x0d[\x20-\x7e]{100,}\x0d"
        strs = re.findall(pat, data)
        #print "STREAM: " + str(stream)
        #print data
        #print "^^^^^^^^^^^"
        #print strs

        # Hope that the Shape() object indexing follows the same order as the strings
        # we found.
        pos = 1
        for shape_text in strs:

            # Access value with .TextFrame.TextRange.Text accessor.
            shape_text = shape_text[1:-1]
            var = "Shapes('" + str(pos) + "').TextFrame.TextRange.Text"
            r.append((var, shape_text))

            # Access value with .TextFrame.ContainingRange accessor.
            var = "Shapes('" + str(pos) + "').TextFrame.ContainingRange"
            r.append((var, shape_text))

            # Access value with .AlternativeText accessor.
            var = "Shapes('" + str(pos) + "').AlternativeText"
            r.append((var, shape_text))

            # Move to next shape.
            pos += 1

        # It looks like maybe(?) the shapes text appears as wide char blocks bounded by
        # 0x0D bytes. We will look for that.
        #pat = r"\x0d(?:\x00[\x20-\x7e]){10,}\x00?\x0d"
        pat = r"(?:\x00[\x20-\x7e]){100,}"
        strs = re.findall(pat, data)

        # Hope that the Shape() object indexing follows the same order as the strings
        # we found.
        pos = 1
        for shape_text in strs:

            # Access value with .TextFrame.TextRange.Text accessor.
            shape_text = shape_text[1:-1].replace("\x00", "")
            var = "Shapes('" + str(pos) + "').TextFrame.TextRange.Text"
            r.append((var, shape_text))

            # Access value with .TextFrame.ContainingRange accessor.
            var = "Shapes('" + str(pos) + "').TextFrame.ContainingRange"
            r.append((var, shape_text))

            # Access value with .AlternativeText accessor.
            var = "Shapes('" + str(pos) + "').AlternativeText"
            r.append((var, shape_text))

            # Move to next shape.
            pos += 1

    except Exception as e:

        # Report the error.
        log.error("Cannot read associated Shapes text. " + str(e))

        # See if we can read Shapes() info from an XML file.
        if ("not an OLE2 structured storage file" in str(e)):
            r = _get_shapes_text_values_xml(fname)

    return r
Exemplo n.º 25
0
    def process_changes_in_remote(self, changes):
        """
        This method is used to changes changes in Mega (synchronize).
        """

        log.debug("Processing changes in remote")
        
        remove_files = changes['removed_files']
        for file in remove_files:
            log.debug("Removing file %s" % file)
            status = self.uploader.remove(
                path='%s/%s' % (settings.get_config('remote', 'folder'),
                                file.relative_path),
                filename=file.name)

            if not status:
                log.error("ERROR DELETING REMOTE FILE %s" % file)


        remove_folders = changes['removed_folders']
        for folder in remove_folders:
            log.debug("Removing folder %s" % folder)
            status = self.uploader.remove(
            path='%s/%s' % (settings.get_config('remote', 'folder'),
                            folder.relative_path),
            filename=folder.name)
            if not status:
                log.error("Folder not deleted correctly in remote %s" % folder)

        new_folders = changes['new_folders']
        for folder in new_folders:
            log.debug("Creating remote folder %s" % folder)
            remote_folder = '%s/%s/%s' % (
                                        settings.get_config('remote', 'folder'), 
                                        folder.relative_path,
                                        folder.name)
            rem_desc = self.uploader.mkdir(remote_folder)
        
        new_files = changes['new_files']
        for file in new_files:
            log.debug("New file %s" % file)
            remote_folder = '%s/%s' % (settings.get_config('remote', 'folder'), 
                                       file.relative_path)
            rem_desc = self.uploader.upload(remote_folder, file.path)


        to_download = changes['to_download']
        for file in to_download:
            log.debug("Download modified %s" % file)
            path = '%s/%s' % (settings.get_config('remote', 'folder'),
                                                        file.relative_path)
            content = self.uploader.get_content_by_path(path=path,
                                                        filename=file.name)
            filesystem.create_file(
                    path=os.path.join(
                           self.backup_path,
                           file.relative_path),
                    name=file.name, 
                    content=content)
        
        new_files = changes['to_upload']
        for file in new_files:
            log.debug("Uploading file %s" % file)
            remote_folder = '%s/%s' % (settings.get_config('remote', 'folder'),
                                file.relative_path)
            rem_desc = self.uploader.upload(remote_folder, file.path)
Exemplo n.º 26
0
 def error(self, msg):
     log.error(msg)