Exemplo n.º 1
0
    def __init__(self, config_file=None):
        logging.config.fileConfig('logging.conf')
        self.logger = logging.getLogger(__name__)

        # max bytes (within respective buffers) to inspect
        self.MAX_INSPECT_UDP_DEPTH = 8192
        self.MAX_INSPECT_CTS_DEPTH = 8192
        self.MAX_INSPECT_STC_DEPTH = 8192

        self.conf = {}

        self.config_file = os.path.abspath(config_file)
        self.config = ConfigParser.SafeConfigParser()

        if config_file and utils.is_file(config_file):
            self.config.read(self.config_file)
Exemplo n.º 2
0
  def __init__(self, config_file=None):
    logging.config.fileConfig('logging.conf')
    self.logger = logging.getLogger(__name__)

    # max bytes (within respective buffers) to inspect
    self.MAX_INSPECT_UDP_DEPTH = 8192
    self.MAX_INSPECT_CTS_DEPTH = 8192
    self.MAX_INSPECT_STC_DEPTH = 8192

    self.conf = {}

    self.config_file = os.path.abspath(config_file)
    self.config = ConfigParser.SafeConfigParser()
 
    if config_file and utils.is_file(config_file):
      self.config.read(self.config_file)
Exemplo n.º 3
0
    def analyze(self, filename):
        if not utils.is_file(filename):
            self.logger.error('%s is not a file.' % filename)
            return

        ## refrain scanning a file more than once
        ## include db checks and ensure config similarity
        ## or check if the report file already exists in reports directory

        else:
            self.logger.info('Starting analysis on file %s' % filename)

        self.session['report']['firstseen'] = utils.time_now_json(
            self.session['config']['timezone'])
        self.session['report']['lastseen'] = utils.time_now_json(
            self.session['config']['timezone'])

        filesize = utils.file_size(filename)
        if self.session['config'][
                'stats_filesize_limit'] == 0 or filesize <= self.session[
                    'config']['stats_filesize_limit']:
            # limit is equal to 0
            # or
            # filesize is lesser than limit
            # all good, keep going
            pass
        else:
            self.logger.warn(
                'Disabling entropy compression stats calculation and file visualization (filesize: %d, stats_filesize_limit: %d)'
                % (filesize, self.session['config']['stats_filesize_limit']))
            self.session['config']['enable_entropy_compression_stats'] = False
            self.session['config']['enable_bytefreq_histogram'] = False
            self.session['config']['enable_file_visualization'] = False

        if not self.session['config']['enable_entropy_compression_stats']:
            # if stats are not computed
            # histogram can't be shown, so disable it explicitly
            self.session['config']['enable_bytefreq_histogram'] = False

        if not self.session['config']['enable_geoloc']:
            # if geodata lookup is disabled
            # map cannot shown, so disable it explicitly
            self.session['config']['enable_google_maps'] = False

        # identify filetype and populate reports
        self.logger.info(
            'Invoking fileid module for type identification and metadata collection'
        )
        fileid = FileID(self.session['config'])
        fileidreport = fileid.identify(filename)

        # initialize fileid specific classes and call analysis methods
        if fileidreport and fileidreport[
                'filecategory'] == 'CAP' and fileidreport['filetype'] == 'PCAP':
            if self.session['config']['enable_pcap']:
                self.logger.info(
                    'Invoking pcapanalysis module for host identification, dns/http/ftp/smtp/pop3/imap probing and flow inspection'
                )
                pcapid = PCAPAnalysis(self.session['config'])
                pcapreport = pcapid.analyze(filename)

            else:
                pcapreport = None

            if pcapreport:
                # for all http sessions,
                # identify and split transactions
                pcapreport = ProtoDecode(
                    self.session['config']).http_transactions(pcapreport)

                if self.session['config']['enable_proto_decode']:
                    self.logger.info(
                        'Invoking HTTP/SMTP/IMAP/POP3 protocol decode module')
                    pcapreport = ProtoDecode(self.session['config']).decode(
                        pcapreport, fileidreport['filetype'])

                self.logger.info('Invoking inspection module')
                pcapreport = Inspect(self.session['config']).inspect(
                    pcapreport, fileidreport['filetype'])

                # populate rudra reports dict with appropriate sections
                self.session['report']['filestats'] = fileidreport
                self.session['report']['pcap'] = pcapreport

                ## populate results into db

                # include some meta info in the report
                ## add scantime
                self.session['metainfo'] = {
                    'datetime': utils.get_current_datetime(),
                    'rudraversion': "rudra v%s" % (get_version_string())
                }

                # encode all unsafe dict values to base64 and append _b64 to respective keynames
                self.logger.info(
                    'Invoking report dict sanitization module: adds _b64 keys')
                self.session['report'] = self.report_sanitize(
                    self.session['report'])

                # normalize dict to have a consistent representation of empty/uninitialized values
                self.logger.info(
                    'Invoking report dict normalization module: cleans empty key:value pairs'
                )
                self.session['report'] = utils.dict_normalize(
                    self.session['report'])

                # write reports in supported formats to reports directory
                self.logger.info(
                    'Invoking reporting module to generate reports in requested formats'
                )
                Report().generate_report(self.session)
                self.logger.info(
                    'Completed report generation: reports/%s.*' %
                    self.session['report']['filestats']['hashes']['sha256'])

            else:  # pcapanalysis returned none
                return

        else:  # not a pcap file
            return
Exemplo n.º 4
0
  def analyze(self, filename):
    if not utils.is_file(filename):
      self.logger.error('%s is not a file.' % filename)
      return

    ## refrain scanning a file more than once
    ## include db checks and ensure config similarity
    ## or check if the report file already exists in reports directory

    else:
      self.logger.info('Starting analysis on file %s' % filename)

    self.session['report']['firstseen'] = utils.time_now_json(self.session['config']['timezone'])
    self.session['report']['lastseen'] = utils.time_now_json(self.session['config']['timezone'])

    filesize = utils.file_size(filename)
    if self.session['config']['stats_filesize_limit'] == 0 or filesize <= self.session['config']['stats_filesize_limit']:
      # limit is equal to 0
      # or
      # filesize is lesser than limit
      # all good, keep going
      pass
    else:
      self.logger.warn('Disabling entropy compression stats calculation and file visualization (filesize: %d, stats_filesize_limit: %d)' % (filesize, self.session['config']['stats_filesize_limit']))
      self.session['config']['enable_entropy_compression_stats'] = False
      self.session['config']['enable_bytefreq_histogram'] = False
      self.session['config']['enable_file_visualization'] = False

    if not self.session['config']['enable_entropy_compression_stats']:
      # if stats are not computed
      # histogram can't be shown, so disable it explicitly
      self.session['config']['enable_bytefreq_histogram'] = False

    if not self.session['config']['enable_geoloc']:
      # if geodata lookup is disabled
      # map cannot shown, so disable it explicitly
      self.session['config']['enable_google_maps'] = False

    # identify filetype and populate reports
    self.logger.info('Invoking fileid module for type identification and metadata collection')
    fileid = FileID(self.session['config'])
    fileidreport = fileid.identify(filename)

    # initialize fileid specific classes and call analysis methods
    if fileidreport and fileidreport['filecategory'] == 'CAP' and fileidreport['filetype'] == 'PCAP':
      if self.session['config']['enable_pcap']:
        self.logger.info('Invoking pcapanalysis module for host identification, dns/http/ftp/smtp/pop3/imap probing and flow inspection')
        pcapid = PCAPAnalysis(self.session['config'])
        pcapreport = pcapid.analyze(filename)

      else:
        pcapreport = None

      if pcapreport:
        # for all http sessions,
        # identify and split transactions
        pcapreport = ProtoDecode(self.session['config']).http_transactions(pcapreport)

        if self.session['config']['enable_proto_decode']:
          self.logger.info('Invoking HTTP/SMTP/IMAP/POP3 protocol decode module')
          pcapreport = ProtoDecode(self.session['config']).decode(pcapreport, fileidreport['filetype'])

        self.logger.info('Invoking inspection module')
        pcapreport = Inspect(self.session['config']).inspect(pcapreport, fileidreport['filetype'])

        # populate rudra reports dict with appropriate sections
        self.session['report']['filestats'] = fileidreport
        self.session['report']['pcap'] = pcapreport

        ## populate results into db

        # include some meta info in the report
        ## add scantime
        self.session['metainfo'] = {
          'datetime': utils.get_current_datetime(),
          'rudraversion': "rudra v%s" % (get_version_string())
        }

        # encode all unsafe dict values to base64 and append _b64 to respective keynames
        self.logger.info('Invoking report dict sanitization module: adds _b64 keys')
        self.session['report'] = self.report_sanitize(self.session['report'])

        # normalize dict to have a consistent representation of empty/uninitialized values
        self.logger.info('Invoking report dict normalization module: cleans empty key:value pairs')
        self.session['report'] = utils.dict_normalize(self.session['report'])

        # write reports in supported formats to reports directory
        self.logger.info('Invoking reporting module to generate reports in requested formats')
        Report().generate_report(self.session)
        self.logger.info('Completed report generation: reports/%s.*' % self.session['report']['filestats']['hashes']['sha256'])

      else: # pcapanalysis returned none
        return

    else: # not a pcap file
      return