コード例 #1
0
ファイル: core.py プロジェクト: triplekill/rudra-pcaponly
    def __init__(self, session={}):
        starttime = time.time()

        logging.config.fileConfig('logging.conf')
        self.logger = logging.getLogger(__name__)

        self.session = session

        self.session['banner'] = utils.to_base64("""
                    .___
  _______  __ __   __| _/_______ _____
  \_  __ \|  |  \ / __ | \_  __ \\\\__  \\
   |  | \/|  |  // /_/ |  |  | \/ / __ \_
   |__|   |____/ \____ |  |__|   (____  / v%s
                      \/              \/ (%s)
    """ % (get_version_string(), get_author()))
        print utils.from_base64(self.session['banner'])

        self.session['report'] = {}

        if self.session['config']['enable_interactive']:
            print ' Use the "self" object to analyze files'
            self.interactive()

        elif self.session['config']['input_files'] and len(
                self.session['config']['input_files']) > 0:
            for f in self.session['config']['input_files']:
                self.analyze(f)

        else:
            self.logger.error(
                'Please use -f to specify a file or use -i for interactive mode'
            )

        endtime = time.time()
        self.session['report']['starttime'] = starttime
        self.session['report']['endtime'] = endtime
        del starttime, endtime

        self.session['report']['elapsedtime'] = self.session['report'][
            'endtime'] - self.session['report']['starttime']
        print 'Total scan time: %s' % (utils.hms_string(
            self.session['report']['elapsedtime']))
コード例 #2
0
ファイル: core.py プロジェクト: xujun10110/rudra
  def __init__(self, session={}):
    starttime = time.time()

    logging.config.fileConfig('logging.conf')
    self.logger = logging.getLogger(__name__)

    self.session = session

    self.session['banner'] = utils.to_base64("""
                    .___
  _______  __ __   __| _/_______ _____
  \_  __ \|  |  \ / __ | \_  __ \\\\__  \\
   |  | \/|  |  // /_/ |  |  | \/ / __ \_
   |__|   |____/ \____ |  |__|   (____  / v%s
                      \/              \/ (%s)
    """ % (get_version_string(), get_author()))
    print utils.from_base64(self.session['banner'])

    self.session['report'] = {}

    if self.session['config']['enable_interactive']:
      print ' Use the "self" object to analyze files'
      self.interactive()

    elif self.session['config']['input_files'] and len(self.session['config']['input_files']) > 0:
      for f in self.session['config']['input_files']:
        self.analyze(f)

    else:
      self.logger.error('Please use -f to specify a file or use -i for interactive mode')

    endtime = time.time()
    self.session['report']['starttime'] = starttime
    self.session['report']['endtime'] = endtime
    del starttime, endtime

    self.session['report']['elapsedtime'] = self.session['report']['endtime'] - self.session['report']['starttime']
    print 'Total scan time: %s' % (utils.hms_string(self.session['report']['elapsedtime']))
コード例 #3
0
ファイル: core.py プロジェクト: triplekill/rudra-pcaponly
    def analyze(self, filename):
        if not utils.is_file(filename):
            self.logger.error('%s is not a file.' % filename)
            return

        ## refrain scanning a file more than once
        ## include db checks and ensure config similarity
        ## or check if the report file already exists in reports directory

        else:
            self.logger.info('Starting analysis on file %s' % filename)

        self.session['report']['firstseen'] = utils.time_now_json(
            self.session['config']['timezone'])
        self.session['report']['lastseen'] = utils.time_now_json(
            self.session['config']['timezone'])

        filesize = utils.file_size(filename)
        if self.session['config'][
                'stats_filesize_limit'] == 0 or filesize <= self.session[
                    'config']['stats_filesize_limit']:
            # limit is equal to 0
            # or
            # filesize is lesser than limit
            # all good, keep going
            pass
        else:
            self.logger.warn(
                'Disabling entropy compression stats calculation and file visualization (filesize: %d, stats_filesize_limit: %d)'
                % (filesize, self.session['config']['stats_filesize_limit']))
            self.session['config']['enable_entropy_compression_stats'] = False
            self.session['config']['enable_bytefreq_histogram'] = False
            self.session['config']['enable_file_visualization'] = False

        if not self.session['config']['enable_entropy_compression_stats']:
            # if stats are not computed
            # histogram can't be shown, so disable it explicitly
            self.session['config']['enable_bytefreq_histogram'] = False

        if not self.session['config']['enable_geoloc']:
            # if geodata lookup is disabled
            # map cannot shown, so disable it explicitly
            self.session['config']['enable_google_maps'] = False

        # identify filetype and populate reports
        self.logger.info(
            'Invoking fileid module for type identification and metadata collection'
        )
        fileid = FileID(self.session['config'])
        fileidreport = fileid.identify(filename)

        # initialize fileid specific classes and call analysis methods
        if fileidreport and fileidreport[
                'filecategory'] == 'CAP' and fileidreport['filetype'] == 'PCAP':
            if self.session['config']['enable_pcap']:
                self.logger.info(
                    'Invoking pcapanalysis module for host identification, dns/http/ftp/smtp/pop3/imap probing and flow inspection'
                )
                pcapid = PCAPAnalysis(self.session['config'])
                pcapreport = pcapid.analyze(filename)

            else:
                pcapreport = None

            if pcapreport:
                # for all http sessions,
                # identify and split transactions
                pcapreport = ProtoDecode(
                    self.session['config']).http_transactions(pcapreport)

                if self.session['config']['enable_proto_decode']:
                    self.logger.info(
                        'Invoking HTTP/SMTP/IMAP/POP3 protocol decode module')
                    pcapreport = ProtoDecode(self.session['config']).decode(
                        pcapreport, fileidreport['filetype'])

                self.logger.info('Invoking inspection module')
                pcapreport = Inspect(self.session['config']).inspect(
                    pcapreport, fileidreport['filetype'])

                # populate rudra reports dict with appropriate sections
                self.session['report']['filestats'] = fileidreport
                self.session['report']['pcap'] = pcapreport

                ## populate results into db

                # include some meta info in the report
                ## add scantime
                self.session['metainfo'] = {
                    'datetime': utils.get_current_datetime(),
                    'rudraversion': "rudra v%s" % (get_version_string())
                }

                # encode all unsafe dict values to base64 and append _b64 to respective keynames
                self.logger.info(
                    'Invoking report dict sanitization module: adds _b64 keys')
                self.session['report'] = self.report_sanitize(
                    self.session['report'])

                # normalize dict to have a consistent representation of empty/uninitialized values
                self.logger.info(
                    'Invoking report dict normalization module: cleans empty key:value pairs'
                )
                self.session['report'] = utils.dict_normalize(
                    self.session['report'])

                # write reports in supported formats to reports directory
                self.logger.info(
                    'Invoking reporting module to generate reports in requested formats'
                )
                Report().generate_report(self.session)
                self.logger.info(
                    'Completed report generation: reports/%s.*' %
                    self.session['report']['filestats']['hashes']['sha256'])

            else:  # pcapanalysis returned none
                return

        else:  # not a pcap file
            return
コード例 #4
0
ファイル: core.py プロジェクト: xujun10110/rudra
  def analyze(self, filename):
    if not utils.is_file(filename):
      self.logger.error('%s is not a file.' % filename)
      return

    ## refrain scanning a file more than once
    ## include db checks and ensure config similarity
    ## or check if the report file already exists in reports directory

    else:
      self.logger.info('Starting analysis on file %s' % filename)

    self.session['report']['firstseen'] = utils.time_now_json(self.session['config']['timezone'])
    self.session['report']['lastseen'] = utils.time_now_json(self.session['config']['timezone'])

    filesize = utils.file_size(filename)
    if self.session['config']['stats_filesize_limit'] == 0 or filesize <= self.session['config']['stats_filesize_limit']:
      # limit is equal to 0
      # or
      # filesize is lesser than limit
      # all good, keep going
      pass
    else:
      self.logger.warn('Disabling entropy compression stats calculation and file visualization (filesize: %d, stats_filesize_limit: %d)' % (filesize, self.session['config']['stats_filesize_limit']))
      self.session['config']['enable_entropy_compression_stats'] = False
      self.session['config']['enable_bytefreq_histogram'] = False
      self.session['config']['enable_file_visualization'] = False

    if not self.session['config']['enable_entropy_compression_stats']:
      # if stats are not computed
      # histogram can't be shown, so disable it explicitly
      self.session['config']['enable_bytefreq_histogram'] = False

    if not self.session['config']['enable_geoloc']:
      # if geodata lookup is disabled
      # map cannot shown, so disable it explicitly
      self.session['config']['enable_google_maps'] = False

    # identify filetype and populate reports
    self.logger.info('Invoking fileid module for type identification and metadata collection')
    fileid = FileID(self.session['config'])
    fileidreport = fileid.identify(filename)

    # initialize fileid specific classes and call analysis methods
    if fileidreport and fileidreport['filecategory'] == 'CAP' and fileidreport['filetype'] == 'PCAP':
      if self.session['config']['enable_pcap']:
        self.logger.info('Invoking pcapanalysis module for host identification, dns/http/ftp/smtp/pop3/imap probing and flow inspection')
        pcapid = PCAPAnalysis(self.session['config'])
        pcapreport = pcapid.analyze(filename)

      else:
        pcapreport = None

      if pcapreport:
        # for all http sessions,
        # identify and split transactions
        pcapreport = ProtoDecode(self.session['config']).http_transactions(pcapreport)

        if self.session['config']['enable_proto_decode']:
          self.logger.info('Invoking HTTP/SMTP/IMAP/POP3 protocol decode module')
          pcapreport = ProtoDecode(self.session['config']).decode(pcapreport, fileidreport['filetype'])

        self.logger.info('Invoking inspection module')
        pcapreport = Inspect(self.session['config']).inspect(pcapreport, fileidreport['filetype'])

        # populate rudra reports dict with appropriate sections
        self.session['report']['filestats'] = fileidreport
        self.session['report']['pcap'] = pcapreport

        ## populate results into db

        # include some meta info in the report
        ## add scantime
        self.session['metainfo'] = {
          'datetime': utils.get_current_datetime(),
          'rudraversion': "rudra v%s" % (get_version_string())
        }

        # encode all unsafe dict values to base64 and append _b64 to respective keynames
        self.logger.info('Invoking report dict sanitization module: adds _b64 keys')
        self.session['report'] = self.report_sanitize(self.session['report'])

        # normalize dict to have a consistent representation of empty/uninitialized values
        self.logger.info('Invoking report dict normalization module: cleans empty key:value pairs')
        self.session['report'] = utils.dict_normalize(self.session['report'])

        # write reports in supported formats to reports directory
        self.logger.info('Invoking reporting module to generate reports in requested formats')
        Report().generate_report(self.session)
        self.logger.info('Completed report generation: reports/%s.*' % self.session['report']['filestats']['hashes']['sha256'])

      else: # pcapanalysis returned none
        return

    else: # not a pcap file
      return