Exemple #1
0
    def _run_analyze_filter(self):
        """Runs Analyze Filter on the OSXCollector output retrieved
        from an S3 bucket.
        """
        self._analysis_output = byte_buffer()
        self._text_analysis_summary = byte_buffer()
        self._html_analysis_summary = byte_buffer()

        analyze_filter = AnalyzeFilter(
            monochrome=True,
            text_output_file=self._text_analysis_summary,
            html_output_file=self._html_analysis_summary,
        )

        _run_filter(
            analyze_filter,
            input_stream=self._osxcollector_output_json_file,
            output_stream=self._analysis_output,
        )

        # rewind the output files
        self._analysis_output.seek(0)
        self._text_analysis_summary.seek(0)
        self._html_analysis_summary.seek(0)
Exemple #2
0
    def _extract_osxcollector_output_json_file(self):
        """Extracts JSON file containing the OSXCollector output from
        tar.gz archive. It will look in the archive contents for the
        file with the extension ".json". If no file with this extension
        is found in the archive or more than one JSON file is found, it
        will raise `OSXCollectorOutputExtractionError`.
        """
        # create a file-like object based on the S3 object contents as string
        fileobj = byte_buffer(self._osxcollector_output)
        tar = tarfile.open(mode='r:gz', fileobj=fileobj)
        json_tarinfo = [t for t in tar if t.name.endswith('.json')]

        if 1 != len(json_tarinfo):
            raise OSXCollectorOutputExtractionError(
                'Expected 1 JSON file inside the OSXCollector output archive, '
                'but found {0} instead.'.format(len(json_tarinfo)),
            )

        tarinfo = json_tarinfo[0]
        self._osxcollector_output_json_file = tar.extractfile(tarinfo)
        logging.info(
            'Extracted OSXCollector output JSON file {0}'.format(tarinfo.name),
        )
Exemple #3
0
    def _upload_analysis_results(self, osxcollector_output_filename):
        # drop the file extension (".tar.gz")
        filename_without_extension = osxcollector_output_filename[:-7]

        analysis_output_filename = '{0}_analysis.json'.format(
            filename_without_extension,
        )
        text_analysis_summary_filename = '{0}_summary.txt'.format(
            filename_without_extension,
        )
        html_analysis_summary_filename = '{0}_summary.html'.format(
            filename_without_extension,
        )

        results = [
            FileMetaInfo(
                osxcollector_output_filename,
                byte_buffer(self._osxcollector_output), 'application/gzip',
            ),
            FileMetaInfo(
                analysis_output_filename, self._analysis_output,
                'application/json',
            ),
            FileMetaInfo(
                text_analysis_summary_filename, self._text_analysis_summary,
                'text/plain',
            ),
            FileMetaInfo(
                html_analysis_summary_filename, self._html_analysis_summary,
                'text/html; charset=UTF-8',
            ),
        ]
        results = [res for res in results if AMIRA._check_buffer_size(res.content) > 0]

        for results_uploader in self._results_uploader:
            results_uploader.upload_results(results)
Exemple #4
0
def loads(data, metadata=None, scoped_session=None, engine=None):
    buf = byte_buffer(data)
    unpickler = Deserializer(buf, metadata, scoped_session, engine)
    return unpickler.load()
Exemple #5
0
def dumps(obj):
    buf = byte_buffer()
    pickler = Serializer(buf)
    pickler.dump(obj)
    return buf.getvalue()
Exemple #6
0
def loads(data, metadata=None, scoped_session=None, engine=None):
    buf = byte_buffer(data)
    unpickler = Deserializer(buf, metadata, scoped_session, engine)
    return unpickler.load()
Exemple #7
0
def dumps(obj, protocol=0):
    buf = byte_buffer()
    pickler = Serializer(buf, protocol)
    pickler.dump(obj)
    return buf.getvalue()