示例#1
0
    def crash_signature(self):
        """Create CrashSignature object from CrashInfo.

        Args:
            None

        Returns:
            CrashSignature: CrashSignature based on log data.
        """
        if self._signature is None:
            collector = Collector()
            with InterProcessLock(str(Path(grz_tmp()) / "fm_sigcache.lock")):
                if collector.sigCacheDir:
                    cache_sig, _ = collector.search(self.crash_info)
                    if cache_sig:
                        LOG.debug("signature loaded from cache file %r", cache_sig)
                        self._signature = CrashSignature.fromFile(cache_sig)
            # if cache lookup failed generate a crash signature
            if self._signature is None:
                self._signature = self.crash_info.createCrashSignature(
                    maxFrames=self.crash_signature_max_frames(self.crash_info)
                )
            if self._signature is None:
                LOG.debug("failed to create FM signature")
        return self._signature
示例#2
0
def download_crash(crash_id):
    """Download testcase for the specified FuzzManager crash.

    Args:
        crash_id (int): ID of the requested crash on the server side

    Returns:
        str: Temporary filename of the testcase. Caller must remove when finished.
    """
    coll = Collector()

    LOG.debug("crash %d, downloading testcase...", crash_id)

    url = "%s://%s:%d/crashmanager/rest/crashes/%s/download/" \
        % (coll.serverProtocol, coll.serverHost, coll.serverPort, crash_id)

    response = coll.get(url)

    disp_m = re.match(r'^attachment; filename="(.*)"$',
                      response.headers.get("content-disposition", ""))

    if disp_m is None:
        raise RuntimeError("Server sent malformed response: %r" % (response,))

    prefix = "crash.%d." % (crash_id,)
    suffix = os.path.splitext(disp_m.group(1))[1]
    testcase_fd, testcase_fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
    with os.fdopen(testcase_fd, "wb") as testcase_fp:
        testcase_fp.write(response.content)

    return testcase_fn
示例#3
0
        def OnFault(self, run, test, variationCount, monitorData,
                    actionValues):
            # Setup FuzzManager with information about target and platform data.
            program_configuration = ProgramConfiguration.fromBinary(
                self.target_binary)

            # Prepare FuzzManager with target and crash information.
            stdout = self._get_value_by_key(monitorData, "stdout.txt", "N/A")
            stderr = self._get_value_by_key(monitorData, "stderr.txt", "N/A")
            auxdat = self._get_value_by_key(monitorData, "auxdat.txt", "N/A")

            crash_info = CrashInfo.fromRawCrashData(stdout, stderr,
                                                    program_configuration,
                                                    auxdat)

            collector = Collector(tool="peach")

            # Write testcase content and any additional meta information to a temporary ZIP archive.
            buffer = StringIO.StringIO()
            zip_buffer = zipfile.ZipFile(buffer, 'w')

            # Collect |actionValues| crash information from Peach.
            for i in range(len(actionValues)):
                if len(actionValues[i]) > 2:
                    data = actionValues[i][2]
                    fileName = "data_%d_%s_%s.txt" % (i, actionValues[i][1],
                                                      actionValues[i][0])
                    zip_buffer.writestr(fileName, data)

                    if len(actionValues[i]
                           ) > 3 and actionValues[i][1] != 'output':
                        data = repr(actionValues[i][3])
                        fileName = "data_%d_%s_%s_action.txt" % (
                            i, actionValues[i][1], actionValues[i][0])
                        zip_buffer.writestr(fileName, data)

                    if len(actionValues[i]
                           ) > 3 and actionValues[i][1] == 'output':
                        fileName = "data_%d_%s_%s_fileName.txt" % (
                            i, actionValues[i][1], actionValues[i][0])
                        data = actionValues[i][3]
                        zip_buffer.writestr(fileName, data)

            # Collect |monitorData| crash information from Peach.
            for k, v in monitorData.items():
                zip_buffer.writestr(k, v)

            zip_buffer.close()
            with tempfile.NamedTemporaryFile(delete=False,
                                             suffix='.zip') as testcase:
                buffer.seek(0)
                testcase.write(buffer.getvalue())
                testcase.close()
                # Submit crash report with testcase to FuzzManager.
                collector.submit(crash_info, testcase.name, metaData=None)
示例#4
0
        def add_fault(self):
            # Setup FuzzManager with target information and platform data.
            program_configuration = ProgramConfiguration.fromBinary(self.binary)

            # Prepare FuzzManager with crash information.
            stdout = "N/A"  # Todo: There is no plain stdout logger yet.
            stderr = "N/A"  # Todo: There is no plain stderr logger yet.
            auxdat = self.bucket.get("crashlog", "N/A").get("data", "N/A")
            metaData = None
            testcase = self.save_bucket_as_zip(self.bucket)
            crash_info = CrashInfo.fromRawCrashData(stdout, stderr, program_configuration, auxdat)

            # Submit crash report with testcase to FuzzManager.
            collector = Collector(tool="dharma")
            collector.submit(crash_info, testcase, metaData)
示例#5
0
    def __init__(self, bucket_id):
        """Initialize a Bucket instance.

        Arguments:
            bucket_id (int): ID of the requested bucket on the server side
        """
        self._bucket_id = bucket_id
        self._sig_filename = None
        self._coll = Collector()
        self._url = "%s://%s:%d/crashmanager/rest/buckets/%d/" % (
            self._coll.serverProtocol,
            self._coll.serverHost,
            self._coll.serverPort,
            bucket_id,
        )
        self._data = None
示例#6
0
    def __init__(self, crash_id):
        """Initialize CrashEntry.

        Arguments:
            crash_id (int): ID of the requested crash on the server side
        """
        self._crash_id = crash_id
        self._coll = Collector()
        self._url = "%s://%s:%d/crashmanager/rest/crashes/%d/" % (
            self._coll.serverProtocol,
            self._coll.serverHost,
            self._coll.serverPort,
            crash_id,
        )
        self._data = None
        self._tc_filename = None
示例#7
0
文件: logger.py 项目: KurSh/peach
        def OnFault(self, run, test, variationCount, monitorData, actionValues):
            # Setup FuzzManager with information about target and platform data.
            program_configuration = ProgramConfiguration.fromBinary(self.target_binary)

            # Prepare FuzzManager with target and crash information.
            stdout = self._get_value_by_key(monitorData, "stdout.txt", "N/A")
            stderr = self._get_value_by_key(monitorData, "stderr.txt", "N/A")
            auxdat = self._get_value_by_key(monitorData, "auxdat.txt", "N/A")

            crash_info = CrashInfo.fromRawCrashData(stdout, stderr, program_configuration, auxdat)

            collector = Collector(tool="peach")

            # Write testcase content and any additional meta information to a temporary ZIP archive.
            buffer = StringIO.StringIO()
            zip_buffer = zipfile.ZipFile(buffer, 'w')

            # Collect |actionValues| crash information from Peach.
            for i in range(len(actionValues)):
                if len(actionValues[i]) > 2:
                    data = actionValues[i][2]
                    fileName = "data_%d_%s_%s.txt" % (i, actionValues[i][1], actionValues[i][0])
                    zip_buffer.writestr(fileName, data)

                    if len(actionValues[i]) > 3 and actionValues[i][1] != 'output':
                        data = repr(actionValues[i][3])
                        fileName = "data_%d_%s_%s_action.txt" % (i, actionValues[i][1], actionValues[i][0])
                        zip_buffer.writestr(fileName, data)

                    if len(actionValues[i]) > 3 and actionValues[i][1] == 'output':
                        fileName = "data_%d_%s_%s_fileName.txt" % (i, actionValues[i][1], actionValues[i][0])
                        data = actionValues[i][3]
                        zip_buffer.writestr(fileName, data)

            # Collect |monitorData| crash information from Peach.
            for k, v in monitorData.items():
                zip_buffer.writestr(k, v)

            zip_buffer.close()
            with tempfile.NamedTemporaryFile(delete=False, suffix='.zip') as testcase:
                buffer.seek(0)
                testcase.write(buffer.getvalue())
                testcase.close()
                # Submit crash report with testcase to FuzzManager.
                collector.submit(crash_info, testcase.name, metaData=None)
示例#8
0
        def add_fault(self):
            # Setup FuzzManager with target information and platform data.
            program_configuration = ProgramConfiguration.fromBinary(
                self.binary)

            # Prepare FuzzManager with crash information.
            stdout = "N/A"  # Todo: There is no plain stdout logger yet.
            stderr = "N/A"  # Todo: There is no plain stderr logger yet.
            auxdat = self.bucket.get("crashlog", "N/A").get("data", "N/A")
            metaData = None
            testcase = self.save_bucket_as_zip(self.bucket)
            crash_info = CrashInfo.fromRawCrashData(stdout, stderr,
                                                    program_configuration,
                                                    auxdat)

            # Submit crash report with testcase to FuzzManager.
            collector = Collector(tool="dharma")
            collector.submit(crash_info, testcase, metaData)
示例#9
0
def make_collector():
    """Creates a jsfunfuzz collector specifying ~/sigcache as the signature cache dir

    Returns:
        Collector: jsfunfuzz collector object
    """
    sigcache_path = Path.home() / "sigcache"
    sigcache_path.mkdir(exist_ok=True)  # pylint: disable=no-member
    return Collector(sigCacheDir=str(sigcache_path), tool="jsfunfuzz")
示例#10
0
def crashentry_data(crash_id, raw=False):
    """Get the CrashEntry data for the specified FuzzManager crash

    Args:
        crash_id (int): ID of the requested crash on the server side
        raw (bool): include rawCrashData, rawStderr, rawStdout in result

    Returns:
        dict: crash entry data (crashmanager.models.CrashEntry)
    """
    coll = Collector()

    LOG.debug("crash %d, downloading metadata...", crash_id)

    url = "%s://%s:%d/crashmanager/rest/crashes/%s/" \
        % (coll.serverProtocol, coll.serverHost, coll.serverPort, crash_id)

    return coll.get(url, params={"include_raw": "1" if raw else "0"}).json()
示例#11
0
def createCollector(tool):  # pylint: disable=invalid-name,missing-docstring,missing-return-doc,missing-return-type-doc
    assert tool == "jsfunfuzz"
    cache_dir = os.path.normpath(
        os.path.expanduser(os.path.join("~", "sigcache")))
    try:
        os.mkdir(cache_dir)
    except OSError:
        pass  # cache_dir already exists
    collector = Collector(sigCacheDir=cache_dir, tool=tool)
    return collector
示例#12
0
def createCollector(tool):
    assert tool == "DOMFuzz" or tool == "jsfunfuzz"
    cacheDir = os.path.normpath(
        os.path.expanduser(os.path.join("~", "sigcache")))
    try:
        os.mkdir(cacheDir)
    except OSError:
        pass  # cacheDir already exists
    collector = Collector(sigCacheDir=cacheDir, tool=tool)
    return collector
示例#13
0
def get_signature(bucket_id):
    """
    Download the signature for the specified FuzzManager bucket.

    Args:
        bucket_id (int): ID of the requested bucket on the server side

    Returns:
        str: temp filename to the JSON signature. caller must remove filename when done
    """
    coll = Collector()

    url = "%s://%s:%d/crashmanager/rest/buckets/%d/" \
        % (coll.serverProtocol, coll.serverHost, coll.serverPort, bucket_id)

    response = coll.get(url).json()

    sig_fd, sig_fn = tempfile.mkstemp(suffix=".json")
    with os.fdopen(sig_fd, "w") as sig_fp:
        sig_fp.write(response["signature"])

    return sig_fn
示例#14
0
def change_quality(crash_id, quality):
    """Update a FuzzManager crash entry quality.

    Args:
        crash_id (int): Crash ID on FuzzManager server
        quality (int): Quality constant defined in FuzzManagerReporter.QUAL_*

    Returns:
        None
    """
    LOG.info("Updating crash %d to quality %s", crash_id, FuzzManagerReporter.quality_name(quality))
    coll = Collector()

    url = "%s://%s:%d/crashmanager/rest/crashes/%d/" \
        % (coll.serverProtocol, coll.serverHost, coll.serverPort, crash_id)
    try:
        Collector().patch(url, data={"testcase_quality": quality})
    except RuntimeError as exc:
        # let 404's go .. evidently the crash was deleted
        if str(exc) == "Unexpected HTTP response: 404":
            LOG.warning("Failed to update (404), does the crash still exist?")
        else:
            raise
示例#15
0
def test_collector_generate_search(tmp_path):
    '''Test sigcache generation and search'''
    # create a cache dir
    cache_dir = tmp_path / 'sigcache'
    cache_dir.mkdir()

    # create a collector
    collector = Collector(sigCacheDir=str(cache_dir))

    # generate a signature from the crash data
    config = ProgramConfiguration('mozilla-central',
                                  'x86-64',
                                  'linux',
                                  version='ba0bc4f26681')
    crashInfo = CrashInfo.fromRawCrashData([], asanTraceCrash.splitlines(),
                                           config)
    sig = collector.generate(crashInfo, False, False, 8)
    assert {str(f) for f in cache_dir.iterdir()} == {sig}

    # search the sigcache and see that it matches the original
    sigMatch, meta = collector.search(crashInfo)
    assert sigMatch == sig
    assert meta is None

    # write metadata and make sure that's returned if it exists
    sigBase, _ = os.path.splitext(sig)
    with open(sigBase + '.metadata', 'w') as f:
        f.write('{}')
    sigMatch, meta = collector.search(crashInfo)
    assert sigMatch == sig
    assert meta == {}

    # make sure another crash doesn't match
    crashInfo = CrashInfo.fromRawCrashData([], [], config)
    sigMatch, meta = collector.search(crashInfo)
    assert sigMatch is None
    assert meta is None

    # returns None if sig generation fails
    result = collector.generate(crashInfo, True, True, 8)
    assert result is None
示例#16
0
def test_collector_generate_search(tmpdir):
    '''Test sigcache generation and search'''
    # create a cache dir
    cache_dir = tmpdir.mkdir('sigcache').strpath

    # create a collector
    collector = Collector(sigCacheDir=cache_dir)

    # generate a signature from the crash data
    config = ProgramConfiguration('mozilla-central', 'x86-64', 'linux', version='ba0bc4f26681')
    crashInfo = CrashInfo.fromRawCrashData([], asanTraceCrash.splitlines(), config)
    sig = collector.generate(crashInfo, False, False, 8)
    assert {f.strpath for f in tmpdir.join('sigcache').listdir()} == {sig}

    # search the sigcache and see that it matches the original
    sigMatch, meta = collector.search(crashInfo)
    assert sigMatch == sig
    assert meta is None

    # write metadata and make sure that's returned if it exists
    sigBase, _ = os.path.splitext(sig)
    with open(sigBase + '.metadata', 'w') as f:
        f.write('{}')
    sigMatch, meta = collector.search(crashInfo)
    assert sigMatch == sig
    assert meta == {}

    # make sure another crash doesn't match
    crashInfo = CrashInfo.fromRawCrashData([], [], config)
    sigMatch, meta = collector.search(crashInfo)
    assert sigMatch is None
    assert meta is None

    # returns None if sig generation fails
    result = collector.generate(crashInfo, True, True, 8)
    assert result is None
示例#17
0
class Bucket(object):
    """Get Bucket data for a specified CrashManager bucket."""

    def __init__(self, bucket_id):
        """Initialize a Bucket instance.

        Arguments:
            bucket_id (int): ID of the requested bucket on the server side
        """
        self._bucket_id = bucket_id
        self._sig_filename = None
        self._coll = Collector()
        self._url = "%s://%s:%d/crashmanager/rest/buckets/%d/" % (
            self._coll.serverProtocol,
            self._coll.serverHost,
            self._coll.serverPort,
            bucket_id,
        )
        self._data = None

    @property
    def bucket_id(self):
        return self._bucket_id

    def __getattr__(self, name):
        if self._data is None:
            self._data = self._coll.get(self._url).json()
        if name not in self._data:
            raise AttributeError(
                "'%s' object has no attribute '%s' (has: %s)"
                % (type(self).__name__, name, list(self._data))
            )
        return self._data[name]

    def __setattr__(self, name, value):
        if name.startswith("_"):
            super().__setattr__(name, value)
            return
        raise AttributeError("can't set attribute")

    def cleanup(self):
        """Cleanup any resources held by this instance.

        Arguments:
            None

        Returns:
            None
        """
        if self._sig_filename is not None:
            rmtree(str(self._sig_filename.parent))

    def iter_crashes(self, quality_filter=None):
        """Fetch all crash IDs for this FuzzManager bucket.
        Only crashes with testcases are returned.

        Arguments:
            quality_filter (int): Filter crashes by quality value (None for all)

        Returns:
            generator: generator of CrashEntry
        """

        def _get_results(endpoint, params=None):
            """
            Function to get paginated results from FuzzManager

            Args:
                endpoint (str): FuzzManager REST API to query (eg. "crashes").
                params (dict): Params to pass through to requests.get

            Returns:
                generator: objects returned by FuzzManager (as dicts)
            """
            LOG.debug("first request to /%s/", endpoint)

            url = "%s://%s:%d/crashmanager/rest/%s/" \
                % (self._coll.serverProtocol, self._coll.serverHost,
                   self._coll.serverPort, endpoint)

            response = self._coll.get(url, params=params).json()

            while True:
                LOG.debug("got %d/%d %s", len(response["results"]), response["count"], endpoint)
                while response["results"]:
                    yield response["results"].pop()

                if response["next"] is None:
                    break

                LOG.debug("next request to /%s/", endpoint)
                response = self._coll.get(response["next"]).json()

        # Get all crashes for bucket
        query_args = [
            ("op", "AND"),
            ("bucket", self.bucket_id),
        ]
        if quality_filter is not None:
            query_args.append(("testcase__quality", quality_filter))
        query = json.dumps(dict(query_args))

        n_yielded = 0
        for crash in _get_results("crashes", params={"query": query, "include_raw": "0"}):

            if not crash["testcase"]:
                LOG.warning("crash %d has no testcase, skipping", crash["id"])
                continue

            n_yielded += 1
            LOG.debug("yielding crash #%d", n_yielded)
            result = CrashEntry(crash["id"])
            result._data = crash  # pylint: disable=protected-access
            yield result

    def signature_path(self):
        """Download the bucket data from CrashManager.

        Arguments:
            None

        Returns:
            Path: Path on disk where signature exists.
        """
        if self._sig_filename is not None:
            return self._sig_filename

        tmpd = Path(mkdtemp(prefix="bucket-%d-" % (self._bucket_id,),
                            dir=grz_tmp("fuzzmanager")))
        try:
            sig_basename = "%d.signature" % (self._bucket_id,)
            sig_filename = tmpd / sig_basename
            sig_filename.write_text(self.signature)
            sigmeta_filename = sig_filename.with_suffix(".metadata")
            sigmeta_filename.write_text(
                json.dumps(
                    {
                        "size": self.size,
                        "frequent": self.frequent,
                        "shortDescription": self.shortDescription,
                        "testcase__quality": self.best_quality,
                    }
                )
            )
        except:  # noqa pragma: no cover pylint: disable=bare-except
            rmtree(str(tmpd))
            raise

        self._sig_filename = sig_filename
        return self._sig_filename
示例#18
0
def test_collector_submit(live_server, tmpdir, fm_user, monkeypatch):
    '''Test crash submission'''
    monkeypatch.setattr(os.path, 'expanduser', lambda path: tmpdir.strpath)  # ensure fuzzmanager config is not used
    monkeypatch.setattr(time, 'sleep', lambda t: None)

    # create a collector
    url = urlsplit(live_server.url)
    collector = Collector(sigCacheDir=tmpdir.mkdir('sigcache').strpath,
                          serverHost=url.hostname,
                          serverPort=url.port,
                          serverProtocol=url.scheme,
                          serverAuthToken=fm_user.token,
                          clientId='test-fuzzer1',
                          tool='test-tool')
    testcase_path = tmpdir.mkdir('testcase').join('testcase.js').strpath
    with open(testcase_path, 'wb') as testcase_fp:
        testcase_fp.write(exampleTestCase)
    config = ProgramConfiguration('mozilla-central', 'x86-64', 'linux', version='ba0bc4f26681')
    crashInfo = CrashInfo.fromRawCrashData([], asanTraceCrash.splitlines(), config)

    # submit a crash to test server using collector
    result = collector.submit(crashInfo, testcase_path)

    # see that the issue was created in the server
    entry = CrashEntry.objects.get(pk=result['id'])
    assert entry.rawStdout == ''
    assert entry.rawStderr == asanTraceCrash
    assert entry.rawCrashData == ''
    assert entry.tool.name == 'test-tool'
    assert entry.client.name == 'test-fuzzer1'
    assert entry.product.name == config.product
    assert entry.product.version == config.version
    assert entry.platform.name == config.platform
    assert entry.os.name == config.os
    assert entry.testcase.quality == 0
    assert not entry.testcase.isBinary
    assert entry.testcase.size == len(exampleTestCase)
    with open(entry.testcase.test.path, 'rb') as testcase_fp:
        assert testcase_fp.read() == exampleTestCase
    assert entry.metadata == ''
    assert entry.env == ''
    assert entry.args == ''

    # create a test config
    with open(tmpdir.join('.fuzzmanagerconf').strpath, 'w') as fp:
        fp.write('[Main]\n')
        fp.write('serverhost = %s\n' % url.hostname)
        fp.write('serverport = %d\n' % url.port)
        fp.write('serverproto = %s\n' % url.scheme)
        fp.write('serverauthtoken = %s\n' % fm_user.token)

    # try a binary testcase via cmd line
    testcase_path = tmpdir.join('testcase.bin').strpath
    with open(testcase_path, 'wb') as testcase_fp:
        testcase_fp.write(b'\0')
    stdout = tmpdir.join('stdout.txt').strpath
    with open(stdout, 'w') as fp:
        fp.write('stdout data')
    stderr = tmpdir.join('stderr.txt').strpath
    with open(stderr, 'w') as fp:
        fp.write('stderr data')
    crashdata = tmpdir.join('crashdata.txt').strpath
    with open(crashdata, 'w') as fp:
        fp.write(asanTraceCrash)
    result = main([
        '--submit',
        '--tool', 'tool2',
        '--product', 'mozilla-inbound',
        '--productversion', '12345',
        '--os', 'minix',
        '--platform', 'pdp11',
        '--env', 'PATH=/home/ken', 'LD_PRELOAD=hack.so',
        '--metadata', 'var1=val1', 'var2=val2',
        '--args', './myprog',
        '--testcase', testcase_path,
        '--testcasequality', '5',
        '--stdout', stdout,
        '--stderr', stderr,
        '--crashdata', crashdata,
    ])
    assert result == 0
    entry = CrashEntry.objects.get(pk__gt=entry.id)  # newer than the last result, will fail if the test db is active
    assert entry.rawStdout == 'stdout data'
    assert entry.rawStderr == 'stderr data'
    assert entry.rawCrashData == asanTraceCrash
    assert entry.tool.name == 'tool2'
    assert entry.client.name == platform.node()
    assert entry.product.name == 'mozilla-inbound'
    assert entry.product.version == '12345'
    assert entry.platform.name == 'pdp11'
    assert entry.os.name == 'minix'
    assert entry.testcase.quality == 5
    assert entry.testcase.isBinary
    assert entry.testcase.size == 1
    with open(entry.testcase.test.path, 'rb') as testcase_fp:
        assert testcase_fp.read() == b'\0'
    assert json.loads(entry.metadata) == {'var1': 'val1', 'var2': 'val2'}
    assert json.loads(entry.env) == {'PATH': '/home/ken', 'LD_PRELOAD': 'hack.so'}
    assert json.loads(entry.args) == ['./myprog']

    class response_t(object):
        status_code = 500
        text = "Error"

    def mypost(_session, _url, _data, headers=None):
        return response_t()
    monkeypatch.setattr(time, 'sleep', lambda t: None)
    monkeypatch.setattr(requests.Session, 'post', mypost)
    with pytest.raises(RuntimeError, match='Server unexpectedly responded'):
        collector.submit(crashInfo, testcase_path)
示例#19
0
def test_collector_refresh(tmpdir, monkeypatch, capsys):
    '''Test signature downloads'''
    # create a test signature zip
    test2 = tmpdir.join('test2.signature').strpath
    with open(test2, 'w') as fp:
        fp.write('test2')
    with zipfile.ZipFile(tmpdir.join('out.zip').strpath, 'w') as zf:
        zf.write(test2, 'test2.signature')

    # create an old signature
    tmpdir.mkdir('sigs')
    with open(tmpdir.join('sigs', 'test1.signature').strpath, 'w'):
        pass
    with open(tmpdir.join('sigs', 'other.txt').strpath, 'w'):
        pass
    assert {f.basename for f in tmpdir.join('sigs').listdir()} == {'test1.signature', 'other.txt'}

    with open(tmpdir.join('out.zip').strpath, 'rb') as fp:
        class response_t(object):
            status_code = requests.codes["ok"]
            text = "OK"
            raw = fp

        # this asserts the expected arguments and returns the open handle to out.zip as 'raw' which is read by refresh()
        def myget(_session, url, stream=None, headers=None):
            assert url == 'gopher://aol.com:70/crashmanager/rest/signatures/download/'
            assert stream is True
            assert headers == {'Authorization': 'Token token'}
            return response_t()
        monkeypatch.setattr(requests.Session, 'get', myget)

        # create Collector
        collector = Collector(sigCacheDir=tmpdir.join('sigs').strpath,
                              serverHost='aol.com',
                              serverPort=70,
                              serverProtocol='gopher',
                              serverAuthToken='token',
                              clientId='test-fuzzer1',
                              tool='test-tool')

        # call refresh
        collector.refresh()

    # check that it worked
    assert {f.basename for f in tmpdir.join('sigs').listdir()} == {'test2.signature', 'other.txt'}
    with open(tmpdir.join('sigs', 'test2.signature').strpath) as fp:
        assert fp.read() == 'test2'
    assert 'other.txt' in capsys.readouterr()[1]  # should have had a warning about unrecognized file

    # check that 404 raises
    monkeypatch.undo()

    class response_t(object):  # noqa
        status_code = requests.codes["not found"]
        text = "Not found"

    def myget(_session, _url, stream=None, headers=None):
        return response_t()
    monkeypatch.setattr(requests.Session, 'get', myget)
    with pytest.raises(RuntimeError, match='Server unexpectedly responded'):
        collector.refresh()

    # check that bad zips raise errors
    monkeypatch.undo()
    with open(tmpdir.join('sigs', 'other.txt').strpath, 'rb') as fp:
        class response_t(object):  # noqa
            status_code = requests.codes["ok"]
            text = "OK"
            raw = fp

        def myget(_session, _url, stream=None, headers=None):
            return response_t()
        monkeypatch.setattr(requests.Session, 'get', myget)
        with pytest.raises(zipfile.BadZipfile, match='not a zip file'):
            collector.refresh()
    monkeypatch.undo()
    with open(tmpdir.join('out.zip').strpath, 'r+b') as fp:
        # corrupt the CRC field for the signature file in the zip
        fp.seek(0x42)
        fp.write(b'\xFF')
    with open(tmpdir.join('out.zip').strpath, 'rb') as fp:
        class response_t(object):  # noqa
            status_code = requests.codes["ok"]
            text = "OK"
            raw = fp

        def myget(_session, _url, stream=None, headers=None):
            return response_t()
        monkeypatch.setattr(requests.Session, 'get', myget)
        with pytest.raises(RuntimeError, match='Bad CRC'):
            collector.refresh()
示例#20
0
    def _submit_report(self, report, test_cases):
        # search for a cached signature match and if the signature
        # is already in the cache and marked as frequent, don't bother submitting
        with InterProcessLock(pathjoin(grz_tmp(), "fm_sigcache.lock")):
            collector = Collector()
            cache_sig_file, cache_metadata = collector.search(report.crash_info)
            if cache_metadata is not None:
                if cache_metadata["frequent"]:
                    LOG.info("Frequent crash matched existing signature: %s",
                             cache_metadata["shortDescription"])
                    if not self.force_report:
                        return
                elif "bug__id" in cache_metadata:
                    LOG.info("Crash matched existing signature (bug %s): %s",
                             cache_metadata["bug__id"],
                             cache_metadata["shortDescription"])
                    # we will still report this one, but no more
                    cache_metadata["frequent"] = True
                # there is already a signature, initialize count
                cache_metadata.setdefault("_grizzly_seen_count", 0)
            else:
                # there is no signature, create one locally so we can count
                # the number of times we've seen it
                max_frames = report.crash_signature_max_frames(report.crash_info)
                cache_sig_file = collector.generate(report.crash_info, numFrames=max_frames)
                cache_metadata = {
                    "_grizzly_seen_count": 0,
                    "frequent": False,
                    "shortDescription": report.crash_info.createShortSignature()}
            if cache_sig_file is None:
                if self._ignored(report):
                    LOG.info("Report is unsupported and is in ignore list")
                    return
                LOG.warning("Report is unsupported by FM, saved to %r", report.path)
                # TODO: we should check if stackhasher failed too
                raise RuntimeError("Failed to create FM signature")
            # limit the number of times we report per cycle
            cache_metadata["_grizzly_seen_count"] += 1
            if cache_metadata["_grizzly_seen_count"] >= self.MAX_REPORTS:
                # we will still report this one, but no more
                cache_metadata["frequent"] = True
            metadata_file = cache_sig_file.replace(".signature", ".metadata")
            with open(metadata_file, "w") as meta_fp:
                dump(cache_metadata, meta_fp)

        # dump test cases and the contained files to working directory
        test_case_meta = []
        for test_number, test_case in enumerate(test_cases):
            test_case_meta.append([test_case.adapter_name, test_case.input_fname])
            dump_path = pathjoin(report.path, "%s-%d" % (report.prefix, test_number))
            if not isdir(dump_path):
                mkdir(dump_path)
            test_case.dump(dump_path, include_details=True)
        report.crash_info.configuration.addMetadata({"grizzly_input": repr(test_case_meta)})
        if test_cases:
            environ_string = " ".join("=".join(kv) for kv in test_cases[0].env_vars.items())
            report.crash_info.configuration.addMetadata({"recorded_envvars": environ_string})
        else:
            self.quality = self.QUAL_NO_TESTCASE
        report.crash_info.configuration.addMetadata(self._extra_metadata)

        # grab screen log (used in automation)
        if getenv("WINDOW") is not None:
            screen_log = pathjoin(getcwd(), ".".join(["screenlog", getenv("WINDOW")]))
            if isfile(screen_log):
                target_log = pathjoin(report.path, "screenlog.txt")
                copyfile(screen_log, target_log)
                Report.tail(target_log, 10240)  # limit to last 10K

        # add results to a zip file
        zip_name = "%s.zip" % (report.prefix,)
        with ZipFile(zip_name, mode="w", compression=ZIP_DEFLATED) as zip_fp:
            # add test files
            for dir_name, _, dir_files in walk(report.path):
                arc_path = relpath(dir_name, report.path)
                for file_name in dir_files:
                    zip_fp.write(
                        pathjoin(dir_name, file_name),
                        arcname=pathjoin(arc_path, file_name))

        # override tool name if specified
        if self.tool is not None:
            collector.tool = self.tool

        # announce shortDescription if crash is not in a bucket
        if cache_metadata["_grizzly_seen_count"] == 1 and not cache_metadata["frequent"]:
            LOG.info("Submitting new crash %r", cache_metadata["shortDescription"])
        # submit results to the FuzzManager server
        new_entry = collector.submit(report.crash_info, testCase=zip_name, testCaseQuality=self.quality)
        LOG.info("Logged %d with quality %d", new_entry["id"], self.quality)

        # remove zipfile
        if isfile(zip_name):
            unlink(zip_name)
示例#21
0
def bucket_crashes(bucket_id, quality_filter):
    """Fetch all crash IDs for the specified FuzzManager bucket.
    Only crashes with testcases are returned.

    Args:
        bucket_id (int): ID of the requested bucket on the server side
        quality_filter (int): Filter crashes by quality value (None for all)

    Returns:
        generator: generator of crash ID (int)
    """
    coll = Collector()

    def _get_results(endpoint, params=None):
        """
        Function to get paginated results from FuzzManager

        Args:
            endpoint (str): FuzzManager REST API to query (eg. "crashes").
            params (dict): Params to pass through to requests.get

        Returns:
            generator: objects returned by FuzzManager (as dicts)
        """
        LOG.debug("first request to /%s/", endpoint)

        url = "%s://%s:%d/crashmanager/rest/%s/" \
            % (coll.serverProtocol, coll.serverHost, coll.serverPort, endpoint)

        response = coll.get(url, params=params).json()

        while True:
            LOG.debug("got %d/%d %s", len(response["results"]), response["count"], endpoint)
            while response["results"]:
                yield response["results"].pop()

            if response["next"] is None:
                break

            LOG.debug("next request to /%s/", endpoint)
            response = coll.get(response["next"]).json()

    # Get all crashes for bucket
    query_args = [
        ("op", "AND"),
        ("bucket", bucket_id),
    ]
    if quality_filter is not None:
        query_args.append(("testcase__quality", quality_filter))
    query = json.dumps(collections.OrderedDict(query_args))

    n_yielded = 0
    for crash in _get_results("crashes", params={"query": query, "include_raw": "0"}):

        if not crash["testcase"]:
            LOG.warning("crash %d has no testcase, skipping", crash["id"])
            continue

        n_yielded += 1
        LOG.debug("yielding crash #%d", n_yielded)
        yield crash["id"]
示例#22
0
def test_collector_download(tmpdir, monkeypatch):
    '''Test testcase downloads'''
    class response1_t(object):
        status_code = requests.codes["ok"]
        text = 'OK'

        def json(self):
            return {'id': 123, 'testcase': 'path/to/testcase.txt'}

    class response2_t(object):
        status_code = requests.codes["ok"]
        headers = {'content-disposition': 'foo'}
        text = 'OK'
        content = b'testcase\xFF'

    # myget1 mocks requests.get to return the rest response to the crashentry get
    def myget1(_session, url, headers=None):
        assert url == 'gopher://aol.com:70/crashmanager/rest/crashes/123/'
        assert headers == {'Authorization': 'Token token'}

        monkeypatch.undo()
        monkeypatch.chdir(tmpdir)  # download writes to cwd, so make that tmpdir
        monkeypatch.setattr(requests.Session, 'get', myget2)
        return response1_t()

    # myget2 mocks requests.get to return the testcase data specified in myget1
    def myget2(_session, url, headers=None):
        assert url == 'gopher://aol.com:70/crashmanager/rest/crashes/123/download/'
        assert headers == {'Authorization': 'Token token'}
        return response2_t()
    monkeypatch.setattr(requests.Session, 'get', myget1)

    # create Collector
    collector = Collector(serverHost='aol.com',
                          serverPort=70,
                          serverProtocol='gopher',
                          serverAuthToken='token',
                          tool='test-tool')

    # call refresh
    collector.download(123)

    # check that it worked
    assert {f.basename for f in tmpdir.listdir()} == {'123.txt'}
    with open('123.txt', 'rb') as fp:
        assert fp.read() == response2_t.content

    # testcase GET returns http error
    class response2_t(object):
        status_code = 404
        text = 'Not found'
    monkeypatch.undo()
    monkeypatch.setattr(requests.Session, 'get', myget1)
    with pytest.raises(RuntimeError, match='Server unexpectedly responded'):
        collector.download(123)

    # download with no testcase
    class response1_t(object):  # noqa
        status_code = requests.codes["ok"]
        text = 'OK'

        def json(self):
            return {'testcase': ''}
    monkeypatch.undo()
    monkeypatch.setattr(requests.Session, 'get', myget1)
    result = collector.download(123)
    assert result is None

    # invalid REST response
    class response1_t(object):  # noqa
        status_code = requests.codes["ok"]
        text = 'OK'

        def json(self):
            return []
    monkeypatch.undo()
    monkeypatch.setattr(requests.Session, 'get', myget1)
    with pytest.raises(RuntimeError, match='malformed JSON'):
        collector.download(123)

    # REST query returns http error
    class response1_t(object):  # noqa
        status_code = 404
        text = 'Not found'
    monkeypatch.undo()
    monkeypatch.setattr(requests.Session, 'get', myget1)
    with pytest.raises(RuntimeError, match='Server unexpectedly responded'):
        collector.download(123)
示例#23
0
def scan_crashes(base_dir,
                 cmdline_path=None,
                 env_path=None,
                 tool_name=None,
                 firefox=None,
                 firefox_prefs=None,
                 firefox_extensions=None,
                 firefox_testpath=None):
    '''
    Scan the base directory for crash tests and submit them to FuzzManager.
    
    @type base_dir: String
    @param base_dir: AFL base directory
    
    @type cmdline_path: String
    @param cmdline_path: Optional command line file to use instead of the
                         one found inside the base directory.
                         
    @type env_path: String
    @param env_path: Optional file containing environment variables.

    @type test_path: String
    @param test_path: Optional filename where to copy the test before
                      attempting to reproduce a crash.
    
    @rtype: int
    @return: Non-zero return code on failure
    '''
    crash_dir = os.path.join(base_dir, "crashes")
    crash_files = []

    for crash_file in os.listdir(crash_dir):
        # Ignore all files that aren't crash results
        if not crash_file.startswith("id:"):
            continue

        crash_file = os.path.join(crash_dir, crash_file)

        # Ignore our own status files
        if crash_file.endswith(".submitted") or crash_file.endswith(".failed"):
            continue

        # Ignore files we already processed
        if os.path.exists(crash_file +
                          ".submitted") or os.path.exists(crash_file +
                                                          ".failed"):
            continue

        crash_files.append(crash_file)

    if crash_files:
        # First try to read necessary information for reproducing crashes
        cmdline = []
        test_idx = None

        base_env = {}
        test_in_env = None
        if env_path:
            with open(env_path, 'r') as env_file:
                for line in env_file:
                    (name, val) = line.rstrip('\n').split("=", 1)
                    base_env[name] = val

                    if '@@' in val:
                        test_in_env = name

        if not cmdline_path:
            cmdline_path = os.path.join(base_dir, "cmdline")

        with open(cmdline_path, 'r') as cmdline_file:
            idx = 0
            for line in cmdline_file:
                if '@@' in line:
                    test_idx = idx
                cmdline.append(line.rstrip('\n'))
                idx += 1

        if test_idx != None:
            orig_test_arg = cmdline[test_idx]

        configuration = ProgramConfiguration.fromBinary(cmdline[0])
        if not configuration:
            print(
                "Error: Creating program configuration from binary failed. Check your binary configuration file.",
                file=sys.stderr)
            return 2

        collector = Collector(tool=tool_name)

        if firefox:
            (ffpInst, ffCmd, ffEnv) = setup_firefox(cmdline[0], firefox_prefs,
                                                    firefox_extensions,
                                                    firefox_testpath)
            cmdline = ffCmd
            base_env.update(ffEnv)

        for crash_file in crash_files:
            stdin = None
            env = None

            if base_env:
                env = dict(base_env)

            if test_idx != None:
                cmdline[test_idx] = orig_test_arg.replace('@@', crash_file)
            elif test_in_env != None:
                env[test_in_env] = env[test_in_env].replace('@@', crash_file)
            elif test_path != None:
                shutil.copy(crash_file, test_path)
            else:
                with open(crash_file, 'r') as crash_fd:
                    stdin = crash_fd.read()

            print("Processing crash file %s" % crash_file, file=sys.stderr)

            runner = AutoRunner.fromBinaryArgs(cmdline[0],
                                               cmdline[1:],
                                               env=env,
                                               stdin=stdin)
            if runner.run():
                crash_info = runner.getCrashInfo(configuration)
                collector.submit(crash_info, crash_file)
                open(crash_file + ".submitted", 'a').close()
                print("Success: Submitted crash to server.", file=sys.stderr)
            else:
                open(crash_file + ".failed", 'a').close()
                print(
                    "Error: Failed to reproduce the given crash, cannot submit.",
                    file=sys.stderr)

        if firefox:
            ffpInst.clean_up()
示例#24
0
def scan_crashes(base_dir, cmdline_path=None, env_path=None, tool_name=None, test_path=None,
                 firefox=None, firefox_prefs=None, firefox_extensions=None, firefox_testpath=None):
    '''
    Scan the base directory for crash tests and submit them to FuzzManager.
    
    @type base_dir: String
    @param base_dir: AFL base directory
    
    @type cmdline_path: String
    @param cmdline_path: Optional command line file to use instead of the
                         one found inside the base directory.
    
    @type env_path: String
    @param env_path: Optional file containing environment variables.

    @type test_path: String
    @param test_path: Optional filename where to copy the test before
                      attempting to reproduce a crash.
    
    @rtype: int
    @return: Non-zero return code on failure
    '''
    crash_dir = os.path.join(base_dir, "crashes")
    crash_files = []

    for crash_file in os.listdir(crash_dir):
        # Ignore all files that aren't crash results
        if not crash_file.startswith("id:"):
            continue

        crash_file = os.path.join(crash_dir, crash_file)

        # Ignore our own status files
        if crash_file.endswith(".submitted") or crash_file.endswith(".failed"):
            continue

        # Ignore files we already processed
        if os.path.exists(crash_file + ".submitted") or os.path.exists(crash_file + ".failed"):
            continue

        crash_files.append(crash_file)

    if crash_files:
        # First try to read necessary information for reproducing crashes

        base_env = {}
        test_in_env = None
        if env_path:
            with open(env_path, 'r') as env_file:
                for line in env_file:
                    (name,val) = line.rstrip('\n').split("=", 1)
                    base_env[name] = val

                    if '@@' in val:
                        test_in_env = name

        if not cmdline_path:
            cmdline_path = os.path.join(base_dir, "cmdline")

        test_idx, cmdline = command_file_to_list(cmdline_path)
        if test_idx is not None:
            orig_test_arg = cmdline[test_idx]

        configuration = ProgramConfiguration.fromBinary(cmdline[0])
        if not configuration:
            print("Error: Creating program configuration from binary failed. Check your binary configuration file.", file=sys.stderr)
            return 2

        collector = Collector(tool=tool_name)

        if firefox:
            (ffpInst, ffCmd, ffEnv) = setup_firefox(cmdline[0], firefox_prefs, firefox_extensions, firefox_testpath)
            cmdline = ffCmd
            base_env.update(ffEnv)

        for crash_file in crash_files:
            stdin = None
            env = None

            if base_env:
                env = dict(base_env)

            if test_idx is not None:
                cmdline[test_idx] = orig_test_arg.replace('@@', crash_file)
            elif test_in_env is not None:
                env[test_in_env] = env[test_in_env].replace('@@', crash_file)
            elif test_path is not None:
                shutil.copy(crash_file, test_path)
            else:
                with open(crash_file, 'r') as crash_fd:
                    stdin = crash_fd.read()

            print("Processing crash file %s" % crash_file, file=sys.stderr)

            runner = AutoRunner.fromBinaryArgs(cmdline[0], cmdline[1:], env=env, stdin=stdin)
            if runner.run():
                crash_info = runner.getCrashInfo(configuration)
                collector.submit(crash_info, crash_file)
                open(crash_file + ".submitted", 'a').close()
                print("Success: Submitted crash to server.", file=sys.stderr)
            else:
                open(crash_file + ".failed", 'a').close()
                print("Error: Failed to reproduce the given crash, cannot submit.", file=sys.stderr)

        if firefox:
            ffpInst.clean_up()
示例#25
0
def main(argv=None):
    '''Command line options.'''

    program_name = os.path.basename(sys.argv[0])
    program_version = "v%s" % __version__
    program_build_date = "%s" % __updated__

    program_version_string = '%%prog %s (%s)' % (program_version, program_build_date)

    if argv is None:
        argv = sys.argv[1:]

    # setup argparser
    parser = argparse.ArgumentParser(usage='%s [OPTIONS] --cmd <COMMAND AND ARGUMENTS>' % program_name)

    mainGroup = parser.add_argument_group(title="Main arguments", description=None)
    fmGroup = parser.add_argument_group(title="FuzzManager specific options",
                              description="""Values for the options listed here are typically
                                          provided through FuzzManager configuration files,
                                          but can be overwritten using these options:""")

    mainGroup.add_argument('--version', action='version', version=program_version_string)
    mainGroup.add_argument('--cmd', dest='cmd', action='store_true', help="Command with parameters to run")
    mainGroup.add_argument('--env', dest='env', nargs='+', type=str, help="List of environment variables in the form 'KEY=VALUE'")

    # Settings
    fmGroup.add_argument("--sigdir", dest="sigdir", help="Signature cache directory", metavar="DIR")
    fmGroup.add_argument("--serverhost", dest="serverhost", help="Server hostname for remote signature management", metavar="HOST")
    fmGroup.add_argument("--serverport", dest="serverport", type=int, help="Server port to use", metavar="PORT")
    fmGroup.add_argument("--serverproto", dest="serverproto", help="Server protocol to use (default is https)", metavar="PROTO")
    fmGroup.add_argument("--serverauthtokenfile", dest="serverauthtokenfile", help="File containing the server authentication token", metavar="FILE")
    fmGroup.add_argument("--clientid", dest="clientid", help="Client ID to use when submitting issues", metavar="ID")
    fmGroup.add_argument("--platform", dest="platform", help="Platform this crash appeared on", metavar="(x86|x86-64|arm)")
    fmGroup.add_argument("--product", dest="product", help="Product this crash appeared on", metavar="PRODUCT")
    fmGroup.add_argument("--productversion", dest="product_version", help="Product version this crash appeared on", metavar="VERSION")
    fmGroup.add_argument("--os", dest="os", help="OS this crash appeared on", metavar="(windows|linux|macosx|b2g|android)")
    fmGroup.add_argument("--tool", dest="tool", help="Name of the tool that found this issue", metavar="NAME")
    fmGroup.add_argument('--metadata', dest='metadata', nargs='+', type=str, help="List of metadata variables in the form 'KEY=VALUE'")

    parser.add_argument('rargs', nargs=argparse.REMAINDER, help=argparse.SUPPRESS)

    if len(argv) == 0:
        parser.print_help()
        return 2

    # process options
    opts = parser.parse_args(argv)

    if not opts.rargs:
        print("Error: No arguments specified", file=sys.stderr)
        return 2

    binary = opts.rargs[0]
    if not os.path.exists(binary):
        print("Error: Specified binary does not exist: %s" % binary, file=sys.stderr)
        return 2

    configuration = ProgramConfiguration.fromBinary(binary)
    if configuration == None:
        print("Error: Failed to load program configuration based on binary", file=sys.stderr)
        return 2

        if opts.platform == None or opts.product == None or opts.os == None:
            print("Error: Must use binary configuration file or specify/configure at least --platform, --product and --os", file=sys.stderr)
            return 2

        configuration = ProgramConfiguration(opts.product, opts.platform, opts.os, opts.product_version)

    env = {}
    if opts.env:
        env = dict(kv.split('=', 1) for kv in opts.env)
        configuration.addEnvironmentVariables(env)

    # Copy the system environment variables by default and overwrite them
    # if they are specified through env.
    env = dict(os.environ)
    if opts.env:
        oenv = dict(kv.split('=', 1) for kv in opts.env)
        configuration.addEnvironmentVariables(oenv)
        for envkey in oenv:
            env[envkey] = oenv[envkey]

    args = opts.rargs[1:]
    if args:
        configuration.addProgramArguments(args)

    metadata = {}
    if opts.metadata:
        metadata.update(dict(kv.split('=', 1) for kv in opts.metadata))
        configuration.addMetadata(metadata)

    # Set LD_LIBRARY_PATH for convenience
    if not 'LD_LIBRARY_PATH' in env:
        env['LD_LIBRARY_PATH'] = os.path.dirname(binary)

    serverauthtoken = None
    if opts.serverauthtokenfile:
        with open(opts.serverauthtokenfile) as f:
            serverauthtoken = f.read().rstrip()

    collector = Collector(opts.sigdir, opts.serverhost, opts.serverport, opts.serverproto, serverauthtoken, opts.clientid, opts.tool)

    signature_repeat_count = 0
    last_signature = None

    while(True):
        process = subprocess.Popen(
                 opts.rargs,
                 # stdout=None,
                 stderr=subprocess.PIPE,
                 env=env,
                 universal_newlines=True
                )

        monitor = LibFuzzerMonitor(process.stderr)
        monitor.start()
        monitor.join()

        print("Process terminated, processing results...", file=sys.stderr)

        trace = monitor.getASanTrace()
        testcase = monitor.getTestcase()

        crashInfo = CrashInfo.fromRawCrashData([], [], configuration, auxCrashData=trace)

        (sigfile, metadata) = collector.search(crashInfo)

        if sigfile != None:
            if last_signature == sigfile:
                signature_repeat_count += 1
            else:
                last_signature = sigfile
                signature_repeat_count = 0

            print("Crash matches signature %s, not submitting..." % sigfile, file=sys.stderr)
        else:
            collector.generate(crashInfo, forceCrashAddress=True, forceCrashInstruction=False, numFrames=8)
            collector.submit(crashInfo, testcase)
            print("Successfully submitted crash.", file=sys.stderr)

        if signature_repeat_count >= 10:
            print("Too many crashes with the same signature, exiting...", file=sys.stderr)
            break
示例#26
0
def test_collector_refresh(capsys, tmp_path):
    '''Test signature downloads'''
    # create a test signature zip
    test2_path = tmp_path / 'test2.signature'
    with test2_path.open('w') as fp:
        fp.write('test2')
    outzip_path = tmp_path / "out.zip"
    with zipfile.ZipFile(str(outzip_path), 'w') as zf:
        zf.write(str(test2_path), 'test2.signature')

    # create an old signature
    sigs_path = tmp_path / 'sigs'
    sigs_path.mkdir()
    (sigs_path / 'test1.signature').touch()
    (sigs_path / 'other.txt').touch()
    assert {f.name
            for f in sigs_path.iterdir()} == {'test1.signature', 'other.txt'}

    with outzip_path.open('rb') as fp:

        class response_t(object):
            status_code = requests.codes["ok"]
            text = "OK"
            raw = fp

        # this asserts the expected arguments and returns the open handle to out.zip as 'raw' which is read by refresh()
        def myget(url, stream=None, headers=None):
            assert url == 'gopher://aol.com:70/crashmanager/rest/signatures/download/'
            assert stream is True
            assert headers == {'Authorization': 'Token token'}
            return response_t()

        # create Collector
        collector = Collector(sigCacheDir=str(sigs_path),
                              serverHost='aol.com',
                              serverPort=70,
                              serverProtocol='gopher',
                              serverAuthToken='token',
                              clientId='test-fuzzer1',
                              tool='test-tool')
        collector._session.get = myget

        # call refresh
        collector.refresh()

    # check that it worked
    assert {f.name
            for f in sigs_path.iterdir()} == {'test2.signature', 'other.txt'}
    assert (sigs_path / 'test2.signature').read_text() == 'test2'
    assert 'other.txt' in capsys.readouterr()[
        1]  # should have had a warning about unrecognized file

    # check that 404 raises

    class response_t(object):  # noqa
        status_code = requests.codes["not found"]
        text = "Not found"

    collector._session.get = lambda *_, **__: response_t()

    with pytest.raises(RuntimeError, match='Server unexpectedly responded'):
        collector.refresh()

    # check that bad zips raise errors
    with (sigs_path / 'other.txt').open('rb') as fp:

        class response_t(object):  # noqa
            status_code = requests.codes["ok"]
            text = "OK"
            raw = fp

        collector._session.get = lambda *_, **__: response_t()

        with pytest.raises(zipfile.BadZipfile, match='not a zip file'):
            collector.refresh()

    with outzip_path.open('r+b') as fp:
        # corrupt the CRC field for the signature file in the zip
        fp.seek(0x42)
        fp.write(b'\xFF')
    with outzip_path.open('rb') as fp:

        class response_t(object):  # noqa
            status_code = requests.codes["ok"]
            text = "OK"
            raw = fp

        collector._session.get = lambda *_, **__: response_t()

        with pytest.raises(RuntimeError, match='Bad CRC'):
            collector.refresh()
示例#27
0
class CrashEntry(object):
    """Get the CrashEntry data for the specified CrashManager crash.

    Attributes:
        crash_id (int): the server ID for the crash
        see crashmanager.serializers.CrashEntrySerializer
    """
    RAW_FIELDS = frozenset({"rawCrashData", "rawStderr", "rawStdout"})

    def __init__(self, crash_id):
        """Initialize CrashEntry.

        Arguments:
            crash_id (int): ID of the requested crash on the server side
        """
        self._crash_id = crash_id
        self._coll = Collector()
        self._url = "%s://%s:%d/crashmanager/rest/crashes/%d/" % (
            self._coll.serverProtocol,
            self._coll.serverHost,
            self._coll.serverPort,
            crash_id,
        )
        self._data = None
        self._tc_filename = None

    @property
    def crash_id(self):
        return self._crash_id

    def __getattr__(self, name):
        if self._data is None or (name in self.RAW_FIELDS and name not in self._data):
            need_raw = "1" if name in self.RAW_FIELDS else "0"
            self._data = self._coll.get(self._url, params={"include_raw": need_raw}).json()
        if name not in self._data:
            raise AttributeError(
                "'%s' object has no attribute '%s' (has: %s)"
                % (type(self).__name__, name, list(self._data))
            )
        return self._data[name]

    def __setattr__(self, name, value):
        if name.startswith("_"):
            super().__setattr__(name, value)
            return
        if name != "testcase_quality":
            raise AttributeError("can't set attribute")
        self._coll.patch(self._url, data={name: value})
        if self._data:
            self._data[name] = value

    def cleanup(self):
        """Cleanup any resources held by this instance.

        Arguments:
            None

        Returns:
            None
        """
        if self._tc_filename is not None:
            self._tc_filename.unlink()

    def testcase_path(self):
        """Download the testcase data from CrashManager.

        Arguments:
            None

        Returns:
            Path: Path on disk where testcase exists_
        """
        if self._tc_filename is not None:
            return self._tc_filename

        dlurl = self._url + "download/"
        response = self._coll.get(dlurl)

        if "content-disposition" not in response.headers:
            raise RuntimeError("Server sent malformed response: %r" % (response,))  # pragma: no cover

        handle, filename = mkstemp(dir=grz_tmp("fuzzmanager"),
            prefix="crash-%d-" % (self.crash_id,), suffix=Path(self.testcase).suffix
        )
        try:
            with open(handle, "wb") as output:
                output.write(response.content)
        except:  # noqa pragma: no cover pylint: disable=bare-except
            unlink(filename)
            raise
        self._tc_filename = Path(filename)
        return self._tc_filename
示例#28
0
def scan_crashes(base_dir, cmdline_path=None):
    '''
    Scan the base directory for crash tests and submit them to FuzzManager.
    
    @type base_dir: String
    @param base_dir: AFL base directory
    
    @type cmdline_path: String
    @param cmdline_path: Optional command line file to use instead of the
                         one found inside the base directory.
    
    @rtype: int
    @return: Non-zero return code on failure
    '''
    crash_dir = os.path.join(base_dir, "crashes")
    crash_files = []

    for crash_file in os.listdir(crash_dir):
        # Ignore all files that aren't crash results
        if not crash_file.startswith("id:"):
            continue

        crash_file = os.path.join(crash_dir, crash_file)

        # Ignore our own status files
        if crash_file.endswith(".submitted") or crash_file.endswith(".failed"):
            continue

        # Ignore files we already processed
        if os.path.exists(crash_file +
                          ".submitted") or os.path.exists(crash_file +
                                                          ".failed"):
            continue

        crash_files.append(crash_file)

    if crash_files:
        # First try to read necessary information for reproducing crashes
        cmdline = []
        test_idx = None

        if not cmdline_path:
            cmdline_path = os.path.join(base_dir, "cmdline")

        with open(cmdline_path, 'r') as cmdline_file:
            idx = 0
            for line in cmdline_file:
                if '@@' in line:
                    test_idx = idx
                cmdline.append(line.rstrip('\n'))
                idx += 1

        if test_idx != None:
            orig_test_arg = cmdline[test_idx]

        print(cmdline)

        configuration = ProgramConfiguration.fromBinary(cmdline[0])
        if not configuration:
            print(
                "Error: Creating program configuration from binary failed. Check your binary configuration file.",
                file=sys.stderr)
            return 2

        collector = Collector()

        for crash_file in crash_files:
            stdin = None

            if test_idx != None:
                cmdline[test_idx] = orig_test_arg.replace('@@', crash_file)
            else:
                with open(crash_file, 'r') as crash_fd:
                    stdin = crash_fd.read()

            runner = AutoRunner.fromBinaryArgs(cmdline[0],
                                               cmdline[1:],
                                               stdin=stdin)
            if runner.run():
                crash_info = runner.getCrashInfo(configuration)
                collector.submit(crash_info, crash_file)
                open(crash_file + ".submitted", 'a').close()
            else:
                open(crash_file + ".failed", 'a').close()
                print(
                    "Error: Failed to reproduce the given crash, cannot submit.",
                    file=sys.stderr)
示例#29
0
    stdout

if __name__ == '__main__':
    crashInfo = CrashInfo.fromRawCrashData(stdout, stderr, configuration)
    print(crashInfo)

### Collector

if __name__ == '__main__':
    print('\n### Collector')

if __name__ == '__main__':
    from Collector.Collector import Collector  # type: ignore

if __name__ == '__main__':
    collector = Collector()

if __name__ == '__main__':
    collector.submit(crashInfo)

### Inspecting Crashes

if __name__ == '__main__':
    print('\n### Inspecting Crashes')

if __name__ == '__main__':
    gui_driver.refresh()

if __name__ == '__main__':
    Image(gui_driver.get_screenshot_as_png())
示例#30
0
    def _submit_report(self, report, test_cases):
        collector = Collector()

        if not self.force_report:
            # search for a cached signature match
            with InterProcessLock(str(Path(grz_tmp()) / "fm_sigcache.lock")):
                _, cache_metadata = collector.search(report.crash_info)

            # check if signature has been marked as frequent in FM
            if cache_metadata is not None and cache_metadata["frequent"]:
                LOG.info(
                    "Frequent crash matched existing signature: %s",
                    cache_metadata["shortDescription"],
                )
                return None

        if self._ignored(report):
            LOG.info("Report is in ignore list")
            return None

        if report.is_hang:
            self.add_extra_metadata("is_hang", True)

        # dump test cases and the contained files to working directory
        test_case_meta = []
        for test_number, test_case in enumerate(test_cases):
            test_case_meta.append([test_case.adapter_name, test_case.input_fname])
            dump_path = report.path / ("%s-%d" % (report.prefix, test_number))
            dump_path.mkdir(exist_ok=True)
            test_case.dump(dump_path, include_details=True)
        report.crash_info.configuration.addMetadata(
            {"grizzly_input": repr(test_case_meta)}
        )
        if test_cases:
            environ_string = " ".join(
                "=".join(kv) for kv in test_cases[0].env_vars.items()
            )
            report.crash_info.configuration.addMetadata(
                {"recorded_envvars": environ_string}
            )
        else:
            self.quality = Quality.NO_TESTCASE
        report.crash_info.configuration.addMetadata(self._extra_metadata)

        # TODO: this should likely move to ffpuppet
        # grab screen log (used in automation)
        if getenv("WINDOW") is not None:
            screen_log = Path.cwd() / ("screenlog.%s" % (getenv("WINDOW"),))
            if screen_log.is_file():
                target_log = report.path / "screenlog.txt"
                copyfile(str(screen_log), str(target_log))
                Report.tail(target_log, 10240)  # limit to last 10K

        with TemporaryDirectory(prefix="fm-zip", dir=grz_tmp()) as tmp_dir:
            # add results to a zip file
            zip_name = Path(tmp_dir) / ("%s.zip" % (report.prefix,))
            with ZipFile(zip_name, mode="w", compression=ZIP_DEFLATED) as zip_fp:
                # add test files
                for entry in report.path.rglob("*"):
                    if entry.is_file():
                        zip_fp.write(
                            str(entry), arcname=str(entry.relative_to(report.path))
                        )
            # override tool name if specified
            if self.tool is not None:
                collector.tool = self.tool

            # submit results to the FuzzManager server
            new_entry = collector.submit(
                report.crash_info, testCase=zip_name, testCaseQuality=self.quality.value
            )
        LOG.info("Logged %d (%s)", new_entry["id"], self.quality.name)

        return new_entry["id"]
示例#31
0
def main(argv=None):
    '''Command line options.'''

    program_name = os.path.basename(sys.argv[0])

    if argv is None:
        argv = sys.argv[1:]

    # setup argparser
    parser = argparse.ArgumentParser(usage='%s --libfuzzer or --aflfuzz [OPTIONS] --cmd <COMMAND AND ARGUMENTS>' % program_name)

    mainGroup = parser.add_argument_group(title="Main Options", description=None)
    aflGroup = parser.add_argument_group(title="AFL Options", description="Use these arguments in AFL mode")
    libfGroup = parser.add_argument_group(title="Libfuzzer Options", description="Use these arguments in Libfuzzer mode" )
    fmGroup = parser.add_argument_group(title="FuzzManager Options", description="Use these to specify FuzzManager parameters" )

    mainGroup.add_argument("--libfuzzer", dest="libfuzzer", action='store_true', help="Enable LibFuzzer mode")
    mainGroup.add_argument("--aflfuzz", dest="aflfuzz", action='store_true', help="Enable AFL mode")
    mainGroup.add_argument("--fuzzmanager", dest="fuzzmanager", action='store_true', help="Use FuzzManager to submit crash results")

    libfGroup.add_argument('--env', dest='env', nargs='+', type=str, help="List of environment variables in the form 'KEY=VALUE'")
    libfGroup.add_argument('--cmd', dest='cmd', action='store_true', help="Command with parameters to run")
    libfGroup.add_argument("--sigdir", dest="sigdir", help="Signature cache directory", metavar="DIR")

    fmGroup.add_argument("--fuzzmanager-toolname", dest="fuzzmanager_toolname", help="Override FuzzManager tool name (for submitting crash results)")
    fmGroup.add_argument("--custom-cmdline-file", dest="custom_cmdline_file", help="Path to custom cmdline file", metavar="FILE")
    fmGroup.add_argument("--env-file", dest="env_file", help="Path to a file with additional environment variables", metavar="FILE")
    fmGroup.add_argument("--serverhost", help="Server hostname for remote signature management.", metavar="HOST")
    fmGroup.add_argument("--serverport", dest="serverport", type=int, help="Server port to use", metavar="PORT")
    fmGroup.add_argument("--serverproto", dest="serverproto", help="Server protocol to use (default is https)", metavar="PROTO")
    fmGroup.add_argument("--serverauthtokenfile", dest="serverauthtokenfile", help="File containing the server authentication token", metavar="FILE")
    fmGroup.add_argument("--clientid", dest="clientid", help="Client ID to use when submitting issues", metavar="ID")
    fmGroup.add_argument("--platform", dest="platform", help="Platform this crash appeared on", metavar="(x86|x86-64|arm)")
    fmGroup.add_argument("--product", dest="product", help="Product this crash appeared on", metavar="PRODUCT")
    fmGroup.add_argument("--productversion", dest="product_version", help="Product version this crash appeared on", metavar="VERSION")
    fmGroup.add_argument("--os", dest="os", help="OS this crash appeared on", metavar="(windows|linux|macosx|b2g|android)")
    fmGroup.add_argument("--tool", dest="tool", help="Name of the tool that found this issue", metavar="NAME")
    fmGroup.add_argument('--metadata', dest='metadata', nargs='+', type=str, help="List of metadata variables in the form 'KEY=VALUE'")

    aflGroup.add_argument("--s3-queue-upload", dest="s3_queue_upload", action='store_true', help="Use S3 to synchronize queues")
    aflGroup.add_argument("--s3-queue-cleanup", dest="s3_queue_cleanup", action='store_true', help="Cleanup S3 queue entries older than specified refresh interval")
    aflGroup.add_argument("--s3-queue-status", dest="s3_queue_status", action='store_true', help="Display S3 queue status")
    aflGroup.add_argument("--s3-build-download", dest="s3_build_download", help="Use S3 to download the build for the specified project", metavar="DIR")
    aflGroup.add_argument("--s3-build-upload", dest="s3_build_upload", help="Use S3 to upload a new build for the specified project", metavar="FILE")
    aflGroup.add_argument("--s3-corpus-download", dest="s3_corpus_download", help="Use S3 to download the test corpus for the specified project", metavar="DIR")
    aflGroup.add_argument("--s3-corpus-download-size", dest="s3_corpus_download_size", help="When downloading the corpus, select only SIZE files randomly", metavar="SIZE")
    aflGroup.add_argument("--s3-corpus-upload", dest="s3_corpus_upload", help="Use S3 to upload a test corpus for the specified project", metavar="DIR")
    aflGroup.add_argument("--s3-corpus-replace", dest="s3_corpus_replace", action='store_true', help="In conjunction with --s3-corpus-upload, deletes all other remote test files")
    aflGroup.add_argument("--s3-corpus-refresh", dest="s3_corpus_refresh", help="Download queues and corpus from S3, combine and minimize, then re-upload.", metavar="DIR")
    aflGroup.add_argument("--s3-corpus-status", dest="s3_corpus_status", action='store_true', help="Display S3 corpus status")
    aflGroup.add_argument("--test-file", dest="test_file", help="Optional path to copy the test file to before reproducing", metavar="FILE")
    aflGroup.add_argument("--afl-timeout", dest="afl_timeout", type=int, default=1000, help="Timeout per test to pass to AFL for corpus refreshing", metavar="MSECS")
    aflGroup.add_argument("--firefox", dest="firefox", action='store_true', help="Test Program is Firefox (requires FFPuppet installed)")
    aflGroup.add_argument("--firefox-prefs", dest="firefox_prefs", help="Path to prefs.js file for Firefox", metavar="FILE")
    aflGroup.add_argument("--firefox-extensions", nargs='+', type=str, dest="firefox_extensions", help="Path extension file for Firefox", metavar="FILE")
    aflGroup.add_argument("--firefox-testpath", dest="firefox_testpath", help="Path to file to open with Firefox", metavar="FILE")
    aflGroup.add_argument("--firefox-start-afl", dest="firefox_start_afl", metavar="FILE", help="Start AFL with the given Firefox binary, remaining arguments being passed to AFL")
    aflGroup.add_argument("--s3-refresh-interval", dest="s3_refresh_interval", type=int, default=86400, help="How often the s3 corpus is refreshed (affects queue cleaning)", metavar="SECS")
    aflGroup.add_argument("--afl-output-dir", dest="afloutdir", help="Path to the AFL output directory to manage", metavar="DIR")
    aflGroup.add_argument("--afl-binary-dir", dest="aflbindir", help="Path to the AFL binary directory to use", metavar="DIR")
    aflGroup.add_argument("--afl-stats", dest="aflstats", help="Collect aggregated statistics while scanning output directories", metavar="FILE")
    aflGroup.add_argument("--s3-bucket", dest="s3_bucket", help="Name of the S3 bucket to use", metavar="NAME")
    aflGroup.add_argument("--project", dest="project", help="Name of the subfolder/project inside the S3 bucket", metavar="NAME")
    aflGroup.add_argument('rargs', nargs=argparse.REMAINDER)

    if not argv:
        parser.print_help()
        return 2

    opts = parser.parse_args(argv)

    if not opts.libfuzzer and not opts.aflfuzz:
	opts.aflfuzz = True

    if opts.cmd and opts.aflfuzz:
	if not opts.firefox:
		print("Error: Use --cmd either with libfuzzer or with afl in firefox mode", file=sys.stderr)
		return 2

    if opts.libfuzzer:
        if not opts.rargs:
            print("Error: No arguments specified", file=sys.stderr)
            return 2

        binary = opts.rargs[0]
        if not os.path.exists(binary):
            print("Error: Specified binary does not exist: %s" % binary, file=sys.stderr)
            return 2

        configuration = ProgramConfiguration.fromBinary(binary)
        if configuration is None:
            print("Error: Failed to load program configuration based on binary", file=sys.stderr)
            return 2

        env = {}
        if opts.env:
            env = dict(kv.split('=', 1) for kv in opts.env)
            configuration.addEnvironmentVariables(env)

        # Copy the system environment variables by default and overwrite them
        # if they are specified through env.
        env = dict(os.environ)
        if opts.env:
            oenv = dict(kv.split('=', 1) for kv in opts.env)
            configuration.addEnvironmentVariables(oenv)
            for envkey in oenv:
                env[envkey] = oenv[envkey]

        args = opts.rargs[1:]
        if args:
                configuration.addProgramArguments(args)

        metadata = {}
        if opts.metadata:
            metadata.update(dict(kv.split('=', 1) for kv in opts.metadata))
            configuration.addMetadata(metadata)

        # Set LD_LIBRARY_PATH for convenience
            if not 'LD_LIBRARY_PATH' in env:
                env['LD_LIBRARY_PATH'] = os.path.dirname(binary)

        collector = Collector(opts.sigdir, opts.fuzzmanager_toolname)

        signature_repeat_count = 0
        last_signature = None

        while True:
            process = subprocess.Popen(
                 opts.rargs,
                 # stdout=None,
                 stderr=subprocess.PIPE,
                 env=env,
                 universal_newlines=True
                )

            monitor = LibFuzzerMonitor(process.stderr)
            monitor.start()
            monitor.join()

            print("Process terminated, processing results...", file=sys.stderr)

            trace = monitor.getASanTrace()
            testcase = monitor.getTestcase()

            crashInfo = CrashInfo.fromRawCrashData([], [], configuration, auxCrashData=trace)

            (sigfile, metadata) = collector.search(crashInfo)

            if sigfile is not None:
                if last_signature == sigfile:
                    signature_repeat_count += 1
                else:
                    last_signature = sigfile
                    signature_repeat_count = 0

                print("Crash matches signature %s, not submitting..." % sigfile, file=sys.stderr)
            else:
                collector.generate(crashInfo, forceCrashAddress=True, forceCrashInstruction=False, numFrames=8)
                collector.submit(crashInfo, testcase)
                print("Successfully submitted crash.", file=sys.stderr)

            if signature_repeat_count >= 10:
                print("Too many crashes with the same signature, exiting...", file=sys.stderr)
                break

    if opts.aflfuzz:
        if opts.firefox or opts.firefox_start_afl:
            if not haveFFPuppet:
                print("Error: --firefox and --firefox-start-afl require FFPuppet to be installed", file=sys.stderr)
                return 2

            if opts.custom_cmdline_file:
                print("Error: --custom-cmdline-file is incompatible with firefox options", file=sys.stderr)
                return 2

            if not opts.firefox_prefs or not opts.firefox_testpath:
                print("Error: --firefox and --firefox-start-afl require --firefox-prefs and --firefox-testpath to be specified", file=sys.stderr)
                return 2

        if opts.firefox_start_afl:
            if not opts.aflbindir:
                print("Error: Must specify --afl-binary-dir for starting AFL with firefox", file=sys.stderr)
                return 2

            (ffp, cmd, env) = setup_firefox(opts.firefox_start_afl, opts.firefox_prefs, opts.firefox_extensions, opts.firefox_testpath)

            afl_cmd = [ os.path.join(opts.aflbindir, "afl-fuzz") ]

            opts.rargs.remove("--")

            afl_cmd.extend(opts.rargs)
            afl_cmd.extend(cmd)

            try:
                subprocess.call(afl_cmd, env=env)
            except:
                traceback.print_exc()

            ffp.clean_up()
            return 0

        afl_out_dirs = []
        if opts.afloutdir:
            if not os.path.exists(os.path.join(opts.afloutdir, "crashes")):
                # The specified directory doesn't have a "crashes" sub directory.
                # Either the wrong directory was specified, or this is an AFL multi-process
                # sychronization directory. Try to figure this out here.
                sync_dirs = os.listdir(opts.afloutdir)

                for sync_dir in sync_dirs:
                    if os.path.exists(os.path.join(opts.afloutdir, sync_dir, "crashes")):
                        afl_out_dirs.append(os.path.join(opts.afloutdir, sync_dir))

                if not afl_out_dirs:
                    print("Error: Directory %s does not appear to be a valid AFL output/sync directory" % opts.afloutdir, file=sys.stderr)
                    return 2
            else:
                afl_out_dirs.append(opts.afloutdir)

        # Upload and FuzzManager modes require specifying the AFL directory
        if opts.s3_queue_upload or opts.fuzzmanager:
            if not opts.afloutdir:
                print("Error: Must specify AFL output directory using --afl-output-dir", file=sys.stderr)
                return 2

        if (opts.s3_queue_upload
            or opts.s3_corpus_refresh
            or opts.s3_build_download
            or opts.s3_build_upload
            or opts.s3_corpus_download
            or opts.s3_corpus_upload
            or opts.s3_queue_status):
            if not opts.s3_bucket or not opts.project:
                print("Error: Must specify both --s3-bucket and --project for S3 actions", file=sys.stderr)
                return 2

        if opts.s3_queue_status:
            status_data = get_queue_status(opts.s3_bucket, opts.project)
            total_queue_files = 0

            for queue_name in status_data:
                print("Queue %s: %s" % (queue_name, status_data[queue_name]))
                total_queue_files += status_data[queue_name]
            print("Total queue files: %s" % total_queue_files)

            return 0

        if opts.s3_corpus_status:
            status_data = get_corpus_status(opts.s3_bucket, opts.project)
            total_corpus_files = 0

            for (status_dt, status_cnt) in sorted(status_data.items()):
                print("Added %s: %s" % (status_dt, status_cnt))
                total_corpus_files += status_cnt
            print("Total corpus files: %s" % total_corpus_files)

            return 0

        if opts.s3_queue_cleanup:
            clean_queue_dirs(opts.s3_corpus_refresh, opts.s3_bucket, opts.project, opts.s3_refresh_interval)
            return 0

        if opts.s3_build_download:
            download_build(opts.s3_build_download, opts.s3_bucket, opts.project)
            return 0

        if opts.s3_build_upload:
            upload_build(opts.s3_build_upload, opts.s3_bucket, opts.project)
            return 0

        if opts.s3_corpus_download:
            if opts.s3_corpus_download_size is not None:
                opts.s3_corpus_download_size = int(opts.s3_corpus_download_size)

            download_corpus(opts.s3_corpus_download, opts.s3_bucket, opts.project, opts.s3_corpus_download_size)
            return 0

        if opts.s3_corpus_upload:
            upload_corpus(opts.s3_corpus_upload, opts.s3_bucket, opts.project, opts.s3_corpus_replace)
            return 0

        if opts.s3_corpus_refresh:
            if not opts.aflbindir:
                print("Error: Must specify --afl-binary-dir for refreshing the test corpus", file=sys.stderr)
                return 2

            if not os.path.exists(opts.s3_corpus_refresh):
                os.makedirs(opts.s3_corpus_refresh)

            queues_dir = os.path.join(opts.s3_corpus_refresh, "queues")

            print("Cleaning old AFL queues from s3://%s/%s/queues/" % (opts.s3_bucket, opts.project))
            clean_queue_dirs(opts.s3_corpus_refresh, opts.s3_bucket, opts.project, opts.s3_refresh_interval)

            print("Downloading AFL queues from s3://%s/%s/queues/ to %s" % (opts.s3_bucket, opts.project, queues_dir)) 
            download_queue_dirs(opts.s3_corpus_refresh, opts.s3_bucket, opts.project)

            cmdline_file = os.path.join(opts.s3_corpus_refresh, "cmdline")
            if not os.path.exists(cmdline_file):
                print("Error: Failed to download a cmdline file from queue directories.", file=sys.stderr)
                return 2

            print("Downloading build")
            download_build(os.path.join(opts.s3_corpus_refresh, "build"), opts.s3_bucket, opts.project)

            with open(os.path.join(opts.s3_corpus_refresh, "cmdline"), 'r') as cmdline_file:
                cmdline = cmdline_file.read().splitlines()

            # Assume cmdline[0] is the name of the binary
            binary_name = os.path.basename(cmdline[0])

            # Try locating our binary in the build we just unpacked
            binary_search_result = [os.path.join(dirpath, filename)
                for dirpath, dirnames, filenames in os.walk(os.path.join(opts.s3_corpus_refresh, "build")) 
                    for filename in filenames 
                        if (filename == binary_name and (stat.S_IXUSR & os.stat(os.path.join(dirpath, filename))[stat.ST_MODE]))]

            if not binary_search_result:
                print("Error: Failed to locate binary %s in unpacked build." % binary_name, file=sys.stderr)
                return 2

            if len(binary_search_result) > 1:
                print("Error: Binary name %s is ambiguous in unpacked build." % binary_name, file=sys.stderr)
                return 2

            cmdline[0] = binary_search_result[0]

            # Download our current corpus into the queues directory as well
            print("Downloading corpus from s3://%s/%s/corpus/ to %s" % (opts.s3_bucket, opts.project, queues_dir))
            download_corpus(queues_dir, opts.s3_bucket, opts.project)

            # Ensure the directory for our new tests is empty
            updated_tests_dir = os.path.join(opts.s3_corpus_refresh, "tests")
            if os.path.exists(updated_tests_dir):
                shutil.rmtree(updated_tests_dir)
            os.mkdir(updated_tests_dir)

            # Run afl-cmin
            afl_cmin = os.path.join(opts.aflbindir, "afl-cmin")
            if not os.path.exists(afl_cmin):
                print("Error: Unable to locate afl-cmin binary.", file=sys.stderr)
                return 2

            if opts.firefox:
                (ffpInst, ffCmd, ffEnv) = setup_firefox(cmdline[0], opts.firefox_prefs, opts.firefox_extensions, opts.firefox_testpath)
                cmdline = ffCmd

            afl_cmdline = [afl_cmin, '-e', '-i', queues_dir, '-o', updated_tests_dir, '-t', str(opts.afl_timeout), '-m', 'none']

            if opts.test_file:
                afl_cmdline.extend(['-f', opts.test_file])

            afl_cmdline.extend(cmdline)

            print("Running afl-cmin")
            with open(os.devnull, 'w') as devnull:
                env = os.environ.copy()
                env['LD_LIBRARY_PATH'] = os.path.dirname(cmdline[0])

                if opts.firefox:
                    env.update(ffEnv)

                subprocess.check_call(afl_cmdline, stdout=devnull, env=env)

            if opts.firefox:
                ffpInst.clean_up()

            # replace existing corpus with reduced corpus
            print("Uploading reduced corpus to s3://%s/%s/corpus/" % (opts.s3_bucket, opts.project))
            upload_corpus(updated_tests_dir, opts.s3_bucket, opts.project, corpus_delete=True)

            # Prune the queues directory once we successfully uploaded the new
            # test corpus, but leave everything that's part of our new corpus
            # so we don't have to download those files again.
            test_files = [file for file in os.listdir(updated_tests_dir) if os.path.isfile(os.path.join(updated_tests_dir, file))]
            obsolete_queue_files = [file for file in os.listdir(queues_dir) if os.path.isfile(os.path.join(queues_dir, file)) and file not in test_files]

            for file in obsolete_queue_files:
                os.remove(os.path.join(queues_dir, file))

        if opts.fuzzmanager or opts.s3_queue_upload or opts.aflstats:
            last_queue_upload = 0
            while True:
                if opts.fuzzmanager:
                    for afl_out_dir in afl_out_dirs:
                        scan_crashes(afl_out_dir, opts.custom_cmdline_file, opts.env_file, opts.fuzzmanager_toolname, opts.test_file)

                # Only upload queue files every 20 minutes
                if opts.s3_queue_upload and last_queue_upload < int(time.time()) - 1200:
                    for afl_out_dir in afl_out_dirs:
                        upload_queue_dir(afl_out_dir, opts.s3_bucket, opts.project, new_cov_only=True)
                    last_queue_upload = int(time.time())

                if opts.aflstats:
                    write_aggregated_stats(afl_out_dirs, opts.aflstats, cmdline_path=opts.custom_cmdline_file)

                time.sleep(10)
示例#32
0
def test_collector_download(tmpdir, monkeypatch):
    '''Test testcase downloads'''
    class response1_t(object):
        status_code = requests.codes["ok"]
        text = 'OK'

        def json(self):
            return {'id': 123, 'testcase': 'path/to/testcase.txt'}

    class response2_t(object):
        status_code = requests.codes["ok"]
        text = 'OK'
        content = b'testcase\xFF'

    # myget1 mocks requests.get to return the rest response to the crashentry get
    def myget1(_session, url, headers=None):
        assert url == 'gopher://aol.com:70/crashmanager/rest/crashes/123/'
        assert headers == {'Authorization': 'Token token'}

        monkeypatch.undo()
        monkeypatch.chdir(
            tmpdir)  # download writes to cwd, so make that tmpdir
        monkeypatch.setattr(requests.Session, 'get', myget2)
        return response1_t()

    # myget2 mocks requests.get to return the testcase data specified in myget1
    def myget2(_session, url, auth=None):
        assert url == 'gopher://aol.com:70/crashmanager/path/to/testcase.txt'
        assert len(auth) == 2
        assert auth[0] == 'fuzzmanager'
        assert auth[1] == 'token'
        return response2_t()

    monkeypatch.setattr(requests.Session, 'get', myget1)

    # create Collector
    collector = Collector(serverHost='aol.com',
                          serverPort=70,
                          serverProtocol='gopher',
                          serverAuthToken='token',
                          tool='test-tool')

    # call refresh
    collector.download(123)

    # check that it worked
    assert {f.basename for f in tmpdir.listdir()} == {'123.txt'}
    with open('123.txt', 'rb') as fp:
        assert fp.read() == response2_t.content

    # testcase GET returns http error
    class response2_t(object):
        status_code = 404
        text = 'Not found'

    monkeypatch.undo()
    monkeypatch.setattr(requests.Session, 'get', myget1)
    with pytest.raises(RuntimeError, match='Server unexpectedly responded'):
        collector.download(123)

    # download with no testcase
    class response1_t(object):  # noqa
        status_code = requests.codes["ok"]
        text = 'OK'

        def json(self):
            return {'testcase': ''}

    monkeypatch.undo()
    monkeypatch.setattr(requests.Session, 'get', myget1)
    result = collector.download(123)
    assert result is None

    # invalid REST response
    class response1_t(object):  # noqa
        status_code = requests.codes["ok"]
        text = 'OK'

        def json(self):
            return []

    monkeypatch.undo()
    monkeypatch.setattr(requests.Session, 'get', myget1)
    with pytest.raises(RuntimeError, match='malformed JSON'):
        collector.download(123)

    # REST query returns http error
    class response1_t(object):  # noqa
        status_code = 404
        text = 'Not found'

    monkeypatch.undo()
    monkeypatch.setattr(requests.Session, 'get', myget1)
    with pytest.raises(RuntimeError, match='Server unexpectedly responded'):
        collector.download(123)
示例#33
0
def test_collector_refresh(tmpdir, monkeypatch, capsys):
    '''Test signature downloads'''
    # create a test signature zip
    test2 = tmpdir.join('test2.signature').strpath
    with open(test2, 'w') as fp:
        fp.write('test2')
    with zipfile.ZipFile(tmpdir.join('out.zip').strpath, 'w') as zf:
        zf.write(test2, 'test2.signature')

    # create an old signature
    tmpdir.mkdir('sigs')
    with open(tmpdir.join('sigs', 'test1.signature').strpath, 'w'):
        pass
    with open(tmpdir.join('sigs', 'other.txt').strpath, 'w'):
        pass
    assert {f.basename
            for f in tmpdir.join('sigs').listdir()
            } == {'test1.signature', 'other.txt'}

    with open(tmpdir.join('out.zip').strpath, 'rb') as fp:

        class response_t(object):
            status_code = requests.codes["ok"]
            text = "OK"
            raw = fp

        # this asserts the expected arguments and returns the open handle to out.zip as 'raw' which is read by refresh()
        def myget(_session, url, stream=None, auth=None):
            assert url == 'gopher://aol.com:70/crashmanager/files/signatures.zip'
            assert stream is True
            assert len(auth) == 2
            assert auth[0] == 'fuzzmanager'
            assert auth[1] == 'token'
            return response_t()

        monkeypatch.setattr(requests.Session, 'get', myget)

        # create Collector
        collector = Collector(sigCacheDir=tmpdir.join('sigs').strpath,
                              serverHost='aol.com',
                              serverPort=70,
                              serverProtocol='gopher',
                              serverAuthToken='token',
                              clientId='test-fuzzer1',
                              tool='test-tool')

        # call refresh
        collector.refresh()

    # check that it worked
    assert {f.basename
            for f in tmpdir.join('sigs').listdir()
            } == {'test2.signature', 'other.txt'}
    with open(tmpdir.join('sigs', 'test2.signature').strpath) as fp:
        assert fp.read() == 'test2'
    assert 'other.txt' in capsys.readouterr()[
        1]  # should have had a warning about unrecognized file

    # check that 404 raises
    monkeypatch.undo()

    class response_t(object):  # noqa
        status_code = requests.codes["not found"]
        text = "Not found"

    def myget(_session, _url, stream=None, auth=None):
        return response_t()

    monkeypatch.setattr(requests.Session, 'get', myget)
    with pytest.raises(RuntimeError, match='Server unexpectedly responded'):
        collector.refresh()

    # check that bad zips raise errors
    monkeypatch.undo()
    with open(tmpdir.join('sigs', 'other.txt').strpath, 'rb') as fp:

        class response_t(object):  # noqa
            status_code = requests.codes["ok"]
            text = "OK"
            raw = fp

        def myget(_session, _url, stream=None, auth=None):
            return response_t()

        monkeypatch.setattr(requests.Session, 'get', myget)
        with pytest.raises(zipfile.BadZipfile, match='not a zip file'):
            collector.refresh()
    monkeypatch.undo()
    with open(tmpdir.join('out.zip').strpath, 'r+b') as fp:
        # corrupt the CRC field for the signature file in the zip
        fp.seek(0x42)
        fp.write(b'\xFF')
    with open(tmpdir.join('out.zip').strpath, 'rb') as fp:

        class response_t(object):  # noqa
            status_code = requests.codes["ok"]
            text = "OK"
            raw = fp

        def myget(_session, _url, stream=None, auth=None):
            return response_t()

        monkeypatch.setattr(requests.Session, 'get', myget)
        with pytest.raises(RuntimeError, match='Bad CRC'):
            collector.refresh()
示例#34
0
def main(argv=None):
    '''Command line options.'''

    program_name = os.path.basename(sys.argv[0])

    if argv is None:
        argv = sys.argv[1:]

    # setup argparser
    parser = argparse.ArgumentParser(
        usage=
        '%s --libfuzzer or --aflfuzz [OPTIONS] --cmd <COMMAND AND ARGUMENTS>' %
        program_name)

    mainGroup = parser.add_argument_group(title="Main Options",
                                          description=None)
    aflGroup = parser.add_argument_group(
        title="AFL Options", description="Use these arguments in AFL mode")
    libfGroup = parser.add_argument_group(
        title="Libfuzzer Options",
        description="Use these arguments in Libfuzzer mode")
    fmGroup = parser.add_argument_group(
        title="FuzzManager Options",
        description="Use these to specify FuzzManager parameters")

    mainGroup.add_argument("--libfuzzer",
                           dest="libfuzzer",
                           action='store_true',
                           help="Enable LibFuzzer mode")
    mainGroup.add_argument("--aflfuzz",
                           dest="aflfuzz",
                           action='store_true',
                           help="Enable AFL mode")
    mainGroup.add_argument("--fuzzmanager",
                           dest="fuzzmanager",
                           action='store_true',
                           help="Use FuzzManager to submit crash results")

    libfGroup.add_argument(
        '--env',
        dest='env',
        nargs='+',
        type=str,
        help="List of environment variables in the form 'KEY=VALUE'")
    libfGroup.add_argument('--cmd',
                           dest='cmd',
                           action='store_true',
                           help="Command with parameters to run")
    libfGroup.add_argument("--sigdir",
                           dest="sigdir",
                           help="Signature cache directory",
                           metavar="DIR")

    fmGroup.add_argument(
        "--fuzzmanager-toolname",
        dest="fuzzmanager_toolname",
        help="Override FuzzManager tool name (for submitting crash results)")
    fmGroup.add_argument("--custom-cmdline-file",
                         dest="custom_cmdline_file",
                         help="Path to custom cmdline file",
                         metavar="FILE")
    fmGroup.add_argument(
        "--env-file",
        dest="env_file",
        help="Path to a file with additional environment variables",
        metavar="FILE")
    fmGroup.add_argument(
        "--serverhost",
        help="Server hostname for remote signature management.",
        metavar="HOST")
    fmGroup.add_argument("--serverport",
                         dest="serverport",
                         type=int,
                         help="Server port to use",
                         metavar="PORT")
    fmGroup.add_argument("--serverproto",
                         dest="serverproto",
                         help="Server protocol to use (default is https)",
                         metavar="PROTO")
    fmGroup.add_argument(
        "--serverauthtokenfile",
        dest="serverauthtokenfile",
        help="File containing the server authentication token",
        metavar="FILE")
    fmGroup.add_argument("--clientid",
                         dest="clientid",
                         help="Client ID to use when submitting issues",
                         metavar="ID")
    fmGroup.add_argument("--platform",
                         dest="platform",
                         help="Platform this crash appeared on",
                         metavar="(x86|x86-64|arm)")
    fmGroup.add_argument("--product",
                         dest="product",
                         help="Product this crash appeared on",
                         metavar="PRODUCT")
    fmGroup.add_argument("--productversion",
                         dest="product_version",
                         help="Product version this crash appeared on",
                         metavar="VERSION")
    fmGroup.add_argument("--os",
                         dest="os",
                         help="OS this crash appeared on",
                         metavar="(windows|linux|macosx|b2g|android)")
    fmGroup.add_argument("--tool",
                         dest="tool",
                         help="Name of the tool that found this issue",
                         metavar="NAME")
    fmGroup.add_argument(
        '--metadata',
        dest='metadata',
        nargs='+',
        type=str,
        help="List of metadata variables in the form 'KEY=VALUE'")

    aflGroup.add_argument("--s3-queue-upload",
                          dest="s3_queue_upload",
                          action='store_true',
                          help="Use S3 to synchronize queues")
    aflGroup.add_argument(
        "--s3-queue-cleanup",
        dest="s3_queue_cleanup",
        action='store_true',
        help="Cleanup S3 queue entries older than specified refresh interval")
    aflGroup.add_argument("--s3-queue-status",
                          dest="s3_queue_status",
                          action='store_true',
                          help="Display S3 queue status")
    aflGroup.add_argument(
        "--s3-build-download",
        dest="s3_build_download",
        help="Use S3 to download the build for the specified project",
        metavar="DIR")
    aflGroup.add_argument(
        "--s3-build-upload",
        dest="s3_build_upload",
        help="Use S3 to upload a new build for the specified project",
        metavar="FILE")
    aflGroup.add_argument(
        "--s3-corpus-download",
        dest="s3_corpus_download",
        help="Use S3 to download the test corpus for the specified project",
        metavar="DIR")
    aflGroup.add_argument(
        "--s3-corpus-download-size",
        dest="s3_corpus_download_size",
        help="When downloading the corpus, select only SIZE files randomly",
        metavar="SIZE")
    aflGroup.add_argument(
        "--s3-corpus-upload",
        dest="s3_corpus_upload",
        help="Use S3 to upload a test corpus for the specified project",
        metavar="DIR")
    aflGroup.add_argument(
        "--s3-corpus-replace",
        dest="s3_corpus_replace",
        action='store_true',
        help=
        "In conjunction with --s3-corpus-upload, deletes all other remote test files"
    )
    aflGroup.add_argument(
        "--s3-corpus-refresh",
        dest="s3_corpus_refresh",
        help=
        "Download queues and corpus from S3, combine and minimize, then re-upload.",
        metavar="DIR")
    aflGroup.add_argument("--s3-corpus-status",
                          dest="s3_corpus_status",
                          action='store_true',
                          help="Display S3 corpus status")
    aflGroup.add_argument(
        "--test-file",
        dest="test_file",
        help="Optional path to copy the test file to before reproducing",
        metavar="FILE")
    aflGroup.add_argument(
        "--afl-timeout",
        dest="afl_timeout",
        type=int,
        default=1000,
        help="Timeout per test to pass to AFL for corpus refreshing",
        metavar="MSECS")
    aflGroup.add_argument(
        "--firefox",
        dest="firefox",
        action='store_true',
        help="Test Program is Firefox (requires FFPuppet installed)")
    aflGroup.add_argument("--firefox-prefs",
                          dest="firefox_prefs",
                          help="Path to prefs.js file for Firefox",
                          metavar="FILE")
    aflGroup.add_argument("--firefox-extensions",
                          nargs='+',
                          type=str,
                          dest="firefox_extensions",
                          help="Path extension file for Firefox",
                          metavar="FILE")
    aflGroup.add_argument("--firefox-testpath",
                          dest="firefox_testpath",
                          help="Path to file to open with Firefox",
                          metavar="FILE")
    aflGroup.add_argument(
        "--firefox-start-afl",
        dest="firefox_start_afl",
        metavar="FILE",
        help=
        "Start AFL with the given Firefox binary, remaining arguments being passed to AFL"
    )
    aflGroup.add_argument(
        "--s3-refresh-interval",
        dest="s3_refresh_interval",
        type=int,
        default=86400,
        help="How often the s3 corpus is refreshed (affects queue cleaning)",
        metavar="SECS")
    aflGroup.add_argument("--afl-output-dir",
                          dest="afloutdir",
                          help="Path to the AFL output directory to manage",
                          metavar="DIR")
    aflGroup.add_argument("--afl-binary-dir",
                          dest="aflbindir",
                          help="Path to the AFL binary directory to use",
                          metavar="DIR")
    aflGroup.add_argument(
        "--afl-stats",
        dest="aflstats",
        help="Collect aggregated statistics while scanning output directories",
        metavar="FILE")
    aflGroup.add_argument("--s3-bucket",
                          dest="s3_bucket",
                          help="Name of the S3 bucket to use",
                          metavar="NAME")
    aflGroup.add_argument(
        "--project",
        dest="project",
        help="Name of the subfolder/project inside the S3 bucket",
        metavar="NAME")
    aflGroup.add_argument('rargs', nargs=argparse.REMAINDER)

    if len(argv) == 0:
        parser.print_help()
        return 2

    opts = parser.parse_args(argv)

    if not opts.libfuzzer and not opts.aflfuzz:
        opts.aflfuzz = True

    if opts.cmd and opts.aflfuzz:
        if not opts.firefox:
            print(
                "Error: Use --cmd either with libfuzzer or with afl in firefox mode",
                file=sys.sdderr)
            return 2

    if opts.libfuzzer:
        if not opts.rargs:
            print("Error: No arguments specified", file=sys.stderr)
            return 2

        binary = opts.rargs[0]
        if not os.path.exists(binary):
            print("Error: Specified binary does not exist: %s" % binary,
                  file=sys.stderr)
            return 2

        configuration = ProgramConfiguration.fromBinary(binary)
        if configuration == None:
            print(
                "Error: Failed to load program configuration based on binary",
                file=sys.stderr)
            return 2

        env = {}
        if opts.env:
            env = dict(kv.split('=', 1) for kv in opts.env)
            configuration.addEnvironmentVariables(env)

        # Copy the system environment variables by default and overwrite them
        # if they are specified through env.
        env = dict(os.environ)
        if opts.env:
            oenv = dict(kv.split('=', 1) for kv in opts.env)
            configuration.addEnvironmentVariables(oenv)
            for envkey in oenv:
                env[envkey] = oenv[envkey]

        args = opts.rargs[1:]
        if args:
            configuration.addProgramArguments(args)

        metadata = {}
        if opts.metadata:
            metadata.update(dict(kv.split('=', 1) for kv in opts.metadata))
            configuration.addMetadata(metadata)

            # Set LD_LIBRARY_PATH for convenience
            if not 'LD_LIBRARY_PATH' in env:
                env['LD_LIBRARY_PATH'] = os.path.dirname(binary)

        collector = Collector(opts.sigdir, opts.fuzzmanager_toolname)

        signature_repeat_count = 0
        last_signature = None

        while (True):
            process = subprocess.Popen(
                opts.rargs,
                # stdout=None,
                stderr=subprocess.PIPE,
                env=env,
                universal_newlines=True)

            monitor = LibFuzzerMonitor(process.stderr)
            monitor.start()
            monitor.join()

            print("Process terminated, processing results...", file=sys.stderr)

            trace = monitor.getASanTrace()
            testcase = monitor.getTestcase()

            crashInfo = CrashInfo.fromRawCrashData([], [],
                                                   configuration,
                                                   auxCrashData=trace)

            (sigfile, metadata) = collector.search(crashInfo)

            if sigfile != None:
                if last_signature == sigfile:
                    signature_repeat_count += 1
                else:
                    last_signature = sigfile
                    signature_repeat_count = 0

                print("Crash matches signature %s, not submitting..." %
                      sigfile,
                      file=sys.stderr)
            else:
                collector.generate(crashInfo,
                                   forceCrashAddress=True,
                                   forceCrashInstruction=False,
                                   numFrames=8)
                collector.submit(crashInfo, testcase)
                print("Successfully submitted crash.", file=sys.stderr)

            if signature_repeat_count >= 10:
                print("Too many crashes with the same signature, exiting...",
                      file=sys.stderr)
                break

    if opts.aflfuzz:
        if opts.firefox or opts.firefox_start_afl:
            if not haveFFPuppet:
                print(
                    "Error: --firefox and --firefox-start-afl require FFPuppet to be installed",
                    file=sys.stderr)
                return 2

            if opts.custom_cmdline_file:
                print(
                    "Error: --custom-cmdline-file is incompatible with firefox options",
                    file=sys.stderr)
                return 2

            if not opts.firefox_prefs or not opts.firefox_testpath:
                print(
                    "Error: --firefox and --firefox-start-afl require --firefox-prefs and --firefox-testpath to be specified",
                    file=sys.stderr)
                return 2

        if opts.firefox_start_afl:
            if not opts.aflbindir:
                print(
                    "Error: Must specify --afl-binary-dir for starting AFL with firefox",
                    file=sys.stderr)
                return 2

            (ffp, cmd, env) = setup_firefox(opts.firefox_start_afl,
                                            opts.firefox_prefs,
                                            opts.firefox_extensions,
                                            opts.firefox_testpath)

            afl_cmd = [os.path.join(opts.aflbindir, "afl-fuzz")]

            opts.rargs.remove("--")

            afl_cmd.extend(opts.rargs)
            afl_cmd.extend(cmd)

            try:
                subprocess.call(afl_cmd, env=env)
            except:
                traceback.print_exc()

            ffp.clean_up()
            return 0

        afl_out_dirs = []
        if opts.afloutdir:
            if not os.path.exists(os.path.join(opts.afloutdir, "crashes")):
                # The specified directory doesn't have a "crashes" sub directory.
                # Either the wrong directory was specified, or this is an AFL multi-process
                # sychronization directory. Try to figure this out here.
                sync_dirs = os.listdir(opts.afloutdir)

                for sync_dir in sync_dirs:
                    if os.path.exists(
                            os.path.join(opts.afloutdir, sync_dir, "crashes")):
                        afl_out_dirs.append(
                            os.path.join(opts.afloutdir, sync_dir))

                if not afl_out_dirs:
                    print(
                        "Error: Directory %s does not appear to be a valid AFL output/sync directory"
                        % opts.afloutdir,
                        file=sys.stderr)
                    return 2
            else:
                afl_out_dirs.append(opts.afloutdir)

        # Upload and FuzzManager modes require specifying the AFL directory
        if opts.s3_queue_upload or opts.fuzzmanager:
            if not opts.afloutdir:
                print(
                    "Error: Must specify AFL output directory using --afl-output-dir",
                    file=sys.stderr)
                return 2

        if (opts.s3_queue_upload or opts.s3_corpus_refresh
                or opts.s3_build_download or opts.s3_build_upload
                or opts.s3_corpus_download or opts.s3_corpus_upload
                or opts.s3_queue_status):
            if not opts.s3_bucket or not opts.project:
                print(
                    "Error: Must specify both --s3-bucket and --project for S3 actions",
                    file=sys.stderr)
                return 2

        if opts.s3_queue_status:
            status_data = get_queue_status(opts.s3_bucket, opts.project)
            total_queue_files = 0

            for queue_name in status_data:
                print("Queue %s: %s" % (queue_name, status_data[queue_name]))
                total_queue_files += status_data[queue_name]
            print("Total queue files: %s" % total_queue_files)

            return 0

        if opts.s3_corpus_status:
            status_data = get_corpus_status(opts.s3_bucket, opts.project)
            total_corpus_files = 0

            for (status_dt, status_cnt) in sorted(status_data.items()):
                print("Added %s: %s" % (status_dt, status_cnt))
                total_corpus_files += status_cnt
            print("Total corpus files: %s" % total_corpus_files)

            return 0

        if opts.s3_queue_cleanup:
            clean_queue_dirs(opts.s3_corpus_refresh, opts.s3_bucket,
                             opts.project, opts.s3_refresh_interval)
            return 0

        if opts.s3_build_download:
            download_build(opts.s3_build_download, opts.s3_bucket,
                           opts.project)
            return 0

        if opts.s3_build_upload:
            upload_build(opts.s3_build_upload, opts.s3_bucket, opts.project)
            return 0

        if opts.s3_corpus_download:
            if opts.s3_corpus_download_size != None:
                opts.s3_corpus_download_size = int(
                    opts.s3_corpus_download_size)

            download_corpus(opts.s3_corpus_download, opts.s3_bucket,
                            opts.project, opts.s3_corpus_download_size)
            return 0

        if opts.s3_corpus_upload:
            upload_corpus(opts.s3_corpus_upload, opts.s3_bucket, opts.project,
                          opts.s3_corpus_replace)
            return 0

        if opts.s3_corpus_refresh:
            if not opts.aflbindir:
                print(
                    "Error: Must specify --afl-binary-dir for refreshing the test corpus",
                    file=sys.stderr)
                return 2

            if not os.path.exists(opts.s3_corpus_refresh):
                os.makedirs(opts.s3_corpus_refresh)

            queues_dir = os.path.join(opts.s3_corpus_refresh, "queues")

            print("Cleaning old AFL queues from s3://%s/%s/queues/" %
                  (opts.s3_bucket, opts.project))
            clean_queue_dirs(opts.s3_corpus_refresh, opts.s3_bucket,
                             opts.project, opts.s3_refresh_interval)

            print("Downloading AFL queues from s3://%s/%s/queues/ to %s" %
                  (opts.s3_bucket, opts.project, queues_dir))
            download_queue_dirs(opts.s3_corpus_refresh, opts.s3_bucket,
                                opts.project)

            cmdline_file = os.path.join(opts.s3_corpus_refresh, "cmdline")
            if not os.path.exists(cmdline_file):
                print(
                    "Error: Failed to download a cmdline file from queue directories.",
                    file=sys.stderr)
                return 2

            print("Downloading build")
            download_build(os.path.join(opts.s3_corpus_refresh, "build"),
                           opts.s3_bucket, opts.project)

            with open(os.path.join(opts.s3_corpus_refresh, "cmdline"),
                      'r') as cmdline_file:
                cmdline = cmdline_file.read().splitlines()

            # Assume cmdline[0] is the name of the binary
            binary_name = os.path.basename(cmdline[0])

            # Try locating our binary in the build we just unpacked
            binary_search_result = [
                os.path.join(dirpath, filename)
                for dirpath, dirnames, filenames in os.walk(
                    os.path.join(opts.s3_corpus_refresh, "build"))
                for filename in filenames if (filename == binary_name and (
                    stat.S_IXUSR
                    & os.stat(os.path.join(dirpath, filename))[stat.ST_MODE]))
            ]

            if not binary_search_result:
                print("Error: Failed to locate binary %s in unpacked build." %
                      binary_name,
                      file=sys.stderr)
                return 2

            if len(binary_search_result) > 1:
                print("Error: Binary name %s is ambiguous in unpacked build." %
                      binary_name,
                      file=sys.stderr)
                return 2

            cmdline[0] = binary_search_result[0]

            # Download our current corpus into the queues directory as well
            print("Downloading corpus from s3://%s/%s/corpus/ to %s" %
                  (opts.s3_bucket, opts.project, queues_dir))
            download_corpus(queues_dir, opts.s3_bucket, opts.project)

            # Ensure the directory for our new tests is empty
            updated_tests_dir = os.path.join(opts.s3_corpus_refresh, "tests")
            if os.path.exists(updated_tests_dir):
                shutil.rmtree(updated_tests_dir)
            os.mkdir(updated_tests_dir)

            # Run afl-cmin
            afl_cmin = os.path.join(opts.aflbindir, "afl-cmin")
            if not os.path.exists(afl_cmin):
                print("Error: Unable to locate afl-cmin binary.",
                      file=sys.stderr)
                return 2

            if opts.firefox:
                (ffpInst, ffCmd,
                 ffEnv) = setup_firefox(cmdline[0], opts.firefox_prefs,
                                        opts.firefox_extensions,
                                        opts.firefox_testpath)
                cmdline = ffCmd

            afl_cmdline = [
                afl_cmin, '-e', '-i', queues_dir, '-o', updated_tests_dir,
                '-t',
                str(opts.afl_timeout), '-m', 'none'
            ]

            if opts.test_file:
                afl_cmdline.extend(['-f', opts.test_file])

            afl_cmdline.extend(cmdline)

            print("Running afl-cmin")
            with open(os.devnull, 'w') as devnull:
                env = os.environ.copy()
                env['LD_LIBRARY_PATH'] = os.path.dirname(cmdline[0])

                if opts.firefox:
                    env.update(ffEnv)

                subprocess.check_call(afl_cmdline, stdout=devnull, env=env)

            if opts.firefox:
                ffpInst.clean_up()

            # replace existing corpus with reduced corpus
            print("Uploading reduced corpus to s3://%s/%s/corpus/" %
                  (opts.s3_bucket, opts.project))
            upload_corpus(updated_tests_dir,
                          opts.s3_bucket,
                          opts.project,
                          corpus_delete=True)

            # Prune the queues directory once we successfully uploaded the new
            # test corpus, but leave everything that's part of our new corpus
            # so we don't have to download those files again.
            test_files = [
                file for file in os.listdir(updated_tests_dir)
                if os.path.isfile(os.path.join(updated_tests_dir, file))
            ]
            obsolete_queue_files = [
                file for file in os.listdir(queues_dir)
                if os.path.isfile(os.path.join(queues_dir, file))
                and file not in test_files
            ]

            for file in obsolete_queue_files:
                os.remove(os.path.join(queues_dir, file))

        if opts.fuzzmanager or opts.s3_queue_upload or opts.aflstats:
            last_queue_upload = 0
            while True:
                if opts.fuzzmanager:
                    for afl_out_dir in afl_out_dirs:
                        scan_crashes(afl_out_dir, opts.custom_cmdline_file,
                                     opts.env_file, opts.fuzzmanager_toolname,
                                     opts.test_file)

                # Only upload queue files every 20 minutes
                if opts.s3_queue_upload and last_queue_upload < int(
                        time.time()) - 1200:
                    for afl_out_dir in afl_out_dirs:
                        upload_queue_dir(afl_out_dir,
                                         opts.s3_bucket,
                                         opts.project,
                                         new_cov_only=True)
                    last_queue_upload = int(time.time())

                if opts.aflstats:
                    write_aggregated_stats(afl_out_dirs, opts.aflstats)

                time.sleep(10)
示例#35
0
def test_collector_submit(live_server, tmpdir, fm_user, monkeypatch):
    '''Test crash submission'''
    monkeypatch.setattr(
        os.path, 'expanduser',
        lambda path: tmpdir.strpath)  # ensure fuzzmanager config is not used
    monkeypatch.setattr(time, 'sleep', lambda t: None)

    # create a collector
    url = urlsplit(live_server.url)
    collector = Collector(sigCacheDir=tmpdir.mkdir('sigcache').strpath,
                          serverHost=url.hostname,
                          serverPort=url.port,
                          serverProtocol=url.scheme,
                          serverAuthToken=fm_user.token,
                          clientId='test-fuzzer1',
                          tool='test-tool')
    testcase_path = tmpdir.mkdir('testcase').join('testcase.js').strpath
    with open(testcase_path, 'wb') as testcase_fp:
        testcase_fp.write(exampleTestCase)
    config = ProgramConfiguration('mozilla-central',
                                  'x86-64',
                                  'linux',
                                  version='ba0bc4f26681')
    crashInfo = CrashInfo.fromRawCrashData([], asanTraceCrash.splitlines(),
                                           config)

    # submit a crash to test server using collector
    result = collector.submit(crashInfo, testcase_path)

    # see that the issue was created in the server
    entry = CrashEntry.objects.get(pk=result['id'])
    assert entry.rawStdout == ''
    assert entry.rawStderr == asanTraceCrash
    assert entry.rawCrashData == ''
    assert entry.tool.name == 'test-tool'
    assert entry.client.name == 'test-fuzzer1'
    assert entry.product.name == config.product
    assert entry.product.version == config.version
    assert entry.platform.name == config.platform
    assert entry.os.name == config.os
    assert entry.testcase.quality == 0
    assert not entry.testcase.isBinary
    assert entry.testcase.size == len(exampleTestCase)
    with open(entry.testcase.test.path, 'rb') as testcase_fp:
        assert testcase_fp.read() == exampleTestCase
    assert entry.metadata == ''
    assert entry.env == ''
    assert entry.args == ''

    # create a test config
    with open(tmpdir.join('.fuzzmanagerconf').strpath, 'w') as fp:
        fp.write('[Main]\n')
        fp.write('serverhost = %s\n' % url.hostname)
        fp.write('serverport = %d\n' % url.port)
        fp.write('serverproto = %s\n' % url.scheme)
        fp.write('serverauthtoken = %s\n' % fm_user.token)

    # try a binary testcase via cmd line
    testcase_path = tmpdir.join('testcase.bin').strpath
    with open(testcase_path, 'wb') as testcase_fp:
        testcase_fp.write(b'\0')
    stdout = tmpdir.join('stdout.txt').strpath
    with open(stdout, 'w') as fp:
        fp.write('stdout data')
    stderr = tmpdir.join('stderr.txt').strpath
    with open(stderr, 'w') as fp:
        fp.write('stderr data')
    crashdata = tmpdir.join('crashdata.txt').strpath
    with open(crashdata, 'w') as fp:
        fp.write(asanTraceCrash)
    result = main([
        '--submit',
        '--tool',
        'tool2',
        '--product',
        'mozilla-inbound',
        '--productversion',
        '12345',
        '--os',
        'minix',
        '--platform',
        'pdp11',
        '--env',
        'PATH=/home/ken',
        'LD_PRELOAD=hack.so',
        '--metadata',
        'var1=val1',
        'var2=val2',
        '--args',
        './myprog',
        '--testcase',
        testcase_path,
        '--testcasequality',
        '5',
        '--stdout',
        stdout,
        '--stderr',
        stderr,
        '--crashdata',
        crashdata,
    ])
    assert result == 0
    entry = CrashEntry.objects.get(
        pk__gt=entry.id
    )  # newer than the last result, will fail if the test db is active
    assert entry.rawStdout == 'stdout data'
    assert entry.rawStderr == 'stderr data'
    assert entry.rawCrashData == asanTraceCrash
    assert entry.tool.name == 'tool2'
    assert entry.client.name == platform.node()
    assert entry.product.name == 'mozilla-inbound'
    assert entry.product.version == '12345'
    assert entry.platform.name == 'pdp11'
    assert entry.os.name == 'minix'
    assert entry.testcase.quality == 5
    assert entry.testcase.isBinary
    assert entry.testcase.size == 1
    with open(entry.testcase.test.path, 'rb') as testcase_fp:
        assert testcase_fp.read() == b'\0'
    assert json.loads(entry.metadata) == {'var1': 'val1', 'var2': 'val2'}
    assert json.loads(entry.env) == {
        'PATH': '/home/ken',
        'LD_PRELOAD': 'hack.so'
    }
    assert json.loads(entry.args) == ['./myprog']

    class response_t(object):
        status_code = 500
        text = "Error"

    def mypost(_session, _url, _data, headers=None):
        return response_t()

    monkeypatch.setattr(time, 'sleep', lambda t: None)
    monkeypatch.setattr(requests.Session, 'post', mypost)
    with pytest.raises(RuntimeError, match='Server unexpectedly responded'):
        collector.submit(crashInfo, testcase_path)
示例#36
0
文件: mgmt_daemon.py 项目: slox3r/afl
def scan_crashes(base_dir, cmdline_path=None):
    '''
    Scan the base directory for crash tests and submit them to FuzzManager.
    
    @type base_dir: String
    @param base_dir: AFL base directory
    
    @type cmdline_path: String
    @param cmdline_path: Optional command line file to use instead of the
                         one found inside the base directory.
    
    @rtype: int
    @return: Non-zero return code on failure
    '''
    crash_dir = os.path.join(base_dir, "crashes")
    crash_files = []

    for crash_file in os.listdir(crash_dir):
        # Ignore all files that aren't crash results
        if not crash_file.startswith("id:"):
            continue
        
        crash_file = os.path.join(crash_dir, crash_file)

        # Ignore our own status files
        if crash_file.endswith(".submitted") or crash_file.endswith(".failed"):
            continue
        
        # Ignore files we already processed
        if os.path.exists(crash_file + ".submitted") or os.path.exists(crash_file + ".failed"):
            continue
        
        crash_files.append(crash_file)
        
    if crash_files:
        # First try to read necessary information for reproducing crashes
        cmdline = []
        test_idx = None
        
        if not cmdline_path:
            cmdline_path = os.path.join(base_dir, "cmdline")
        
        with open(cmdline_path, 'r') as cmdline_file:
            idx = 0
            for line in cmdline_file:
                if '@@' in line:
                    test_idx = idx
                cmdline.append(line.rstrip('\n'))
                idx += 1
            
        if test_idx != None:
            orig_test_arg = cmdline[test_idx]

        print(cmdline)
        
        configuration = ProgramConfiguration.fromBinary(cmdline[0])
        if not configuration:
            print("Error: Creating program configuration from binary failed. Check your binary configuration file.", file=sys.stderr)
            return 2
        
        collector = Collector()
        
        for crash_file in crash_files:
            stdin = None
            
            if test_idx != None:
                cmdline[test_idx] = orig_test_arg.replace('@@', crash_file)
            else:
                with open(crash_file, 'r') as crash_fd:
                    stdin = crash_fd.read()
            
            runner = AutoRunner.fromBinaryArgs(cmdline[0], cmdline[1:], stdin=stdin)
            if runner.run():
                crash_info = runner.getCrashInfo(configuration)
                collector.submit(crash_info, crash_file)
                open(crash_file + ".submitted", 'a').close()
            else:
                open(crash_file + ".failed", 'a').close()
                print("Error: Failed to reproduce the given crash, cannot submit.", file=sys.stderr)