def _insert_crash_cfuzz(self, crash_data): # FIXME validate user provided data job = Job.objects.get(name=crash_data['job_name']) iteration = self.get_iteration_of_crash(job) if crash_data['crash']: with open(crash_data['filename'], 'rb') as f: data = f.read() logger.debug('Inserting crash: %s.' % str(crash_data)) cfuzz_crash = Crash(job_id=job.id, crash_signal=crash_data['signal'], test_case=data, verified=False, iteration=iteration) cfuzz_crash.save() logger.debug('Crash stored') else: logger.debug('No crash clean up') try: os.remove(crash_data['filename']) except OSError as e: print('Error: %s - %s.' % (e.filename, e.strerror)) stats = { 'fuzzer': 'cfuzz', 'job_id': str(job.id), 'job_name': job.name, 'runtime': 0, 'total_execs': '+1' } self.wq.publish('stats', json.dumps(stats))
def show_previous_crash(crash_id): job_id_of_crash = list(Crash.objects(id=crash_id))[0]["job_id"] all_job_crashes = list(Crash.objects(job_id=job_id_of_crash)) for index, crash in enumerate(all_job_crashes): if str(crash["id"]) == str(crash_id): if index == 0: next_crash_index = len(all_job_crashes) - 1 break else: next_crash_index = index - 1 break return show_crash(all_job_crashes[next_crash_index]["id"])
def jobs_download(job_id): # FIXME may crash if no crashes available if job_id is None: flask.flash("Invalid job ID") return flask.redirect('/jobs/show') job = Job.objects.get(id=job_id) if not can_do_stuff_with_job(current_user, job.owner): flask.flash('User is not allowed to download job.') return flask.redirect('/jobs/show') job_crashes = Crash.objects(job_id=job_id) if job_crashes: imz = InMemoryZip() summary = {} for c in job_crashes: summary[str(c.id)] = _get_summary_for_crash(c) imz.append("%s" % str(c.id), c.test_case) imz.append("summary.json", json.dumps(summary, indent=4)) filename = os.path.join('/tmp', '%s.zip' % job_id) if os.path.exists(filename): os.remove(filename) imz.writetofile(filename) return flask.send_file(filename, as_attachment=True)
def calculate_general_statistics(self): iteration, runtime, execs_per_sec = self.summarize_individual_job_statistics() return {"iteration": iteration, "runtime": runtime, "execs_per_sec": execs_per_sec, "number_of_job_names": Job.objects.count(), "number_of_crashes": Crash.objects().count(), "number_of_unique_crashes": self.calculate_number_of_unique_crashes(), "number_of_unique_exploitable_crashes": self.calculate_number_of_unique_and_exploitable_crashes()}
def calculate_general_statistics(self): iteration, runtime, execs_per_sec = self.summarize_individual_job_statistics() return {'iteration': iteration, 'runtime': runtime, 'execs_per_sec': execs_per_sec, 'number_of_job_names': Job.objects.count(), 'number_of_crashes': Crash.objects().count(), 'number_of_unique_crashes': self.calculate_number_of_unique_crashes(), 'number_of_unique_exploitable_crashes': self.calculate_number_of_unique_and_exploitable_crashes()}
def delete_job(job_id): if job_id is None: flask.abort(400, description="Invalid job ID") if flask.request.method == 'POST': job = Job.objects.get(id=job_id) if job: job.delete() crashes = Crash.objects(job_id=job_id) crashes.delete() return flask.redirect('/jobs/show') else: return flask.render_template("jobs_delete.html", id=job_id)
def api_delete_job(job_id=None): if job_id is None: return json.dumps({'success': False, 'msg': 'no job ID provided'}) else: job = Job.objects.get(id=job_id) if job: job.delete() crashes = Crash.objects(job_id=job_id) crashes.delete() return json.dumps({'success': True}) else: return json.dumps({'success': False})
def _insert_crash_afl(self, crash_data): logger.info("Inserting AFL crash: %s" % crash_data['filename']) crash_path = os.path.join(f3c_global_config.samples_path, "crashes") new_path = os.path.join(crash_path, crash_data['filename']) with open(new_path, 'wb') as fp: fp.write(base64.b64decode(crash_data['crash_data'])) logger.debug("Inserting AFL crash with signal %i." % crash_data['signal']) if 'classification' in crash_data: # TODO ensure that verified is a boolean afl_crash = Crash(job_id=crash_data['job_id'], crash_signal=crash_data['signal'], crash_path=new_path, verified=crash_data['verified'], date=datetime.datetime.now(), crash_hash=crash_data['hash'], exploitability=crash_data['classification'], additional=crash_data['description']) else: afl_crash = Crash(job_id=crash_data['job_id'], crash_signal=crash_data['signal'], crash_path=new_path, date=datetime.datetime.now(), verified=crash_data['verified']) afl_crash.save() logger.debug("Crash stored")
def _insert_crash_cfuzz(self, crash_data): crash_path = os.path.join(f3c_global_config.samples_path, "crashes") temp_file = crash_data['filename'] if not os.path.exists(temp_file): logger.error("Test case file %s does not exists!" % temp_file) return False buf = open(temp_file, "rb").read() file_hash = hashlib.sha1(buf).hexdigest() new_path = os.path.join(crash_path, file_hash) logger.info("Saving test file %s" % new_path) shutil.move(temp_file, new_path) logger.debug("Inserting crash: %s." % str(crash_data)) cfuzz_crash = Crash(job_id=crash_data['job_id'], crash_signal=crash_data['signal'], crash_path=new_path, date=datetime.datetime.now(), verified=False) cfuzz_crash.save() logger.debug("Crash stored")
def get_crash_stats_of_date(date): number_of_crashes = Crash.objects(date__gte=date, date__lt=date + timedelta(days=1)).count() unique_crashes = get_unique_crashes_of_date(date) unique_exploitable_crashes = get_unique_exploitable_crashes_of_date(date) return { "number_of_crashes": number_of_crashes, "number_of_unique_crashes": len(list(unique_crashes)), "number_of_unique_exploitable_crashes": len(list(unique_exploitable_crashes)) }
def delete_job(job_id): if job_id is None: flask.abort(400, description="Invalid job ID") if flask.request.method == 'POST': job = Job.objects.get(id=job_id) if job: if not can_do_stuff_with_job(current_user, job.owner): logging.error('User %s can not delete job with id %s' % (current_user.email, str(job.id))) flask.flash('You are not allow to delete this job.') else: job.delete() crashes = Crash.objects(job_id=job_id) crashes.delete() return flask.redirect('/jobs/show') else: return flask.render_template('jobs_delete.html', id=job_id)
def on_message(self, channel, method_frame, header_frame, body): verified_crash = json.loads(body.decode("utf-8")) if verified_crash['verified']: logger.debug('[CrashVerification] Got verified crash with ID %s' % verified_crash['crash_id']) crash = Crash.objects(id=verified_crash['crash_id']).limit(1) crash.update(verified=True, exploitability=verified_crash['classification'], additional=verified_crash['short_desc'], crash_hash=verified_crash['crash_hash']) logger.debug('[CrashVerification] Updated crash in DB.') else: logger.debug('[CrashVerification] Could not verify crash.') crash = Crash.objects.get(id=verified_crash['crash_id']) if crash: crash.delete() logger.debug('[CrashVerification] Deleted crash from database.')
def search_crash(error=None): crash_database_structure = [ item for item in Crash._get_collection().find()[0] ] job_names = _get_job_names_of_user() if request.method == 'POST': try: crashes = process_search_query() if crashes: return show_crashes(crashes=crashes) else: error = "No Crashes are fitting to your Search Request" except Exception as e: error = e return render_template("crashes_search.html", database_structure=crash_database_structure, job_names=job_names, error=error)
def _insert_crash_afl(self, crash_data): logger.debug('Inserting AFL crash with signal %i.' % crash_data['signal']) job = Job.objects.get(name=crash_data['job_name']) iteration = self.get_iteration_of_crash(job) if 'classification' in crash_data: afl_crash = Crash(job_id=job.id, crash_signal=crash_data['signal'], test_case=crash_data['crash_data'].encode(), verified=crash_data['verified'], crash_hash=crash_data['hash'], exploitability=crash_data['classification'], additional=crash_data['description'], iteration=iteration) else: afl_crash = Crash(job_id=crash_data['job_name'], crash_signal=crash_data['signal'], test_case=crash_data['crash_data'].encode(), verified=crash_data['verified'], iteration=iteration) afl_crash.save() logger.debug('Crash stored')
def _insert_crash_syzkaller(self, crash_data): logger.debug('Inserting Syzkaller crash with signal {}.'.format( crash_data['signal'])) job = Job.objects.get(name=crash_data['job_name']) iteration = 0 if 'classification' in crash_data: syzkaller_crash = Crash( job_id=job.id, crash_signal=crash_data['signal'], test_case=crash_data['test_case'].encode(), verified=crash_data['verified'], crash_hash=crash_data['hash'], exploitability=crash_data['classification'], additional=crash_data['description'], iteration=iteration) else: syzkaller_crash = Crash(job_id=job.id, crash_signal=crash_data['signal'], test_case=crash_data['test_case'].encode(), verified=crash_data['verified'], iteration=iteration) syzkaller_crash.save() logger.debug('Crash stored')
def calculate_last_24_hours_crashes_per_time_interval(self): last_24_hours_crashes = Crash.objects(date__gte=self.date_now - timedelta(days=1)).only("date", "iteration").order_by("date") last_24_hours_crashes = list(last_24_hours_crashes) self.crash_counter -= len(last_24_hours_crashes) last_24_hours_crashes_per_time_interval, last_24_hours_iterations_per_time_interval = self.calculate_crashes_and_iterations_per_time_interval(list(last_24_hours_crashes)) return last_24_hours_crashes_per_time_interval, last_24_hours_iterations_per_time_interval
def calculate_all_crashes_per_time_interval(self): all_crashes = Crash.objects().only("date", "iteration").order_by("date") all_crashes_per_time_interval, iterations_per_time_interval = self.calculate_crashes_and_iterations_per_time_interval(list(all_crashes)) return all_crashes_per_time_interval, iterations_per_time_interval
def calculate_different_crash_signals(self): distinct_crash_signals = Crash.objects.distinct('crash_signal') distinct_crash_signals_with_quantity = {} for crash_signal in distinct_crash_signals: distinct_crash_signals_with_quantity[crash_signal] = Crash.objects(crash_signal=crash_signal).count() return distinct_crash_signals_with_quantity
def calculate_all_crashes_per_time_interval(self): all_crashes = Crash.objects().only('job_id', 'date', 'iteration').order_by('date') all_crashes_per_time_interval, iterations_per_time_interval = self.calculate_crashes_and_iterations_per_time_interval_for_all_jobs(list(all_crashes)) return all_crashes_per_time_interval, iterations_per_time_interval
def calculate_last_24_hours_crashes_per_time_interval(self): last_24_hours_crashes = Crash.objects(date__gte=self.date_now - timedelta(days=1)).only('job_id', 'date', 'iteration').order_by('date') last_24_hours_crashes = list(last_24_hours_crashes) self.crash_counter -= len(last_24_hours_crashes) last_24_hours_crashes_per_time_interval, last_24_hours_iterations_per_time_interval = self.calculate_crashes_and_iterations_per_time_interval_for_all_jobs(list(last_24_hours_crashes)) return last_24_hours_crashes_per_time_interval, last_24_hours_iterations_per_time_interval
def _get_non_verified_crashes(self): res = Crash.objects(verified=False) return res