def job_success(result, file, probe): try: log.info("file:%s probe %s", file, probe) celery_frontend.scan_result(file, probe, result) except Exception as e: log.exception(e) raise job_success.retry(countdown=5, max_retries=3, exc=e)
def scan(file, probelist, frontend_scan): try: with session_transaction() as session: log.debug("scan_id: %s fileweb_id: %s received %s", frontend_scan, file, probelist) user = User.get_by_rmqvhost(session) scan = scan_ctrl.new(frontend_scan, user, session) # Now, create one subtask per file to scan per probe new_jobs = [] for probe in probelist: if probe in probe_ctrl.available_probes: j = Job(scan.id, file, probe) session.add(j) new_jobs.append(j) else: # send an error message to not stuck the scan # One probe asked is no more present res = probe_ctrl.create_error_results(probe, "missing probe", session) celery_frontend.scan_result(file, probe, res) session.commit() scan_ctrl.launch(scan, new_jobs, session) log.info("scan_id %s: file %s received / %d active probe(s) / " "%d job(s) launched", scan.scan_id, file, len(probe_ctrl.available_probes), len(scan.jobs)) except Exception as e: log.exception(e) raise
def scan(file, probelist, frontend_scan): try: with session_transaction() as session: log.debug("scan_id: %s fileweb_id: %s received %s", frontend_scan, file, probelist) user = User.get_by_rmqvhost(session) scan = scan_ctrl.new(frontend_scan, user, session) # Now, create one subtask per file to scan per probe new_jobs = [] for probe in probelist: if probe in probe_ctrl.available_probes: j = Job(scan.id, file, probe) session.add(j) new_jobs.append(j) else: # send an error message to not stuck the scan # One probe asked is no more present res = probe_ctrl.create_error_results( probe, "missing probe", session) celery_frontend.scan_result(file, probe, res) session.commit() scan_ctrl.launch(scan, new_jobs, session) log.info( "scan_id %s: file %s received / %d active probe(s) / " "%d job(s) launched", scan.scan_id, file, len(probe_ctrl.available_probes), len(scan.jobs)) except Exception as e: log.exception(type(e).__name__ + " : " + str(e)) raise
def test_scan_result(self, m_async_call): filename = "filename" probe = "probe" result = "result" module.scan_result(filename, probe, result) m_async_call.assert_called_once_with(module.frontend_app, "frontend_app", "scan_result", args=[filename, probe, result])
def job_error(parent_taskid, file, probe): try: log.info("file:%s probe %s", file, probe) with session_query() as session: result = probe_ctrl.create_error_results(probe, "job error", session) celery_frontend.scan_result(file, probe, result) except Exception as e: log.exception(e) raise job_error.retry(countdown=5, max_retries=3, exc=e)
def job_success(result, jobid): try: (frontend_scanid, filename, probe) = job_ctrl.info(jobid) log.info("scanid %s jobid:%d probe %s", frontend_scanid, jobid, probe) celery_frontend.scan_result(frontend_scanid, filename, probe, result) job_ctrl.success(jobid) except: log.info("exception", exc_info=True) return
def test_scan_result(self, m_async_call): filename = "filename" probe = "probe" result = "result" module.scan_result(filename, probe, result) hook_error = module.route( module.frontend_app.signature("frontend_app.scan_result_error", [filename, probe, result])) m_async_call.assert_called_once_with(module.frontend_app, "frontend_app", "scan_result", args=[filename, probe, result], link_error=hook_error)
def test_scan_result(self, m_async_call): filename = "filename" probe = "probe" result = "result" module.scan_result(filename, probe, result) hook_error = module.route( module.frontend_app.signature( "frontend_app.scan_result_error", [filename, probe, result])) m_async_call.assert_called_once_with(module.frontend_app, "frontend_app", "scan_result", args=[filename, probe, result], link_error=hook_error)
def job_error(parent_taskid, jobid): try: (frontend_scanid, filename, probe) = job_ctrl.info(jobid) log.info("scanid %s jobid:%d probe %s", frontend_scanid, jobid, probe) job_ctrl.error(jobid) result = {} result['status'] = -1 result['name'] = probe result['error'] = "Brain job error" result['duration'] = job_ctrl.duration(jobid) celery_frontend.scan_result(frontend_scanid, filename, probe, result) except: log.info("exception", exc_info=True) return
def job_error(parent_taskid, frontend_scan_id, filename, probename): try: log.info("scanid %s: filename:%s probe %s", frontend_scan_id, filename, probename) with session_query() as session: probe = Probe.get_by_name(probename, session) result = {} result['status'] = -1 result['name'] = probe.display_name result['type'] = probe.category result['error'] = "job error" result['duration'] = None celery_frontend.scan_result(frontend_scan_id, filename, probe, result) except Exception as e: log.exception(e) raise job_error.retry(countdown=5, max_retries=3, exc=e)
def scan(frontend_scanid, scan_request_dict): try: with session_transaction() as session: log.debug("scanid: %s received %s", frontend_scanid, scan_request_dict) user = User.get_by_rmqvhost(session) scan_request = IrmaScanRequest(scan_request_dict) scan = scan_ctrl.new(frontend_scanid, user, scan_request.nb_files, session) available_probelist = probe_ctrl.get_list(session) # Now, create one subtask per file to # scan per probe new_jobs = [] for filehash in scan_request.filehashes(): probelist = scan_request.get_probelist(filehash) for probe in probelist: if probe in available_probelist: j = Job(scan.id, filehash, probe) session.add(j) new_jobs.append(j) else: # send an error message to not stuck the scan # One probe asked is no more present res = probe_ctrl.create_error_results(probe, "missing probe", session) celery_frontend.scan_result(frontend_scanid, filehash, probe, res) session.commit() scan_ctrl.launch(scan, new_jobs, session) celery_frontend.scan_launched(scan.scan_id, scan_request.to_dict()) log.info("scanid %s: %d file(s) received / %d active probe(s) / " "%d job(s) launched", scan.scan_id, scan.nb_files, len(available_probelist), len(scan.jobs)) except Exception as e: log.exception(e) raise