def test_session_scope_exc(): session_mock = MagicMock() with pytest.raises(Exception): with base.session_scope(backend=lambda: session_mock) as session: raise RuntimeError session_mock.commit.assert_not_called() session_mock.rollback.assert_called()
def unfinished_jobs(): """ Returns a list of unfinished jobs :return: """ stmt = "SELECT * FROM jobs where status != 'ended'" with session_scope(True) as session: result = session.execute(stmt) jobs = [dict(row) for row in result] result.close() return jobs
def get_process_state(process_id): """ Returns the state of a process as a list of jobs {id, status} :param process_id: :return: """ # Use query parameters to protect the query against SQL injection stmt = text("SELECT id, status FROM jobs WHERE process_id =:process_id").bindparams(process_id=process_id) with session_scope(True) as session: result = session.execute(stmt) process_state = [dict(row) for row in result.fetchall()] result.close() return process_state
def remove_job(job_id): """ Removes the job with the specified id. Jobs might be removed when for instance the status is rejected :param job_id: :return: """ stmt = f""" DELETE FROM tasks WHERE jobid={job_id}; DELETE FROM logs WHERE jobid={job_id}; DELETE FROM jobsteps WHERE jobid={job_id}; DELETE FROM jobs WHERE id={job_id}; COMMIT; """ with session_scope(True) as session: result = session.execute(stmt) result.close() return unfinished_jobs()
def _broadcasts(self): """Broadcast 'new_logs' events on any new log messages :return: None """ print("Start broadcast new logs", self._clients) while self._clients > 0: with session_scope(True) as session: last_logid = get_last_logid(session) last_timestamp = get_last_service_timestamp(session) if last_logid != self._previous_last_logid: self._socketio.emit('new_logs', {'last_logid': last_logid}) self._previous_last_logid = last_logid if last_timestamp != self._previous_last_timestamp: self._socketio.emit('update_services', {'last_timestamp': last_timestamp.isoformat()}) self._previous_last_timestamp = last_timestamp time.sleep(LogBroadcaster.CHECK_LOGS_INTERVAL) self._broadcaster = None print("End broadcast new logs", self._clients)
def test_session_scope_rw(): session_mock = MagicMock() with base.session_scope(backend=lambda: session_mock) as session: assert not isinstance(session_mock.flush, types.LambdaType) session_mock.commit.assert_called() session_mock.close.assert_called()
def resolve_jobs(self, _, **kwargs): days_ago = 10 if "days_ago" in kwargs: days_ago = int(kwargs["days_ago"]) del kwargs["days_ago"] search = "true" if "search" in kwargs: search = kwargs["search"].lower() search = f"lower(log.msg) LIKE '%{search}%'" del kwargs["search"] where = " AND ".join( [f"{key} = '{value}'" for key, value in kwargs.items()]) if kwargs else "True" query = f""" SELECT firstlog.process_id, job.id AS jobid, job.end - job.start AS bruto_duration, stepdurations.duration AS netto_duration, now() - job.start as time_ago, CASE WHEN now() - job.start <= '24 hours'::interval THEN ' 0 - 24 uur' WHEN now() - job.start <= '48 hours'::interval THEN '24 - 48 uur' WHEN now() - job.start <= '96 hours'::interval THEN '48 - 96 uur' ELSE 'Ouder' END AS age_category, date(job.start) AS day, firstlog.name AS name, firstlog.source AS source, firstlog.application AS application, firstlog.destination AS destination, firstlog.catalogue AS catalogue, firstlog.entity AS entity, job.start AS starttime, date_part('year', job.start) AS startyear, date_part('month', job.start) AS startmonth, job.end AS endtime, date_part('year', job.end) AS endyear, date_part('month', job.end) AS endmonth, step.name AS step, CASE WHEN job.status = 'ended' THEN job.status ELSE step.status END AS status, job.user AS user, job.attribute AS attribute, log.infos, log.warnings, log.errors, COALESCE(job.log_counts->>'data_info', '0')::int AS datainfos, COALESCE(job.log_counts->>'data_warning', '0')::int AS datawarnings, COALESCE(job.log_counts->>'data_error', '0')::int AS dataerrors FROM ( SELECT sum(case when log.level = 'INFO' then 1 end) as infos, sum(case when log.level = 'WARNING' then 1 end) as warnings, sum(case when log.level = 'ERROR' then 1 end) as errors, min(log.logid) as logid, jobid FROM logs log WHERE log.timestamp >= now() - '{days_ago} days'::interval AND {search} GROUP BY log.jobid ) log join logs firstlog on firstlog.logid = log.logid join jobs job ON job.id=log.jobid left join ( select jobid, max(id) as stepid from jobsteps group by jobid ) as laststep on laststep.jobid = log.jobid left join jobsteps step on step.id = laststep.stepid LEFT JOIN ( SELECT jobid, SUM(jobsteps.end - jobsteps.start) AS duration FROM jobsteps GROUP BY jobid ) as stepdurations ON stepdurations.jobid = job.id """ statement = f""" SELECT * FROM ( {query} ) AS result WHERE {where} ORDER BY starttime DESC """ statement = text(statement) # Response will change when a new log has become available with session_scope(True) as session: last_logid = get_last_logid(session) # Response will also change when the statement changes return Query._resolve_cache.resolve( "resolve_jobs", last_logid, statement, lambda: [Job(**dict(result)) for result in engine.execute(statement)])