def wait_for_execution(t): """Note that a query started manually from IDE will stay "executing" until you fetch all it's rows.""" \ """ In such case "Wait for session" can be helpful.""" if not t.data: r = execute(t.target, "select min(sql_exec_start) from v$sql_monitor" " where sql_id = :sql_id and status = 'EXECUTING'" , t.parameters , 'one' , False) if not r[0]: return t.abort(f"SQL {t.parameters['sql_id']} Not found") else: t.data = r[0] return r = execute(t.target , "select nvl(sum(case when status = 'EXECUTING' then 1 else 0 end), 0) e" ", nvl(sum(case when status like 'DONE%' then 1 else 0 end), 0) d" ", max(status) s" " from v$sql_monitor where sql_id = :sql_id and sql_exec_start >= :start_time" , {'sql_id': t.parameters['sql_id'], 'start_time': t.data} , 'one' , False) if r[0] + r[1] == 0: return t.abort(f"SQL {t.parameters['sql_id']} Not found") if r[0] > 0: return if t.reply_to_message_id: return t.finish(r[2].lower()) return t.finish('{} on {} is {}.'.format(t_link(f"{t.target}/Q/{t.parameters['sql_id']}", t.parameters['sql_id']) , t.target , r[2].lower()))
def wait_for_ts(t): """Notification will be sent again only when the threshold be crossed.""" r = execute( t.target, "select * from (" "select files.tablespace_name" ", round((max_files_size - (files.free_files_space + free.free_space)) / 1024 / 1024 / 1024) used_gb" ", round(files.max_files_size / 1024 / 1024 / 1024) allocated_gb" ", round(((max_files_size - (files.free_files_space + free.free_space))" " / max_files_size) * 100) pct_used" " from (" "select tablespace_name" ", sum(decode(maxbytes, 0, bytes, maxbytes)) max_files_size" ", sum(decode(maxbytes, 0, bytes, maxbytes)) - sum(bytes) free_files_space" " from dba_data_files" " group by tablespace_name) files" " inner join (" "select tablespace_name" ", sum(nvl(bytes,0)) free_space" " from dba_free_space" " group by tablespace_name) free on free.tablespace_name = files.tablespace_name" ") where pct_used >= :pct_used and tablespace_name like :tablespace_name", { 'pct_used': t.parameters['pct_used'], 'tablespace_name': t.optional.get('tablespace_name', '%') }, 'many', False) return t.get_message( r, lambda o, i: f'Tablespace {i[0]} on {o.target} is {i[3]}%' f' used ({i[1]} of {i[2]} Gb).', None, 0)
def wait_for_session(t): """Be sure to choose the main session if your query started in parallel mode.""" if not t.data: e = execute(t.target, "select sid, status from v$session where sid = :sid", t.parameters, 'one', False) if not e: return t.abort('Not found') t.data = {'sid': e[0]} r = execute(t.target, "select sid, status from v$session where sid = :sid", t.data, 'one', False) if not r: return t.finish(f"Session {t.data['sid']} is not found on {t.target}.") if r[1] != 'INACTIVE': return return t.finish(f'Session {r[0]} on {t.target} is {r[1].lower()}.')
def check_job_status(t): r = execute(t.target , "select job, log_user, nvl(failures, 0) fails from dba_jobs where broken = 'Y'" , {} , 'many' , False) return t.get_message(r, lambda o, i: f'Job {i[0]} ({i[1]}) on {o.target} is broken, {i[2]} failures.', None, 0)
def wait_for_sql_error(t): """This task shows sql errors, stored in sql_monitor cache. Errors, displaced from the cache, will be lost.""" \ """ A good approach is creating a trigger "after servererror".""" if not t.data: t.data = {'start_date': t.create_date} end_date = datetime.now() r = execute( t.target, "select username, sql_id, sid, error_message" " from v$sql_monitor" " where status = 'DONE (ERROR)'" " and error_number not in (1013, 28, 604, 24381)" # cancel, kill, recursive, DML array " and last_refresh_time between :start_date and :end_date" " and (username not like :user_name or username is null)", { 'start_date': t.data['start_date'], 'end_date': end_date, 'user_name': t.optional.get('ignore_user', '---') }, 'many', False) t.data['start_date'] = end_date return t.get_message( r, lambda o, i: '{} ({}, {}) on {} is failed ({}).'.format( t_link(f'{o.target}/Q/{i[1]}', i[1]), i[2], i[0], o.target, i[3]. replace('\n', ' ')))
def wait_for_heavy(t): r = execute( t.target, "select username, sql_id, exec_time_min, temp_usage_gb, exec_id, sid from" " (select s.username, m.sql_id, to_char(round(elapsed_time / 60000000)) exec_time_min, s.sid," " m.sql_id || to_char(m.sql_exec_id) || to_char(m.sql_exec_start, 'yyyymmddhh24miss') exec_id," " rtrim(to_char(((nvl(sum(u.blocks), 0) * min(p.value)) / 1024 / 1024 / 1024), 'fm999990d99')" ", to_char(0,'fmd')) temp_usage_gb" " from v$session s" " left join v$sort_usage u on s.saddr = u.session_addr" " join v$parameter p on p.name = 'db_block_size'" " join v$sql_monitor m on m.sid = s.sid and m.session_serial# = s.serial#" " where m.status = 'EXECUTING'{}{}" " group by s.username, m.sql_id, round(elapsed_time / 60000000), s.sid," " m.sql_id || to_char(m.sql_exec_id) || to_char(m.sql_exec_start, 'yyyymmddhh24miss'))" " where exec_time_min >= :exec_time_min or temp_usage_gb >= :temp_usage_gb" .format( ' and s.username like :user_name' if t.optional.get( 'user_name', None) else '', ' and s.username not like :ignore_user' if t.optional.get( 'ignore_user', None) else ''), { **t.parameters, **t.optional }, 'many', False) return t.get_message( r, lambda o, i: '{} ({}, {}) on {} is executing {} minutes and consumes {} Gb of temp space.' .format(t_link(f'{o.target}/Q/{i[1]}', i[1]), i[5], i[0], o.target, i[ 2], i[3]), None, 4)
def wait_for_ts(t): """Notification will be sent again only when the threshold be crossed.""" p = { 'pct_used': t.parameters['pct_used'], 'tablespace_name': t.optional.get('tablespace_name') } if not p['tablespace_name']: p.pop('tablespace_name') r = execute( t.target, "select * from (select t.tablespace_name," " round((max_files_size - (files.free_files_space + free.free_space)) / 1024 / 1024 / 1024) used_gb," " round(files.max_files_size / 1024 / 1024 / 1024) allocated_gb," " round(((max_files_size - (files.free_files_space + free.free_space))" " / max_files_size) * 100) pct_used" " from dba_tablespaces t" " left join (select tablespace_name," " sum(nvl(bytes,0)) free_space" " from dba_free_space" " group by tablespace_name) free on free.tablespace_name = t.tablespace_name" " left join (select tablespace_name," " sum(decode(maxbytes, 0, bytes, maxbytes)) max_files_size," " sum(decode(maxbytes, 0, bytes, maxbytes)) - sum(bytes) free_files_space" " from dba_data_files group by tablespace_name) files on t.tablespace_name = files.tablespace_name)" " where pct_used >= :pct_used{}".format( ' and tablespace_name like :tablespace_name' if t.optional. get('tablespace_name') else ''), p, 'many', False) return t.get_message( r, lambda o, i: f'Tablespace {i[0]} on {o.target} is {i[3]}%' f' used ({i[1]} of {i[2]} Gb).', None, 0)
def get_row_count(target, owner, table): date_columns = execute( target, "select column_name from all_tab_columns" " where owner = :o and table_name = :t and data_type = 'DATE'" " order by column_name", { 'o': owner, 't': table }) if 'do' not in request.args: return render_template('row_count.html', date_columns=date_columns, data=None) check_for_column = execute( target, "select owner, table_name, column_name from all_tab_columns" " where owner = :o and table_name = :t and data_type = 'DATE'" " and column_name = :c", { 'o': owner, 't': table, 'c': request.args.get('column_name', '') }, fetch_mode='one') if not check_for_column: flash('No such column') return render_template('row_count.html', date_columns=date_columns, data=None) rr, required_values = parse_parameters(request.args, {'date_from': 'datetime'}) if rr: flash(f'Incorrect value for required parameter: {rr}') return render_template('row_count.html', date_columns=date_columns, data=None) data = execute( target, f"select trunc({check_for_column[2]}) date_column, count({check_for_column[2]}) row_count" f" from {check_for_column[0]}.{check_for_column[1]}" f" where {check_for_column[2]} >= :date_from" f" group by trunc({check_for_column[2]})" f" order by trunc({check_for_column[2]})", required_values) if not data: flash('No rows found for this period') return render_template('row_count.html', date_columns=date_columns, data=data)
def get_awr_report(target): s = execute( target, "select snap_id" ", to_char(begin_interval_time, 'dd.mm.yyyy hh24:mi:ss') begin_date" ", to_char(end_interval_time, 'dd.mm.yyyy hh24:mi:ss') end_date" " from dba_hist_snapshot" " order by snap_id") if 'do' not in request.args: return render_template('awr.html', snapshots=s, data=None) if not request.args.get('sql_id', ''): r = execute(target, "select output" " from table(dbms_workload_repository.awr_report_html(" "(select dbid from v$database)" ", (select instance_number from v$instance)" ", :bid, :eid, 8))", { 'bid': request.args['bid'], 'eid': request.args['eid'] }, fetch_mode='many') else: r = execute(target, "select output" " from table(dbms_workload_repository.awr_sql_report_html(" "(select dbid from v$database)" ", (select instance_number from v$instance)" ", :bid, :eid, :sql_id))", { 'bid': request.args['bid'], 'eid': request.args['eid'], 'sql_id': request.args['sql_id'] }, fetch_mode='many') if not r: flash('Not found') return render_template('awr.html', snapshots=s, data=None) root = ElementTree.fromstring(''.join(item[0] for item in r if item[0])).find('./body') root.find('./h1').clear() for item in root: item.attrib.pop('border', None) item.attrib.pop('class', None) item.attrib.pop('summary', None) return render_template('awr.html', snapshots=s, data=[ ElementTree.tostring( item, method='html').decode('utf-8') for item in root ])
def check_src_structure(t): """This task compares destination and source column types for all existing tables in specified schema.""" if t.parameters['source_db'] not in app.config['USERS'][t.user_name][2]: return t.abort( f"Source target {t.parameters['source_db']} not exists or not allowed." ) target_columns = execute( t.target, "select c.table_name || '.' || c.column_name, c.data_type, c.data_length" " from all_tab_columns c" " join all_tables t on t.owner = c.owner and t.table_name = c.table_name" " where c.owner = :destination_schema" " and c.table_name like :by_target_prefix and c.table_name like :by_target_postfix" " order by 1, 2", { 'destination_schema': t.parameters['destination_schema'], 'by_target_prefix': t.optional.get('by_target_prefix', '') + '%', 'by_target_postfix': '%' + t.optional.get('by_target_postfix', '') }, 'many', False) src_columns = execute( t.parameters['source_db'], "select :prefix || c.table_name || :postfix || '.' || c.column_name," " c.data_type, c.data_length from all_tab_columns c" " join all_tables t on t.owner = c.owner and t.table_name = c.table_name" " where c.owner = :source_schema order by 1, 2", { 'source_schema': t.parameters['source_schema'], 'prefix': t.optional.get('by_target_prefix', ''), 'postfix': t.optional.get('by_target_postfix', '') }, 'many', False) comparison_result = [] src_columns = {item[0]: (item[1], item[2]) for item in src_columns} for target_column in target_columns: c = src_columns.get(target_column[0]) if not c: continue if target_column[1] != c[0] or target_column[2] != c[1]: comparison_result.append(( f'{target_column[0]}' f'\n {target_column[1]}({target_column[2]}) → {c[0]}({c[1]})', )) return t.get_message( comparison_result, lambda o, i: i[0], lambda o: f"Some source tables" f" for {o.target}.{o.parameters['destination_schema']} has been changed", 0)
def wait_for_expiry(t): r = execute(t.target , "select username, to_char(expiry_date, 'dd.mm.yyyy hh24:mi:ss') exp" " from dba_users" " where expiry_date between sysdate and sysdate + :expires_in_days" , t.parameters , 'many' , False) return t.get_message(r, lambda o, i: f'User account {i[0]} on {o.target} expires at {i[1]}.', None, 0)
def get_insert_from_select(target, owner, table): params = {'owner': owner, 'p_table': table} r = execute( target, "select count(table_name) from all_tables" " where owner = :owner and table_name = :p_table", params, 'one') if r[0] != 1: abort(404) column_list = execute( target, "select column_name from all_tab_cols" " where owner = :owner and table_name = :p_table and virtual_column = 'NO'" " order by column_id", params) column_string_list = '\n , '.join([i[0] for i in column_list]) return render_template( 'layout.html', formatted_text=f"INSERT /*+ APPEND */ INTO {owner}.{table}\n" f" ({column_string_list})\n" f"SELECT {column_string_list}\n" f"FROM ???.{table};\n" f"COMMIT;")
def wait_for_temp(t): """Notification will be sent again only when the threshold be crossed.""" r = execute(t.target , "select tablespace_name, to_char(round((used_blocks / total_blocks) * 100)) pct_used" " from v$sort_segment" " where round((used_blocks / total_blocks) * 100) >= :pct_used" , t.parameters , 'many' , False) return t.get_message(r, lambda o, i: f'Tablespace {i[0]} on {o.target} is {i[1]}% used.', None, 0)
def check_logs_deletion(t): """Each occurrence increases the threshold to 2x.""" r = execute(t.target , "select round(nvl(sum(blocks * block_size) / 1024 / 1024 / 1024, 0)) size_gb" " from v$archived_log where deleted = 'NO'" , {} , 'one' , False) return t.get_message(r, lambda o, i: f'{i} gb of archived logs on {o.target} are waiting to be deleted.' , None, t.parameters['size_gb'], 2)
def wait_for_recycled(t): r = execute(t.target , "select nvl(round(sum(r.space * p.value) / 1024 / 1024 / 1024), 0) space_gb" " from dba_recyclebin r join v$parameter p on p.name = 'db_block_size'" " where r.can_purge = 'YES' and nvl(r.space, 0) <> 0" , {} , 'one' , False) return t.get_message(r , lambda o, i: f'{i} Gb can be purged from recycle bin on {o.target}.' , None, t.parameters['space_gb'], 2)
def check_concurrency(t): """Each occurrence increases the threshold to 2x.""" r = execute( t.target, "select nvl(round((sum(concurrency_wait_time) / nullif(sum(elapsed_time), 0)) * 100), 0) ct" " from v$sql_monitor where status = 'EXECUTING'" " and sid in (select sid from v$session where status = 'ACTIVE')", {}, 'one', False) return t.get_message( r, lambda o, i: f'{o.target} has average concurrency rate = {i}%.', None, t.parameters['concurrency_pct'], 1.5)
def check_session_stats(t): """Please see "Activity -> Session monitor -> Session -> Session stats" to find all available statistic names.""" r = execute( t.target, "select s.sid, n.name, s.value from v$sesstat s join v$statname n on s.statistic# = n.statistic#" " where n.name = :statistic_name and s.value >= :value order by s.value desc", {**t.parameters}, 'many', False) return t.get_message( r, lambda o, i: 'Session {} on {} has {} = {}.'.format( t_link(f'{o.target}/S/{str(i[0])}', str(i[0])), o.target, i[1], get_num_str(i[2])), None, 0)
def check_redo_switches(t): pt = t.period[-1:] pv = t.period[:-1] t.parameters['start_date'] = datetime.now() - get_offset(pv, pt) r = execute(t.target , "select count(1) switches_count from v$log_history" " where first_time > :start_date having count(1) >= :switches_per_interval" , {'start_date': t.parameters['start_date'] , 'switches_per_interval': t.parameters['switches_per_interval']} , 'one' , False) return f'Redo logs on {t.target} have been switched {str(r[0])} times in the last {t.period}.' if r else None
def check_resource_usage(t): r = execute(t.target , "select resource_name, to_char(current_utilization), trim(limit_value)" ", round((current_utilization / to_number(limit_value)) * 100)" " from v$resource_limit" " where trim(limit_value) not in ('0', 'UNLIMITED')" " and round((current_utilization / to_number(limit_value)) * 100) >= :pct_used" , t.parameters , 'many' , False) return '\n'.join(f'The resource {t.target}.{item[0]}' f' is {item[3]}% used ({item[1]} of {item[2]}).' for item in r)
def check_frequent_sql(t): """This task based on v$sqlarea which accumulate statistics since SQL statement had been cached.""" r = execute( t.target, "select sql_id, parsing_schema_name, executions," " to_char(to_date(first_load_time, 'yyyy-mm-dd/hh24:mi:ss'), 'dd.mm hh24:mi')" " from v$sqlarea where parsing_schema_name not like '%SYS%'" " and executions > :executions order by executions desc", {**t.parameters}, 'many', False) return t.get_message( r, lambda o, i: '{} ({}) executed {} times since {}.'.format( t_link(f'{o.target}/Q/{i[0]}', i[0]), i[1], get_num_str(i[2]), i[3] ), lambda o: f'Frequent executions on {o.target}', 0)
def wait_for_zombie(t): """User sessions could stay active and being waiting for an event that never comes.""" \ """ They must be killed to free locked resources.""" r = execute(t.target , "select sid, username from v$session where type = 'USER' and (" "(sysdate - last_call_et / 86400 < sysdate - :last_call_minutes * 1 / 24 / 60 and status = 'ACTIVE')" " or (event = 'SQL*Net break/reset to client' and status = 'INACTIVE'))" , {**t.parameters} , 'many' , False) return t.get_message(r , lambda o, i: 'Session {} ({}) on {} seems to be a zombie.' .format(t_link(f'{o.target}/S/{str(i[0])}', str(i[0])), i[1], t.target), None, 0)
def check_size(t): """Each occurrence increases the threshold to 2x.""" r = execute(t.target , "select round(nvl(sum(bytes) / 1024 / 1024, 0)) size_mb" " from dba_segments" " where owner = :owner and segment_name = :segment_name" , {'owner': t.parameters['owner'], 'segment_name': t.parameters['segment_name']} , 'one' , False) if not r: return t.abort(f'Segment {t.parameters["owner"]}.{t.parameters["segment_name"]} not found.') return t.get_message(r , lambda o, i: f'{o.parameters["owner"]}.{o.parameters["segment_name"]}' f' size reached {i} mb on {o.target}.', None, t.parameters['size_mb'], 2)
def wait_for_uncommitted(t): r = execute(t.target , "select distinct s.osuser, s.machine, l.name" " from dba_dml_locks l" " inner join v$session s on s.sid = l.session_id" " where s.status != 'ACTIVE'" " and l.name not like :ignore_tables" " and round(last_call_et / 60) >= :idle_time_minutes" , {'idle_time_minutes': t.parameters['idle_time_minutes'] , 'ignore_tables': t.optional.get('ignore_tables', '-')} , 'many' , False) return t.get_message(r, lambda o, i: f'It seems {i[0]} ({i[1]})' f' forgot to commit a transaction on {o.target} ({i[2]}).', None, 0)
def get_ash_report(target): if 'do' not in request.args: return render_template('ash.html', data=None) source = {} required = {'l_btime': 'datetime', 'l_etime': 'datetime'} source['l_btime'] = request.args.get('l_btime', '') source['l_etime'] = request.args.get('l_etime', '') if request.args.get('l_sid', ''): source['l_sid'] = request.args['l_sid'] required['l_sid'] = 'int' if request.args.get('l_sql_id', ''): source['l_sql_id'] = request.args['l_sql_id'] required['l_sql_id'] = 'str' error, required_values = parse_parameters(source, required) if error: flash(f'Incorrect value: {error}') return render_template('ash.html', data=None) r = execute(target, "select output" " from table(dbms_workload_repository.ash_report_html(" "l_dbid => (select dbid from v$database)" ", l_inst_num => (select instance_number from v$instance)" ", " + ", ".join(k + ' => :' + k for k in required_values.keys()) + "))", required_values, fetch_mode='many') if not r: flash('Not found') return render_template('ash.html', data=None) root = ElementTree.fromstring(''.join(item[0].replace('<<', '<<') for item in r if item[0])).find('./body') root.find('./h1').clear() for item in root: item.attrib.pop('border', None) item.attrib.pop('class', None) item.attrib.pop('summary', None) return render_template('ash.html', data=[ ElementTree.tostring( item, method='html').decode('utf-8') for item in root ])
def wait_for_queued(t): pt = t.period[-1:] pv = t.period[:-1] t.parameters['start_date'] = datetime.now() - get_offset(pv, pt) r = execute(t.target , "select nvl(sql_id, 'Unknown sql') || ' ' || event || ' ' || to_char(session_id), " " nvl(sql_id, 'Unknown sql'), event, session_id, machine, count(1) waits" " from v$active_session_history" " where event like 'enq:%' and sample_time > :start_date" " and event not like :ignore_event" " group by nvl(sql_id, 'Unknown sql') || ' ' || event || ' ' || to_char(session_id)," " sql_id, event, session_id, machine" " having count(1) > :queued_time_sec" , {'start_date': t.parameters['start_date'] , 'queued_time_sec': t.parameters['queued_time_sec'] , 'ignore_event': t.optional.get('ignore_event', '---')} , 'many' , False) return t.get_message(r, lambda o, i: '{} ({}, {}) on {} has been queued for {} seconds ({}).' .format(t_link(f'{t.target}/Q/{i[1]}', i[1]), i[4], i[3], t.target, i[5], i[2]), None, 0)
def get_top_activity(target): if 'do' not in request.args.keys(): return render_template('top_activity.html') required_source = { 'start_date': request.args.get('start_date', '-1h'), 'end_date': request.args.get('end_date', '-0h') } required = {'start_date': 'datetime', 'end_date': 'datetime'} error, required_values = parse_parameters(required_source, required) if error: flash(f'Incorrect value: {error}') return render_template('top_activity.html') optional_source = { 'wait_class': request.args.get('wait_class', ''), 'event': request.args.get('event', ''), 'session_id': request.args.get('session_id', ''), 'user_name': request.args.get('user_name', ''), 'sql_id': request.args.get('sql_id', ''), 'object_name': request.args.get('object_name', '') } _optional = { 'wait_class': 'str', 'event': 'str', 'session_id': 'int', 'user_name': 'str', 'sql_id': 'str', 'object_name': 'str' } error, optional_values = parse_parameters(optional_source, _optional, True) if error: flash(f'Incorrect value: {error}') return render_template('top_activity.html') optional_values = {k: v for k, v in optional_values.items() if v} r = execute( target, "with h as (select sample_id, sample_time," " sql_id, o.object_name, event, event_id, user_id, session_id," " to_char(session_id) || ':' || to_char(session_serial#) sess" ", nvl(wait_class, 'CPU') wait_class" ", nvl(wait_class_id, -1) wait_class_id" ", wait_time, time_waited from v$active_session_history ash" " left join dba_objects o on o.object_id = ash.current_obj#" " where sample_time >= trunc(:start_date, 'mi') and sample_time < trunc(:end_date, 'mi')" " and sample_time > trunc(sysdate){}{}{}{}{}{})" " select 1 t, to_char(sample_time, 'hh24:mi') s, wait_class v1, wait_class_id v2, count(1) c" " from h group by to_char(sample_time, 'hh24:mi'), wait_class, wait_class_id union all" " select 2 t, sql_id s, wait_class v1, wait_class_id v2, count(1) c from h" " where sql_id is not null and sql_id in (select sql_id" " from (select sql_id, row_number() over (order by tc desc) rn" " from (select sql_id, count(1) tc from h" " where sql_id is not null group by sql_id)) where rn <= 10)" " group by sql_id, wait_class, wait_class_id union all" " select 6 t, to_char(h.session_id) || ':' || nvl(u.username, '') s," " wait_class v1, wait_class_id v2, count(1) c from h" " left join dba_users u on u.user_id = h.user_id" " where sess in (select sess" " from (select sess, row_number() over (order by tc desc) rn" " from (select sess, count(1) tc from h" " group by sess)) where rn <= 10)" " group by to_char(h.session_id) || ':' || nvl(u.username, ''), wait_class, wait_class_id union all" " select 3 t, object_name s, wait_class v1, wait_class_id v2, count(1) c from h" " where object_name is not null and object_name in (select object_name" " from (select object_name, row_number() over (order by tc desc) rn" " from (select object_name, count(1) tc from h" " where object_name is not null group by object_name))" " where rn <= 10) group by object_name, wait_class, wait_class_id union all" " select 4 t, null s, wait_class v1, wait_class_id v2, count(1) c" " from h group by wait_class, wait_class_id union all" " select 5 t, null s, event v1, event_id v2, count(1) c" " from h group by event, event_id union all" " select 7 t, to_char(sample_time, 'hh24:mi:ss') s, null v1, null v2, count(distinct session_id) c" " from h group by to_char(sample_time, 'hh24:mi:ss') union all" " select 8 t, null s, null v1, null v2, to_number(value) c" " from v$parameter where name = 'cpu_count' union all" " select 9 t, null s, null v1, null v2, to_number(value) c" " from v$parameter where name = 'sessions' order by 1, 4, 2".format( " and nvl(wait_class, 'CPU') like :wait_class" if optional_values.get('wait_class', '') else "", " and event like :event" if optional_values.get( 'event', '') else "", " and session_id = :session_id" if optional_values.get('session_id', '') else "", " and sql_id = :sql_id" if optional_values.get( 'sql_id', '') else "", " and object_name like :object_name" if optional_values.get('object_name', '') else "", " and user_id in (select user_id from dba_users " "where username like :user_name)" if optional_values.get( 'user_name', '') else ""), { **required_values, **optional_values }) colors = { 'Other': '#F06EAA', 'Application': '#C02800', 'Configuration': '#5C440B', 'Administrative': '#717354', 'Concurrency': '#8B1A00', 'Commit': '#E46800', 'Idle': '#FFFFFF', 'Network': '#9F9371', 'User I/O': '#004AE7', 'System I/O': '#0094E7', 'Scheduler': '#CCFFCC', 'Queueing': '#C2B79B', 'CPU': '#00CC00' } series = { k[1]: [] for k in sorted(set((item[3], item[2]) for item in r if item[0] == 1), key=lambda x: x[0]) } p = deepcopy(app.config['CHART_CONFIG']) p['style'].colors = tuple(colors[wait_class] for wait_class in series.keys()) p['height'] = 10 * 22 session_count = max(tuple(item[4] for item in r if item[0] == 7) or (0, )) session_limit = max(tuple(item[4] for item in r if item[0] == 9) or (0, )) cpu_count = max(tuple(item[4] for item in r if item[0] == 8) or (0, )) top_activity = StackedLine(**p, legend_at_bottom=True, legend_at_bottom_columns=len(series.keys()), title=f'sessions(max): {session_count}, ' f'sessions(limit): {session_limit}, ' f'cpu cores: {cpu_count};') top_activity.fill = True top_activity.x_labels = sorted(set(item[1] for item in r if item[0] == 1)) top_activity.x_labels_major_every = max( -(-len(top_activity.x_labels) // 20), 1) top_activity.truncate_label = 5 top_activity.show_minor_x_labels = False for label in top_activity.x_labels: for serie in series.keys(): v = tuple( item[4] for item in r if item[0] == 1 and item[1] == label and item[2] == serie) series[serie].append(v[0] if len(v) > 0 else 0) for serie in series.keys(): top_activity.add(serie, series[serie], show_dots=False) top_sql = HorizontalStackedBar(**p) top_sql.show_legend = False top_sql.width = 400 top_sql.show_x_labels = False top_sql.x_labels = sorted(set(item[1] for item in r if item[0] == 2), key=lambda x: sum( tuple(item[4] for item in r if item[0] == 2 and item[1] == x))) top_sql.height = len(top_sql.x_labels) * 22 series = { k[1]: [] for k in sorted(set((item[3], item[2]) for item in r if item[0] == 2), key=lambda x: x[0]) } for label in top_sql.x_labels: for serie in series.keys(): v = tuple( item[4] for item in r if item[0] == 2 and item[1] == label and item[2] == serie) series[serie].append(v[0] if len(v) > 0 else 0) for serie in series.keys(): # todo https://github.com/Kozea/pygal/issues/18 top_sql.add(serie, [ dict(value=item, color=colors[serie], xlink=dict(href=url_for('get_query', target=target, query=top_sql.x_labels[i], _external=True), target='_blank')) for i, item in enumerate(series[serie]) ]) top_objects = HorizontalStackedBar(**p) top_objects.show_legend = False top_objects.width = 400 top_objects.show_x_labels = False top_objects.x_labels = sorted(set(item[1] for item in r if item[0] == 3), key=lambda x: sum( tuple(item[4] for item in r if item[0] == 3 and item[1] == x))) series = { k[1]: [] for k in sorted(set((item[3], item[2]) for item in r if item[0] == 3), key=lambda x: x[0]) } top_objects.height = len(top_objects.x_labels) * 22 for label in top_objects.x_labels: for serie in series.keys(): v = tuple( item[4] for item in r if item[0] == 3 and item[1] == label and item[2] == serie) series[serie].append(v[0] if len(v) > 0 else 0) for serie in series.keys(): top_objects.add( serie, [dict(value=item, color=colors[serie]) for item in series[serie]]) pie_type = 5 if 'wait_class' in optional_values.keys( ) or 'event' in optional_values.keys() else 4 top_waits = Pie(**p) top_waits.show_legend = False top_waits.width = 140 top_waits.width = 140 top_waits.inner_radius = 0.5 labels = tuple( k[1] for k in sorted(set( (item[3], item[2]) for item in r if item[0] == pie_type), key=lambda x: x[0] if isinstance(x[0], int) else 0)) for label in labels: top_waits.add( label, tuple(item[4] for item in r if item[0] == pie_type and item[2] == label)[0]) top_sessions = HorizontalStackedBar(**p) top_sessions.show_legend = False top_sessions.width = 300 top_sessions.show_x_labels = False top_sessions.x_labels = sorted( set(item[1] for item in r if item[0] == 6), key=lambda x: sum( tuple(item[4] for item in r if item[0] == 6 and item[1] == x))) top_sessions.height = len(top_sessions.x_labels) * 22 series = { k[1]: [] for k in sorted(set((item[3], item[2]) for item in r if item[0] == 6), key=lambda x: x[0]) } for label in top_sessions.x_labels: for serie in series.keys(): v = tuple( item[4] for item in r if item[0] == 6 and item[1] == label and item[2] == serie) series[serie].append(v[0] if len(v) > 0 else 0) for serie in series.keys(): top_sessions.add(serie, [ dict(value=item, color=colors[serie], xlink=dict( href=url_for('get_session', target=target, sid=top_sessions.x_labels[i].split(':')[0], _external=True), target='_blank')) for i, item in enumerate(series[serie]) ]) return render_template( 'top_activity.html', top_activity=top_activity.render_data_uri(), top_sql=top_sql.render_data_uri() if 'sql_id' not in optional_values.keys() else None, top_sessions=top_sessions.render_data_uri() if 'session_id' not in optional_values.keys() else None, top_objects=top_objects.render_data_uri(), top_waits=top_waits.render_data_uri() if labels else None)
def get_scan_speed(target, owner, table): r = execute( target, "select owner, table_name from all_tables" " where owner = :owner and table_name = :p_table", { 'owner': owner, 'p_table': table }, 'one') if not r: abort(404) owner_name, table_name = r part_list = execute( target, "select partition_name from all_tab_partitions" " where table_owner = :owner and table_name = :p_table" " order by partition_name", { 'owner': owner_name, 'p_table': table_name }) start_table_scan_time = time() scan_results = [] if part_list: r = execute( target, "select nvl(sp.partition_name, s.partition_name) partition_name" " , round(nvl(sum(bytes) / 1024 / 1024, 0)) size_mb" " from dba_segments s left join all_tab_subpartitions sp" " on sp.table_owner = s.owner and sp.table_name = s.segment_name" " and s.partition_name = sp.subpartition_name" " where s.owner = :owner and s.segment_name = :p_table" " group by nvl(sp.partition_name, s.partition_name) order by 1", { 'owner': owner_name, 'p_table': table_name }) part_size = {item[0]: item[1] for item in r} for partition in part_list: start_part_scan_time = time() r = execute( target, f"select /*+ no_index(t) */ count(*)" f" from {owner_name}.{table_name} partition ({partition[0]}) t", {}, 'one') finish_part_scan_time = time() scan_results.append( (partition[0], r[0], round(finish_part_scan_time - start_part_scan_time), part_size.get(partition[0], 0))) else: r = execute( target, f"select /*+ no_index(t) */ count(*) from {owner_name}.{table_name} t", {}, 'one') table_size = execute( target, "select round(nvl(sum(bytes) / 1024 / 1024, 0)) size_mb" " from dba_segments where owner = :owner and segment_name = :p_table", { 'owner': owner_name, 'p_table': table_name }, 'one') scan_results.append( (table_name, r[0], round(time() - start_table_scan_time), table_size[0])) finish_table_scan_time = time() output_data = [] for item in scan_results: output_data.append( (item[0], item[2], item[1], item[1] if item[2] == 0 else round(item[1] / item[2]), item[3], item[3] if item[2] == 0 else round(item[3] / item[2]))) if part_list: total_row_count = sum(list(zip(*output_data))[2]) total_scan_time = round(finish_table_scan_time - start_table_scan_time) total_size = sum(part_size.values()) output_data.append( ('TOTAL:', total_scan_time, total_row_count, total_row_count if total_scan_time == 0 else round( total_row_count / total_scan_time), total_size, total_size if total_scan_time == 0 else round(total_size / total_scan_time))) return render_template( 'static_list.html', text= f'Completed in {((finish_table_scan_time - start_table_scan_time) / 60):.1f} minutes.', data=output_data)
def wait_for_status(t): if not t.data: t.parameters = upper_values(t.parameters) t.parameters['status_values'] = dlm_str_to_list(t.parameters['status_values']) t.parameters['info_column'] = dlm_str_to_list(t.parameters['info_column']) table_columns = get_tab_columns(t.target, t.parameters['owner'], t.parameters['table']) for item in [t.parameters['date_column'], t.parameters['status_column']] + t.parameters['info_column']: if item not in table_columns.keys(): return t.abort(f'{t.parameters["owner"]}.{t.parameters["table"]}.{item} not found.') if 'DATE' not in table_columns[t.parameters['date_column']]: return t.abort(f'{t.parameters["date_column"]} must be a date type.') status_type = table_columns[t.parameters['status_column']] if status_type != 'NUMBER' and 'CHAR' not in status_type: return t.abort(f'Unsupported type of {t.parameters["status_column"]} (neither number nor char).') if status_type == 'NUMBER': try: t.parameters['status_values'] = [int(v) for v in t.parameters['status_values']] except ValueError: return t.abort(f'All of status values ({t.parameters["status_values"]}) must be numbers.') t.parameters['info_column'] = {k: table_columns[k] for k in t.parameters['info_column']} filter_column_type = '' if t.optional.get('filter_column', False): if t.optional['filter_column'] not in table_columns.keys(): return t.abort(f'{t.parameters["owner"]}.{t.parameters["table"]}' f'.{t.optional["filter_column"]} not found.') filter_column_type = table_columns[t.optional['filter_column']] if filter_column_type != 'NUMBER' and 'CHAR' not in filter_column_type: return t.abort(f'Unsupported type of {t.optional["filter_column"]} (neither number nor char).') if not t.optional.get('filter_value', False): return t.abort('Filter value is not set.') if filter_column_type == 'NUMBER': try: t.optional['filter_value'] = int(t.optional['filter_value']) except ValueError: return t.abort(f'Filter value must be a number.') t.data = {'status_values': t.parameters['status_values'] , 'status_type': status_type , 'start_date': t.create_date , 'filter_column_type': filter_column_type} end_date = datetime.now() p = {str(k): v for k, v in enumerate(t.data['status_values'], start=1)} p['start_date'] = t.data['start_date'] p['end_date'] = end_date p['filter_value'] = t.optional.get('filter_value', '1') info_column_list = [] for c, ct in t.parameters['info_column'].items(): if ct == 'CLOB': info_column_list.append(f"cast(dbms_lob.substr({c}, 255) as varchar2(255))") elif 'CHAR' in ct: info_column_list.append(f"substr(to_char({c}), 0, 255)") else: info_column_list.append(f"to_char({c})") info_column_sql_text = ' || \' \' || '.join(info_column_list) filter_column = t.optional.get('filter_column', '\'1\'') r = execute(t.target , f"select to_char({t.parameters['date_column']}, 'hh24:mi:ss')" f", {info_column_sql_text}" f", {t.parameters['status_column']}" f" from {t.parameters['owner']}.{t.parameters['table']}" f" where {t.parameters['date_column']} >= :start_date" f" and {t.parameters['date_column']} < :end_date" f" and {'upper' if t.data['status_type'] != 'NUMBER' else ''}" f"({t.parameters['status_column']})" f" in ({':' + ', :'.join(str(i) for i in range(1, len(t.data['status_values']) + 1))})" f" and {filter_column} = :filter_value" , p , 'many' , False) t.data['start_date'] = end_date return t.get_message(r , lambda o, i: f'{i[0]} {i[1]}'.replace('<', '<').replace('>', '>') , lambda o: f'{o.parameters["table"]} ({o.target})')