def common_query(conn_params, query_name, get_data={}): ''' Run queries that needs no dynamic generation. Queries here are already stored and would only need to be executed on the database selected get_data is a django QueryDict structure ''' pgsql_redundant_queries = ('template_list', 'group_list', 'user_list', 'db_list', 'schema_list', 'db_rpr', ) mysql_redundant_queries = ('db_list','charset_list', 'supported_engines', 'db_rpr',) if conn_params['dialect'] == 'postgresql' and query_name in pgsql_redundant_queries : # update connection db if it is different from login db conn_params['db'] = get_data.get('db') if get_data.get('db') else conn_params['db'] # make query changes and mini translations if query_name == 'schema_list': if hasattr(settings, 'TT_SHOW_SYSTEM_CATALOGS'): query_name = 'full_schema_list' if settings.TT_SHOW_SYSTEM_CATALOGS == True else "user_schema_list" else: query_name = "user_schema_list" # default r = sa.full_query(conn_params, sql.stored_query(query_name, conn_params['dialect'])) # raise Exception(r) return r elif conn_params['dialect'] == 'mysql' and query_name in mysql_redundant_queries : # this kind of queries require no special attention return sa.full_query(conn_params, sql.stored_query(query_name, conn_params['dialect']))
def common_query(conn_params, query_name, get_data={}): ''' Run queries that needs no dynamic generation. Queries here are already stored and would only need to be executed on the database selected get_data is a django QueryDict structure ''' pgsql_redundant_queries = ('template_list', 'group_list', 'user_list', 'db_list', 'schema_list') mysql_redundant_queries = ('db_list','charset_list', 'supported_engines') if conn_params['dialect'] == 'postgresql' and query_name in pgsql_redundant_queries : # this kind of queries require no special attention if query_name == 'schema_list': if hasattr(settings, 'TT_SHOW_SYSTEM_CATALOGS'): query_name = 'full_schema_list' if settings.TT_SHOW_SYSTEM_CATALOGS == True else "user_schema_list" else: query_name = "user_schema_list" # default conn_params['db'] == get_data.get('db') if get_data.get('db') else conn_params['db'] r = sql.full_query(conn_params, sql.stored_query(query_name, conn_params['dialect'])) return r['rows'] elif conn_params['dialect'] == 'mysql': if query_name in mysql_redundant_queries : # this kind of queries require no special attention return sql.full_query(conn_params, sql.stored_query(query_name, conn_params['dialect']))['rows']
def get_home_variables(request): p = fns.get_conn_params(request) variables = {'user': p['username'], 'host': p['host']} variables['dialect'] = 'PostgreSQL' if p['dialect'] == 'postgresql' else 'MySQL' result = sa.full_query( p, sql.stored_query('variables', p['dialect'])) if p['dialect'] == 'postgresql': variables['version'] = result['rows'][0] return variables elif p['dialect'] == 'mysql': if type(result) == dict: ll = result['rows'] d = {} for i in range( len(ll) ): d[ll[i][0]] = ll[i][1] variables.update(d) return variables else: return fns.http_500(result)
def rpr_query(conn_params, query_type, get_data={}, post_data={}): ''' Run queries that have to be generated on the fly. Most queries depends on get_data, while some few depends on post_data get_data and post_data are gotten from request.GET and request.POST or form.cleaned_data some functions in this file should have been (and previously have been) in this function. but the contents of a function are not indexed by IDEs and that makes debugging difficult ''' # common queries that returns success state as a dict only no_return_queries = ('create_user', 'drop_user', 'create_db','create_table', 'drop_table', 'empty_table', 'delete_row', 'create_column', 'drop_column', 'drop_db', 'drop_sequence', 'reset_sequence', 'drop_constraint', ) psycopg2_queries = ('drop_db', ) if query_type in no_return_queries: conn_params['db'] = get_data['db'] if get_data.has_key('db') else conn_params['db'] query_data = {} query_data.update(get_data, **post_data) queries = sql.generate_query( query_type, conn_params['dialect'],query_data) if conn_params['dialect'] == 'postgresql' and query_type in psycopg2_queries: # this queries needs to be run outside a transaction block # SA execute functions runs all its queries inside a transaction block result = sa.execute_outside_transaction(conn_params, queries) else: result = sa.short_query(conn_params, queries) return HttpResponse( json.dumps(result) ) # specific queries with implementations similar to both dialects elif query_type in ('indexes', 'primary_keys', 'foreign_key_relation'): if conn_params['dialect'] == 'postgresql' and query_type == 'indexes': return get_constraints(conn_params, query_type, get_data) r = sa.full_query(conn_params, sql.generate_query(query_type, conn_params['dialect'], get_data)[0]) return r elif query_type in ('get_single_row',): sub_q_data = {'tbl': get_data['tbl'],'db':get_data['db']} if get_data.has_key('schm'): sub_q_data['schm'] = get_data['schm'] # generate where statement sub_q_data['where'] = "" for ind in range(len(post_data)): sub_q_data['where'] += post_data.keys()[ind].strip() + "=" val = post_data.values()[ind].strip() sub_q_data['where'] += fns.quote(val) if ind != len(post_data) - 1: sub_q_data['where'] += ' AND ' # retrieve and run queries conn_params['db'] = get_data['db'] # assert False q = sql.generate_query(query_type, conn_params['dialect'], sub_q_data) r = sa.full_query(conn_params, q[0]) return r elif query_type in ('table_rpr', 'table_structure', 'raw_table_structure', 'seqs_rpr'): sub_q_data = {'db': get_data['db'],} if get_data.has_key('tbl'): sub_q_data['tbl'] = get_data['tbl'] if get_data.has_key('schm'): sub_q_data['schm'] = get_data['schm'] # make query if conn_params['dialect'] == 'postgresql' and query_type == 'raw_table_structure': q = 'table_structure' else: q = query_type r = sa.full_query(conn_params, sql.generate_query(q, conn_params['dialect'], sub_q_data)[0] ) # further needed processing if conn_params['dialect'] == 'postgresql' and query_type.count('table_structure'): rwz = [] for tuple_row in r['rows']: row = list(tuple_row) data_type_str = row[1] _l = [ data_type_str ] datetime_precision = row[7] if data_type_str in ('bit', 'bit varying', 'character varying', 'character') and type(row[4]) is int: _l.append( '({0})'.format(row[4]) ) # elif data_type_str in ('numeric', 'decimal') and type(row[5]) is int or type(row[6]) is int: elif data_type_str in ('numeric', 'decimal'): numeric_precision, numeric_scale = row[5], row[6] _l.append( '(%d,%d)' % (numeric_precision, numeric_scale) ) # interval types should need some revising elif data_type_str in ('interval', ): _l.append( '(%d)' % datetime_precision ) # time and timestamps have a somewhat different declarative syntax elif datetime_precision is int and (data_type_str.startswith('time') or data_type_str.startswith('timestamp')): _in = 9 if data_type_str.startswith('timestamp') else 4 # the length of time and timestamp respectively _l = [ data_type_str[:_in], '(%d)' % datetime_precision , data_type_str[_in:]] # append the current row to rwz if query_type == 'table_structure': rwz.append([row[0], "".join(_l), row[2], row[3] ]) elif query_type == 'raw_table_structure': row.append("".join(_l)) rwz.append(row) # change r['rows'] r['rows'] = rwz # change r['columns'] if query_type == 'table_structure': r['columns'] = [ r['columns'][0], r['columns'][1], r['columns'][2], r['columns'][3] ] elif query_type == 'raw_table_structure': r['columns'].append('column_type') return r # queries with dissimilar implementations elif conn_params['dialect'] == 'mysql': if query_type == 'describe_databases': conn_params['db'] = 'INFORMATION_SCHEMA'; query = sql.stored_query(query_type, conn_params['dialect']) return sa.full_query(conn_params, query) else: return fns.http_500('query not yet implemented!') else: return fns.http_500('dialect not supported!')
def rpr_query(conn_params, query_type, get_data={}, post_data={}): ''' Run queries that have to be generated on the fly. Most queries depends on get_data, while some few depends on post_data get_data and post_data are gotten from request.GET and request.POST or form.cleaned_data ''' # common queries that returns success state as a dict only no_return_queries = ('create_user', 'drop_user', 'create_db','create_table', 'drop_table', 'empty_table', 'delete_row', 'create_column', 'delete_column',) if query_type in no_return_queries: conn_params['db'] = get_data['db'] if get_data.has_key('db') else conn_params['db'] query_data = {} query_data.update(get_data, **post_data) q = sql.generate_query( query_type, conn_params['dialect'],query_data) result = sql.short_query(conn_params, q) return HttpResponse( json.dumps(result) ) # specific queries with implementations similar to both dialects elif query_type == 'user_rpr': if conn_params['dialect'] == 'mysql': conn_params['db'] = 'mysql' r = sql.full_query(conn_params, sql.stored_query(get_data['query'],conn_params['dialect']) ) if type(r) == dict: r else: return fns.http_500(r) elif query_type in ('indexes', 'primary_keys', 'foreign_key_relation'): if conn_params['dialect'] == 'postgresql': conn_params['db'] = get_data['db'] r = sql.full_query(conn_params, sql.generate_query(query_type, conn_params['dialect'], get_data)[0]) return r elif query_type in ('get_single_row',): sub_q_data = {'tbl': get_data['tbl'],'db':get_data['db']} if get_data.has_key('schm'): sub_q_data['schm'] = get_data['schm'] # generate where statement sub_q_data['where'] = "" for ind in range(len(post_data)): sub_q_data['where'] += post_data.keys()[ind].strip() + "=" sub_q_data['where'] += post_data.values()[ind].strip() if ind != len(post_data) - 1: sub_q_data['where'] += ' AND ' # retrieve and run queries conn_params['db'] = get_data['db'] # assert False q = sql.generate_query(query_type, conn_params['dialect'], sub_q_data) r = sql.full_query(conn_params, q[0]) return r elif query_type in ('table_rpr', 'table_structure', 'raw_table_structure'): conn_params['db'] = get_data['db'] sub_q_data = {'db': get_data['db'],} if get_data.has_key('tbl'): sub_q_data['tbl'] = get_data['tbl'] if get_data.has_key('schm'): sub_q_data['schm'] = get_data['schm'] # make query if conn_params['dialect'] == 'postgresql' and query_type == 'raw_table_structure': q = 'table_structure' else: q = query_type r = sql.full_query(conn_params, sql.generate_query(q, conn_params['dialect'], sub_q_data)[0] ) # further needed processing if conn_params['dialect'] == 'postgresql' and query_type.count('table_structure'): rwz = [] for tuple_row in r['rows']: row = list(tuple_row) _l = [ row[1] ] if row[1] in ('bit', 'bit varying', 'character varying', 'character') and type(row[4]) is int: _l.append( '({0})'.format(row[4]) ) elif row[1] in ('numeric', 'decimal') and type(row[5]) is int or type(row[6]) is int: _l.append( '({0},{1})'.format(row[5], row[6]) ) elif row[1] in ('interval', 'time with time zone', 'time without time zone', 'timestamp with time zone', 'timestamp without time zone') and type(row[7]) is int: _l.append( '({0})'.format(row[7]) ) # append the current row to rwz if query_type == 'table_structure': rwz.append([row[0], "".join(_l), row[2], row[3] ]) elif query_type == 'raw_table_structure': row.append("".join(_l)) rwz.append(row) # change r['rows'] r['rows'] = rwz # change r['columns'] if query_type == 'table_structure': r['columns'] = [ r['columns'][0], r['columns'][1], r['columns'][2], r['columns'][3] ] elif query_type == 'raw_table_structure': r['columns'].append('column_type') return r elif query_type == 'browse_table': # initializations sub_q_data = {'tbl': get_data['tbl'],'db':get_data['db']} sub_q_data['offset'] = get_data['offset'] if get_data.has_key('offset') else 0 sub_q_data['limit'] = get_data['limit'] if get_data.has_key('limit') else getattr(settings, 'TT_MAX_ROW_COUNT', 100) for item in ['schm', 'sort_key', 'sort_dir']: if get_data.has_key(item): sub_q_data[item] = get_data[item] # retrieve and run queries conn_params['db'] = get_data['db'] keys = rpr_query(conn_params, 'primary_keys', sub_q_data) count = sql.full_query(conn_params, sql.generate_query('count_rows', conn_params['dialect'], sub_q_data)[0], )['rows'] r = sql.full_query(conn_params, sql.generate_query(query_type, conn_params['dialect'], sub_q_data)[0] ) # format and return data if type(r) == dict: r.update({'total_count': count[0][0], 'offset': sub_q_data['offset'], 'limit':sub_q_data['limit'], 'keys': keys}) return r else: return fns.http_500(r) # queries that just asks formats and return result elif query_type in ('existing_tables',): query_data = {'db':get_data['db'],} if get_data.has_key('tbl'): query_data['tbl'] = get_data['tbl'] if conn_params['dialect'] == 'postgresql': query_data['schm'] = get_data['schm'] conn_params['db'] = query_data['db'] q = sql.generate_query(query_type, conn_params['dialect'], query_data) r = sql.full_query(conn_params, q[0]) return r['rows'] # queries with dissimilar implementations elif conn_params['dialect'] == 'postgresql': return fns.http_500('query ({query_type}) not implemented!'.format(query_type=query_type)) elif conn_params['dialect'] == 'mysql': if query_type == 'describe_databases': conn_params['db'] = 'INFORMATION_SCHEMA'; query = sql.stored_query(query_type, conn_params['dialect']) return sql.full_query(conn_params, query) else: return fns.http_500('query not yet implemented!') else: return fns.http_500('dialect not supported!')