def execute(query, headers=0, credentials=None): # fetch all the data from_ = extract_url(query) if not from_: raise ProgrammingError('Invalid query: {query}'.format(query=query)) baseurl = get_url(from_, headers) payload = run_query(baseurl, 'SELECT *', credentials) # create table conn = sqlite3.connect(':memory:', detect_types=sqlite3.PARSE_DECLTYPES) cursor = conn.cursor() create_table(cursor, from_, payload) insert_into(cursor, from_, payload) conn.commit() # run query in SQLite instead logger.info('SQLite query: {}'.format(query)) results = cursor.execute(query).fetchall() description = cursor.description return results, description
def execute(query, headers=0, credentials=None): try: parsed_query = parse_sql(query) except pyparsing.ParseException as e: raise ProgrammingError(format_moz_error(query, e)) # fetch aliases, since they will be removed by the translator original_aliases = extract_column_aliases(parsed_query) # extract URL from the `FROM` clause from_ = extract_url(query) baseurl = get_url(from_, headers) # verify that URL is actually a Google spreadsheet parsed = parse.urlparse(baseurl) if not parsed.netloc == 'docs.google.com': raise InterfaceError('Invalid URL, must be a docs.google.com URL!') # map between labels and ids, eg, `{ 'country': 'A' }` column_map = get_column_map(baseurl, credentials) # preprocess used_processors = [] for cls in processors: if cls.match(parsed_query): processor = cls() parsed_query = processor.pre_process(parsed_query, column_map) used_processors.append(processor) processed_aliases = extract_column_aliases(parsed_query) # translate colum names to ids and remove aliases translated_query = translate(parsed_query, column_map) logger.info('Original query: {}'.format(query)) logger.info('Translated query: {}'.format(translated_query)) # run query payload = run_query(baseurl, translated_query, credentials) if payload['status'] == 'error': raise ProgrammingError( format_gsheet_error(query, translated_query, payload['errors'])) # postprocess for processor in used_processors: payload = processor.post_process(payload, processed_aliases) # add aliases back cols = payload['table']['cols'] for alias, col in zip(original_aliases, cols): if alias is not None: col['label'] = alias description = get_description_from_payload(payload) # convert rows to proper type (datetime, eg) rows = payload['table']['rows'] results = convert_rows(cols, rows) return results, description