Exemple #1
0
 def test_dataframe_timezone(self):
     tz = psycopg2.tz.FixedOffsetTimezone(offset=60, name=None)
     data = [(datetime.datetime(2017, 11, 18, 21, 53, 0, 219225,
                                tzinfo=tz), ),
             (datetime.datetime(
                 2017,
                 11,
                 18,
                 22,
                 6,
                 30,
                 61810,
                 tzinfo=tz,
             ), )]
     df = dataframe.SupersetDataFrame(
         pd.DataFrame(data=list(data), columns=[
             'data',
         ]))
     data = df.data
     self.assertDictEqual(
         data[0],
         {
             'data': pd.Timestamp('2017-11-18 21:53:00.219225+0100', tz=tz),
         },
     )
     self.assertDictEqual(
         data[1],
         {
             'data': pd.Timestamp('2017-11-18 22:06:30.061810+0100', tz=tz),
         },
     )
    def test_results_msgpack_deserialization(self):
        use_new_deserialization = True
        data = [("a", 4, 4.0, "2019-08-18T16:39:16.660000")]
        cursor_descr = (
            ("a", "string"),
            ("b", "int"),
            ("c", "float"),
            ("d", "datetime"),
        )
        db_engine_spec = BaseEngineSpec()
        cdf = dataframe.SupersetDataFrame(data, cursor_descr, db_engine_spec)
        query = {
            "database_id": 1,
            "sql": "SELECT * FROM birth_names LIMIT 100",
            "status": utils.QueryStatus.PENDING,
        }
        (
            serialized_data,
            selected_columns,
            all_columns,
            expanded_columns,
        ) = sql_lab._serialize_and_expand_data(
            cdf, db_engine_spec, use_new_deserialization
        )
        payload = {
            "query_id": 1,
            "status": utils.QueryStatus.SUCCESS,
            "state": utils.QueryStatus.SUCCESS,
            "data": serialized_data,
            "columns": all_columns,
            "selected_columns": selected_columns,
            "expanded_columns": expanded_columns,
            "query": query,
        }

        serialized_payload = sql_lab._serialize_payload(
            payload, use_new_deserialization
        )
        self.assertIsInstance(serialized_payload, bytes)

        with mock.patch.object(
            db_engine_spec, "expand_data", wraps=db_engine_spec.expand_data
        ) as expand_data:
            query_mock = mock.Mock()
            query_mock.database.db_engine_spec.expand_data = expand_data

            deserialized_payload = views._deserialize_results_payload(
                serialized_payload, query_mock, use_new_deserialization
            )
            payload["data"] = dataframe.SupersetDataFrame.format_data(cdf.raw_df)

            self.assertDictEqual(deserialized_payload, payload)
            expand_data.assert_called_once()
Exemple #3
0
 def test_dataframe_timezone(self):
     tz = psycopg2.tz.FixedOffsetTimezone(offset=60, name=None)
     data = [
         (datetime.datetime(2017, 11, 18, 21, 53, 0, 219225, tzinfo=tz),),
         (datetime.datetime(2017, 11, 18, 22, 6, 30, 61810, tzinfo=tz),),
     ]
     df = dataframe.SupersetDataFrame(list(data), [["data"]], BaseEngineSpec)
     data = df.data
     self.assertDictEqual(
         data[0], {"data": pd.Timestamp("2017-11-18 21:53:00.219225+0100", tz=tz)}
     )
     self.assertDictEqual(
         data[1], {"data": pd.Timestamp("2017-11-18 22:06:30.061810+0100", tz=tz)}
     )
Exemple #4
0
 def test_mssql_engine_spec_odbc(self):
     # Test for case when pyodbc.Row is returned (msodbc driver)
     data = [Row((1, 1, datetime.datetime(2017, 10, 19, 23, 39, 16, 660000))),
             Row((2, 2, datetime.datetime(2018, 10, 19, 23, 39, 16, 660000)))]
     df = dataframe.SupersetDataFrame(
         list(data),
         [['col1'], ['col2'], ['col3']],
         MssqlEngineSpec)
     data = df.data
     self.assertEqual(len(data), 2)
     self.assertEqual(data[0],
                      {'col1': 1,
                       'col2': 1,
                       'col3': pd.Timestamp('2017-10-19 23:39:16.660000')})
Exemple #5
0
 def test_mssql_engine_spec_pymssql(self):
     # Test for case when tuple is returned (pymssql)
     data = [
         (1, 1, datetime.datetime(2017, 10, 19, 23, 39, 16, 660000)),
         (2, 2, datetime.datetime(2018, 10, 19, 23, 39, 16, 660000)),
     ]
     df = dataframe.SupersetDataFrame(
         list(data), [["col1"], ["col2"], ["col3"]], MssqlEngineSpec
     )
     data = df.data
     self.assertEqual(len(data), 2)
     self.assertEqual(
         data[0],
         {"col1": 1, "col2": 1, "col3": pd.Timestamp("2017-10-19 23:39:16.660000")},
     )
Exemple #6
0
    def test_get_columns(self):
        main_db = self.get_main_database(db.session)
        df = main_db.get_df("SELECT * FROM multiformat_time_series", None)
        cdf = dataframe.SupersetDataFrame(df)

        # Making ordering non-deterministic
        cols = self.dictify_list_of_dicts(cdf.columns, 'name')

        if main_db.sqlalchemy_uri.startswith('sqlite'):
            self.assertEqual(self.dictify_list_of_dicts([
                {'is_date': True, 'type': 'STRING', 'name': 'ds',
                    'is_dim': False},
                {'is_date': True, 'type': 'STRING', 'name': 'ds2',
                    'is_dim': False},
                {'agg': 'sum', 'is_date': False, 'type': 'INT',
                    'name': 'epoch_ms', 'is_dim': False},
                {'agg': 'sum', 'is_date': False, 'type': 'INT',
                    'name': 'epoch_s', 'is_dim': False},
                {'is_date': True, 'type': 'STRING', 'name': 'string0',
                    'is_dim': False},
                {'is_date': False, 'type': 'STRING',
                    'name': 'string1', 'is_dim': True},
                {'is_date': True, 'type': 'STRING', 'name': 'string2',
                    'is_dim': False},
                {'is_date': False, 'type': 'STRING',
                    'name': 'string3', 'is_dim': True}], 'name'),
                cols,
            )
        else:
            self.assertEqual(self.dictify_list_of_dicts([
                {'is_date': True, 'type': 'DATETIME', 'name': 'ds',
                    'is_dim': False},
                {'is_date': True, 'type': 'DATETIME',
                    'name': 'ds2', 'is_dim': False},
                {'agg': 'sum', 'is_date': False, 'type': 'INT',
                    'name': 'epoch_ms', 'is_dim': False},
                {'agg': 'sum', 'is_date': False, 'type': 'INT',
                    'name': 'epoch_s', 'is_dim': False},
                {'is_date': True, 'type': 'STRING', 'name': 'string0',
                    'is_dim': False},
                {'is_date': False, 'type': 'STRING',
                    'name': 'string1', 'is_dim': True},
                {'is_date': True, 'type': 'STRING', 'name': 'string2',
                    'is_dim': False},
                {'is_date': False, 'type': 'STRING',
                    'name': 'string3', 'is_dim': True}], 'name'),
                cols,
            )
Exemple #7
0
def convert_results_to_df(column_names, data):
    """Convert raw query results to a DataFrame."""
    column_names = dedup(column_names)

    # check whether the result set has any nested dict columns
    if data:
        first_row = data[0]
        has_dict_col = any([isinstance(c, dict) for c in first_row])
        df_data = list(data) if has_dict_col else np.array(data, dtype=object)
    else:
        df_data = []

    cdf = dataframe.SupersetDataFrame(
        pd.DataFrame(df_data, columns=column_names))

    return cdf
    def test_results_default_deserialization(self):
        use_new_deserialization = False
        data = [("a", 4, 4.0, "2019-08-18T16:39:16.660000")]
        cursor_descr = (
            ("a", "string"),
            ("b", "int"),
            ("c", "float"),
            ("d", "datetime"),
        )
        db_engine_spec = BaseEngineSpec()
        cdf = dataframe.SupersetDataFrame(data, cursor_descr, db_engine_spec)
        query = {
            "database_id": 1,
            "sql": "SELECT * FROM birth_names LIMIT 100",
            "status": utils.QueryStatus.PENDING,
        }
        (
            serialized_data,
            selected_columns,
            all_columns,
            expanded_columns,
        ) = sql_lab._serialize_and_expand_data(
            cdf, db_engine_spec, use_new_deserialization
        )
        payload = {
            "query_id": 1,
            "status": utils.QueryStatus.SUCCESS,
            "state": utils.QueryStatus.SUCCESS,
            "data": serialized_data,
            "columns": all_columns,
            "selected_columns": selected_columns,
            "expanded_columns": expanded_columns,
            "query": query,
        }

        serialized_payload = sql_lab._serialize_payload(
            payload, use_new_deserialization
        )
        self.assertIsInstance(serialized_payload, str)

        query_mock = mock.Mock()
        deserialized_payload = views._deserialize_results_payload(
            serialized_payload, query_mock, use_new_deserialization
        )

        self.assertDictEqual(deserialized_payload, payload)
        query_mock.assert_not_called()
Exemple #9
0
def get_sql_results(self, query_id, return_results=True, store_results=False):
    """Executes the sql query returns the results."""
    if not self.request.called_directly:
        engine = sqlalchemy.create_engine(
            app.config.get('SQLALCHEMY_DATABASE_URI'), poolclass=NullPool)
        session_class = sessionmaker()
        session_class.configure(bind=engine)
        session = session_class()
    else:
        session = db.session()
        session.commit()  # HACK
    try:
        query = session.query(models.Query).filter_by(id=query_id).one()
    except Exception as e:
        logging.error("Query with id `{}` could not be retrieved".format(query_id))
        logging.error("Sleeping for a sec and retrying...")
        # Nasty hack to get around a race condition where the worker
        # cannot find the query it's supposed to run
        sleep(1)
        query = session.query(models.Query).filter_by(id=query_id).one()

    database = query.database
    db_engine_spec = database.db_engine_spec
    db_engine_spec.patch()

    def handle_error(msg):
        """Local method handling error while processing the SQL"""
        query.error_message = msg
        query.status = QueryStatus.FAILED
        query.tmp_table_name = None
        session.commit()
        raise Exception(query.error_message)

    if store_results and not results_backend:
        handle_error("Results backend isn't configured.")

    # Limit enforced only for retrieving the data, not for the CTA queries.
    superset_query = SupersetQuery(query.sql)
    executed_sql = superset_query.stripped()
    if not superset_query.is_select() and not database.allow_dml:
        handle_error(
            "Only `SELECT` statements are allowed against this database")
    if query.select_as_cta:
        if not superset_query.is_select():
            handle_error(
                "Only `SELECT` statements can be used with the CREATE TABLE "
                "feature.")
        if not query.tmp_table_name:
            start_dttm = datetime.fromtimestamp(query.start_time)
            query.tmp_table_name = 'tmp_{}_table_{}'.format(
                query.user_id,
                start_dttm.strftime('%Y_%m_%d_%H_%M_%S'))
        executed_sql = superset_query.as_create_table(query.tmp_table_name)
        query.select_as_cta_used = True
    elif (
            query.limit and superset_query.is_select() and
            db_engine_spec.limit_method == LimitMethod.WRAP_SQL):
        executed_sql = database.wrap_sql_limit(executed_sql, query.limit)
        query.limit_used = True
    try:
        template_processor = get_template_processor(
            database=database, query=query)
        executed_sql = template_processor.process_template(executed_sql)
        executed_sql = db_engine_spec.sql_preprocessor(executed_sql)
    except Exception as e:
        logging.exception(e)
        msg = "Template rendering failed: " + utils.error_msg_from_exception(e)
        handle_error(msg)

    query.executed_sql = executed_sql
    query.status = QueryStatus.RUNNING
    query.start_running_time = utils.now_as_float()
    session.merge(query)
    session.commit()
    logging.info("Set query to 'running'")

    engine = database.get_sqla_engine(schema=query.schema)
    conn = engine.raw_connection()
    cursor = conn.cursor()
    logging.info("Running query: \n{}".format(executed_sql))
    try:
        logging.info(query.executed_sql)
        cursor.execute(
            query.executed_sql, **db_engine_spec.cursor_execute_kwargs)
    except Exception as e:
        logging.exception(e)
        conn.close()
        handle_error(db_engine_spec.extract_error_message(e))

    try:
        logging.info("Handling cursor")
        db_engine_spec.handle_cursor(cursor, query, session)
        logging.info("Fetching data: {}".format(query.to_dict()))
        data = db_engine_spec.fetch_data(cursor, query.limit)
    except Exception as e:
        logging.exception(e)
        conn.close()
        handle_error(db_engine_spec.extract_error_message(e))

    conn.commit()
    conn.close()

    if query.status == utils.QueryStatus.STOPPED:
        return json.dumps({
            'query_id': query.id,
            'status': query.status,
            'query': query.to_dict(),
        }, default=utils.json_iso_dttm_ser)

    column_names = (
        [col[0] for col in cursor.description] if cursor.description else [])
    column_names = dedup(column_names)
    cdf = dataframe.SupersetDataFrame(pd.DataFrame(
        list(data), columns=column_names))

    query.rows = cdf.size
    query.progress = 100
    query.status = QueryStatus.SUCCESS
    if query.select_as_cta:
        query.select_sql = '{}'.format(database.select_star(
            query.tmp_table_name,
            limit=query.limit,
            schema=database.force_ctas_schema
        ))
    query.end_time = utils.now_as_float()
    session.merge(query)
    session.flush()

    payload = {
        'query_id': query.id,
        'status': query.status,
        'data': cdf.data if cdf.data else [],
        'columns': cdf.columns if cdf.columns else [],
        'query': query.to_dict(),
    }
    payload = json.dumps(payload, default=utils.json_iso_dttm_ser)

    if store_results:
        key = '{}'.format(uuid.uuid4())
        logging.info("Storing results in results backend, key: {}".format(key))
        results_backend.set(key, zlib.compress(payload))
        query.results_key = key

    session.merge(query)
    session.commit()

    if return_results:
        return payload
Exemple #10
0
def execute_sql(
    ctask, query_id, rendered_query, return_results=True, store_results=False,
    user_name=None, session=None, start_time=None,
):
    """Executes the sql query returns the results."""
    if store_results and start_time:
        # only asynchronous queries
        stats_logger.timing(
            'sqllab.query.time_pending', now_as_float() - start_time)
    query = get_query(query_id, session)
    payload = dict(query_id=query_id)

    database = query.database
    db_engine_spec = database.db_engine_spec
    db_engine_spec.patch()

    def handle_error(msg):
        """Local method handling error while processing the SQL"""
        troubleshooting_link = config['TROUBLESHOOTING_LINK']
        query.error_message = msg
        query.status = QueryStatus.FAILED
        query.tmp_table_name = None
        session.commit()
        payload.update({
            'status': query.status,
            'error': msg,
        })
        if troubleshooting_link:
            payload['link'] = troubleshooting_link
        return payload

    if store_results and not results_backend:
        return handle_error("Results backend isn't configured.")

    # Limit enforced only for retrieving the data, not for the CTA queries.
    superset_query = SupersetQuery(rendered_query)
    executed_sql = superset_query.stripped()
    SQL_MAX_ROWS = app.config.get('SQL_MAX_ROW')
    if not superset_query.is_readonly() and not database.allow_dml:
        return handle_error(
            'Only `SELECT` statements are allowed against this database')
    if query.select_as_cta:
        if not superset_query.is_select():
            return handle_error(
                'Only `SELECT` statements can be used with the CREATE TABLE '
                'feature.')
        if not query.tmp_table_name:
            start_dttm = datetime.fromtimestamp(query.start_time)
            query.tmp_table_name = 'tmp_{}_table_{}'.format(
                query.user_id, start_dttm.strftime('%Y_%m_%d_%H_%M_%S'))
        executed_sql = superset_query.as_create_table(query.tmp_table_name)
        query.select_as_cta_used = True
    if (superset_query.is_select() and SQL_MAX_ROWS and
            (not query.limit or query.limit > SQL_MAX_ROWS)):
        query.limit = SQL_MAX_ROWS
        executed_sql = database.apply_limit_to_sql(executed_sql, query.limit)

    # Hook to allow environment-specific mutation (usually comments) to the SQL
    SQL_QUERY_MUTATOR = config.get('SQL_QUERY_MUTATOR')
    if SQL_QUERY_MUTATOR:
        executed_sql = SQL_QUERY_MUTATOR(
            executed_sql, user_name, security_manager, database)

    query.executed_sql = executed_sql
    query.status = QueryStatus.RUNNING
    query.start_running_time = now_as_float()
    session.merge(query)
    session.commit()
    logging.info("Set query to 'running'")
    conn = None
    try:
        engine = database.get_sqla_engine(
            schema=query.schema,
            nullpool=True,
            user_name=user_name,
        )
        conn = engine.raw_connection()
        cursor = conn.cursor()
        logging.info('Running query: \n{}'.format(executed_sql))
        logging.info(query.executed_sql)
        query_start_time = now_as_float()
        db_engine_spec.execute(cursor, query.executed_sql, async_=True)
        logging.info('Handling cursor')
        db_engine_spec.handle_cursor(cursor, query, session)
        logging.info('Fetching data: {}'.format(query.to_dict()))
        stats_logger.timing(
            'sqllab.query.time_executing_query',
            now_as_float() - query_start_time)
        fetching_start_time = now_as_float()
        data = db_engine_spec.fetch_data(cursor, query.limit)
        stats_logger.timing(
            'sqllab.query.time_fetching_results',
            now_as_float() - fetching_start_time)
    except SoftTimeLimitExceeded as e:
        logging.exception(e)
        if conn is not None:
            conn.close()
        return handle_error(
            "SQL Lab timeout. This environment's policy is to kill queries "
            'after {} seconds.'.format(SQLLAB_TIMEOUT))
    except Exception as e:
        logging.exception(e)
        if conn is not None:
            conn.close()
        return handle_error(db_engine_spec.extract_error_message(e))

    logging.info('Fetching cursor description')
    cursor_description = cursor.description
    if conn is not None:
        conn.commit()
        conn.close()

    if query.status == QueryStatus.STOPPED:
        return handle_error('The query has been stopped')

    cdf = dataframe.SupersetDataFrame(data, cursor_description, db_engine_spec)

    query.rows = cdf.size
    query.progress = 100
    query.status = QueryStatus.SUCCESS
    if query.select_as_cta:
        query.select_sql = '{}'.format(
            database.select_star(
                query.tmp_table_name,
                limit=query.limit,
                schema=database.force_ctas_schema,
                show_cols=False,
                latest_partition=False))
    query.end_time = now_as_float()
    session.merge(query)
    session.flush()

    payload.update({
        'status': query.status,
        'data': cdf.data if cdf.data else [],
        'columns': cdf.columns if cdf.columns else [],
        'query': query.to_dict(),
    })
    if store_results:
        key = '{}'.format(uuid.uuid4())
        logging.info('Storing results in results backend, key: {}'.format(key))
        write_to_results_backend_start = now_as_float()
        json_payload = json.dumps(
            payload, default=json_iso_dttm_ser, ignore_nan=True)
        cache_timeout = database.cache_timeout
        if cache_timeout is None:
            cache_timeout = config.get('CACHE_DEFAULT_TIMEOUT', 0)
        results_backend.set(key, zlib_compress(json_payload), cache_timeout)
        query.results_key = key
        stats_logger.timing(
            'sqllab.query.results_backend_write',
            now_as_float() - write_to_results_backend_start)
    session.merge(query)
    session.commit()

    if return_results:
        return payload
Exemple #11
0
def get_sql_results(self, query_id, return_results=True, store_results=False):
    """Executes the sql query returns the results."""
    if not self.request.called_directly:
        engine = sqlalchemy.create_engine(
            app.config.get('SQLALCHEMY_DATABASE_URI'), poolclass=NullPool)
        session_class = sessionmaker()
        session_class.configure(bind=engine)
        session = session_class()
    else:
        session = db.session()
        session.commit()  # HACK
    query = session.query(models.Query).filter_by(id=query_id).one()
    database = query.database
    db_engine_spec = database.db_engine_spec

    def handle_error(msg):
        """Local method handling error while processing the SQL"""
        query.error_message = msg
        query.status = QueryStatus.FAILED
        query.tmp_table_name = None
        session.commit()
        raise Exception(query.error_message)

    if store_results and not results_backend:
        handle_error("Results backend isn't configured.")

    # Limit enforced only for retrieving the data, not for the CTA queries.
    superset_query = SupersetQuery(query.sql)
    executed_sql = superset_query.stripped()
    if not superset_query.is_select() and not database.allow_dml:
        handle_error(
            "Only `SELECT` statements are allowed against this database")
    if query.select_as_cta:
        if not superset_query.is_select():
            handle_error(
                "Only `SELECT` statements can be used with the CREATE TABLE "
                "feature.")
        if not query.tmp_table_name:
            start_dttm = datetime.fromtimestamp(query.start_time)
            query.tmp_table_name = 'tmp_{}_table_{}'.format(
                query.user_id,
                start_dttm.strftime('%Y_%m_%d_%H_%M_%S'))
        executed_sql = superset_query.as_create_table(query.tmp_table_name)
        query.select_as_cta_used = True
    elif (
            query.limit and superset_query.is_select() and
            db_engine_spec.limit_method == LimitMethod.WRAP_SQL):
        executed_sql = database.wrap_sql_limit(executed_sql, query.limit)
        query.limit_used = True
    engine = database.get_sqla_engine(schema=query.schema)
    try:
        template_processor = get_template_processor(
            database=database, query=query)
        executed_sql = template_processor.process_template(executed_sql)
        executed_sql = db_engine_spec.sql_preprocessor(executed_sql)
    except Exception as e:
        logging.exception(e)
        msg = "Template rendering failed: " + utils.error_msg_from_exception(e)
        handle_error(msg)

    query.executed_sql = executed_sql
    logging.info("Running query: \n{}".format(executed_sql))
    try:
        result_proxy = engine.execute(query.executed_sql, schema=query.schema)
    except Exception as e:
        logging.exception(e)
        handle_error(db_engine_spec.extract_error_message(e))

    cursor = result_proxy.cursor
    query.status = QueryStatus.RUNNING
    session.flush()
    db_engine_spec.handle_cursor(cursor, query, session)

    cdf = None
    if result_proxy.cursor:
        column_names = [col[0] for col in result_proxy.cursor.description]
        column_names = dedup(column_names)
        if db_engine_spec.limit_method == LimitMethod.FETCH_MANY:
            data = result_proxy.fetchmany(query.limit)
        else:
            data = result_proxy.fetchall()
        cdf = dataframe.SupersetDataFrame(
            pd.DataFrame(data, columns=column_names))

    query.rows = result_proxy.rowcount
    query.progress = 100
    query.status = QueryStatus.SUCCESS
    if query.rows == -1 and cdf:
        # Presto doesn't provide result_proxy.row_count
        query.rows = cdf.size
    if query.select_as_cta:
        query.select_sql = '{}'.format(database.select_star(
            query.tmp_table_name,
            limit=query.limit,
            schema=database.force_ctas_schema
        ))
    query.end_time = utils.now_as_float()
    session.flush()

    payload = {
        'query_id': query.id,
        'status': query.status,
        'data': [],
    }
    payload['data'] = cdf.data if cdf else []
    payload['columns'] = cdf.columns_dict if cdf else []
    payload['query'] = query.to_dict()
    payload = json.dumps(payload, default=utils.json_iso_dttm_ser)

    if store_results:
        key = '{}'.format(uuid.uuid4())
        logging.info("Storing results in results backend, key: {}".format(key))
        results_backend.set(key, zlib.compress(payload))
        query.results_key = key

    session.flush()
    session.commit()

    if return_results:
        return payload
Exemple #12
0
def execute_sql(ctask,
                query_id,
                return_results=True,
                store_results=False,
                user_name=None):
    """Executes the sql query returns the results."""
    session = get_session(not ctask.request.called_directly)

    query = get_query(query_id, session)
    payload = dict(query_id=query_id)

    database = query.database
    db_engine_spec = database.db_engine_spec
    db_engine_spec.patch()

    def handle_error(msg):
        """Local method handling error while processing the SQL"""
        query.error_message = msg
        query.status = QueryStatus.FAILED
        query.tmp_table_name = None
        session.commit()
        payload.update({
            'status': query.status,
            'error': msg,
        })
        return payload

    if store_results and not results_backend:
        return handle_error("Results backend isn't configured.")

    # Limit enforced only for retrieving the data, not for the CTA queries.
    superset_query = SupersetQuery(query.sql)
    executed_sql = superset_query.stripped()
    if not superset_query.is_select() and not database.allow_dml:
        return handle_error(
            "Only `SELECT` statements are allowed against this database")
    if query.select_as_cta:
        if not superset_query.is_select():
            return handle_error(
                "Only `SELECT` statements can be used with the CREATE TABLE "
                "feature.")
            return
        if not query.tmp_table_name:
            start_dttm = datetime.fromtimestamp(query.start_time)
            query.tmp_table_name = 'tmp_{}_table_{}'.format(
                query.user_id, start_dttm.strftime('%Y_%m_%d_%H_%M_%S'))
        executed_sql = superset_query.as_create_table(query.tmp_table_name)
        query.select_as_cta_used = True
    elif (query.limit and superset_query.is_select()
          and db_engine_spec.limit_method == LimitMethod.WRAP_SQL):
        executed_sql = database.wrap_sql_limit(executed_sql, query.limit)
        query.limit_used = True
    try:
        template_processor = get_template_processor(database=database,
                                                    query=query)
        executed_sql = template_processor.process_template(executed_sql)
    except Exception as e:
        logging.exception(e)
        msg = "Template rendering failed: " + utils.error_msg_from_exception(e)
        return handle_error(msg)

    query.executed_sql = executed_sql
    query.status = QueryStatus.RUNNING
    query.start_running_time = utils.now_as_float()
    session.merge(query)
    session.commit()
    logging.info("Set query to 'running'")

    engine = database.get_sqla_engine(
        schema=query.schema,
        nullpool=not ctask.request.called_directly,
        user_name=user_name)
    try:
        engine = database.get_sqla_engine(
            schema=query.schema,
            nullpool=not ctask.request.called_directly,
            user_name=user_name)
        conn = engine.raw_connection()
        cursor = conn.cursor()
        logging.info("Running query: \n{}".format(executed_sql))
        logging.info(query.executed_sql)
        cursor.execute(query.executed_sql,
                       **db_engine_spec.cursor_execute_kwargs)
        logging.info("Handling cursor")
        db_engine_spec.handle_cursor(cursor, query, session)
        logging.info("Fetching data: {}".format(query.to_dict()))
        data = db_engine_spec.fetch_data(cursor, query.limit)
    except SoftTimeLimitExceeded as e:
        logging.exception(e)
        conn.close()
        return handle_error(
            "SQL Lab timeout. This environment's policy is to kill queries "
            "after {} seconds.".format(SQLLAB_TIMEOUT))
    except Exception as e:
        logging.exception(e)
        conn.close()
        return handle_error(db_engine_spec.extract_error_message(e))

    logging.info("Fetching cursor description")
    cursor_description = cursor.description

    conn.commit()
    conn.close()

    if query.status == utils.QueryStatus.STOPPED:
        return json.dumps(
            {
                'query_id': query.id,
                'status': query.status,
                'query': query.to_dict(),
            },
            default=utils.json_iso_dttm_ser)

    column_names = ([col[0] for col in cursor_description]
                    if cursor_description else [])
    column_names = dedup(column_names)
    cdf = dataframe.SupersetDataFrame(
        pd.DataFrame(list(data), columns=column_names))

    query.rows = cdf.size
    query.progress = 100
    query.status = QueryStatus.SUCCESS
    if query.select_as_cta:
        query.select_sql = '{}'.format(
            database.select_star(
                query.tmp_table_name,
                limit=query.limit,
                schema=database.force_ctas_schema,
                show_cols=False,
                latest_partition=False,
            ))
    query.end_time = utils.now_as_float()
    session.merge(query)
    session.flush()

    payload.update({
        'status': query.status,
        'data': cdf.data if cdf.data else [],
        'columns': cdf.columns if cdf.columns else [],
        'query': query.to_dict(),
    })
    if store_results:
        key = '{}'.format(uuid.uuid4())
        logging.info("Storing results in results backend, key: {}".format(key))
        json_payload = json.dumps(payload, default=utils.json_iso_dttm_ser)
        results_backend.set(key, utils.zlib_compress(json_payload))
        query.results_key = key
        query.end_result_backend_time = utils.now_as_float()

    session.merge(query)
    session.commit()

    if return_results:
        return payload
Exemple #13
0
 def test_get_columns_dict(self):
     main_db = self.get_main_database(db.session)
     df = main_db.get_df("SELECT * FROM multiformat_time_series", None)
     cdf = dataframe.SupersetDataFrame(df)
     if main_db.sqlalchemy_uri.startswith('sqlite'):
         self.assertEqual([{
             'is_date': True,
             'type': 'datetime_string',
             'name': 'ds',
             'is_dim': False
         }, {
             'is_date': True,
             'type': 'datetime_string',
             'name': 'ds2',
             'is_dim': False
         }, {
             'agg': 'sum',
             'is_date': False,
             'type': 'int64',
             'name': 'epoch_ms',
             'is_dim': False
         }, {
             'agg': 'sum',
             'is_date': False,
             'type': 'int64',
             'name': 'epoch_s',
             'is_dim': False
         }, {
             'is_date': True,
             'type': 'datetime_string',
             'name': 'string0',
             'is_dim': False
         }, {
             'is_date': False,
             'type': 'object',
             'name': 'string1',
             'is_dim': True
         }, {
             'is_date': True,
             'type': 'datetime_string',
             'name': 'string2',
             'is_dim': False
         }, {
             'is_date': False,
             'type': 'object',
             'name': 'string3',
             'is_dim': True
         }], cdf.columns_dict)
     else:
         self.assertEqual([{
             'is_date': True,
             'type': 'datetime_string',
             'name': 'ds',
             'is_dim': False
         }, {
             'is_date': True,
             'type': 'datetime64[ns]',
             'name': 'ds2',
             'is_dim': False
         }, {
             'agg': 'sum',
             'is_date': False,
             'type': 'int64',
             'name': 'epoch_ms',
             'is_dim': False
         }, {
             'agg': 'sum',
             'is_date': False,
             'type': 'int64',
             'name': 'epoch_s',
             'is_dim': False
         }, {
             'is_date': True,
             'type': 'datetime_string',
             'name': 'string0',
             'is_dim': False
         }, {
             'is_date': False,
             'type': 'object',
             'name': 'string1',
             'is_dim': True
         }, {
             'is_date': True,
             'type': 'datetime_string',
             'name': 'string2',
             'is_dim': False
         }, {
             'is_date': False,
             'type': 'object',
             'name': 'string3',
             'is_dim': True
         }], cdf.columns_dict)
Exemple #14
0
def execute_sql_statement(sql_statement, query, user_name, session, cursor):
    """Executes a single SQL statement"""
    database = query.database
    db_engine_spec = database.db_engine_spec
    parsed_query = ParsedQuery(sql_statement)
    sql = parsed_query.stripped()
    SQL_MAX_ROWS = app.config.get("SQL_MAX_ROW")

    if not parsed_query.is_readonly() and not database.allow_dml:
        raise SqlLabSecurityException(
            _("Only `SELECT` statements are allowed against this database")
        )
    if query.select_as_cta:
        if not parsed_query.is_select():
            raise SqlLabException(
                _(
                    "Only `SELECT` statements can be used with the CREATE TABLE "
                    "feature."
                )
            )
        if not query.tmp_table_name:
            start_dttm = datetime.fromtimestamp(query.start_time)
            query.tmp_table_name = "tmp_{}_table_{}".format(
                query.user_id, start_dttm.strftime("%Y_%m_%d_%H_%M_%S")
            )
        sql = parsed_query.as_create_table(query.tmp_table_name)
        query.select_as_cta_used = True
    if parsed_query.is_select():
        if SQL_MAX_ROWS and (not query.limit or query.limit > SQL_MAX_ROWS):
            query.limit = SQL_MAX_ROWS
        if query.limit:
            sql = database.apply_limit_to_sql(sql, query.limit)

    # Hook to allow environment-specific mutation (usually comments) to the SQL
    SQL_QUERY_MUTATOR = config.get("SQL_QUERY_MUTATOR")
    if SQL_QUERY_MUTATOR:
        sql = SQL_QUERY_MUTATOR(sql, user_name, security_manager, database)

    try:
        if log_query:
            log_query(
                query.database.sqlalchemy_uri,
                query.executed_sql,
                query.schema,
                user_name,
                __name__,
                security_manager,
            )
        query.executed_sql = sql
        with stats_timing("sqllab.query.time_executing_query", stats_logger):
            logging.info("Running query: \n{}".format(sql))
            db_engine_spec.execute(cursor, sql, async_=True)
            logging.info("Handling cursor")
            db_engine_spec.handle_cursor(cursor, query, session)

        with stats_timing("sqllab.query.time_fetching_results", stats_logger):
            logging.debug("Fetching data for query object: {}".format(query.to_dict()))
            data = db_engine_spec.fetch_data(cursor, query.limit)

    except SoftTimeLimitExceeded as e:
        logging.exception(e)
        raise SqlLabTimeoutException(
            "SQL Lab timeout. This environment's policy is to kill queries "
            "after {} seconds.".format(SQLLAB_TIMEOUT)
        )
    except Exception as e:
        logging.exception(e)
        raise SqlLabException(db_engine_spec.extract_error_message(e))

    logging.debug("Fetching cursor description")
    cursor_description = cursor.description
    return dataframe.SupersetDataFrame(data, cursor_description, db_engine_spec)
Exemple #15
0
def execute_norm(ctask,
                 query_id,
                 rendered_query,
                 return_results=True,
                 store_results=False,
                 user_name=None,
                 session=None):
    """ Executes the norm script and returns the results"""
    if rendered_query.lower().find('select') >= 0:
        return execute_sql(ctask, query_id, rendered_query, return_results,
                           store_results, user_name, session)

    query = get_query(query_id, session)
    payload = dict(query_id=query_id)

    def handle_error(msg):
        """Local method handling error while processing the SQL"""
        troubleshooting_link = config['TROUBLESHOOTING_LINK']
        query.error_message = msg
        query.status = QueryStatus.FAILED
        query.tmp_table_name = None
        session.commit()
        payload.update({
            'status': query.status,
            'error': msg,
        })
        if troubleshooting_link:
            payload['link'] = troubleshooting_link
        return payload

    if store_results and not results_backend:
        return handle_error("Results backend isn't configured.")

    query.executed_sql = rendered_query
    query.status = QueryStatus.RUNNING
    query.start_running_time = utils.now_as_float()
    session.merge(query)
    session.commit()
    logging.info("Set query to 'running'")

    database = query.database
    db_engine_spec = database.db_engine_spec
    db_engine_spec.patch()
    try:
        data = norm.execute(query.executed_sql, session, query.user)
    except SoftTimeLimitExceeded as e:
        logging.exception(e)
        return handle_error(
            "SQL Lab timeout. This environment's policy is to kill queries "
            'after {} seconds.'.format(SQLLAB_TIMEOUT))
    except Exception as e:
        logging.exception(e)
        return handle_error(db_engine_spec.extract_error_message(e))

    if query.status == utils.QueryStatus.STOPPED:
        return handle_error('The query has been stopped')

    cdf = dataframe.SupersetDataFrame(data)

    query.rows = cdf.size
    query.progress = 100
    query.status = QueryStatus.SUCCESS
    query.end_time = utils.now_as_float()
    session.merge(query)
    session.flush()

    payload.update({
        'status': query.status,
        'data': cdf.data if cdf.data else [],
        'columns': cdf.columns if cdf.columns else [],
        'query': query.to_dict(),
    })
    if store_results:
        key = '{}'.format(uuid.uuid4())
        logging.info('Storing results in results backend, key: {}'.format(key))
        json_payload = json.dumps(payload, default=utils.json_iso_dttm_ser)
        cache_timeout = config.get('CACHE_DEFAULT_TIMEOUT', 0)
        results_backend.set(key, utils.zlib_compress(json_payload),
                            cache_timeout)
        query.results_key = key
        query.end_result_backend_time = utils.now_as_float()

    session.merge(query)
    session.commit()

    if return_results:
        return payload