Esempio n. 1
0
    def init(self, conf: ConfigTree) -> None:
        conf = conf.with_fallback(
            SnowflakeTableLastUpdatedExtractor.DEFAULT_CONFIG)

        if conf.get_bool(SnowflakeTableLastUpdatedExtractor.
                         USE_CATALOG_AS_CLUSTER_NAME):
            cluster_source = "t.table_catalog"
        else:
            cluster_source = "'{}'".format(
                conf.get_string(
                    SnowflakeTableLastUpdatedExtractor.CLUSTER_KEY))

        self._database = conf.get_string(
            SnowflakeTableLastUpdatedExtractor.DATABASE_KEY)
        self._snowflake_database = conf.get_string(
            SnowflakeTableLastUpdatedExtractor.SNOWFLAKE_DATABASE_KEY)

        self.sql_stmt = SnowflakeTableLastUpdatedExtractor.SQL_STATEMENT.format(
            where_clause_suffix=conf.get_string(
                SnowflakeTableLastUpdatedExtractor.WHERE_CLAUSE_SUFFIX_KEY),
            cluster_source=cluster_source,
            database=self._snowflake_database)

        LOGGER.info(
            'SQL for snowflake table last updated timestamp: {}'.format(
                self.sql_stmt))

        # use an sql_alchemy_extractor to execute sql
        self._alchemy_extractor = SQLAlchemyExtractor()
        sql_alch_conf = Scoped.get_scoped_conf(conf, self._alchemy_extractor.get_scope()) \
            .with_fallback(ConfigFactory.from_dict({SQLAlchemyExtractor.EXTRACT_SQL: self.sql_stmt}))

        self._alchemy_extractor.init(sql_alch_conf)
        self._extract_iter: Union[None, Iterator] = None
Esempio n. 2
0
    def init(self, conf):
        # type: (ConfigTree) -> None
        self.conf = conf.with_fallback(PrestoTableMetadataExtractor.DEFAULT_CONFIG)
        self._database = "{}".format(
            self.conf.get_string(PrestoTableMetadataExtractor.DATABASE_KEY)
        )
        self._cluster = self.conf.get(PrestoTableMetadataExtractor.CLUSTER_KEY, None)
        LOGGER.info("Cluster name: {}".format(self._cluster))

        if self._cluster is not None:
            cluster_prefix = self._cluster + "."
        else:
            cluster_prefix = ""

        self.sql_stmt = PrestoTableMetadataExtractor.SQL_STATEMENT.format(
            cluster_prefix=cluster_prefix,
            where_clause_suffix=self.conf.get_string(
                PrestoTableMetadataExtractor.WHERE_CLAUSE_SUFFIX_KEY
            )
            or "",
        )

        LOGGER.info("SQL for presto: {}".format(self.sql_stmt))

        self._alchemy_extractor = SQLAlchemyExtractor()
        sql_alch_conf = Scoped.get_scoped_conf(
            self.conf, self._alchemy_extractor.get_scope()
        ).with_fallback(
            ConfigFactory.from_dict({SQLAlchemyExtractor.EXTRACT_SQL: self.sql_stmt})
        )

        self._alchemy_extractor.init(sql_alch_conf)
        self._extract_iter = None  # type: Union[None, Iterator]
Esempio n. 3
0
    def init(self, conf: ConfigTree) -> None:
        conf = conf.with_fallback(BasePostgresMetadataExtractor.DEFAULT_CONFIG)
        self._cluster = conf.get_string(
            BasePostgresMetadataExtractor.CLUSTER_KEY)

        self._database = conf.get_string(
            BasePostgresMetadataExtractor.DATABASE_KEY, default='postgres')

        self.sql_stmt = self.get_sql_statement(
            use_catalog_as_cluster_name=conf.get_bool(
                BasePostgresMetadataExtractor.USE_CATALOG_AS_CLUSTER_NAME),
            where_clause_suffix=conf.get_string(
                BasePostgresMetadataExtractor.WHERE_CLAUSE_SUFFIX_KEY),
        )

        self._alchemy_extractor = SQLAlchemyExtractor()
        sql_alch_conf = Scoped.get_scoped_conf(conf, self._alchemy_extractor.get_scope())\
            .with_fallback(ConfigFactory.from_dict({SQLAlchemyExtractor.EXTRACT_SQL: self.sql_stmt}))

        self.sql_stmt = sql_alch_conf.get_string(
            SQLAlchemyExtractor.EXTRACT_SQL)

        LOGGER.info('SQL for postgres metadata: %s', self.sql_stmt)

        self._alchemy_extractor.init(sql_alch_conf)
        self._extract_iter: Union[None, Iterator] = None
    def _get_non_partitioned_table_sql_alchemy_extractor(self):
        # type: () -> Extractor
        """
        Getting an SQLAlchemy extractor that extracts storage location for non-partitioned table for further probing
        last updated timestamp

        :return: SQLAlchemyExtractor
        """
        if HiveTableLastUpdatedExtractor.NON_PARTITIONED_TABLE_WHERE_CLAUSE_SUFFIX_KEY in self._conf:
            where_clause_suffix = """
            {}
            AND {}
            """.format(
                self._conf.get_string(
                    HiveTableLastUpdatedExtractor.
                    NON_PARTITIONED_TABLE_WHERE_CLAUSE_SUFFIX_KEY),
                HiveTableLastUpdatedExtractor.ADDTIONAL_WHERE_CLAUSE)
        else:
            where_clause_suffix = 'WHERE {}'.format(
                HiveTableLastUpdatedExtractor.ADDTIONAL_WHERE_CLAUSE)

        sql_stmt = HiveTableLastUpdatedExtractor.NON_PARTITIONED_TABLE_SQL_STATEMENT.format(
            where_clause_suffix=where_clause_suffix)

        LOGGER.info(
            'SQL for non-partitioned table against Hive metastore: {}'.format(
                sql_stmt))

        sql_alchemy_extractor = SQLAlchemyExtractor()
        sql_alchemy_conf = Scoped.get_scoped_conf(self._conf, sql_alchemy_extractor.get_scope()) \
            .with_fallback(ConfigFactory.from_dict({SQLAlchemyExtractor.EXTRACT_SQL: sql_stmt}))
        sql_alchemy_extractor.init(sql_alchemy_conf)
        return sql_alchemy_extractor
Esempio n. 5
0
    def init(self, conf):
        # type: (ConfigTree) -> None
        conf = conf.with_fallback(PostgresMetadataExtractor.DEFAULT_CONFIG)
        self._cluster = '{}'.format(
            conf.get_string(PostgresMetadataExtractor.CLUSTER_KEY))

        if conf.get_bool(
                PostgresMetadataExtractor.USE_CATALOG_AS_CLUSTER_NAME):
            cluster_source = "c.table_catalog"
        else:
            cluster_source = "'{}'".format(self._cluster)

        self._database = conf.get_string(
            PostgresMetadataExtractor.DATABASE_KEY,
            default='postgres').encode('utf-8', 'ignore')

        self.sql_stmt = PostgresMetadataExtractor.SQL_STATEMENT.format(
            where_clause_suffix=conf.get_string(
                PostgresMetadataExtractor.WHERE_CLAUSE_SUFFIX_KEY),
            cluster_source=cluster_source)

        LOGGER.info('SQL for postgres metadata: {}'.format(self.sql_stmt))

        self._alchemy_extractor = SQLAlchemyExtractor()
        sql_alch_conf = Scoped.get_scoped_conf(conf, self._alchemy_extractor.get_scope())\
            .with_fallback(ConfigFactory.from_dict({SQLAlchemyExtractor.EXTRACT_SQL: self.sql_stmt}))

        self._alchemy_extractor.init(sql_alch_conf)
        self._extract_iter = None  # type: Union[None, Iterator]
    def init(self, conf):
        # type: (ConfigTree) -> None
        conf = conf.with_fallback(Db2MetadataExtractor.DEFAULT_CONFIG)
        self._cluster = '{}'.format(
            conf.get_string(Db2MetadataExtractor.CLUSTER_KEY))

        cluster_source = "'{}'".format(self._cluster)

        database = conf.get_string(Db2MetadataExtractor.DATABASE_KEY,
                                   default='db2')
        if six.PY2 and isinstance(database, six.text_type):
            database = database.encode('utf-8', 'ignore')

        self._database = database

        self.sql_stmt = Db2MetadataExtractor.SQL_STATEMENT.format(
            where_clause_suffix=conf.get_string(
                Db2MetadataExtractor.WHERE_CLAUSE_SUFFIX_KEY),
            cluster_source=cluster_source)

        self._alchemy_extractor = SQLAlchemyExtractor()
        sql_alch_conf = Scoped.get_scoped_conf(conf, self._alchemy_extractor.get_scope())\
            .with_fallback(ConfigFactory.from_dict({SQLAlchemyExtractor.EXTRACT_SQL: self.sql_stmt}))

        self.sql_stmt = sql_alch_conf.get_string(
            SQLAlchemyExtractor.EXTRACT_SQL)

        LOGGER.info('SQL for Db2 metadata: {}'.format(self.sql_stmt))

        self._alchemy_extractor.init(sql_alch_conf)
        self._extract_iter = None  # type: Union[None, Iterator]
    def init(self, conf):
        # type: (ConfigTree) -> None
        self.conf = conf.with_fallback(
            PrestoTableMetadataExtractor.DEFAULT_CONFIG)
        self._database = '{}'.format(self.conf.get_string('database'))
        self._cluster = self.conf.get('cluster', None)
        self._default_cluster_name = self.conf.get_string(
            'default_cluster_name')
        LOGGER.info('Cluster name: {}'.format(self._cluster))

        if self._cluster is not None:
            cluster_prefix = self._cluster + '.'
        else:
            cluster_prefix = ''

        self.sql_stmt = PrestoTableMetadataExtractor.SQL_STATEMENT.format(
            cluster_prefix=cluster_prefix,
            where_clause_suffix=self.conf.get_string('where_clause_suffix'))

        LOGGER.info('SQL for presto: {}'.format(self.sql_stmt))

        self._alchemy_extractor = SQLAlchemyExtractor()
        sql_alch_conf = Scoped.get_scoped_conf(self.conf, self._alchemy_extractor.get_scope())\
            .with_fallback(ConfigFactory.from_dict({SQLAlchemyExtractor.EXTRACT_SQL: self.sql_stmt}))

        self._alchemy_extractor.init(sql_alch_conf)
        self._extract_iter = None  # type: Union[None, Iterator]
    def init(self, conf):
        # type: (ConfigTree) -> None
        conf = conf.with_fallback(OracleMetadataExtractor.DEFAULT_CONFIG)
        self._cluster = '{}'.format(
            conf.get_string(OracleMetadataExtractor.CLUSTER_KEY))

        ##setting cluster name based on config
        if conf.get_bool(OracleMetadataExtractor.USE_CATALOG_AS_CLUSTER_NAME):
            cluster_source = "b.cluster_name"
        else:
            cluster_source = "'{}'".format(self._cluster)

        database = conf.get_string(OracleMetadataExtractor.DATABASE_KEY,
                                   default='oracle')

        self._database = database

        self.sql_stmt = OracleMetadataExtractor.SQL_STATEMENT.format(
            cluster_source=cluster_source)
        LOGGER.info('SQL for oracle metadata: {}'.format(self.sql_stmt))

        self._alchemy_extractor = SQLAlchemyExtractor()
        sql_alch_conf = Scoped.get_scoped_conf(conf, self._alchemy_extractor.get_scope())\
            .with_fallback(ConfigFactory.from_dict({SQLAlchemyExtractor.EXTRACT_SQL: self.sql_stmt}))

        self._alchemy_extractor.init(sql_alch_conf)
        self._extract_iter = None  # type: Union[None, Iterator]
    def init(self, conf: ConfigTree) -> None:
        conf = conf.with_fallback(MysqlMetadataExtractor.DEFAULT_CONFIG)
        self._cluster = conf.get_string(MysqlMetadataExtractor.CLUSTER_KEY)

        if conf.get_bool(MysqlMetadataExtractor.USE_CATALOG_AS_CLUSTER_NAME):
            cluster_source = "c.table_catalog"
        else:
            cluster_source = f"'{self._cluster}'"

        self._database = conf.get_string(MysqlMetadataExtractor.DATABASE_KEY,
                                         default='mysql')

        self.sql_stmt = MysqlMetadataExtractor.SQL_STATEMENT.format(
            where_clause_suffix=conf.get_string(
                MysqlMetadataExtractor.WHERE_CLAUSE_SUFFIX_KEY),
            cluster_source=cluster_source)

        self._alchemy_extractor = SQLAlchemyExtractor()
        sql_alch_conf = Scoped.get_scoped_conf(conf, self._alchemy_extractor.get_scope()) \
            .with_fallback(ConfigFactory.from_dict({SQLAlchemyExtractor.EXTRACT_SQL: self.sql_stmt}))

        self.sql_stmt = sql_alch_conf.get_string(
            SQLAlchemyExtractor.EXTRACT_SQL)

        LOGGER.info('SQL for mysql metadata: %s', self.sql_stmt)

        self._alchemy_extractor.init(sql_alch_conf)
        self._extract_iter: Union[None, Iterator] = None
Esempio n. 10
0
    def init(self, conf):
        conf = conf.with_fallback(self.DEFAULT_CONFIG)

        self._cluster = "{}".format(conf.get_string(self.CLUSTER_KEY))

        self._database = conf.get_string(self.DATABASE_KEY)

        self.sql_stmt = self._get_sql_statement(
            use_catalog_as_cluster_name=conf.get_bool(
                self.USE_CATALOG_AS_CLUSTER_NAME),
            where_clause_suffix=conf.get_string(self.WHERE_CLAUSE_SUFFIX_KEY),
        )

        self._alchemy_extractor = SQLAlchemyExtractor()

        sql_alch_conf = Scoped.get_scoped_conf(
            conf, SQLALCHEMY_ENGINE_SCOPE).with_fallback(
                ConfigFactory.from_dict(
                    {SQLAlchemyExtractor.EXTRACT_SQL: self.sql_stmt}))

        self.sql_stmt = sql_alch_conf.get_string(
            SQLAlchemyExtractor.EXTRACT_SQL)

        LOGGER.info("SQL for postgres metadata: %s", self.sql_stmt)

        self._alchemy_extractor.init(sql_alch_conf)
        self._extract_iter: Union[None, iterator] = None
Esempio n. 11
0
    def init(self, conf: ConfigTree) -> None:
        conf = conf.with_fallback(SqliteMetadataExtractor.DEFAULT_CONFIG)
        self._cluster = conf.get_string(SqliteMetadataExtractor.CLUSTER_KEY)

        self._database = conf.get_string(SqliteMetadataExtractor.DATABASE_KEY,
                                         default="sqlite")

        self.sql_stmt = SqliteMetadataExtractor.SQL_STATEMENT.format(
            where_clause_suffix=conf.get_string(
                SqliteMetadataExtractor.WHERE_CLAUSE_SUFFIX_KEY),
            cluster_source=self._cluster,
        )

        self._alchemy_extractor = SQLAlchemyExtractor()
        sql_alch_conf = Scoped.get_scoped_conf(
            conf, self._alchemy_extractor.get_scope()).with_fallback(
                ConfigFactory.from_dict(
                    {SQLAlchemyExtractor.EXTRACT_SQL: self.sql_stmt}))

        self.sql_stmt = sql_alch_conf.get_string(
            SQLAlchemyExtractor.EXTRACT_SQL)

        LOGGER.info("SQL for sqlite metadata: %s", self.sql_stmt)

        self._alchemy_extractor.init(sql_alch_conf)
        self._extract_iter: Union[None, Iterator] = None
Esempio n. 12
0
    def init(self, conf: ConfigTree) -> None:
        conf = conf.with_fallback(DruidMetadataExtractor.DEFAULT_CONFIG)
        self._cluster = conf.get_string(DruidMetadataExtractor.CLUSTER_KEY)

        self.sql_stmt = DruidMetadataExtractor.SQL_STATEMENT.format(
            where_clause_suffix=conf.get_string(
                DruidMetadataExtractor.WHERE_CLAUSE_SUFFIX_KEY, default=''))

        self._alchemy_extractor = SQLAlchemyExtractor()
        sql_alch_conf = Scoped.get_scoped_conf(conf, self._alchemy_extractor.get_scope())\
            .with_fallback(ConfigFactory.from_dict({SQLAlchemyExtractor.EXTRACT_SQL: self.sql_stmt}))

        self._alchemy_extractor.init(sql_alch_conf)
        self._extract_iter: Union[None, Iterator] = None
Esempio n. 13
0
def create_table_wm_job(**kwargs):
    sql = textwrap.dedent("""
        SELECT From_unixtime(A0.create_time) as create_time,
               'hive'                        as `database`,
               C0.NAME                       as `schema`,
               B0.tbl_name as table_name,
               {func}(A0.part_name) as part_name,
               {watermark} as part_type
        FROM   PARTITIONS A0
               LEFT OUTER JOIN TBLS B0
                            ON A0.tbl_id = B0.tbl_id
               LEFT OUTER JOIN DBS C0
                            ON B0.db_id = C0.db_id
        WHERE  C0.NAME IN {schemas}
               AND B0.tbl_type IN ( 'EXTERNAL_TABLE', 'MANAGED_TABLE' )
               AND A0.PART_NAME NOT LIKE '%%__HIVE_DEFAULT_PARTITION__%%'
        GROUP  BY C0.NAME, B0.tbl_name
        ORDER by create_time desc
    """).format(func=kwargs['templates_dict'].get('agg_func'),
                watermark=kwargs['templates_dict'].get('watermark_type'),
                schemas=SUPPORTED_HIVE_SCHEMA_SQL_IN_CLAUSE)

    logging.info('SQL query: {}'.format(sql))
    tmp_folder = '/var/tmp/amundsen/table_{hwm}'.format(
        hwm=kwargs['templates_dict'].get('watermark_type').strip("\""))
    node_files_folder = '{tmp_folder}/nodes'.format(tmp_folder=tmp_folder)
    relationship_files_folder = '{tmp_folder}/relationships'.format(
        tmp_folder=tmp_folder)

    hwm_extractor = SQLAlchemyExtractor()
    csv_loader = FsNeo4jCSVLoader()

    task = DefaultTask(extractor=hwm_extractor,
                       loader=csv_loader,
                       transformer=NoopTransformer())

    job_config = ConfigFactory.from_dict({
        'extractor.sqlalchemy.{}'.format(SQLAlchemyExtractor.CONN_STRING):
        connection_string(),
        'extractor.sqlalchemy.{}'.format(SQLAlchemyExtractor.EXTRACT_SQL):
        sql,
        'extractor.sqlalchemy.model_class':
        'databuilder.models.watermark.Watermark',
        'loader.filesystem_csv_neo4j.{}'.format(FsNeo4jCSVLoader.NODE_DIR_PATH):
        node_files_folder,
        'loader.filesystem_csv_neo4j.{}'.format(FsNeo4jCSVLoader.RELATION_DIR_PATH):
        relationship_files_folder,
        'publisher.neo4j.{}'.format(neo4j_csv_publisher.NODE_FILES_DIR):
        node_files_folder,
        'publisher.neo4j.{}'.format(neo4j_csv_publisher.RELATION_FILES_DIR):
        relationship_files_folder,
        'publisher.neo4j.{}'.format(neo4j_csv_publisher.NEO4J_END_POINT_KEY):
        neo4j_endpoint,
        'publisher.neo4j.{}'.format(neo4j_csv_publisher.NEO4J_USER):
        neo4j_user,
        'publisher.neo4j.{}'.format(neo4j_csv_publisher.NEO4J_PASSWORD):
        neo4j_password,
    })
    job = DefaultJob(conf=job_config, task=task, publisher=Neo4jCsvPublisher())
    job.launch()
    def init(self, conf):
        # type: (ConfigTree) -> None
        conf = conf.with_fallback(HiveTableMetadataExtractor.DEFAULT_CONFIG)
        self._cluster = '{}'.format(conf.get_string(HiveTableMetadataExtractor.CLUSTER_KEY))

        self.sql_stmt = HiveTableMetadataExtractor.SQL_STATEMENT.format(
            where_clause_suffix=conf.get_string(HiveTableMetadataExtractor.WHERE_CLAUSE_SUFFIX_KEY))

        LOGGER.info('SQL for hive metastore: {}'.format(self.sql_stmt))

        self._alchemy_extractor = SQLAlchemyExtractor()
        sql_alch_conf = Scoped.get_scoped_conf(conf, self._alchemy_extractor.get_scope())\
            .with_fallback(ConfigFactory.from_dict({SQLAlchemyExtractor.EXTRACT_SQL: self.sql_stmt}))

        self._alchemy_extractor.init(sql_alch_conf)
        self._extract_iter = None  # type: Union[None, Iterator]
    def init(self, conf):
        # type: (ConfigTree) -> None
        conf = conf.with_fallback(MSSQLMetadataExtractor.DEFAULT_CONFIG)

        self._cluster = '{}'.format(
            conf.get_string(MSSQLMetadataExtractor.CLUSTER_KEY))

        if conf.get_bool(MSSQLMetadataExtractor.USE_CATALOG_AS_CLUSTER_NAME):
            cluster_source = "DB_NAME()"
        else:
            cluster_source = "'{}'".format(self._cluster)

        database = conf.get_string(MSSQLMetadataExtractor.DATABASE_KEY,
                                   default='mssql')
        if six.PY2 and isinstance(database, six.text_type):
            database = database.encode('utf-8', 'ignore')

        self._database = database

        config_where_clause = conf.get_string(
            MSSQLMetadataExtractor.WHERE_CLAUSE_SUFFIX_KEY)

        logging.info("Crawling for Schemas %s", config_where_clause)

        if len(config_where_clause) > 0:
            where_clause_suffix = MSSQLMetadataExtractor\
                .DEFAULT_WHERE_CLAUSE_VALUE\
                .format(schemas=config_where_clause)
        else:
            where_clause_suffix = ''

        self.sql_stmt = MSSQLMetadataExtractor.SQL_STATEMENT.format(
            where_clause_suffix=where_clause_suffix,
            cluster_source=cluster_source)

        LOGGER.info('SQL for MS SQL Metadata: {}'.format(self.sql_stmt))

        self._alchemy_extractor = SQLAlchemyExtractor()
        sql_alch_conf = Scoped\
            .get_scoped_conf(conf, self._alchemy_extractor.get_scope()) \
            .with_fallback(
                ConfigFactory.from_dict({
                    SQLAlchemyExtractor.EXTRACT_SQL: self.sql_stmt})
            )

        self._alchemy_extractor.init(sql_alch_conf)
        self._extract_iter = None  # type: Union[None, Iterator]
    def init(self, conf: ConfigTree) -> None:
        conf = conf.with_fallback(AthenaMetadataExtractor.DEFAULT_CONFIG)
        self._cluster = conf.get_string(AthenaMetadataExtractor.CATALOG_KEY)

        self.sql_stmt = AthenaMetadataExtractor.SQL_STATEMENT.format(
            where_clause_suffix=conf.get_string(
                AthenaMetadataExtractor.WHERE_CLAUSE_SUFFIX_KEY),
            catalog_source=self._cluster)

        LOGGER.info('SQL for Athena metadata: %s', self.sql_stmt)

        self._alchemy_extractor = SQLAlchemyExtractor()
        sql_alch_conf = Scoped.get_scoped_conf(conf, self._alchemy_extractor.get_scope())\
            .with_fallback(ConfigFactory.from_dict({SQLAlchemyExtractor.EXTRACT_SQL: self.sql_stmt}))

        self._alchemy_extractor.init(sql_alch_conf)
        self._extract_iter: Union[None, Iterator] = None
    def _get_partitioned_table_sql_alchemy_extractor(self) -> Extractor:
        """
        Getting an SQLAlchemy extractor that extracts last updated timestamp for partitioned table.
        :return: SQLAlchemyExtractor
        """

        sql_stmt = HiveTableLastUpdatedExtractor.PARTITION_TABLE_SQL_STATEMENT.format(
            where_clause_suffix=self._conf.get_string(
                HiveTableLastUpdatedExtractor.PARTITIONED_TABLE_WHERE_CLAUSE_SUFFIX_KEY, ' '))

        LOGGER.info('SQL for partitioned table against Hive metastore: %s', sql_stmt)

        sql_alchemy_extractor = SQLAlchemyExtractor()
        sql_alchemy_conf = Scoped.get_scoped_conf(self._conf, sql_alchemy_extractor.get_scope()) \
            .with_fallback(ConfigFactory.from_dict({SQLAlchemyExtractor.EXTRACT_SQL: sql_stmt}))
        sql_alchemy_extractor.init(sql_alchemy_conf)
        return sql_alchemy_extractor
Esempio n. 18
0
    def test_extraction_with_model_class(self: Any, mock_method: Any) -> None:
        """
        Test Extraction using model class
        """
        config_dict = {
            'extractor.sqlalchemy.conn_string':
            'TEST_CONNECTION',
            'extractor.sqlalchemy.extract_sql':
            'SELECT 1 FROM TEST_TABLE;',
            'extractor.sqlalchemy.model_class':
            'tests.unit.extractor.test_sql_alchemy_extractor.TableMetadataResult'
        }
        self.conf = ConfigFactory.from_dict(config_dict)

        extractor = SQLAlchemyExtractor()
        extractor.results = [
            dict(database='test_database',
                 schema='test_schema',
                 name='test_table',
                 description='test_description',
                 column_name='test_column_name',
                 column_type='test_column_type',
                 column_comment='test_column_comment',
                 owner='test_owner')
        ]

        extractor.init(
            Scoped.get_scoped_conf(conf=self.conf,
                                   scope=extractor.get_scope()))

        result = extractor.extract()

        self.assertIsInstance(result, TableMetadataResult)
        self.assertEqual(result.name, 'test_table')
Esempio n. 19
0
    def init(self, conf: ConfigTree) -> None:
        conf = conf.with_fallback(HiveTableMetadataExtractor.DEFAULT_CONFIG)
        self._cluster = conf.get_string(HiveTableMetadataExtractor.CLUSTER_KEY)

        self._alchemy_extractor = SQLAlchemyExtractor()

        sql_alch_conf = Scoped.get_scoped_conf(
            conf, self._alchemy_extractor.get_scope())
        default_sql = self._choose_default_sql_stm(sql_alch_conf).format(
            where_clause_suffix=conf.get_string(
                HiveTableMetadataExtractor.WHERE_CLAUSE_SUFFIX_KEY))

        self.sql_stmt = conf.get_string(HiveTableMetadataExtractor.EXTRACT_SQL,
                                        default=default_sql)

        LOGGER.info('SQL for hive metastore: %s', self.sql_stmt)

        sql_alch_conf = sql_alch_conf.with_fallback(
            ConfigFactory.from_dict(
                {SQLAlchemyExtractor.EXTRACT_SQL: self.sql_stmt}))
        self._alchemy_extractor.init(sql_alch_conf)
        self._extract_iter: Union[None, Iterator] = None
 def test_extraction_with_single_query_result(self, mock_method):
     # type: (Any, Any) -> None
     """
     Test Extraction from single result from query
     """
     extractor = SQLAlchemyExtractor()
     extractor.results = [('test_result'), ]
     extractor.init(Scoped.get_scoped_conf(conf=self.conf,
                                           scope=extractor.get_scope()))
     results = extractor.extract()
     self.assertEqual(results, 'test_result')
    def init(self, conf: ConfigTree) -> None:
        self.conf = conf.with_fallback(
            SnowflakeMetadataExtractor.DEFAULT_CONFIG)
        self._database = self.conf.get_string(
            SnowflakeMetadataExtractor.DATABASE_KEY)
        self._cluster = '{}'.format(
            self.conf.get_string(SnowflakeMetadataExtractor.CLUSTER_KEY))

        self.sql_stmt = SnowflakeMetadataExtractor.SQL_STATEMENT.format(
            where_clause_suffix=self.conf.get_string('where_clause_suffix'),
            cluster=self._cluster,
            database=self._database)

        LOGGER.info('SQL for snowflake: {}'.format(self.sql_stmt))

        self._alchemy_extractor = SQLAlchemyExtractor()
        sql_alchemy_scope = self._alchemy_extractor.get_scope()
        sql_alchemy_conf = Scoped.get_scoped_conf(conf, sql_alchemy_scope)
        sql_alchemy_conf.put(SQLAlchemyExtractor.EXTRACT_SQL, self.sql_stmt)

        self._alchemy_extractor.init(sql_alchemy_conf)
        self._extract_iter = None
    def init(self, conf: ConfigTree) -> None:
        conf = conf.with_fallback(MSSQLMetadataExtractor.DEFAULT_CONFIG)

        self._cluster = conf.get_string(MSSQLMetadataExtractor.CLUSTER_KEY)

        if conf.get_bool(MSSQLMetadataExtractor.USE_CATALOG_AS_CLUSTER_NAME):
            cluster_source = "DB_NAME()"
        else:
            cluster_source = f"'{self._cluster}'"

        self._database = conf.get_string(MSSQLMetadataExtractor.DATABASE_KEY,
                                         default='mssql')

        config_where_clause = conf.get_string(
            MSSQLMetadataExtractor.WHERE_CLAUSE_SUFFIX_KEY)

        LOGGER.info("Crawling for Schemas %s", config_where_clause)

        if config_where_clause:
            where_clause_suffix = MSSQLMetadataExtractor \
                .DEFAULT_WHERE_CLAUSE_VALUE \
                .format(schemas=config_where_clause)
        else:
            where_clause_suffix = ''

        self.sql_stmt = MSSQLMetadataExtractor.SQL_STATEMENT.format(
            where_clause_suffix=where_clause_suffix,
            cluster_source=cluster_source)

        LOGGER.info('SQL for MS SQL Metadata: %s', self.sql_stmt)

        self._alchemy_extractor = SQLAlchemyExtractor()
        sql_alch_conf = Scoped \
            .get_scoped_conf(conf, self._alchemy_extractor.get_scope()) \
            .with_fallback(ConfigFactory.from_dict({SQLAlchemyExtractor.EXTRACT_SQL: self.sql_stmt}))

        self._alchemy_extractor.init(sql_alch_conf)
        self._extract_iter: Union[None, Iterator] = None
def create_sample_job(table_name, model_name):
    sql = textwrap.dedent("""
    select * from {table_name};
    """).format(table_name=table_name)

    tmp_folder = '/tmp/amundsen/{table_name}'.format(table_name=table_name)

    # tmp_folder = os.path.join(
    #     BASE_DIR, "amundsen", f"{table_name}".format(table_name=table_name)
    # )
    node_files_folder = "{tmp_folder}/nodes".format(tmp_folder=tmp_folder)
    relationship_files_folder = "{tmp_folder}/relationships".format(
        tmp_folder=tmp_folder)

    sql_extractor = SQLAlchemyExtractor()
    csv_loader = FsNeo4jCSVLoader()

    task = DefaultTask(extractor=sql_extractor,
                       loader=csv_loader,
                       transformer=NoopTransformer())

    job_config = ConfigFactory.from_dict({
        "extractor.sqlalchemy.{}".format(SQLAlchemyExtractor.CONN_STRING):
        SQLITE_CONN_STRING,
        "extractor.sqlalchemy.{}".format(SQLAlchemyExtractor.EXTRACT_SQL):
        sql,
        "extractor.sqlalchemy.model_class":
        model_name,
        "loader.filesystem_csv_neo4j.{}".format(FsNeo4jCSVLoader.NODE_DIR_PATH):
        node_files_folder,
        "loader.filesystem_csv_neo4j.{}".format(FsNeo4jCSVLoader.RELATION_DIR_PATH):
        relationship_files_folder,
        "loader.filesystem_csv_neo4j.{}".format(FsNeo4jCSVLoader.SHOULD_DELETE_CREATED_DIR):
        True,
        "publisher.neo4j.{}".format(neo4j_csv_publisher.NODE_FILES_DIR):
        node_files_folder,
        "publisher.neo4j.{}".format(neo4j_csv_publisher.RELATION_FILES_DIR):
        relationship_files_folder,
        "publisher.neo4j.{}".format(neo4j_csv_publisher.NEO4J_END_POINT_KEY):
        neo4j_endpoint,
        "publisher.neo4j.{}".format(neo4j_csv_publisher.NEO4J_USER):
        neo4j_user,
        "publisher.neo4j.{}".format(neo4j_csv_publisher.NEO4J_PASSWORD):
        neo4j_password,
        "publisher.neo4j.{}".format(neo4j_csv_publisher.JOB_PUBLISH_TAG):
        "unique_tag",  # should use unique tag here like {ds}
    })
    job = DefaultJob(conf=job_config, task=task, publisher=Neo4jCsvPublisher())
    return job
Esempio n. 24
0
    def test_extraction_with_empty_query_result(self: Any,
                                                mock_method: Any) -> None:
        """
        Test Extraction with empty result from query
        """
        extractor = SQLAlchemyExtractor()
        extractor.results = ['']
        extractor.init(
            Scoped.get_scoped_conf(conf=self.conf,
                                   scope=extractor.get_scope()))

        results = extractor.extract()
        self.assertEqual(results, '')
    def test_extraction_with_multiple_query_result(self, mock_method):
        # type: (Any, Any) -> None
        """
        Test Extraction from list of results from query
        """
        extractor = SQLAlchemyExtractor()
        extractor.results = ['test_result', 'test_result2', 'test_result3']
        extractor.init(Scoped.get_scoped_conf(conf=self.conf,
                                              scope=extractor.get_scope()))
        result = [extractor.extract() for _ in range(3)]

        self.assertEqual(len(result), 3)
        self.assertEqual(result,
                         ['test_result', 'test_result2', 'test_result3'])
Esempio n. 26
0
def test_athena_extractor_iam(open_catalog_connection):
    catalog, conf = open_catalog_connection
    with catalog.managed_session:
        source = catalog.add_source(
            name="athena_iam",
            source_type="athena",
            region_name="us_east_1",
            s3_staging_dir="staging_dir",
        )

        extractor, conn_conf = DbScanner._create_athena_extractor(source)
        scoped = Scoped.get_scoped_conf(
            Scoped.get_scoped_conf(conn_conf, extractor.get_scope()),
            SQLAlchemyExtractor().get_scope(),
        )
        assert (
            scoped.get_string(SQLAlchemyExtractor.CONN_STRING) ==
            "awsathena+rest://:@athena.us_east_1.amazonaws.com:443/?s3_staging_dir=staging_dir"
        )
Esempio n. 27
0
def create_sample_job(table_name, model_name):
    sql = textwrap.dedent("""
    select * from {table_name};
    """).format(table_name=table_name)

    tmp_folder = '/var/tmp/amundsen/{table_name}'.format(table_name=table_name)
    node_files_folder = '{tmp_folder}/nodes'.format(tmp_folder=tmp_folder)
    relationship_files_folder = '{tmp_folder}/relationships'.format(
        tmp_folder=tmp_folder)

    sql_extractor = SQLAlchemyExtractor()
    csv_loader = FsNeo4jCSVLoader()

    task = DefaultTask(extractor=sql_extractor,
                       loader=csv_loader,
                       transformer=NoopTransformer())

    job_config = ConfigFactory.from_dict({
        'extractor.sqlalchemy.{}'.format(SQLAlchemyExtractor.CONN_STRING):
        SQLITE_CONN_STRING,
        'extractor.sqlalchemy.{}'.format(SQLAlchemyExtractor.EXTRACT_SQL):
        sql,
        'extractor.sqlalchemy.model_class':
        model_name,
        'loader.filesystem_csv_neo4j.{}'.format(FsNeo4jCSVLoader.NODE_DIR_PATH):
        node_files_folder,
        'loader.filesystem_csv_neo4j.{}'.format(FsNeo4jCSVLoader.RELATION_DIR_PATH):
        relationship_files_folder,
        'loader.filesystem_csv_neo4j.{}'.format(FsNeo4jCSVLoader.SHOULD_DELETE_CREATED_DIR):
        True,
        'publisher.neo4j.{}'.format(neo4j_csv_publisher.NODE_FILES_DIR):
        node_files_folder,
        'publisher.neo4j.{}'.format(neo4j_csv_publisher.RELATION_FILES_DIR):
        relationship_files_folder,
        'publisher.neo4j.{}'.format(neo4j_csv_publisher.NEO4J_END_POINT_KEY):
        neo4j_endpoint,
        'publisher.neo4j.{}'.format(neo4j_csv_publisher.NEO4J_USER):
        neo4j_user,
        'publisher.neo4j.{}'.format(neo4j_csv_publisher.NEO4J_PASSWORD):
        neo4j_password,
    })
    job = DefaultJob(conf=job_config, task=task, publisher=Neo4jCsvPublisher())
    return job
Esempio n. 28
0
class PostgresMetadataExtractor(Extractor):
    """
    Extracts Postgres table and column metadata from underlying meta store database using SQLAlchemyExtractor
    """
    # SELECT statement from postgres information_schema to extract table and column metadata
    SQL_STATEMENT = """
    SELECT
      {cluster_source} as cluster, c.table_schema as schema_name, c.table_name as name, pgtd.description as description
      ,c.column_name as col_name, c.data_type as col_type
      , pgcd.description as col_description, ordinal_position as col_sort_order
    FROM INFORMATION_SCHEMA.COLUMNS c
    INNER JOIN
      pg_catalog.pg_statio_all_tables as st on c.table_schema=st.schemaname and c.table_name=st.relname
    LEFT JOIN
      pg_catalog.pg_description pgcd on pgcd.objoid=st.relid and pgcd.objsubid=c.ordinal_position
    LEFT JOIN
      pg_catalog.pg_description pgtd on pgtd.objoid=st.relid and pgtd.objsubid=0
    {where_clause_suffix}
    ORDER by cluster, schema_name, name, col_sort_order ;
    """

    # CONFIG KEYS
    WHERE_CLAUSE_SUFFIX_KEY = 'where_clause_suffix'
    CLUSTER_KEY = 'cluster_key'
    USE_CATALOG_AS_CLUSTER_NAME = 'use_catalog_as_cluster_name'
    DATABASE_KEY = 'database_key'

    # Default values
    DEFAULT_CLUSTER_NAME = 'master'

    DEFAULT_CONFIG = ConfigFactory.from_dict({
        WHERE_CLAUSE_SUFFIX_KEY: ' ',
        CLUSTER_KEY: DEFAULT_CLUSTER_NAME,
        USE_CATALOG_AS_CLUSTER_NAME: True
    })

    def init(self, conf):
        # type: (ConfigTree) -> None
        conf = conf.with_fallback(PostgresMetadataExtractor.DEFAULT_CONFIG)
        self._cluster = '{}'.format(
            conf.get_string(PostgresMetadataExtractor.CLUSTER_KEY))

        if conf.get_bool(
                PostgresMetadataExtractor.USE_CATALOG_AS_CLUSTER_NAME):
            cluster_source = "c.table_catalog"
        else:
            cluster_source = "'{}'".format(self._cluster)

        self._database = conf.get_string(
            PostgresMetadataExtractor.DATABASE_KEY,
            default='postgres').encode('utf-8', 'ignore')

        self.sql_stmt = PostgresMetadataExtractor.SQL_STATEMENT.format(
            where_clause_suffix=conf.get_string(
                PostgresMetadataExtractor.WHERE_CLAUSE_SUFFIX_KEY),
            cluster_source=cluster_source)

        LOGGER.info('SQL for postgres metadata: {}'.format(self.sql_stmt))

        self._alchemy_extractor = SQLAlchemyExtractor()
        sql_alch_conf = Scoped.get_scoped_conf(conf, self._alchemy_extractor.get_scope())\
            .with_fallback(ConfigFactory.from_dict({SQLAlchemyExtractor.EXTRACT_SQL: self.sql_stmt}))

        self._alchemy_extractor.init(sql_alch_conf)
        self._extract_iter = None  # type: Union[None, Iterator]

    def extract(self):
        # type: () -> Union[TableMetadata, None]
        if not self._extract_iter:
            self._extract_iter = self._get_extract_iter()
        try:
            return next(self._extract_iter)
        except StopIteration:
            return None

    def get_scope(self):
        # type: () -> str
        return 'extractor.postgres_metadata'

    def _get_extract_iter(self):
        # type: () -> Iterator[TableMetadata]
        """
        Using itertools.groupby and raw level iterator, it groups to table and yields TableMetadata
        :return:
        """
        for key, group in groupby(self._get_raw_extract_iter(),
                                  self._get_table_key):
            columns = []

            for row in group:
                last_row = row
                columns.append(
                    ColumnMetadata(row['col_name'], row['col_description'],
                                   row['col_type'], row['col_sort_order']))

            yield TableMetadata(self._database, last_row['cluster'],
                                last_row['schema_name'], last_row['name'],
                                last_row['description'], columns)

    def _get_raw_extract_iter(self):
        # type: () -> Iterator[Dict[str, Any]]
        """
        Provides iterator of result row from SQLAlchemy extractor
        :return:
        """
        row = self._alchemy_extractor.extract()
        while row:
            yield row
            row = self._alchemy_extractor.extract()

    def _get_table_key(self, row):
        # type: (Dict[str, Any]) -> Union[TableKey, None]
        """
        Table key consists of schema and table name
        :param row:
        :return:
        """
        if row:
            return TableKey(schema_name=row['schema_name'],
                            table_name=row['name'])

        return None
class PrestoViewMetadataExtractor(Extractor):
    """
    Extracts Presto View and column metadata from underlying meta store database using SQLAlchemyExtractor
    PrestoViewMetadataExtractor does not require a separate table model but just reuse the existing TableMetadata
    """
    # SQL statement to extract View metadata
    # {where_clause_suffix} could be used to filter schemas
    SQL_STATEMENT = """
    SELECT t.TBL_ID, d.NAME as `schema`, t.TBL_NAME name, t.TBL_TYPE, t.VIEW_ORIGINAL_TEXT as view_original_text
    FROM TBLS t
    JOIN DBS d ON t.DB_ID = d.DB_ID
    WHERE t.VIEW_EXPANDED_TEXT = '/* Presto View */'
    {where_clause_suffix}
    ORDER BY t.TBL_ID desc;
    """

    # Presto View data prefix and suffix definition:
    # https://github.com/prestodb/presto/blob/43bd519052ba4c56ff1f4fc807075637ab5f4f10/presto-hive/src/main/java/com/facebook/presto/hive/HiveUtil.java#L153-L154
    PRESTO_VIEW_PREFIX = '/* Presto View: '
    PRESTO_VIEW_SUFFIX = ' */'

    # CONFIG KEYS
    WHERE_CLAUSE_SUFFIX_KEY = 'where_clause_suffix'
    CLUSTER_KEY = 'cluster'

    DEFAULT_CONFIG = ConfigFactory.from_dict({
        WHERE_CLAUSE_SUFFIX_KEY: ' ',
        CLUSTER_KEY: 'gold'
    })

    def init(self, conf):
        # type: (ConfigTree) -> None
        conf = conf.with_fallback(PrestoViewMetadataExtractor.DEFAULT_CONFIG)
        self._cluster = '{}'.format(
            conf.get_string(PrestoViewMetadataExtractor.CLUSTER_KEY))

        self.sql_stmt = PrestoViewMetadataExtractor.SQL_STATEMENT.format(
            where_clause_suffix=conf.get_string(
                PrestoViewMetadataExtractor.WHERE_CLAUSE_SUFFIX_KEY))

        LOGGER.info('SQL for hive metastore: {}'.format(self.sql_stmt))

        self._alchemy_extractor = SQLAlchemyExtractor()
        sql_alch_conf = Scoped.get_scoped_conf(conf, self._alchemy_extractor.get_scope())\
            .with_fallback(ConfigFactory.from_dict({SQLAlchemyExtractor.EXTRACT_SQL: self.sql_stmt}))

        self._alchemy_extractor.init(sql_alch_conf)
        self._extract_iter = None  # type: Union[None, Iterator]

    def extract(self):
        # type: () -> Union[TableMetadata, None]
        if not self._extract_iter:
            self._extract_iter = self._get_extract_iter()
        try:
            return next(self._extract_iter)
        except StopIteration:
            return None

    def get_scope(self):
        # type: () -> str
        return 'extractor.presto_view_metadata'

    def _get_extract_iter(self):
        # type: () -> Iterator[TableMetadata]
        """
        Using itertools.groupby and raw level iterator, it groups to table and yields TableMetadata
        :return:
        """
        row = self._alchemy_extractor.extract()
        while row:
            columns = self._get_column_metadata(row['view_original_text'])
            yield TableMetadata(database='presto',
                                cluster=self._cluster,
                                schema=row['schema'],
                                name=row['name'],
                                description=None,
                                columns=columns,
                                is_view=True)
            row = self._alchemy_extractor.extract()

    def _get_column_metadata(self, view_original_text):
        # type: (str) -> List[ColumnMetadata]
        """
        Get Column Metadata from VIEW_ORIGINAL_TEXT from TBLS table for Presto Views.
        Columns are sorted the same way as they appear in Presto Create View SQL.
        :param view_original_text:
        :return:
        """
        # remove encoded Presto View data prefix and suffix
        encoded_view_info = (view_original_text.split(
            PrestoViewMetadataExtractor.PRESTO_VIEW_PREFIX,
            1)[-1].rsplit(PrestoViewMetadataExtractor.PRESTO_VIEW_SUFFIX,
                          1)[0])

        # view_original_text is b64 encoded:
        # https://github.com/prestodb/presto/blob/43bd519052ba4c56ff1f4fc807075637ab5f4f10/presto-hive/src/main/java/com/facebook/presto/hive/HiveUtil.java#L602-L605
        decoded_view_info = base64.b64decode(encoded_view_info)
        columns = json.loads(decoded_view_info).get('columns')

        return [
            ColumnMetadata(name=column['name'],
                           description=None,
                           col_type=column['type'],
                           sort_order=i) for i, column in enumerate(columns)
        ]
Esempio n. 30
0
class MysqlMetadataExtractor(Extractor):
    """
    Extracts mysql table and column metadata from underlying meta store database using SQLAlchemyExtractor
    """
    # SELECT statement from mysql information_schema to extract table and column metadata
    SQL_STATEMENT = """
        SELECT
        lower(c.column_name) AS col_name,
        c.column_comment AS col_description,
        lower(c.data_type) AS col_type,
        lower(c.ordinal_position) AS col_sort_order,
        {cluster_source} AS cluster,
        lower(c.table_schema) AS "schema",
        lower(c.table_name) AS name,
        t.table_comment AS description,
        case when lower(t.table_type) = "view" then "true" else "false" end AS is_view
        FROM
        INFORMATION_SCHEMA.COLUMNS AS c
        LEFT JOIN
        INFORMATION_SCHEMA.TABLES t
            ON c.TABLE_NAME = t.TABLE_NAME
            AND c.TABLE_SCHEMA = t.TABLE_SCHEMA
        {where_clause_suffix}
        ORDER by cluster, "schema", name, col_sort_order ;
    """

    # CONFIG KEYS
    WHERE_CLAUSE_SUFFIX_KEY = 'where_clause_suffix'
    CLUSTER_KEY = 'cluster_key'
    USE_CATALOG_AS_CLUSTER_NAME = 'use_catalog_as_cluster_name'
    DATABASE_KEY = 'database_key'

    # Default values
    DEFAULT_CLUSTER_NAME = 'master'

    DEFAULT_CONFIG = ConfigFactory.from_dict(
        {WHERE_CLAUSE_SUFFIX_KEY: ' ', CLUSTER_KEY: DEFAULT_CLUSTER_NAME, USE_CATALOG_AS_CLUSTER_NAME: True}
    )

    def init(self, conf):
        # type: (ConfigTree) -> None
        conf = conf.with_fallback(MysqlMetadataExtractor.DEFAULT_CONFIG)
        self._cluster = '{}'.format(conf.get_string(MysqlMetadataExtractor.CLUSTER_KEY))

        if conf.get_bool(MysqlMetadataExtractor.USE_CATALOG_AS_CLUSTER_NAME):
            cluster_source = "c.table_catalog"
        else:
            cluster_source = "'{}'".format(self._cluster)

        database = conf.get_string(MysqlMetadataExtractor.DATABASE_KEY, default='mysql')
        if six.PY2 and isinstance(database, six.text_type):
            database = database.encode('utf-8', 'ignore')

        self._database = database

        self.sql_stmt = MysqlMetadataExtractor.SQL_STATEMENT.format(
            where_clause_suffix=conf.get_string(MysqlMetadataExtractor.WHERE_CLAUSE_SUFFIX_KEY),
            cluster_source=cluster_source
        )

        self._alchemy_extractor = SQLAlchemyExtractor()
        sql_alch_conf = Scoped.get_scoped_conf(conf, self._alchemy_extractor.get_scope())\
            .with_fallback(ConfigFactory.from_dict({SQLAlchemyExtractor.EXTRACT_SQL: self.sql_stmt}))

        self.sql_stmt = sql_alch_conf.get_string(SQLAlchemyExtractor.EXTRACT_SQL)

        LOGGER.info('SQL for mysql metadata: {}'.format(self.sql_stmt))

        self._alchemy_extractor.init(sql_alch_conf)
        self._extract_iter = None  # type: Union[None, Iterator]

    def extract(self):
        # type: () -> Union[TableMetadata, None]
        if not self._extract_iter:
            self._extract_iter = self._get_extract_iter()
        try:
            return next(self._extract_iter)
        except StopIteration:
            return None

    def get_scope(self):
        # type: () -> str
        return 'extractor.mysql_metadata'

    def _get_extract_iter(self):
        # type: () -> Iterator[TableMetadata]
        """
        Using itertools.groupby and raw level iterator, it groups to table and yields TableMetadata
        :return:
        """
        for key, group in groupby(self._get_raw_extract_iter(), self._get_table_key):
            columns = []

            for row in group:
                last_row = row
                columns.append(ColumnMetadata(row['col_name'], row['col_description'],
                                              row['col_type'], row['col_sort_order']))

            yield TableMetadata(self._database, last_row['cluster'],
                                last_row['schema'],
                                last_row['name'],
                                last_row['description'],
                                columns,
                                is_view=last_row['is_view'])

    def _get_raw_extract_iter(self):
        # type: () -> Iterator[Dict[str, Any]]
        """
        Provides iterator of result row from SQLAlchemy extractor
        :return:
        """
        row = self._alchemy_extractor.extract()
        while row:
            yield row
            row = self._alchemy_extractor.extract()

    def _get_table_key(self, row):
        # type: (Dict[str, Any]) -> Union[TableKey, None]
        """
        Table key consists of schema and table name
        :param row:
        :return:
        """
        if row:
            return TableKey(schema=row['schema'], table_name=row['name'])

        return None