コード例 #1
0
def _expect_row_value(key: str, row: agate.Row):
    if key not in row.keys():
        raise InternalException(
            'Got a row without "{}" column, columns: {}'
            .format(key, row.keys())
        )
    return row[key]
コード例 #2
0
ファイル: test_adapter.py プロジェクト: tongqqiu/dbt-spark
    def test_parse_relation_with_statistics(self):
        self.maxDiff = None
        rel_type = 'table'

        relation = BaseRelation.create(database='default_database',
                                       schema='default_schema',
                                       identifier='mytable',
                                       type=rel_type)

        # Mimics the output of Spark with a DESCRIBE TABLE EXTENDED
        plain_rows = [
            ('col1', 'decimal(22,0)'),
            ('# Partition Information', 'data_type'), (None, None),
            ('# Detailed Table Information', None),
            ('Database', relation.database), ('Owner', 'root'),
            ('Created Time', 'Wed Feb 04 18:15:00 UTC 1815'),
            ('Last Access', 'Wed May 20 19:25:00 UTC 1925'),
            ('Statistics', '1109049927 bytes, 14093476 rows'),
            ('Type', 'MANAGED'), ('Provider', 'delta'),
            ('Location', '/mnt/vo'),
            ('Serde Library',
             'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'),
            ('InputFormat',
             'org.apache.hadoop.mapred.SequenceFileInputFormat'),
            ('OutputFormat',
             'org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'),
            ('Partition Provider', 'Catalog')
        ]

        input_cols = [
            Row(keys=['col_name', 'data_type'], values=r) for r in plain_rows
        ]

        config = self.get_target_http(self.project_cfg)
        rows = SparkAdapter(config).parse_describe_extended(
            relation, input_cols)
        self.assertEqual(len(rows), 1)
        self.assertEqual(
            rows[0].to_dict(), {
                'table_database': relation.database,
                'table_schema': relation.schema,
                'table_name': relation.name,
                'table_type': rel_type,
                'table_owner': 'root',
                'column': 'col1',
                'column_name': 'col1',
                'column_index': 0,
                'dtype': 'decimal(22,0)',
                'numeric_scale': None,
                'numeric_precision': None,
                'char_size': None,
                'stats:bytes:description': '',
                'stats:bytes:include': True,
                'stats:bytes:label': 'bytes',
                'stats:bytes:value': 1109049927,
                'stats:rows:description': '',
                'stats:rows:include': True,
                'stats:rows:label': 'rows',
                'stats:rows:value': 14093476,
            })
コード例 #3
0
ファイル: test_adapter.py プロジェクト: franloza/dbt-spark
    def test_parse_relation_with_integer_owner(self):
        self.maxDiff = None
        rel_type = SparkRelation.get_relation_type.Table

        relation = SparkRelation.create(
            schema='default_schema',
            identifier='mytable',
            type=rel_type
        )
        assert relation.database is None

        # Mimics the output of Spark with a DESCRIBE TABLE EXTENDED
        plain_rows = [
            ('col1', 'decimal(22,0)'),
            ('# Detailed Table Information', None),
            ('Owner', 1234)
        ]

        input_cols = [Row(keys=['col_name', 'data_type'], values=r)
                      for r in plain_rows]

        config = self._get_target_http(self.project_cfg)
        rows = SparkAdapter(config).parse_describe_extended(
            relation, input_cols)

        self.assertEqual(rows[0].to_column_dict().get('table_owner'), '1234')
コード例 #4
0
ファイル: test_adapter.py プロジェクト: datasharkNL/dbt-spark
    def test_parse_relation(self):
        self.maxDiff = None
        rel_type = SparkRelation.get_relation_type.Table

        relation = SparkRelation.create(schema='default_schema',
                                        identifier='mytable',
                                        type=rel_type)
        assert relation.database is None

        # Mimics the output of Spark with a DESCRIBE TABLE EXTENDED
        plain_rows = [
            ('col1', 'decimal(22,0)'), (
                'col2',
                'string',
            ), ('dt', 'date'), ('# Partition Information', 'data_type'),
            ('# col_name', 'data_type'), ('dt', 'date'), (None, None),
            ('# Detailed Table Information', None), ('Database', None),
            ('Owner', 'root'),
            ('Created Time', 'Wed Feb 04 18:15:00 UTC 1815'),
            ('Last Access', 'Wed May 20 19:25:00 UTC 1925'), ('Type',
                                                              'MANAGED'),
            ('Provider', 'delta'), ('Location', '/mnt/vo'),
            ('Serde Library',
             'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'),
            ('InputFormat',
             'org.apache.hadoop.mapred.SequenceFileInputFormat'),
            ('OutputFormat',
             'org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'),
            ('Partition Provider', 'Catalog')
        ]

        input_cols = [
            Row(keys=['col_name', 'data_type'], values=r) for r in plain_rows
        ]

        config = self._get_target_http(self.project_cfg)
        rows = SparkAdapter(config).parse_describe_extended(
            relation, input_cols)
        self.assertEqual(len(rows), 3)
        self.assertEqual(
            rows[0].to_dict(omit_none=False), {
                'table_database': None,
                'table_schema': relation.schema,
                'table_name': relation.name,
                'table_type': rel_type,
                'table_owner': 'root',
                'column': 'col1',
                'column_index': 0,
                'dtype': 'decimal(22,0)',
                'numeric_scale': None,
                'numeric_precision': None,
                'char_size': None
            })

        self.assertEqual(
            rows[1].to_dict(omit_none=False), {
                'table_database': None,
                'table_schema': relation.schema,
                'table_name': relation.name,
                'table_type': rel_type,
                'table_owner': 'root',
                'column': 'col2',
                'column_index': 1,
                'dtype': 'string',
                'numeric_scale': None,
                'numeric_precision': None,
                'char_size': None
            })

        self.assertEqual(
            rows[2].to_dict(omit_none=False), {
                'table_database': None,
                'table_schema': relation.schema,
                'table_name': relation.name,
                'table_type': rel_type,
                'table_owner': 'root',
                'column': 'dt',
                'column_index': 2,
                'dtype': 'date',
                'numeric_scale': None,
                'numeric_precision': None,
                'char_size': None
            })
コード例 #5
0
ファイル: impl.py プロジェクト: silentsokolov/dbt-clickhouse
def _expect_row_value(key: str, row: agate.Row):
    if key not in row.keys():
        raise dbt.exceptions.InternalException(
            f'Got a row without \'{key}\' column, columns: {row.keys()}')

    return row[key]