def test_nested_schema_flattening(self):
        """Loading nested JSON objects with flattening and not not flattening"""
        tap_lines = test_utils.get_test_tap_lines('messages-with-nested-schema.json')

        # Turning on data flattening
        self.config['data_flattening_max_level'] = 10

        # Load with default settings - Flattening disabled
        self.persist_lines_with_cache(tap_lines)

        # Get loaded rows from tables
        snowflake = DbSync(self.config)
        target_schema = self.config.get('default_target_schema', '')
        flattened_table = snowflake.query(
            "SELECT * FROM {}.test_table_nested_schema ORDER BY c_pk".format(target_schema))

        # Should be flattened columns
        self.assertEqual(
            flattened_table,
            [{
                'C_PK': 1,
                'C_ARRAY': '[\n  1,\n  2,\n  3\n]',
                'C_OBJECT': None,
                # Cannot map RECORD to SCHEMA. SCHEMA doesn't have properties that requires for flattening
                'C_OBJECT_WITH_PROPS__KEY_1': 'value_1',
                'C_NESTED_OBJECT__NESTED_PROP_1': 'nested_value_1',
                'C_NESTED_OBJECT__NESTED_PROP_2': 'nested_value_2',
                'C_NESTED_OBJECT__NESTED_PROP_3__MULTI_NESTED_PROP_1': 'multi_value_1',
                'C_NESTED_OBJECT__NESTED_PROP_3__MULTI_NESTED_PROP_2': 'multi_value_2',
            }])
    def test_nested_schema_unflattening(self):
        """Loading nested JSON objects into VARIANT columns without flattening"""
        tap_lines = test_utils.get_test_tap_lines('messages-with-nested-schema.json')

        # Load with default settings - Flattening disabled
        self.persist_lines_with_cache(tap_lines)

        # Get loaded rows from tables - Transform JSON to string at query time
        snowflake = DbSync(self.config)
        target_schema = self.config.get('default_target_schema', '')
        unflattened_table = snowflake.query("""
            SELECT c_pk
                  ,TO_CHAR(c_array) c_array
                  ,TO_CHAR(c_object) c_object
                  ,TO_CHAR(c_object) c_object_with_props
                  ,TO_CHAR(c_nested_object) c_nested_object
              FROM {}.test_table_nested_schema
             ORDER BY c_pk""".format(target_schema))

        # Should be valid nested JSON strings
        self.assertEqual(
            unflattened_table,
            [{
                'C_PK': 1,
                'C_ARRAY': '[1,2,3]',
                'C_OBJECT': '{"key_1":"value_1"}',
                'C_OBJECT_WITH_PROPS': '{"key_1":"value_1"}',
                'C_NESTED_OBJECT': '{"nested_prop_1":"nested_value_1","nested_prop_2":"nested_value_2","nested_prop_3":{"multi_nested_prop_1":"multi_value_1","multi_nested_prop_2":"multi_value_2"}}'
            }])
    def test_information_schema_cache_outdated(self):
        """If informations schema cache is not up to date then it should fail"""
        tap_lines_with_multi_streams = test_utils.get_test_tap_lines(
            "messages-with-three-streams.json")

        # 1) Simulate an out of data cache:
        # Table is in cache but not exists in database
        snowflake = DbSync(self.config)
        target_schema = self.config.get("default_target_schema", "").upper()
        snowflake.query("""
            CREATE TABLE IF NOT EXISTS {}.columns (table_schema VARCHAR, table_name VARCHAR, column_name VARCHAR, data_type VARCHAR)
        """.format(snowflake.pipelinewise_schema))
        snowflake.query("""
            INSERT INTO {0}.columns (table_schema, table_name, column_name, data_type)
            SELECT '{1}', 'TEST_TABLE_ONE', 'DUMMY_COLUMN_1', 'TEXT' UNION
            SELECT '{1}', 'TEST_TABLE_ONE', 'DUMMY_COLUMN_2', 'TEXT' UNION
            SELECT '{1}', 'TEST_TABLE_TWO', 'DUMMY_COLUMN_3', 'TEXT'
        """.format(snowflake.pipelinewise_schema, target_schema))

        # Loading into an outdated information_schema cache should fail with table not exists
        with self.assertRaises(Exception):
            self.persist_lines_with_cache(tap_lines_with_multi_streams)

        # 2) Simulate an out of data cache:
        # Table is in cache structure is not in sync with the actual table in the database
        snowflake.query("CREATE SCHEMA IF NOT EXISTS {}".format(target_schema))
        snowflake.query(
            "CREATE OR REPLACE TABLE {}.test_table_one (C_PK NUMBER, C_INT NUMBER, C_VARCHAR TEXT)"
            .format(target_schema))

        # Loading into an outdated information_schema cache should fail with columns exists
        # It should try adding the new column based on the values in cache but the column already exists
        with self.assertRaises(Exception):
            self.persist_lines_with_cache(tap_lines_with_multi_streams)
    def test_loading_unicode_characters(self):
        """Loading unicode encoded characters"""
        tap_lines = test_utils.get_test_tap_lines(
            "messages-with-unicode-characters.json")

        # Load with default settings
        self.persist_lines_with_cache(tap_lines)

        # Get loaded rows from tables
        snowflake = DbSync(self.config)
        target_schema = self.config.get("default_target_schema", "")
        table_unicode = snowflake.query(
            "SELECT * FROM {}.test_table_unicode ORDER BY C_INT".format(
                target_schema))

        self.assertEqual(
            table_unicode,
            [
                {
                    "C_INT": 1,
                    "C_PK": 1,
                    "C_VARCHAR": "Hello world, Καλημέρα κόσμε, コンニチハ"
                },
                {
                    "C_INT": 2,
                    "C_PK": 2,
                    "C_VARCHAR": "Chinese: 和毛泽东 <<重上井冈山>>. 严永欣, 一九八八年."
                },
                {
                    "C_INT":
                    3,
                    "C_PK":
                    3,
                    "C_VARCHAR":
                    "Russian: Зарегистрируйтесь сейчас на Десятую Международную Конференцию по",
                },
                {
                    "C_INT": 4,
                    "C_PK": 4,
                    "C_VARCHAR": "Thai: แผ่นดินฮั่นเสื่อมโทรมแสนสังเวช"
                },
                {
                    "C_INT":
                    5,
                    "C_PK":
                    5,
                    "C_VARCHAR":
                    "Arabic: لقد لعبت أنت وأصدقاؤك لمدة وحصلتم علي من إجمالي النقاط",
                },
                {
                    "C_INT": 6,
                    "C_PK": 6,
                    "C_VARCHAR": "Special Characters: [\",'!@£$%^&*()]"
                },
            ],
        )
    def test_column_name_change(self):
        """Tests correct renaming of snowflake columns after source change"""
        tap_lines_before_column_name_change = test_utils.get_test_tap_lines('messages-with-three-streams.json')
        tap_lines_after_column_name_change = test_utils.get_test_tap_lines(
            'messages-with-three-streams-modified-column.json')

        # Load with default settings
        self.persist_lines_with_cache(tap_lines_before_column_name_change)
        self.persist_lines_with_cache(tap_lines_after_column_name_change)

        # Get loaded rows from tables
        snowflake = DbSync(self.config)
        target_schema = self.config.get('default_target_schema', '')
        table_one = snowflake.query("SELECT * FROM {}.test_table_one ORDER BY c_pk".format(target_schema))
        table_two = snowflake.query("SELECT * FROM {}.test_table_two ORDER BY c_pk".format(target_schema))
        table_three = snowflake.query("SELECT * FROM {}.test_table_three ORDER BY c_pk".format(target_schema))

        # Get the previous column name from information schema in test_table_two
        previous_column_name = snowflake.query("""
            SELECT column_name
              FROM information_schema.columns
             WHERE table_catalog = '{}'
               AND table_schema = '{}'
               AND table_name = 'TEST_TABLE_TWO'
               AND ordinal_position = 1
            """.format(
            self.config.get('dbname', '').upper(),
            target_schema.upper()))[0]["COLUMN_NAME"]

        # Table one should have no changes
        self.assertEqual(
            table_one,
            [{'C_INT': 1, 'C_PK': 1, 'C_VARCHAR': '1'}])

        # Table two should have versioned column
        self.assertEquals(
            table_two,
            [
                {previous_column_name: datetime.datetime(2019, 2, 1, 15, 12, 45), 'C_INT': 1, 'C_PK': 1,
                 'C_VARCHAR': '1', 'C_DATE': None},
                {previous_column_name: datetime.datetime(2019, 2, 10, 2), 'C_INT': 2, 'C_PK': 2, 'C_VARCHAR': '2',
                 'C_DATE': '2019-02-12 02:00:00'},
                {previous_column_name: None, 'C_INT': 3, 'C_PK': 3, 'C_VARCHAR': '2', 'C_DATE': '2019-02-15 02:00:00'}
            ]
        )

        # Table three should have renamed columns
        self.assertEqual(
            table_three,
            [
                {'C_INT': 1, 'C_PK': 1, 'C_TIME': datetime.time(4, 0), 'C_VARCHAR': '1', 'C_TIME_RENAMED': None},
                {'C_INT': 2, 'C_PK': 2, 'C_TIME': datetime.time(7, 15), 'C_VARCHAR': '2', 'C_TIME_RENAMED': None},
                {'C_INT': 3, 'C_PK': 3, 'C_TIME': datetime.time(23, 0, 3), 'C_VARCHAR': '3',
                 'C_TIME_RENAMED': datetime.time(8, 15)},
                {'C_INT': 4, 'C_PK': 4, 'C_TIME': None, 'C_VARCHAR': '4', 'C_TIME_RENAMED': datetime.time(23, 0, 3)}
            ])
示例#6
0
def load_table_cache(config):
    table_cache = []
    if not ('disable_table_cache' in config and config['disable_table_cache']):
        LOGGER.info("Getting catalog objects from table cache...")

        db = DbSync(config)
        table_cache = db.get_table_columns(
            table_schemas=get_schema_names_from_config(config))

    return table_cache
示例#7
0
def load_information_schema_cache(config):
    information_schema_cache = []
    if not ('disable_table_cache' in config and config['disable_table_cache'] == True):
        logger.info("Getting catalog objects from information_schema cache table...")

        db = DbSync(config)
        information_schema_cache = db.get_table_columns(
            table_schemas=get_schema_names_from_config(config),
            from_information_schema_cache_table=True)

    return information_schema_cache
    def test_non_db_friendly_columns(self):
        """Loading non-db friendly columns like, camelcase, minus signs, etc."""
        tap_lines = test_utils.get_test_tap_lines(
            "messages-with-non-db-friendly-columns.json")

        # Load with default settings
        self.persist_lines_with_cache(tap_lines)

        # Get loaded rows from tables
        snowflake = DbSync(self.config)
        target_schema = self.config.get("default_target_schema", "")
        table_non_db_friendly_columns = snowflake.query(
            "SELECT * FROM {}.test_table_non_db_friendly_columns ORDER BY c_pk"
            .format(target_schema))

        self.assertEqual(
            table_non_db_friendly_columns,
            [
                {
                    "C_PK": 1,
                    "CAMELCASECOLUMN": "Dummy row 1",
                    "MINUS-COLUMN": "Dummy row 1"
                },
                {
                    "C_PK": 2,
                    "CAMELCASECOLUMN": "Dummy row 2",
                    "MINUS-COLUMN": "Dummy row 2"
                },
                {
                    "C_PK": 3,
                    "CAMELCASECOLUMN": "Dummy row 3",
                    "MINUS-COLUMN": "Dummy row 3"
                },
                {
                    "C_PK": 4,
                    "CAMELCASECOLUMN": "Dummy row 4",
                    "MINUS-COLUMN": "Dummy row 4"
                },
                {
                    "C_PK": 5,
                    "CAMELCASECOLUMN": "Dummy row 5",
                    "MINUS-COLUMN": "Dummy row 5"
                },
            ],
        )
示例#9
0
    def test_loading_unicode_characters(self):
        """Loading unicode encoded characters"""
        tap_lines = test_utils.get_test_tap_lines(
            'messages-with-unicode-characters.json')

        # Load with default settings
        target_snowflake.persist_lines(self.config, tap_lines)

        # Get loaded rows from tables
        snowflake = DbSync(self.config)
        target_schema = self.config.get('schema', '')
        table_unicode = snowflake.query(
            "SELECT * FROM {}.test_table_unicode".format(target_schema))

        self.assertEqual(table_unicode, [{
            'C_INT':
            1,
            'C_PK':
            1,
            'C_VARCHAR':
            'Hello world, Καλημέρα κόσμε, コンニチハ'
        }, {
            'C_INT':
            2,
            'C_PK':
            2,
            'C_VARCHAR':
            'Chinese: 和毛泽东 <<重上井冈山>>. 严永欣, 一九八八年.'
        }, {
            'C_INT':
            3,
            'C_PK':
            3,
            'C_VARCHAR':
            'Russian: Зарегистрируйтесь сейчас на Десятую Международную Конференцию по'
        }, {
            'C_INT':
            4,
            'C_PK':
            4,
            'C_VARCHAR':
            'Thai: แผ่นดินฮั่นเสื่อมโทรมแสนสังเวช'
        }, {
            'C_INT':
            5,
            'C_PK':
            5,
            'C_VARCHAR':
            'Arabic: لقد لعبت أنت وأصدقاؤك لمدة وحصلتم علي من إجمالي النقاط'
        }, {
            'C_INT':
            6,
            'C_PK':
            6,
            'C_VARCHAR':
            'Special Characters: [",\'!@£$%^&*()]'
        }])
示例#10
0
def flush_records(stream: str,
                  records: List[Dict],
                  db_sync: DbSync,
                  temp_dir: str = None,
                  no_compression: bool = False) -> None:
    """
    Takes a list of record messages and loads it into the snowflake target table

    Args:
        stream: Name of the stream
        records: List of dictionary, that represents multiple csv lines. Dict key is the column name, value is the
                 column value
        row_count:
        db_sync: A DbSync object
        temp_dir: Directory where intermediate temporary files will be created. (Default: OS specificy temp directory)
        no_compression: Disable to use compressed files. (Default: False)

    Returns:
        None
    """
    # Generate file on disk in the required format
    filepath = db_sync.file_format.formatter.records_to_file(
        records,
        db_sync.flatten_schema,
        compression=not no_compression,
        dest_dir=temp_dir,
        data_flattening_max_level=db_sync.data_flattening_max_level)

    # Get file stats
    row_count = len(records)
    size_bytes = os.path.getsize(filepath)

    # Upload to s3 and load into Snowflake
    s3_key = db_sync.put_to_stage(filepath,
                                  stream,
                                  row_count,
                                  temp_dir=temp_dir)
    db_sync.load_file(s3_key, row_count, size_bytes)

    # Delete file from local disk and from s3
    os.remove(filepath)
    db_sync.delete_from_stage(stream, s3_key)
def get_snowflake_statics(config):
    """Retrieve common Snowflake items will be used multiple times

    Params:
        config: configuration dictionary

    Returns:
        tuple of retrieved items: table_cache, file_format_type
    """
    table_cache = []
    if not ('disable_table_cache' in config and config['disable_table_cache']):
        LOGGER.info('Getting catalog objects from table cache...')

        db = DbSync(config)  # pylint: disable=invalid-name
        table_cache = db.get_table_columns(
            table_schemas=stream_utils.get_schema_names_from_config(config))

    # The file format is detected at DbSync init time
    file_format_type = db.file_format.file_format_type

    return table_cache, file_format_type
    def test_non_db_friendly_columns(self):
        """Loading non-db friendly columns like, camelcase, minus signs, etc."""
        tap_lines = test_utils.get_test_tap_lines('messages-with-non-db-friendly-columns.json')

        # Load with default settings
        self.persist_lines_with_cache(tap_lines)

        # Get loaded rows from tables
        snowflake = DbSync(self.config)
        target_schema = self.config.get('default_target_schema', '')
        table_non_db_friendly_columns = snowflake.query(
            "SELECT * FROM {}.test_table_non_db_friendly_columns ORDER BY c_pk".format(target_schema))

        self.assertEqual(
            table_non_db_friendly_columns,
            [
                {'C_PK': 1, 'CAMELCASECOLUMN': 'Dummy row 1', 'MINUS-COLUMN': 'Dummy row 1'},
                {'C_PK': 2, 'CAMELCASECOLUMN': 'Dummy row 2', 'MINUS-COLUMN': 'Dummy row 2'},
                {'C_PK': 3, 'CAMELCASECOLUMN': 'Dummy row 3', 'MINUS-COLUMN': 'Dummy row 3'},
                {'C_PK': 4, 'CAMELCASECOLUMN': 'Dummy row 4', 'MINUS-COLUMN': 'Dummy row 4'},
                {'C_PK': 5, 'CAMELCASECOLUMN': 'Dummy row 5', 'MINUS-COLUMN': 'Dummy row 5'},
            ])
    def setUp(self):
        self.config = test_utils.get_test_config()
        snowflake = DbSync(self.config)

        # Drop target schema
        if self.config['default_target_schema']:
            snowflake.query("DROP SCHEMA IF EXISTS {}".format(
                self.config['default_target_schema']))

        # Drop pipelinewise schema with information_schema cache
        if self.config['stage']:
            snowflake.query("DROP TABLE IF EXISTS {}.columns".format(
                snowflake.pipelinewise_schema))
    def setUp(self):
        self.config = test_utils.get_test_config()
        snowflake = DbSync(self.config)

        # Drop target schema
        if self.config['default_target_schema']:
            snowflake.query("DROP SCHEMA IF EXISTS {}".format(self.config['default_target_schema']))

        # Delete target schema entries from PIPELINEWISE.COLUMNS
        if self.config['stage']:
            snowflake.query("DELETE FROM {}.columns WHERE TABLE_SCHEMA ilike '{}'".format(snowflake.pipelinewise_schema, self.config['default_target_schema']))
示例#15
0
def persist_lines(config, lines, table_cache=None) -> None:
    state = None
    flushed_state = None
    schemas = {}
    key_properties = {}
    validators = {}
    records_to_load = {}
    row_count = {}
    stream_to_sync = {}
    total_row_count = {}
    batch_size_rows = config.get('batch_size_rows', DEFAULT_BATCH_SIZE_ROWS)

    # Loop over lines from stdin
    for line in lines:
        try:
            o = json.loads(line)
        except json.decoder.JSONDecodeError:
            LOGGER.error("Unable to parse:\n{}".format(line))
            raise

        if 'type' not in o:
            raise Exception(
                "Line is missing required key 'type': {}".format(line))

        t = o['type']

        if t == 'RECORD':
            if 'stream' not in o:
                raise Exception(
                    "Line is missing required key 'stream': {}".format(line))
            if o['stream'] not in schemas:
                raise Exception(
                    "A record for stream {} was encountered before a corresponding schema"
                    .format(o['stream']))

            # Get schema for this record's stream
            stream = o['stream']

            adjust_timestamps_in_record(o['record'], schemas[stream])

            # Validate record
            if config.get('validate_records'):
                try:
                    validators[stream].validate(float_to_decimal(o['record']))
                except Exception as ex:
                    if type(ex).__name__ == "InvalidOperation":
                        raise InvalidValidationOperationException(
                            f"Data validation failed and cannot load to destination. RECORD: {o['record']}\n"
                            "multipleOf validations that allows long precisions are not supported (i.e. with 15 digits"
                            "or more) Try removing 'multipleOf' methods from JSON schema."
                        )
                    raise RecordValidationException(
                        f"Record does not pass schema validation. RECORD: {o['record']}"
                    )

            primary_key_string = stream_to_sync[
                stream].record_primary_key_string(o['record'])
            if not primary_key_string:
                primary_key_string = 'RID-{}'.format(total_row_count[stream])

            if stream not in records_to_load:
                records_to_load[stream] = {}

            # increment row count only when a new PK is encountered in the current batch
            if primary_key_string not in records_to_load[stream]:
                row_count[stream] += 1
                total_row_count[stream] += 1

            # append record
            if config.get('add_metadata_columns') or config.get('hard_delete'):
                records_to_load[stream][
                    primary_key_string] = add_metadata_values_to_record(
                        o, stream_to_sync[stream])
            else:
                records_to_load[stream][primary_key_string] = o['record']

            if row_count[stream] >= batch_size_rows:
                # flush all streams, delete records if needed, reset counts and then emit current state
                if config.get('flush_all_streams'):
                    filter_streams = None
                else:
                    filter_streams = [stream]

                # Flush and return a new state dict with new positions only for the flushed streams
                flushed_state = flush_streams(records_to_load,
                                              row_count,
                                              stream_to_sync,
                                              config,
                                              state,
                                              flushed_state,
                                              filter_streams=filter_streams)

                # emit last encountered state
                emit_state(copy.deepcopy(flushed_state))

        elif t == 'SCHEMA':
            if 'stream' not in o:
                raise Exception(
                    "Line is missing required key 'stream': {}".format(line))

            stream = o['stream']
            new_schema = float_to_decimal(o['schema'])

            # Update and flush only if the the schema is new or different than
            # the previously used version of the schema
            if stream not in schemas or schemas[stream] != new_schema:

                schemas[stream] = new_schema
                validators[stream] = Draft7Validator(
                    schemas[stream], format_checker=FormatChecker())

                # flush records from previous stream SCHEMA
                # if same stream has been encountered again, it means the schema might have been altered
                # so previous records need to be flushed
                if row_count.get(stream, 0) > 0:
                    flushed_state = flush_streams(records_to_load, row_count,
                                                  stream_to_sync, config,
                                                  state, flushed_state)

                    # emit latest encountered state
                    emit_state(flushed_state)

                # key_properties key must be available in the SCHEMA message.
                if 'key_properties' not in o:
                    raise Exception("key_properties field is required")

                # Log based and Incremental replications on tables with no Primary Key
                # cause duplicates when merging UPDATE events.
                # Stop loading data by default if no Primary Key.
                #
                # If you want to load tables with no Primary Key:
                #  1) Set ` 'primary_key_required': false ` in the target-snowflake config.json
                #  or
                #  2) Use fastsync [postgres-to-snowflake, mysql-to-snowflake, etc.]
                if config.get('primary_key_required', True) and len(
                        o['key_properties']) == 0:
                    LOGGER.critical(
                        "Primary key is set to mandatory but not defined in the [{}] stream"
                        .format(stream))
                    raise Exception("key_properties field is required")

                key_properties[stream] = o['key_properties']

                if config.get('add_metadata_columns') or config.get(
                        'hard_delete'):
                    stream_to_sync[stream] = DbSync(
                        config, add_metadata_columns_to_schema(o), table_cache)
                else:
                    stream_to_sync[stream] = DbSync(config, o, table_cache)

                stream_to_sync[stream].create_schema_if_not_exists()
                stream_to_sync[stream].sync_table()

                row_count[stream] = 0
                total_row_count[stream] = 0

        elif t == 'ACTIVATE_VERSION':
            LOGGER.debug('ACTIVATE_VERSION message')

        elif t == 'STATE':
            LOGGER.debug('Setting state to {}'.format(o['value']))
            state = o['value']

            # Initially set flushed state
            if not flushed_state:
                flushed_state = copy.deepcopy(state)

        else:
            raise Exception("Unknown message type {} in message {}".format(
                o['type'], o))

    # if some bucket has records that need to be flushed but haven't reached batch size
    # then flush all buckets.
    if sum(row_count.values()) > 0:
        # flush all streams one last time, delete records if needed, reset counts and then emit current state
        flushed_state = flush_streams(records_to_load, row_count,
                                      stream_to_sync, config, state,
                                      flushed_state)

    # emit latest state
    emit_state(copy.deepcopy(flushed_state))
    def assert_three_streams_are_into_snowflake(
            self,
            should_metadata_columns_exist=False,
            should_hard_deleted_rows=False):
        """
        This is a helper assertion that checks if every data from the message-with-three-streams.json
        file is available in Snowflake tables correctly.
        Useful to check different loading methods (unencrypted, Client-Side encryption, gzip, etc.)
        without duplicating assertions
        """
        snowflake = DbSync(self.config)
        default_target_schema = self.config.get("default_target_schema", "")
        schema_mapping = self.config.get("schema_mapping", {})

        # Identify target schema name
        target_schema = None
        if default_target_schema is not None and default_target_schema.strip():
            target_schema = default_target_schema
        elif schema_mapping:
            target_schema = "tap_mysql_test"

        # Get loaded rows from tables
        table_one = snowflake.query(
            "SELECT * FROM {}.test_table_one ORDER BY c_pk".format(
                target_schema))
        table_two = snowflake.query(
            "SELECT * FROM {}.test_table_two ORDER BY c_pk".format(
                target_schema))
        table_three = snowflake.query(
            "SELECT * FROM {}.test_table_three ORDER BY c_pk".format(
                target_schema))

        # ----------------------------------------------------------------------
        # Check rows in table_one
        # ----------------------------------------------------------------------
        expected_table_one = [{"C_INT": 1, "C_PK": 1, "C_VARCHAR": "1"}]

        self.assertEqual(self.remove_metadata_columns_from_rows(table_one),
                         expected_table_one)

        # ----------------------------------------------------------------------
        # Check rows in table_tow
        # ----------------------------------------------------------------------
        expected_table_two = []
        if not should_hard_deleted_rows:
            expected_table_two = [
                {
                    "C_INT": 1,
                    "C_PK": 1,
                    "C_VARCHAR": "1",
                    "C_DATE": datetime.datetime(2019, 2, 1, 15, 12, 45),
                },
                {
                    "C_INT": 2,
                    "C_PK": 2,
                    "C_VARCHAR": "2",
                    "C_DATE": datetime.datetime(2019, 2, 10, 2, 0, 0),
                },
            ]
        else:
            expected_table_two = [{
                "C_INT":
                2,
                "C_PK":
                2,
                "C_VARCHAR":
                "2",
                "C_DATE":
                datetime.datetime(2019, 2, 10, 2, 0, 0),
            }]

        self.assertEqual(self.remove_metadata_columns_from_rows(table_two),
                         expected_table_two)

        # ----------------------------------------------------------------------
        # Check rows in table_three
        # ----------------------------------------------------------------------
        expected_table_three = []
        if not should_hard_deleted_rows:
            expected_table_three = [
                {
                    "C_INT": 1,
                    "C_PK": 1,
                    "C_VARCHAR": "1",
                    "C_TIME": datetime.time(4, 0, 0)
                },
                {
                    "C_INT": 2,
                    "C_PK": 2,
                    "C_VARCHAR": "2",
                    "C_TIME": datetime.time(7, 15, 0)
                },
                {
                    "C_INT": 3,
                    "C_PK": 3,
                    "C_VARCHAR": "3",
                    "C_TIME": datetime.time(23, 0, 3)
                },
            ]
        else:
            expected_table_three = [
                {
                    "C_INT": 1,
                    "C_PK": 1,
                    "C_VARCHAR": "1",
                    "C_TIME": datetime.time(4, 0, 0)
                },
                {
                    "C_INT": 2,
                    "C_PK": 2,
                    "C_VARCHAR": "2",
                    "C_TIME": datetime.time(7, 15, 0)
                },
            ]

        self.assertEqual(self.remove_metadata_columns_from_rows(table_three),
                         expected_table_three)

        # ----------------------------------------------------------------------
        # Check if metadata columns exist or not
        # ----------------------------------------------------------------------
        if should_metadata_columns_exist:
            self.assert_metadata_columns_exist(table_one)
            self.assert_metadata_columns_exist(table_two)
            self.assert_metadata_columns_exist(table_three)
        else:
            self.assert_metadata_columns_not_exist(table_one)
            self.assert_metadata_columns_not_exist(table_two)
            self.assert_metadata_columns_not_exist(table_three)
    def test_information_schema_cache_create_and_update(self):
        """Newly created and altered tables must be cached automatically for later use.

        Information_schema_columns cache is a copy of snowflake INFORMATION_SCHAME.COLUMNS table to avoid the error of
        'Information schema query returned too much data. Please repeat query with more selective predicates.'.
        """
        tap_lines_before_column_name_change = test_utils.get_test_tap_lines(
            "messages-with-three-streams.json")
        tap_lines_after_column_name_change = test_utils.get_test_tap_lines(
            "messages-with-three-streams-modified-column.json")

        # Load with default settings
        self.persist_lines_with_cache(tap_lines_before_column_name_change)
        self.persist_lines_with_cache(tap_lines_after_column_name_change)

        # Get data form information_schema cache table
        snowflake = DbSync(self.config)
        target_schema = self.config.get("default_target_schema", "")
        information_schema_cache = snowflake.query(
            "SELECT * FROM {}.columns ORDER BY table_schema, table_name, column_name"
            .format(snowflake.pipelinewise_schema))

        # Get the previous column name from information schema in test_table_two
        previous_column_name = snowflake.query("""
            SELECT column_name
              FROM information_schema.columns
             WHERE table_catalog = '{}'
               AND table_schema = '{}'
               AND table_name = 'TEST_TABLE_TWO'
               AND ordinal_position = 1
            """.format(
            self.config.get("dbname", "").upper(),
            target_schema.upper()))[0]["COLUMN_NAME"]

        # Every column has to be in the cached information_schema with the latest versions
        self.assertEqual(
            information_schema_cache,
            [
                {
                    "TABLE_SCHEMA": target_schema.upper(),
                    "TABLE_NAME": "TEST_TABLE_ONE",
                    "COLUMN_NAME": "C_INT",
                    "DATA_TYPE": "NUMBER",
                },
                {
                    "TABLE_SCHEMA": target_schema.upper(),
                    "TABLE_NAME": "TEST_TABLE_ONE",
                    "COLUMN_NAME": "C_PK",
                    "DATA_TYPE": "NUMBER",
                },
                {
                    "TABLE_SCHEMA": target_schema.upper(),
                    "TABLE_NAME": "TEST_TABLE_ONE",
                    "COLUMN_NAME": "C_VARCHAR",
                    "DATA_TYPE": "TEXT",
                },
                {
                    "TABLE_SCHEMA": target_schema.upper(),
                    "TABLE_NAME": "TEST_TABLE_THREE",
                    "COLUMN_NAME": "C_INT",
                    "DATA_TYPE": "NUMBER",
                },
                {
                    "TABLE_SCHEMA": target_schema.upper(),
                    "TABLE_NAME": "TEST_TABLE_THREE",
                    "COLUMN_NAME": "C_PK",
                    "DATA_TYPE": "NUMBER",
                },
                {
                    "TABLE_SCHEMA": target_schema.upper(),
                    "TABLE_NAME": "TEST_TABLE_THREE",
                    "COLUMN_NAME": "C_TIME",
                    "DATA_TYPE": "TIME",
                },
                {
                    "TABLE_SCHEMA": target_schema.upper(),
                    "TABLE_NAME": "TEST_TABLE_THREE",
                    "COLUMN_NAME": "C_TIME_RENAMED",
                    "DATA_TYPE": "TIME",
                },
                {
                    "TABLE_SCHEMA": target_schema.upper(),
                    "TABLE_NAME": "TEST_TABLE_THREE",
                    "COLUMN_NAME": "C_VARCHAR",
                    "DATA_TYPE": "TEXT",
                },
                {
                    "TABLE_SCHEMA": target_schema.upper(),
                    "TABLE_NAME": "TEST_TABLE_TWO",
                    "COLUMN_NAME": "C_DATE",
                    "DATA_TYPE": "TEXT",
                },
                {
                    "TABLE_SCHEMA": target_schema.upper(),
                    "TABLE_NAME": "TEST_TABLE_TWO",
                    "COLUMN_NAME": previous_column_name,
                    "DATA_TYPE": "TIMESTAMP_NTZ",
                },
                {
                    "TABLE_SCHEMA": target_schema.upper(),
                    "TABLE_NAME": "TEST_TABLE_TWO",
                    "COLUMN_NAME": "C_INT",
                    "DATA_TYPE": "NUMBER",
                },
                {
                    "TABLE_SCHEMA": target_schema.upper(),
                    "TABLE_NAME": "TEST_TABLE_TWO",
                    "COLUMN_NAME": "C_PK",
                    "DATA_TYPE": "NUMBER",
                },
                {
                    "TABLE_SCHEMA": target_schema.upper(),
                    "TABLE_NAME": "TEST_TABLE_TWO",
                    "COLUMN_NAME": "C_VARCHAR",
                    "DATA_TYPE": "TEXT",
                },
            ],
        )
示例#18
0
def persist_lines(config, lines, information_schema_cache=None) -> None:
    state = None
    schemas = {}
    key_properties = {}
    validators = {}
    records_to_load = {}
    csv_files_to_load = {}
    row_count = {}
    stream_to_sync = {}
    total_row_count = {}
    batch_size_rows = config.get('batch_size_rows', 100000)

    # Loop over lines from stdin
    for line in lines:
        try:
            o = json.loads(line)
        except json.decoder.JSONDecodeError:
            logger.error("Unable to parse:\n{}".format(line))
            raise

        if 'type' not in o:
            raise Exception(
                "Line is missing required key 'type': {}".format(line))

        t = o['type']

        if t == 'RECORD':
            if 'stream' not in o:
                raise Exception(
                    "Line is missing required key 'stream': {}".format(line))
            if o['stream'] not in schemas:
                raise Exception(
                    "A record for stream {} was encountered before a corresponding schema"
                    .format(o['stream']))

            # Get schema for this record's stream
            stream = o['stream']

            # Validate record
            try:
                validators[stream].validate(float_to_decimal(o['record']))
            except Exception as ex:
                if type(ex).__name__ == "InvalidOperation":
                    logger.error(
                        "Data validation failed and cannot load to destination. RECORD: {}\n'multipleOf' validations that allows long precisions are not supported (i.e. with 15 digits or more). Try removing 'multipleOf' methods from JSON schema."
                        .format(o['record']))
                    raise ex

            primary_key_string = stream_to_sync[
                stream].record_primary_key_string(o['record'])
            if not primary_key_string:
                primary_key_string = 'RID-{}'.format(total_row_count[stream])

            if stream not in records_to_load:
                records_to_load[stream] = {}

            # increment row count with every record line
            row_count[stream] += 1
            total_row_count[stream] += 1

            # append record
            if config.get('add_metadata_columns') or config.get('hard_delete'):
                records_to_load[stream][
                    primary_key_string] = add_metadata_values_to_record(
                        o, stream_to_sync[stream])
            else:
                records_to_load[stream][primary_key_string] = o['record']

            if row_count[stream] >= batch_size_rows:
                # flush all streams, delete records if needed, reset counts and then emit current state
                flush_all_streams(records_to_load, row_count, stream_to_sync,
                                  config)
                # emit last encountered state
                emit_state(state)

        elif t == 'SCHEMA':
            if 'stream' not in o:
                raise Exception(
                    "Line is missing required key 'stream': {}".format(line))

            stream = o['stream']

            schemas[stream] = o
            schema = float_to_decimal(o['schema'])
            validators[stream] = Draft4Validator(
                schema, format_checker=FormatChecker())

            # flush records from previous stream SCHEMA
            # if same stream has been encountered again, it means the schema might have been altered
            # so previous records need to be flushed
            if row_count.get(stream, 0) > 0:
                flush_all_streams(records_to_load, row_count, stream_to_sync,
                                  config)
                # emit latest encountered state
                emit_state(state)

            # key_properties key must be available in the SCHEMA message.
            if 'key_properties' not in o:
                raise Exception("key_properties field is required")

            # Log based and Incremental replications on tables with no Primary Key
            # cause duplicates when merging UPDATE events.
            # Stop loading data by default if no Primary Key.
            #
            # If you want to load tables with no Primary Key:
            #  1) Set ` 'primary_key_required': false ` in the target-snowflake config.json
            #  or
            #  2) Use fastsync [postgres-to-snowflake, mysql-to-snowflake, etc.]
            if config.get('primary_key_required', True) and len(
                    o['key_properties']) == 0:
                logger.critical(
                    "Primary key is set to mandatory but not defined in the [{}] stream"
                    .format(stream))
                raise Exception("key_properties field is required")

            key_properties[stream] = o['key_properties']

            if config.get('add_metadata_columns') or config.get('hard_delete'):
                stream_to_sync[stream] = DbSync(
                    config, add_metadata_columns_to_schema(o),
                    information_schema_cache)
            else:
                stream_to_sync[stream] = DbSync(config, o,
                                                information_schema_cache)

            try:
                stream_to_sync[stream].create_schema_if_not_exists()
                stream_to_sync[stream].sync_table()
            except Exception as e:
                logger.error("""
                    Cannot sync table structure in Snowflake schema: {} .
                    Try to delete {}.COLUMNS table to reset information_schema cache. Maybe it's outdated.
                """.format(stream_to_sync[stream].schema_name,
                           stream_to_sync[stream].pipelinewise_schema.upper()))
                raise e

            row_count[stream] = 0
            total_row_count[stream] = 0
            csv_files_to_load[stream] = NamedTemporaryFile(mode='w+b')

        elif t == 'ACTIVATE_VERSION':
            logger.debug('ACTIVATE_VERSION message')

        elif t == 'STATE':
            logger.debug('Setting state to {}'.format(o['value']))
            state = o['value']

        else:
            raise Exception("Unknown message type {} in message {}".format(
                o['type'], o))

    # if some bucket has records that need to be flushed but haven't reached batch size
    # then flush all buckets.
    if len(row_count.values()) > 0:
        # flush all streams one last time, delete records if needed, reset counts and then emit current state
        flush_all_streams(records_to_load, row_count, stream_to_sync, config)

    # emit latest state
    emit_state(state)
示例#19
0
    def assert_three_streams_are_into_snowflake(
            self,
            should_metadata_columns_exist=False,
            should_hard_deleted_rows=False):
        """
        This is a helper assertion that checks if every data from the message-with-three-streams.json
        file is available in Snowflake tables correctly.
        Useful to check different loading methods (unencrypted, Client-Side encryption, gzip, etc.)
        without duplicating assertions
        """
        snowflake = DbSync(self.config)
        config_schema = self.config.get('schema', '')
        config_dynamic_schema_name = self.config.get('dynamic_schema_name', '')
        config_dynamic_schema_name_postfix = self.config.get(
            'dynamic_schema_name_postfix', '')

        # Identify target schema name
        target_schema = None
        if config_schema is not None and config_schema.strip():
            target_schema = config_schema
        elif config_dynamic_schema_name:
            target_schema = "tap_mysql_test"
            if config_dynamic_schema_name_postfix:
                target_schema = "{}{}".format(
                    target_schema, config_dynamic_schema_name_postfix)

        # Get loaded rows from tables
        table_one = snowflake.query(
            "SELECT * FROM {}.test_table_one".format(target_schema))
        table_two = snowflake.query(
            "SELECT * FROM {}.test_table_two".format(target_schema))
        table_three = snowflake.query(
            "SELECT * FROM {}.test_table_three".format(target_schema))

        # ----------------------------------------------------------------------
        # Check rows in table_one
        # ----------------------------------------------------------------------
        expected_table_one = [{'C_INT': 1, 'C_PK': 1, 'C_VARCHAR': '1'}]

        self.assertEqual(self.remove_metadata_columns_from_rows(table_one),
                         expected_table_one)

        # ----------------------------------------------------------------------
        # Check rows in table_tow
        # ----------------------------------------------------------------------
        expected_table_two = []
        if not should_hard_deleted_rows:
            expected_table_two = [{
                'C_INT':
                1,
                'C_PK':
                1,
                'C_VARCHAR':
                '1',
                'C_DATE':
                datetime.datetime(2019, 2, 1, 15, 12, 45)
            }, {
                'C_INT':
                2,
                'C_PK':
                2,
                'C_VARCHAR':
                '2',
                'C_DATE':
                datetime.datetime(2019, 2, 10, 2, 0, 0)
            }]
        else:
            expected_table_two = [{
                'C_INT':
                2,
                'C_PK':
                2,
                'C_VARCHAR':
                '2',
                'C_DATE':
                datetime.datetime(2019, 2, 10, 2, 0, 0)
            }]

        self.assertEqual(self.remove_metadata_columns_from_rows(table_two),
                         expected_table_two)

        # ----------------------------------------------------------------------
        # Check rows in table_three
        # ----------------------------------------------------------------------
        expected_table_three = []
        if not should_hard_deleted_rows:
            expected_table_three = [{
                'C_INT': 1,
                'C_PK': 1,
                'C_VARCHAR': '1'
            }, {
                'C_INT': 2,
                'C_PK': 2,
                'C_VARCHAR': '2'
            }, {
                'C_INT': 3,
                'C_PK': 3,
                'C_VARCHAR': '3'
            }]
        else:
            expected_table_three = [{
                'C_INT': 1,
                'C_PK': 1,
                'C_VARCHAR': '1'
            }, {
                'C_INT': 2,
                'C_PK': 2,
                'C_VARCHAR': '2'
            }]

        self.assertEqual(self.remove_metadata_columns_from_rows(table_three),
                         expected_table_three)

        # ----------------------------------------------------------------------
        # Check if metadata columns exist or not
        # ----------------------------------------------------------------------
        if should_metadata_columns_exist:
            self.assert_metadata_columns_exist(table_one)
            self.assert_metadata_columns_exist(table_two)
            self.assert_metadata_columns_exist(table_three)
        else:
            self.assert_metadata_columns_not_exist(table_one)
            self.assert_metadata_columns_not_exist(table_two)
            self.assert_metadata_columns_not_exist(table_three)
    def assert_logical_streams_are_in_snowflake(
            self, should_metadata_columns_exist=False):
        # Get loaded rows from tables
        snowflake = DbSync(self.config)
        target_schema = self.config.get("default_target_schema", "")
        table_one = snowflake.query(
            "SELECT * FROM {}.logical1_table1 ORDER BY CID".format(
                target_schema))
        table_two = snowflake.query(
            "SELECT * FROM {}.logical1_table2 ORDER BY CID".format(
                target_schema))
        table_three = snowflake.query(
            "SELECT * FROM {}.logical2_table1 ORDER BY CID".format(
                target_schema))
        table_four = snowflake.query(
            "SELECT CID, CTIMENTZ, CTIMETZ FROM {}.logical1_edgydata WHERE CID IN(1,2,3,4,5,6,8,9) ORDER BY CID"
            .format(target_schema))

        # ----------------------------------------------------------------------
        # Check rows in table_one
        # ----------------------------------------------------------------------
        expected_table_one = [
            {
                "CID": 1,
                "CVARCHAR": "inserted row",
                "CVARCHAR2": None
            },
            {
                "CID": 2,
                "CVARCHAR": "inserted row",
                "CVARCHAR2": "inserted row"
            },
            {
                "CID": 3,
                "CVARCHAR": "inserted row",
                "CVARCHAR2": "inserted row"
            },
            {
                "CID": 4,
                "CVARCHAR": "inserted row",
                "CVARCHAR2": "inserted row"
            },
        ]

        # ----------------------------------------------------------------------
        # Check rows in table_tow
        # ----------------------------------------------------------------------
        expected_table_two = [
            {
                "CID": 1,
                "CVARCHAR": "updated row"
            },
            {
                "CID": 2,
                "CVARCHAR": "updated row"
            },
            {
                "CID": 3,
                "CVARCHAR": "updated row"
            },
            {
                "CID": 5,
                "CVARCHAR": "updated row"
            },
            {
                "CID": 7,
                "CVARCHAR": "updated row"
            },
            {
                "CID": 8,
                "CVARCHAR": "updated row"
            },
            {
                "CID": 9,
                "CVARCHAR": "updated row"
            },
            {
                "CID": 10,
                "CVARCHAR": "updated row"
            },
        ]

        # ----------------------------------------------------------------------
        # Check rows in table_three
        # ----------------------------------------------------------------------
        expected_table_three = [
            {
                "CID": 1,
                "CVARCHAR": "updated row"
            },
            {
                "CID": 2,
                "CVARCHAR": "updated row"
            },
            {
                "CID": 3,
                "CVARCHAR": "updated row"
            },
        ]

        # ----------------------------------------------------------------------
        # Check rows in table_four
        # ----------------------------------------------------------------------
        expected_table_four = [
            {
                "CID": 1,
                "CTIMENTZ": None,
                "CTIMETZ": None
            },
            {
                "CID": 2,
                "CTIMENTZ": datetime.time(23, 0, 15),
                "CTIMETZ": datetime.time(23, 0, 15)
            },
            {
                "CID": 3,
                "CTIMENTZ": datetime.time(12, 0, 15),
                "CTIMETZ": datetime.time(12, 0, 15)
            },
            {
                "CID": 4,
                "CTIMENTZ": datetime.time(12, 0, 15),
                "CTIMETZ": datetime.time(9, 0, 15)
            },
            {
                "CID": 5,
                "CTIMENTZ": datetime.time(12, 0, 15),
                "CTIMETZ": datetime.time(15, 0, 15)
            },
            {
                "CID": 6,
                "CTIMENTZ": datetime.time(0, 0),
                "CTIMETZ": datetime.time(0, 0)
            },
            {
                "CID": 8,
                "CTIMENTZ": datetime.time(0, 0),
                "CTIMETZ": datetime.time(1, 0)
            },
            {
                "CID": 9,
                "CTIMENTZ": datetime.time(0, 0),
                "CTIMETZ": datetime.time(0, 0)
            },
        ]

        if should_metadata_columns_exist:
            self.assertEqual(self.remove_metadata_columns_from_rows(table_one),
                             expected_table_one)
            self.assertEqual(self.remove_metadata_columns_from_rows(table_two),
                             expected_table_two)
            self.assertEqual(
                self.remove_metadata_columns_from_rows(table_three),
                expected_table_three)
            self.assertEqual(table_four, expected_table_four)
        else:
            self.assertEqual(table_one, expected_table_one)
            self.assertEqual(table_two, expected_table_two)
            self.assertEqual(table_three, expected_table_three)
            self.assertEqual(table_four, expected_table_four)
示例#21
0
 def setUp(self):
     self.config = test_utils.get_test_config()
     snowflake = DbSync(self.config)
     if self.config['schema']:
         snowflake.query("DROP SCHEMA IF EXISTS {}".format(
             self.config['schema']))
def persist_lines(config, lines, table_cache=None) -> None:
    # global variables
    state = None
    flushed_state = None
    schemas = {}
    validators = {}

    batch_size_rows = config.get('batch_size_rows', DEFAULT_BATCH_SIZE_ROWS)
    parallelism = config.get("parallelism", DEFAULT_PARALLELISM)
    max_parallelism = config.get("max_parallelism", DEFAULT_MAX_PARALLELISM)
    # parallelism of 0 means auto parallelism
    # - use as many threads as possible, up to max_parallelism
    if parallelism == 0:
        parallelism = max_parallelism
    elif parallelism == -1:
        raise NotImplementedError("-1 parallelism documented but not implemented.")

    # RECORD variables
    records_to_load = {}
    row_count = {}
    total_row_count = {}
    stream_to_sync = {}
    buckets_to_flush = []

    # BATCH variables
    total_batches = 0
    batch_queue_warm_up = 1
    batches_to_flush = []

    # SCHEMA variables
    key_properties = {}

    # Loop over lines from stdin
    for line in lines:
        o = load_line(line)

        if 'type' not in o:
            raise Exception("Line is missing required key 'type': {}".format(line))

        t = o['type']

        if t == 'STATE':
            LOGGER.debug('Setting state to {}'.format(o['value']))
            state = o['value']

            # Initially set flushed state
            if not flushed_state:
                flushed_state = copy.deepcopy(state)

        elif t == 'SCHEMA':

            utils.check_message_has_stream(line, o)

            stream = o['stream']
            new_schema = utils.float_to_decimal(o['schema'])

            # Update and flush only if the the schema is new or different than
            # the previously used version of the schema
            if stream not in schemas or schemas[stream] != new_schema:

                # Save old schema and validator in case we need them to flush records
                old_schema = schemas.get(stream)
                old_validator = validators.get(stream)
                # Update schema and validator
                schemas[stream] = new_schema
                validators[stream] = Draft7Validator(schemas[stream], format_checker=FormatChecker())

                # flush records from previous stream SCHEMA
                # if same stream has been encountered again, it means the schema might have been altered
                # so previous records need to be flushed
                if row_count.get(stream, 0) > 0:
                    _, flushed_state = record_handler.flush_stream_buckets(
                        config=config, schema=old_schema, validator=old_validator,
                        buckets=records_to_load, streams_to_flush=[stream],
                        parallelism=parallelism, row_count=row_count, state=state,
                        flushed_state=flushed_state
                    )

                    # emit latest encountered state
                    emit_state(flushed_state)

                # key_properties key must be available in the SCHEMA message.
                if 'key_properties' not in o:
                    raise Exception("key_properties field is required")

                # Log based and Incremental replications on tables with no Primary Key
                # cause duplicates when merging UPDATE events.
                # Stop loading data by default if no Primary Key.
                #
                # If you want to load tables with no Primary Key:
                #  1) Set ` 'primary_key_required': false ` in the target-snowflake config.json
                #  or
                #  2) Use fastsync [postgres-to-snowflake, mysql-to-snowflake, etc.]
                if config.get('primary_key_required', True) and len(o['key_properties']) == 0:
                    LOGGER.critical("Primary key is set to mandatory but not defined in the [{}] stream".format(stream))
                    raise Exception("key_properties field is required")

                key_properties[stream] = o['key_properties']

                if config.get('add_metadata_columns') or config.get('hard_delete'):
                    stream_to_sync[stream] = DbSync(
                        config, utils.add_metadata_columns_to_schema(o),
                        table_cache
                    )
                else:
                    stream_to_sync[stream] = DbSync(config, o, table_cache)

                stream_to_sync[stream].create_schema_if_not_exists()
                stream_to_sync[stream].sync_table()

                row_count[stream] = 0
                total_row_count[stream] = 0

        elif t == 'RECORD':

            utils.check_message_has_stream(line, o)
            utils.check_message_has_schema(line, o, schemas)

            record_message = o
            record = record_message['record']
            stream = record_message['stream']

            # Get primary key for this record
            primary_key_string = stream_to_sync[stream].record_primary_key_string(record)
            if not primary_key_string:
                primary_key_string = 'RID-{}'.format(total_row_count[stream])

            # If no bucket for records in this stream exists, create one.
            if stream not in records_to_load:
                records_to_load[stream] = {}

            # increment row count only when a new PK is encountered in the current batch
            if primary_key_string not in records_to_load[stream]:
                row_count[stream] += 1
                total_row_count[stream] += 1

            # add record_message to relevant bucket (by PK)
            records_to_load[stream][primary_key_string] = record_message

            # track full buckets
            if row_count[stream] >= batch_size_rows:
                buckets_to_flush.append(stream)

            # Do we have enough full buckets to begin flushing them?
            if (
                # we are replicating fewer streams than parallelism
                len(buckets_to_flush) == len(stream_to_sync.keys())
                # we have enough records in enough streams to flush them all in parallel
                or len(buckets_to_flush) >= parallelism
                # we have at least 1 full bucket and 'flush_all_streams' is configured
                or (len(buckets_to_flush) > 0 and config.get('flush_all_streams'))
            ):
                if config.get('flush_all_streams'):
                    streams = records_to_load.keys()
                else:  # only flush full buckets
                    streams = buckets_to_flush

                # Flush and return a new state dict with new positions only for the flushed streams
                buckets_to_flush, flushed_state = record_handler.flush_stream_buckets(
                    config=config, schemas=schemas, validators=validators,
                    buckets=records_to_load, streams_to_flush=streams,
                    parallelism=parallelism, row_count=row_count,
                    stream_to_sync=stream_to_sync, state=state,
                    flushed_state=flushed_state
                )
                # emit last encountered state
                emit_state(copy.deepcopy(flushed_state))

        elif t == 'BATCH':

            utils.check_message_has_stream(line, o)
            utils.check_message_has_schema(line, o, schemas)

            batch_message = o
            LOGGER.info(f"Received batch message for file: {o['filepath']}")
            total_batches += 1

            # batches are already bucketed by stream, so we can
            # just track batches
            batches_to_flush.append(batch_message)
            batch_queue_depth = len(batches_to_flush)
            # Do we have enough batches to begin flushing them?
            if (
                # don't wait for a full queue to start processing (avoid slow start)
                (batch_queue_warm_up == batch_queue_depth and batch_queue_warm_up <= parallelism)
                or (batch_queue_depth >= parallelism)
                or (batch_queue_depth > 0 and config.get('flush_all_streams'))
            ):
                LOGGER.info(f"Flushing {len(batches_to_flush)} batches.")
                # flush batches
                batches_to_flush, flushed_state = record_handler.flush_batches(
                    config=config, schemas=schemas, validators=validators,
                    batches=batches_to_flush, parallelism=parallelism,
                    stream_to_sync=stream_to_sync, state=state,
                    flushed_state=flushed_state
                )
                # emit last encountered state
                emit_state(copy.deepcopy(flushed_state))
                # Keep scaling the batch queue warm-up until greater than parallelism
                if batch_queue_warm_up <= parallelism:
                    batch_queue_warm_up = batch_queue_warm_up * 2

        elif t == 'ACTIVATE_VERSION':
            LOGGER.debug('ACTIVATE_VERSION message')

        else:
            raise Exception(
                f"Unknown message type {o['type']} in message {o}"
            )

    # if some bucket has records that need to be flushed but haven't reached batch size
    # then flush all buckets.
    if sum(row_count.values()) > 0:
        # flush all streams one last time, delete records if needed, reset counts and then emit current state
        streams = list(records_to_load.keys())
        _, flushed_state = record_handler.flush_stream_buckets(
            config=config, schemas=schemas, validators=validators,
            buckets=records_to_load, streams_to_flush=streams,
            parallelism=parallelism, row_count=row_count,
            stream_to_sync=stream_to_sync, state=state,
            flushed_state=flushed_state
        )
        # emit latest state
        emit_state(copy.deepcopy(flushed_state))

    # if there are any remaining batches to flush, flush them
    if len(batches_to_flush) > 0:
        batches_to_flush, flushed_state = record_handler.flush_batches(
            config=config, schemas=schemas, validators=validators,
            batches=batches_to_flush, parallelism=parallelism,
            stream_to_sync=stream_to_sync, state=state,
            flushed_state=flushed_state
        )
        # emit latest state
        emit_state(copy.deepcopy(flushed_state))
def persist_lines(config, lines, table_cache=None, file_format_type: FileFormatTypes = None) -> None:
    """Main loop to read and consume singer messages from stdin

    Params:
        config: configuration dictionary
        lines: iterable of singer messages
        table_cache: Optional dictionary of Snowflake table structures. This is useful to run the less
                     INFORMATION_SCHEMA and SHOW queries as possible.
                     If not provided then an SQL query will be generated at runtime to
                     get all the required information from Snowflake
        file_format_type: Optional FileFormatTypes value that defines which supported file format to use
                          to load data into Snowflake.
                          If not provided then it will be detected automatically

    Returns:
        tuple of retrieved items: table_cache, file_format_type
    """
    state = None
    flushed_state = None
    schemas = {}
    key_properties = {}
    validators = {}
    records_to_load = {}
    row_count = {}
    stream_to_sync = {}
    total_row_count = {}
    batch_size_rows = config.get('batch_size_rows', DEFAULT_BATCH_SIZE_ROWS)
    batch_wait_limit_seconds = config.get('batch_wait_limit_seconds', None)
    flush_timestamp = datetime.utcnow()
    archive_load_files = config.get('archive_load_files', False)
    archive_load_files_data = {}

    # Loop over lines from stdin
    for line in lines:
        try:
            o = json.loads(line)
        except json.decoder.JSONDecodeError:
            LOGGER.error('Unable to parse:\n%s', line)
            raise

        if 'type' not in o:
            raise Exception(f"Line is missing required key 'type': {line}")

        t = o['type']

        if t == 'RECORD':
            if 'stream' not in o:
                raise Exception(f"Line is missing required key 'stream': {line}")
            if o['stream'] not in schemas:
                raise Exception(
                    f"A record for stream {o['stream']} was encountered before a corresponding schema")

            # Get schema for this record's stream
            stream = o['stream']

            stream_utils.adjust_timestamps_in_record(o['record'], schemas[stream])

            # Validate record
            if config.get('validate_records'):
                try:
                    validators[stream].validate(stream_utils.float_to_decimal(o['record']))
                except Exception as ex:
                    if type(ex).__name__ == "InvalidOperation":
                        raise InvalidValidationOperationException(
                            f"Data validation failed and cannot load to destination. RECORD: {o['record']}\n"
                            "multipleOf validations that allows long precisions are not supported (i.e. with 15 digits"
                            "or more) Try removing 'multipleOf' methods from JSON schema.") from ex
                    raise RecordValidationException(f"Record does not pass schema validation. RECORD: {o['record']}") \
                        from ex

            primary_key_string = stream_to_sync[stream].record_primary_key_string(o['record'])
            if not primary_key_string:
                primary_key_string = 'RID-{}'.format(total_row_count[stream])

            if stream not in records_to_load:
                records_to_load[stream] = {}

            # increment row count only when a new PK is encountered in the current batch
            if primary_key_string not in records_to_load[stream]:
                row_count[stream] += 1
                total_row_count[stream] += 1

            # append record
            if config.get('add_metadata_columns') or config.get('hard_delete'):
                records_to_load[stream][primary_key_string] = stream_utils.add_metadata_values_to_record(o)
            else:
                records_to_load[stream][primary_key_string] = o['record']

            if archive_load_files and stream in archive_load_files_data:
                # Keep track of min and max of the designated column
                stream_archive_load_files_values = archive_load_files_data[stream]
                if 'column' in stream_archive_load_files_values:
                    incremental_key_column_name = stream_archive_load_files_values['column']
                    incremental_key_value = o['record'][incremental_key_column_name]
                    min_value = stream_archive_load_files_values['min']
                    max_value = stream_archive_load_files_values['max']

                    if min_value is None or min_value > incremental_key_value:
                        stream_archive_load_files_values['min'] = incremental_key_value

                    if max_value is None or max_value < incremental_key_value:
                        stream_archive_load_files_values['max'] = incremental_key_value

            flush = False
            if row_count[stream] >= batch_size_rows:
                flush = True
                LOGGER.info("Flush triggered by batch_size_rows (%s) reached in %s",
                            batch_size_rows, stream)
            elif (batch_wait_limit_seconds and
                  datetime.utcnow() >= (flush_timestamp + timedelta(seconds=batch_wait_limit_seconds))):
                flush = True
                LOGGER.info("Flush triggered by batch_wait_limit_seconds (%s)",
                            batch_wait_limit_seconds)

            if flush:
                # flush all streams, delete records if needed, reset counts and then emit current state
                if config.get('flush_all_streams'):
                    filter_streams = None
                else:
                    filter_streams = [stream]

                # Flush and return a new state dict with new positions only for the flushed streams
                flushed_state = flush_streams(
                    records_to_load,
                    row_count,
                    stream_to_sync,
                    config,
                    state,
                    flushed_state,
                    archive_load_files_data,
                    filter_streams=filter_streams)

                flush_timestamp = datetime.utcnow()

                # emit last encountered state
                emit_state(copy.deepcopy(flushed_state))

        elif t == 'SCHEMA':
            if 'stream' not in o:
                raise Exception(f"Line is missing required key 'stream': {line}")

            stream = o['stream']
            new_schema = stream_utils.float_to_decimal(o['schema'])

            # Update and flush only if the the schema is new or different than
            # the previously used version of the schema
            if stream not in schemas or schemas[stream] != new_schema:

                schemas[stream] = new_schema
                validators[stream] = Draft7Validator(schemas[stream], format_checker=FormatChecker())

                # flush records from previous stream SCHEMA
                # if same stream has been encountered again, it means the schema might have been altered
                # so previous records need to be flushed
                if row_count.get(stream, 0) > 0:
                    flushed_state = flush_streams(records_to_load,
                                                  row_count,
                                                  stream_to_sync,
                                                  config,
                                                  state,
                                                  flushed_state,
                                                  archive_load_files_data)

                    # emit latest encountered state
                    emit_state(flushed_state)

                # key_properties key must be available in the SCHEMA message.
                if 'key_properties' not in o:
                    raise Exception("key_properties field is required")

                # Log based and Incremental replications on tables with no Primary Key
                # cause duplicates when merging UPDATE events.
                # Stop loading data by default if no Primary Key.
                #
                # If you want to load tables with no Primary Key:
                #  1) Set ` 'primary_key_required': false ` in the target-snowflake config.json
                #  or
                #  2) Use fastsync [postgres-to-snowflake, mysql-to-snowflake, etc.]
                if config.get('primary_key_required', True) and len(o['key_properties']) == 0:
                    LOGGER.critical('Primary key is set to mandatory but not defined in the [%s] stream', stream)
                    raise Exception("key_properties field is required")

                key_properties[stream] = o['key_properties']

                if config.get('add_metadata_columns') or config.get('hard_delete'):
                    stream_to_sync[stream] = DbSync(config,
                                                    add_metadata_columns_to_schema(o),
                                                    table_cache,
                                                    file_format_type)
                else:
                    stream_to_sync[stream] = DbSync(config, o, table_cache, file_format_type)

                if archive_load_files:
                    archive_load_files_data[stream] = {
                        'tap': config.get('tap_id'),
                    }

                    # In case of incremental replication, track min/max of the replication key.
                    # Incremental replication is assumed if o['bookmark_properties'][0] is one of the columns.
                    incremental_key_column_name = stream_utils.get_incremental_key(o)
                    if incremental_key_column_name:
                        LOGGER.info("Using %s as incremental_key_column_name", incremental_key_column_name)
                        archive_load_files_data[stream].update(
                            column=incremental_key_column_name,
                            min=None,
                            max=None
                        )
                    else:
                        LOGGER.warning(
                            "archive_load_files is enabled, but no incremental_key_column_name was found. "
                            "Min/max values will not be added to metadata for stream %s.", stream
                        )

                stream_to_sync[stream].create_schema_if_not_exists()
                stream_to_sync[stream].sync_table()

                row_count[stream] = 0
                total_row_count[stream] = 0

        elif t == 'ACTIVATE_VERSION':
            LOGGER.debug('ACTIVATE_VERSION message')

        elif t == 'STATE':
            LOGGER.debug('Setting state to %s', o['value'])
            state = o['value']

            # Initially set flushed state
            if not flushed_state:
                flushed_state = copy.deepcopy(state)

        else:
            raise Exception(f"Unknown message type {o['type']} in message {o}")

    # if some bucket has records that need to be flushed but haven't reached batch size
    # then flush all buckets.
    if sum(row_count.values()) > 0:
        # flush all streams one last time, delete records if needed, reset counts and then emit current state
        flushed_state = flush_streams(records_to_load, row_count, stream_to_sync, config, state, flushed_state,
                                      archive_load_files_data)

    # emit latest state
    emit_state(copy.deepcopy(flushed_state))
def persist_lines(config, lines, information_schema_cache=None) -> None:
    state = None
    schemas = {}
    key_properties = {}
    validators = {}
    records_to_load = {}
    csv_files_to_load = {}
    row_count = {}
    stream_to_sync = {}
    total_row_count = {}
    batch_size_rows = config.get("batch_size_rows", 100000)
    tables_created = list()

    # Loop over lines from stdin
    for line in lines:
        try:
            o = json.loads(line)
        except json.decoder.JSONDecodeError:
            logger.error("Unable to parse:\n{}".format(line))
            raise

        if "type" not in o:
            raise Exception(
                "Line is missing required key 'type': {}".format(line))

        t = o["type"]

        if t == "RECORD":
            if "stream" not in o:
                raise Exception(
                    "Line is missing required key 'stream': {}".format(line))
            if o["stream"] not in schemas:
                raise Exception(
                    "A record for stream {} was encountered before a corresponding schema"
                    .format(o["stream"]))

            # Get schema for this record's stream
            stream = o["stream"]

            # Validate record
            try:
                validators[stream].validate(float_to_decimal(o["record"]))
            except Exception as ex:
                pass

            primary_key_string = stream_to_sync[
                stream].record_primary_key_string(o["record"])
            if not primary_key_string:
                primary_key_string = "RID-{}".format(total_row_count[stream])

            if stream not in records_to_load:
                records_to_load[stream] = {}

            # increment row count with every record line
            row_count[stream] += 1
            total_row_count[stream] += 1

            # append record
            if config.get("add_metadata_columns") or config.get("hard_delete"):
                records_to_load[stream][
                    primary_key_string] = add_metadata_values_to_record(
                        o, stream_to_sync[stream])
            else:
                records_to_load[stream][primary_key_string] = o["record"]

            if row_count[stream] >= batch_size_rows:
                for stream in records_to_load.keys():
                    if stream not in tables_created:
                        stream_to_sync[stream].create_targets()
                        tables_created.append(stream)
                    # flush all streams, delete records if needed, reset counts and then emit current state
                flush_all_streams(records_to_load, row_count, stream_to_sync,
                                  config)
                # emit last encountered state
                emit_state(state)

        elif t == "SCHEMA":
            if "stream" not in o:
                raise Exception(
                    "Line is missing required key 'stream': {}".format(line))

            stream = o["stream"]

            schemas[stream] = o
            schema = float_to_decimal(o["schema"])
            validators[stream] = Draft4Validator(
                schema, format_checker=FormatChecker())

            # flush records from previous stream SCHEMA
            # if same stream has been encountered again, it means the schema might have been altered
            # so previous records need to be flushed
            if row_count.get(stream, 0) > 0:
                for stream in records_to_load.keys():
                    if stream not in tables_created:
                        stream_to_sync[stream].create_targets()
                        tables_created.append(stream)

                flush_all_streams(records_to_load, row_count, stream_to_sync,
                                  config)
                # emit latest encountered state
                emit_state(state)

            # key_properties key must be available in the SCHEMA message.
            if "key_properties" not in o:
                raise Exception("key_properties field is required")

            # Log based and Incremental replications on tables with no Primary Key
            # cause duplicates when merging UPDATE events.
            # Stop loading data by default if no Primary Key.
            #
            # If you want to load tables with no Primary Key:
            #  1) Set ` 'primary_key_required': false ` in the target-snowflake config.json
            #  or
            #  2) Use fastsync [postgres-to-snowflake, mysql-to-snowflake, etc.]
            if config.get("primary_key_required", True) and len(
                    o["key_properties"]) == 0:
                logger.critical(
                    "Primary key is set to mandatory but not defined in the [{}] stream"
                    .format(stream))
                raise Exception("key_properties field is required")

            key_properties[stream] = o["key_properties"]

            if config.get("add_metadata_columns") or config.get("hard_delete"):
                stream_to_sync[stream] = DbSync(
                    config, add_metadata_columns_to_schema(o),
                    information_schema_cache)
            else:
                stream_to_sync[stream] = DbSync(config, o,
                                                information_schema_cache)

            try:
                stream_to_sync[stream].create_schema_if_not_exists()
                stream_to_sync[stream].sync_table()
            except Exception as e:
                logger.error("""
                    Cannot sync table structure in Snowflake schema: {} .
                    Try to delete {}.COLUMNS table to reset information_schema cache. Maybe it's outdated.
                """.format(
                    stream_to_sync[stream].schema_name,
                    stream_to_sync[stream].pipelinewise_schema.upper(),
                ))
                raise e

            row_count[stream] = 0
            total_row_count[stream] = 0
            csv_files_to_load[stream] = NamedTemporaryFile(mode="w+b")

        elif t == "ACTIVATE_VERSION":
            logger.debug("ACTIVATE_VERSION message")

        elif t == "STATE":
            logger.debug("Setting state to {}".format(o["value"]))
            state = o["value"]

        else:
            raise Exception("Unknown message type {} in message {}".format(
                o["type"], o))

    # if some bucket has records that need to be flushed but haven't reached batch size
    # then flush all buckets.
    if len(row_count.values()) > 0:
        for stream in records_to_load.keys():
            if stream not in tables_created:
                stream_to_sync[stream].create_targets()
                tables_created.append(stream)

        # flush all streams one last time, delete records if needed, reset counts and then emit current state
        flush_all_streams(records_to_load, row_count, stream_to_sync, config)
    for stream in tables_created:
        stream_to_sync[stream].finalize_table()
    # emit latest state
    emit_state(state)
def flush_records(stream: str,
                  records: List[Dict],
                  db_sync: DbSync,
                  temp_dir: str = None,
                  no_compression: bool = False,
                  archive_load_files: Dict = None) -> None:
    """
    Takes a list of record messages and loads it into the snowflake target table

    Args:
        stream: Name of the stream
        records: List of dictionary, that represents multiple csv lines. Dict key is the column name, value is the
                 column value
        row_count:
        db_sync: A DbSync object
        temp_dir: Directory where intermediate temporary files will be created. (Default: OS specific temp directory)
        no_compression: Disable to use compressed files. (Default: False)
        archive_load_files: Data needed for archive load files. (Default: None)

    Returns:
        None
    """
    # Generate file on disk in the required format
    filepath = db_sync.file_format.formatter.records_to_file(records,
                                                             db_sync.flatten_schema,
                                                             compression=not no_compression,
                                                             dest_dir=temp_dir,
                                                             data_flattening_max_level=
                                                             db_sync.data_flattening_max_level)

    # Get file stats
    row_count = len(records)
    size_bytes = os.path.getsize(filepath)

    # Upload to s3 and load into Snowflake
    s3_key = db_sync.put_to_stage(filepath, stream, row_count, temp_dir=temp_dir)
    db_sync.load_file(s3_key, row_count, size_bytes)

    # Delete file from local disk
    os.remove(filepath)

    if archive_load_files:
        stream_name_parts = stream_utils.stream_name_to_dict(stream)
        if 'schema_name' not in stream_name_parts or 'table_name' not in stream_name_parts:
            raise Exception("Failed to extract schema and table names from stream '{}'".format(stream))

        archive_schema = stream_name_parts['schema_name']
        archive_table = stream_name_parts['table_name']
        archive_tap = archive_load_files['tap']

        archive_metadata = {
            'tap': archive_tap,
            'schema': archive_schema,
            'table': archive_table,
            'archived-by': 'pipelinewise_target_snowflake'
        }

        if 'column' in archive_load_files:
            archive_metadata.update({
                'incremental-key': archive_load_files['column'],
                'incremental-key-min': str(archive_load_files['min']),
                'incremental-key-max': str(archive_load_files['max'])
            })

        # Use same file name as in import
        archive_file = os.path.basename(s3_key)
        archive_key = "{}/{}/{}".format(archive_tap, archive_table, archive_file)

        db_sync.copy_to_archive(s3_key, archive_key, archive_metadata)

    # Delete file from S3
    db_sync.delete_from_stage(stream, s3_key)