Exemplo n.º 1
0
 def gen_schema_from_data(data):
     generator = SchemaGenerator('dict', keep_nulls=True)
     schema_map, error_logs = generator.deduce_schema(data)
     if error_logs:
         raise PyGBQError('Could not generate schema, please provide a schema')
     schema = generator.flatten_schema(schema_map)
     return schema
Exemplo n.º 2
0
    def verify_data_chunk(self, chunk_count, chunk):
        data_flags = chunk['data_flags']
        input_format = 'csv' if ('csv' in data_flags) else 'json'
        keep_nulls = ('keep_nulls' in data_flags)
        infer_mode = ('infer_mode' in data_flags)
        quoted_values_are_strings = ('quoted_values_are_strings' in data_flags)
        sanitize_names = ('sanitize_names' in data_flags)
        records = chunk['records']
        expected_errors = chunk['errors']
        expected_error_map = chunk['error_map']
        expected_schema = chunk['schema']

        print("Test chunk %s: First record: %s" % (chunk_count, records[0]))
        # Generate schema.
        generator = SchemaGenerator(
            input_format=input_format,
            infer_mode=infer_mode,
            keep_nulls=keep_nulls,
            quoted_values_are_strings=quoted_values_are_strings,
            sanitize_names=sanitize_names)
        schema_map, error_logs = generator.deduce_schema(records)
        schema = generator.flatten_schema(schema_map)

        # Check the schema, preserving order
        expected = json.loads(expected_schema, object_pairs_hook=OrderedDict)
        self.assertEqual(expected, schema)

        # Check the error messages
        self.assertEqual(len(expected_errors), len(error_logs))
        self.assert_error_messages(expected_error_map, error_logs)
Exemplo n.º 3
0
    def verify_data_chunk(self, chunk_count, chunk):
        data_flags = chunk['data_flags']
        keep_nulls = ('keep_nulls' in data_flags)
        quoted_values_are_strings = ('quoted_values_are_strings' in data_flags)
        records = chunk['records']
        expected_errors = chunk['errors']
        expected_error_map = chunk['error_map']
        expected_schema = chunk['schema']

        print("Test chunk %s: First record: %s" % (chunk_count, records[0]))

        # Generate schema.
        generator = SchemaGenerator(
            keep_nulls=keep_nulls,
            quoted_values_are_strings=quoted_values_are_strings)
        schema_map, error_logs = generator.deduce_schema(records)
        schema = generator.flatten_schema(schema_map)

        # Check the schema
        expected = sort_schema(json.loads(expected_schema))
        self.assertEqual(expected, schema)

        # Check the error messages
        self.assertEqual(len(expected_errors), len(error_logs))
        self.assert_error_messages(expected_error_map, error_logs)
Exemplo n.º 4
0
    def process(self, batch):
        logging.info(f"Got {len(batch)} bad rows")
        table_id = f"{self.bq_dataset}.{self.bq_table}"

        generator = SchemaGenerator(input_format='dict',
                                    quoted_values_are_strings=True)

        # Get original schema to assist the deduce_schema function.
        # If the table doesn't exist
        # proceed with empty original_schema_map
        try:
            table_file_name = f"original_schema_{self.bq_table}.json"
            table = self.client.get_table(table_id)
            self.client.schema_to_json(table.schema, table_file_name)
            original_schema_map = read_existing_schema_from_file(
                table_file_name)
        except Exception:
            logging.info(
                f"{table_id} table not exists. Proceed without getting schema")
            original_schema_map = {}

        # generate the new schema
        schema_map, error_logs = generator.deduce_schema(
            input_data=batch, schema_map=original_schema_map)
        schema = generator.flatten_schema(schema_map)

        job_config = bigquery.LoadJobConfig(
            source_format=bigquery.SourceFormat.NEWLINE_DELIMITED_JSON,
            schema_update_options=[
                bigquery.SchemaUpdateOption.ALLOW_FIELD_ADDITION
            ],
            write_disposition=bigquery.WriteDisposition.WRITE_APPEND,
            schema=schema)

        try:
            load_job = self.client.load_table_from_json(
                batch,
                table_id,
                job_config=job_config,
            )  # Make an API request.

            load_job.result()  # Waits for the job to complete.
            if load_job.errors:
                logging.info(f"error_result =  {load_job.error_result}")
                logging.info(f"errors =  {load_job.errors}")
            else:
                logging.info(f'Loaded {len(batch)} rows.')

        except Exception as error:
            logging.info(f'Error: {error} with loading dataframe')
    def test_run_with_input_and_output(self):
        generator = SchemaGenerator()
        input = StringIO('{ "name": "1" }')
        output = StringIO()
        generator.run(input, output)
        expected = """\
[
  {
    "mode": "NULLABLE",
    "name": "name",
    "type": "INTEGER"
  }
]
"""
        self.assertEqual(expected, output.getvalue())
    def test_quoted_values_are_strings(self):
        generator = SchemaGenerator(quoted_values_are_strings=True)
        self.assertEqual('STRING', generator.infer_value_type('abcd'))

        self.assertEqual('INTEGER', generator.infer_value_type(1))
        self.assertEqual('STRING', generator.infer_value_type('1'))

        self.assertEqual('FLOAT', generator.infer_value_type(1.0))
        self.assertEqual('STRING', generator.infer_value_type('1.0'))

        self.assertEqual('BOOLEAN', generator.infer_value_type(True))
        self.assertEqual('STRING', generator.infer_value_type('True'))
Exemplo n.º 7
0
    def verify_data_chunk(self, chunk):
        chunk_count = chunk['chunk_count']
        line_number = chunk['line_number']
        data_flags = chunk['data_flags']
        input_format = 'csv' if ('csv' in data_flags) else 'json'
        keep_nulls = ('keep_nulls' in data_flags)
        infer_mode = ('infer_mode' in data_flags)
        quoted_values_are_strings = ('quoted_values_are_strings' in data_flags)
        sanitize_names = ('sanitize_names' in data_flags)
        ignore_invalid_lines = ('ignore_invalid_lines' in data_flags)
        records = chunk['records']
        expected_errors = chunk['errors']
        expected_error_map = chunk['error_map']
        expected_schema = chunk['schema']
        existing_schema = chunk['existing_schema']

        print(f"Test chunk: {chunk_count}; line_number: {line_number}; "
              f"first record: {records[0]}")
        # Generate schema.
        generator = SchemaGenerator(
            input_format=input_format,
            infer_mode=infer_mode,
            keep_nulls=keep_nulls,
            quoted_values_are_strings=quoted_values_are_strings,
            sanitize_names=sanitize_names,
            ignore_invalid_lines=ignore_invalid_lines)
        existing_schema_map = None
        if existing_schema:
            existing_schema_map = bq_schema_to_map(json.loads(existing_schema))
        schema_map, error_logs = generator.deduce_schema(
            records, schema_map=existing_schema_map)
        schema = generator.flatten_schema(schema_map)

        # Check the schema, preserving order
        expected = json.loads(expected_schema, object_pairs_hook=OrderedDict)
        self.assertEqual(expected, schema)

        # Check the error messages
        try:
            self.assertEqual(len(expected_errors), len(error_logs))
        except AssertionError as e:
            print(f"Number of errors mismatched, expected:"
                  f" {len(expected_errors)} got: {len(error_logs)}")
            print(f"Errors: {error_logs}")
            print(f"Expected Errors: {expected_errors}")
            raise e
        self.assert_error_messages(expected_error_map, error_logs)
    def test_bq_schema_to_map_round_trip_permutations(self):
        """This checks that each possible type of consititued schema, when
        generated, then converted to a schema_map, then back to the schema, they
        are equal.

        This function is really ugly but has good coverage. This was migrated
        from pytest fixtures which were a bit cleaner but we ideally did not
        want to add a new dependency / library that is used for testing.
        """
        valid_types = BQ_TYPES
        valid_modes = ['NULLABLE', 'REQUIRED', 'REPEATED']
        valid_input_formats_and_modes = [
            ('csv', True),
            ('csv', False),
            ('json', False),
        ]
        valid_keep_null_params = [True, False]
        valid_quoted_values_are_strings = [True, False]
        for valid_type in valid_types:
            for valid_mode in valid_modes:
                bq_entry = self.make_bq_schema_entry(valid_mode, valid_type)
                schema = [bq_entry]
                schema_map = bq_schema_to_map(schema)
                for input_format_and_mode in valid_input_formats_and_modes:
                    for keep_null_param in valid_keep_null_params:
                        for quotes_are_strings in \
                                valid_quoted_values_are_strings:
                            generator = SchemaGenerator(
                                input_format=input_format_and_mode[0],
                                infer_mode=input_format_and_mode[1],
                                keep_nulls=keep_null_param,
                                quoted_values_are_strings=quotes_are_strings)
                            flattened = generator.flatten_schema(schema_map)
                            try:
                                self.assertEqual(schema, flattened)
                            except AssertionError as e:
                                print("test_bq_schema_to_map_permutations"
                                      " failed for case where: "
                                      f"bq_entry={bq_entry}\n"
                                      "schema_generator created with values:"
                                      f"{input_format_and_mode[0]}"
                                      f"-{input_format_and_mode[1]}"
                                      f"-{keep_null_param}"
                                      f"-{quotes_are_strings}")
                                raise e
Exemplo n.º 9
0
def generate_schema(temp_data, replace_time_types=True, extra_types=[]):
    """
    Generate BigQuery schema by first using BigQuery SchemaGenerator and 
    then only keeping TIME and related types, besides extra_types passed to
    the function. Everything else is set to string.
    """

    # Find out what data format to read:
    if os.path.exists(temp_data):
        generator = SchemaGenerator(keep_nulls=True)
    elif os.path.exists(temp_data.replace('.json', '.csv')):
        generator = SchemaGenerator(input_format='csv', keep_nulls=True)
        temp_data = temp_data.replace('.json', '.csv')
    else:
        raise Exception('unknown temp_data file extension')

    # Deduce schema:
    with open(temp_data, 'r') as f:
        schema_map, error_logs = generator.deduce_schema(f)
    schema = generator.flatten_schema(schema_map)

    # Set requested fields to string:
    schema_crawl_to_str(schema, replace_time_types, extra_types)

    print(schema)

    return list(map(lambda x: bigquery.SchemaField.from_api_repr(x), schema))
Exemplo n.º 10
0
def generate_bq_schema(obj: dict, options: dict = None):
    """Generates a bigquery compliant schema using the bigquery-schema-generator library.

    Args:
        obj: the data structure to create a big query schema for
        options (optional): a dictionary of options to pass to the SchemaGenerator class.
        If nothing is specified default values are used
    Returns:
        A bigquery schema dictionary
    """

    if not options:
        options = {
            'input_format': None,
            'infer_mode': False,
            'keep_nulls': False,
            'quoted_values_are_strings': False,
            'debugging_interval': 500,
            'debugging_map': False
        }

    generator = SchemaGenerator(
        input_format=options['input_format'],
        infer_mode=options['infer_mode'],
        keep_nulls=options['keep_nulls'],
        quoted_values_are_strings=options['quoted_values_are_strings'],
        debugging_interval=options['debugging_interval'],
        debugging_map=options['debugging_map'])

    schema_map, error_logs = generator.deduce_schema(
        [json.dumps(item) for item in obj])
    schema = generator.flatten_schema(schema_map)

    if error_logs:
        logging.debug(error_logs)

    return schema
    def test_run_with_invalid_input_throws_exception(self):
        generator = SchemaGenerator()
        output = StringIO()

        input = StringIO('[]')
        with self.assertRaises(Exception):
            generator.run(input, output)

        input = StringIO('this is not a JSON')
        with self.assertRaises(Exception):
            generator.run(input, output)
Exemplo n.º 12
0
 def test_infer_value_type(self):
     generator = SchemaGenerator()
     self.assertEqual('TIME', generator.infer_value_type('12:34:56'))
     self.assertEqual('DATE', generator.infer_value_type('2018-02-08'))
     self.assertEqual('TIMESTAMP',
                      generator.infer_value_type('2018-02-08T12:34:56'))
     self.assertEqual('STRING', generator.infer_value_type('abc'))
     self.assertEqual('BOOLEAN', generator.infer_value_type(True))
     self.assertEqual('INTEGER', generator.infer_value_type(1))
     self.assertEqual('FLOAT', generator.infer_value_type(2.0))
     self.assertEqual('RECORD', generator.infer_value_type({
         'a': 1,
         'b': 2
     }))
     self.assertEqual('__null__', generator.infer_value_type(None))
     self.assertEqual('__empty_record__', generator.infer_value_type({}))
     self.assertEqual('__empty_array__', generator.infer_value_type([]))
     self.assertEqual('__array__', generator.infer_value_type([1, 2, 3]))
    def test_infer_array_type(self):
        generator = SchemaGenerator()

        self.assertEqual('INTEGER', generator.infer_array_type([1, 1]))
        self.assertEqual('FLOAT', generator.infer_array_type([1.0, 2.0]))
        self.assertEqual('BOOLEAN', generator.infer_array_type([True, False]))
        self.assertEqual('STRING', generator.infer_array_type(['a', 'b']))
        self.assertEqual(
            'DATE', generator.infer_array_type(['2018-02-09', '2018-02-10']))
        self.assertEqual('TIME',
                         generator.infer_array_type(['10:44:00', '10:44:01']))
        self.assertEqual(
            'TIMESTAMP',
            generator.infer_array_type(
                ['2018-02-09T11:00:00', '2018-02-10T11:00:01']))
        self.assertEqual('RECORD', generator.infer_array_type([{'a': 1}]))

        # Special types are supported
        self.assertEqual('__null__', generator.infer_array_type([None]))
        self.assertEqual('__empty_record__', generator.infer_array_type([{}]))
        self.assertEqual('__empty_array__', generator.infer_array_type([[]]))

        # Mixed TIME, DATE, TIMESTAMP converts to STRING
        self.assertEqual(
            'STRING', generator.infer_array_type(['2018-02-09', '10:44:00']))
        self.assertEqual(
            'STRING',
            generator.infer_array_type(['2018-02-09T11:00:00', '10:44:00']))
        self.assertEqual(
            'STRING',
            generator.infer_array_type(['2018-02-09', '2018-02-09T10:44:00']))
        self.assertEqual('STRING',
                         generator.infer_array_type(['time', '10:44:00']))
        self.assertEqual('STRING',
                         generator.infer_array_type(['date', '2018-02-09']))
        self.assertEqual(
            'STRING',
            generator.infer_array_type(['timestamp', '2018-02-09T10:44:00']))

        # Mixed FLOAT and INTEGER returns FLOAT
        self.assertEqual('FLOAT', generator.infer_array_type([1, 2.0]))
        self.assertEqual('FLOAT', generator.infer_array_type([1.0, 2]))

        # Invalid mixed arrays
        self.assertIsNone(generator.infer_array_type([None, 1]))
        self.assertIsNone(generator.infer_array_type([1, True]))
        self.assertIsNone(generator.infer_array_type([1, '2018-02-09']))
        self.assertIsNone(generator.infer_array_type(['a', 1]))
        self.assertIsNone(generator.infer_array_type(['a', []]))
        self.assertIsNone(generator.infer_array_type(['a', {}]))
        self.assertIsNone(generator.infer_array_type([{}, []]))
        self.assertIsNone(generator.infer_array_type([{'a': 1}, []]))
        self.assertIsNone(generator.infer_array_type([{'a': 1}, [2]]))
        self.assertIsNone(generator.infer_array_type([{}, [2]]))
    def test_infer_bigquery_type(self):
        generator = SchemaGenerator()

        self.assertEqual(('NULLABLE', 'TIME'),
                         generator.infer_bigquery_type('12:33:01'))
        self.assertEqual(('NULLABLE', 'DATE'),
                         generator.infer_bigquery_type('2018-02-08'))
        self.assertEqual(('NULLABLE', 'TIMESTAMP'),
                         generator.infer_bigquery_type('2018-02-08T12:34:56'))
        self.assertEqual(('NULLABLE', 'STRING'),
                         generator.infer_bigquery_type('abc'))
        self.assertEqual(('NULLABLE', 'BOOLEAN'),
                         generator.infer_bigquery_type(True))
        self.assertEqual(('NULLABLE', 'INTEGER'),
                         generator.infer_bigquery_type(1))
        self.assertEqual(('NULLABLE', 'FLOAT'),
                         generator.infer_bigquery_type(2.0))
        # yapf: disable
        self.assertEqual(('NULLABLE', 'RECORD'),
                         generator.infer_bigquery_type({ 'a': 1, 'b': 2 }))
        # yapf: enable
        self.assertEqual(('NULLABLE', '__null__'),
                         generator.infer_bigquery_type(None))
        self.assertEqual(('NULLABLE', '__empty_record__'),
                         generator.infer_bigquery_type({}))
        self.assertEqual(('NULLABLE', '__empty_array__'),
                         generator.infer_bigquery_type([]))

        self.assertEqual(
            ('REPEATED', 'TIME'),
            generator.infer_bigquery_type(['00:00:00', '00:00:01',
                                           '00:00:02']))
        self.assertEqual(
            ('REPEATED', 'DATE'),
            generator.infer_bigquery_type(['2018-02-08', '2018-02-09']))
        self.assertEqual(('REPEATED', 'TIMESTAMP'),
                         generator.infer_bigquery_type(
                             ['2018-02-08T12:34:56', '2018-02-08T12:34:56']))
        self.assertEqual(('REPEATED', 'STRING'),
                         generator.infer_bigquery_type(['a', 'b', 'c']))
        self.assertEqual(('REPEATED', 'BOOLEAN'),
                         generator.infer_bigquery_type([True, False, True]))
        self.assertEqual(('REPEATED', 'INTEGER'),
                         generator.infer_bigquery_type([1, 2, 3]))
        self.assertEqual(('REPEATED', 'FLOAT'),
                         generator.infer_bigquery_type([1.0, 2.0]))
        # yapf: disable
        self.assertEqual(('REPEATED', 'RECORD'),
                         generator.infer_bigquery_type([
                            { 'a': 1, 'b': 2 },
                            { 'c': 3 }]))
        # yapf: enable
        self.assertEqual(('REPEATED', '__empty_record__'),
                         generator.infer_bigquery_type([{}]))

        # Cannot have arrays of nulls (REPEATED __null__)
        with self.assertRaises(Exception):
            generator.infer_bigquery_type([None])

        # Cannot have arrays of empty arrays: (REPEATED __empty_array__)
        with self.assertRaises(Exception):
            generator.infer_bigquery_type([[], []])

        # Cannot have arrays of arrays: (REPEATED __array__)
        with self.assertRaises(Exception):
            generator.infer_bigquery_type([[1, 2], [2]])
    def test_infer_value_type(self):
        generator = SchemaGenerator()

        # STRING and date/time
        self.assertEqual('STRING', generator.infer_value_type('abc'))
        self.assertEqual('TIME', generator.infer_value_type('12:34:56'))
        self.assertEqual('DATE', generator.infer_value_type('2018-02-08'))
        self.assertEqual('TIMESTAMP',
                         generator.infer_value_type('2018-02-08T12:34:56'))

        # BOOLEAN
        self.assertEqual('BOOLEAN', generator.infer_value_type(True))
        self.assertEqual('QBOOLEAN', generator.infer_value_type('True'))
        self.assertEqual('QBOOLEAN', generator.infer_value_type('False'))
        self.assertEqual('QBOOLEAN', generator.infer_value_type('true'))
        self.assertEqual('QBOOLEAN', generator.infer_value_type('false'))

        # INTEGER
        self.assertEqual('INTEGER', generator.infer_value_type(1))
        self.assertEqual('INTEGER',
                         generator.infer_value_type(9223372036854775807))
        self.assertEqual('INTEGER',
                         generator.infer_value_type(-9223372036854775808))
        self.assertEqual('FLOAT',
                         generator.infer_value_type(9223372036854775808))
        self.assertEqual('FLOAT',
                         generator.infer_value_type(-9223372036854775809))

        # Quoted INTEGER
        self.assertEqual('QINTEGER', generator.infer_value_type('2'))
        self.assertEqual('QINTEGER', generator.infer_value_type('-1000'))
        self.assertEqual('QINTEGER',
                         generator.infer_value_type('9223372036854775807'))
        self.assertEqual('QINTEGER',
                         generator.infer_value_type('-9223372036854775808'))
        self.assertEqual('QFLOAT',
                         generator.infer_value_type('9223372036854775808'))
        self.assertEqual('QFLOAT',
                         generator.infer_value_type('-9223372036854775809'))

        # FLOAT
        self.assertEqual('FLOAT', generator.infer_value_type(2.0))

        # Quoted FLOAT
        self.assertEqual('QFLOAT', generator.infer_value_type('3.0'))
        self.assertEqual('QFLOAT', generator.infer_value_type('-5.4'))

        # RECORD
        self.assertEqual('RECORD', generator.infer_value_type({
            'a': 1,
            'b': 2
        }))

        # Special
        self.assertEqual('__null__', generator.infer_value_type(None))
        self.assertEqual('__empty_record__', generator.infer_value_type({}))
        self.assertEqual('__empty_array__', generator.infer_value_type([]))
        self.assertEqual('__array__', generator.infer_value_type([1, 2, 3]))
    def verify_data_chunk_as_csv_json_dict(self, *, chunk, as_dict):
        """Verify the given chunk from the testdata.txt file. If `as_dict` is
        True, then if the input_format of the chunk is 'json', pretend
        that the input data was given as an internal Python dict, and verify
        the 'input_format=dict' code path in SchemaGenerator.
        """
        chunk_count = chunk['chunk_count']
        line_number = chunk['line_number']
        data_flags = chunk['data_flags']
        input_format = 'csv' if ('csv' in data_flags) else 'json'
        keep_nulls = ('keep_nulls' in data_flags)
        infer_mode = ('infer_mode' in data_flags)
        quoted_values_are_strings = ('quoted_values_are_strings' in data_flags)
        sanitize_names = ('sanitize_names' in data_flags)
        ignore_invalid_lines = ('ignore_invalid_lines' in data_flags)
        records = chunk['records']
        expected_errors = chunk['errors']
        expected_error_map = chunk['error_map']
        expected_schema = chunk['schema']
        existing_schema = chunk['existing_schema']

        if as_dict:
            if input_format == 'json':
                print(
                    f"Test chunk: {chunk_count}; line_number: {line_number}; "
                    f"input_format='dict'")
                input_format = 'dict'
                records = json_reader(records)
            else:
                # Don't bother converting CSV data chunks into Python dict.
                return
        else:
            print(f"Test chunk: {chunk_count}; line_number: {line_number}; "
                  f"first record: {records[0]}")

        # Generate schema.
        generator = SchemaGenerator(
            input_format=input_format,
            infer_mode=infer_mode,
            keep_nulls=keep_nulls,
            quoted_values_are_strings=quoted_values_are_strings,
            sanitize_names=sanitize_names,
            ignore_invalid_lines=ignore_invalid_lines)
        existing_schema_map = None
        if existing_schema:
            existing_schema_map = bq_schema_to_map(json.loads(existing_schema))
        schema_map, error_logs = generator.deduce_schema(
            records, schema_map=existing_schema_map)
        schema = generator.flatten_schema(schema_map)

        # Check the schema, preserving order
        expected = json.loads(expected_schema, object_pairs_hook=OrderedDict)
        self.assertEqual(expected, schema)

        # Check the error messages
        try:
            self.assertEqual(len(expected_errors), len(error_logs))
        except AssertionError as e:
            print(f"Number of errors mismatched, expected:"
                  f" {len(expected_errors)} got: {len(error_logs)}")
            print(f"Errors: {error_logs}")
            print(f"Expected Errors: {expected_errors}")
            raise e
        self.assert_error_messages(expected_error_map, error_logs)
Exemplo n.º 17
0
    def process(self, df):
        # table where we're going to store the data
        table_id = f"{self.bq_dataset}.{self.bq_table}"

        # function to help with the json -> bq schema transformations
        generator = SchemaGenerator(input_format='dict', quoted_values_are_strings=True, keep_nulls=True)

        # Get original schema to assist the deduce_schema function. If the table doesn't exist
        # proceed with empty original_schema_map
        try:
            table = self.client.get_table(table_id)
            original_schema = table.schema
            self.client.schema_to_json(original_schema, "original_schema.json")
            with open("original_schema.json") as f:
                original_schema = json.load(f)
                original_schema_map, original_schema_error_logs = generator.deduce_schema(input_data=original_schema)
        except Exception:
            logging.info(f"{table_id} table not exists. Proceed without getting schema")
            original_schema_map = {}

        # convert dataframe to dict
        json_text = df.to_dict('records')

        # generate the new schema, we need to write it to a file because schema_from_json only accepts json file as input
        schema_map, error_logs = generator.deduce_schema(input_data=json_text, schema_map=original_schema_map)
        schema = generator.flatten_schema(schema_map)

        schema_file_name = "schema_map.json"
        with open(schema_file_name, "w") as output_file:
            json.dump(schema, output_file)

        # convert the generated schema to a version that BQ understands
        bq_schema = self.client.schema_from_json(schema_file_name)

        job_config = bigquery.LoadJobConfig(
            source_format=bigquery.SourceFormat.NEWLINE_DELIMITED_JSON,
            schema_update_options=[
                bigquery.SchemaUpdateOption.ALLOW_FIELD_ADDITION,
                bigquery.SchemaUpdateOption.ALLOW_FIELD_RELAXATION
            ],
            write_disposition=bigquery.WriteDisposition.WRITE_APPEND,
            schema=bq_schema
        )
        job_config.schema = bq_schema

        try:
            load_job = self.client.load_table_from_json(
                json_text,
                table_id,
                job_config=job_config,
            )  # Make an API request.

            load_job.result()  # Waits for the job to complete.
            if load_job.errors:
                logging.info(f"error_result =  {load_job.error_result}")
                logging.info(f"errors =  {load_job.errors}")
            else:
                logging.info(f'Loaded {len(df)} rows.')

        except Exception as error:
            logging.info(f'Error: {error} with loading dataframe')

            if load_job and load_job.errors:
                logging.info(f"error_result =  {load_job.error_result}")
                logging.info(f"errors =  {load_job.errors}")