def DTSTableDefinition_to_BQLoadJobConfig(dts_tabledef): """ https://cloud.google.com/bigquery/docs/reference/data-transfer/partner/rpc/google.cloud.bigquery.datatransfer.v1#tabledefinition TO https://googlecloudplatform.github.io/google-cloud-python/latest/bigquery/reference.html#google.cloud.bigquery.job.LoadJob :param dts_tabledef: :return: """ from bq_dts import rest_client job_config = LoadJobConfig() dts_schema = RPCRecordSchema_to_GCloudSchema(dts_tabledef['schema']) job_config.schema = dts_schema # BQ DTS does not provide controls for the following dispositions job_config.create_disposition = bigquery.CreateDisposition.CREATE_IF_NEEDED job_config.write_disposition = bigquery.WriteDisposition.WRITE_TRUNCATE if 'format' in dts_tabledef: dts_format = dts_tabledef['format'] source_format = rest_client.BQ_DTS_FORMAT_TO_BQ_SOURCE_FORMAT_MAP[ dts_format] assert source_format is not None job_config.source_format = source_format if 'max_bad_records' in dts_tabledef: job_config.max_bad_records = dts_tabledef['max_bad_records'] if 'encoding' in dts_tabledef: dts_encoding = dts_tabledef['encoding'] job_config.encoding = rest_client.BQ_DTS_ENCODING_TO_BQ_ENCODING_MAP[ dts_encoding] if 'csv_options' in dts_tabledef: csv_opts = dts_tabledef['csv_options'] if 'field_delimiter' in csv_opts: job_config.field_delimiter = csv_opts['field_delimiter'] if 'allow_quoted_newlines' in csv_opts: job_config.allow_quoted_newlines = csv_opts[ 'allow_quoted_newlines'] if 'quote_char' in csv_opts: job_config.quote_character = csv_opts['quote_char'] if 'skip_leading_rows' in csv_opts: job_config.skip_leading_rows = csv_opts['skip_leading_rows'] return job_config
def load_stage(dst_dataset: Dataset, bq_client: Client, bucket_name: str, gcs_client: storage.Client) -> List[LoadJob]: """ Stage files from a bucket to a dataset :param dst_dataset: reference to destination dataset object :param bq_client: a BigQuery client object :param bucket_name: the location in GCS containing the vocabulary files :param gcs_client: a Cloud Storage client object :return: list of completed load jobs """ blobs = list(gcs_client.list_blobs(bucket_name)) table_blobs = [_filename_to_table_name(blob.name) for blob in blobs] missing_blobs = [ table for table in VOCABULARY_TABLES if table not in table_blobs ] if missing_blobs: raise RuntimeError( f'Bucket {bucket_name} is missing files for tables {missing_blobs}' ) load_jobs = [] for blob in blobs: table_name = _filename_to_table_name(blob.name) # ignore any non-vocabulary files if table_name not in VOCABULARY_TABLES: continue destination = dst_dataset.table(table_name) safe_schema = safe_schema_for(table_name) job_config = LoadJobConfig() job_config.schema = safe_schema job_config.skip_leading_rows = 1 job_config.field_delimiter = FIELD_DELIMITER job_config.max_bad_records = MAX_BAD_RECORDS job_config.source_format = 'CSV' job_config.quote_character = '' source_uri = f'gs://{bucket_name}/{blob.name}' load_job = bq_client.load_table_from_uri(source_uri, destination, job_config=job_config) LOGGER.info(f'table:{destination} job_id:{load_job.job_id}') load_jobs.append(load_job) load_job.result() return load_jobs
def add_load_job_csv_config(unhandled_hints: Set[str], hints: ValidatedRecordsHints, fail_if_cant_handle_hint: bool, config: bigquery.LoadJobConfig) -> None: # source_format: File format of the data. config.source_format = 'CSV' # encoding: The character encoding of the data. # The supported values are UTF-8 or ISO-8859-1. # "UTF-8 or ISO-8859-1" # if hints.encoding == 'UTF8': config.encoding = 'UTF-8' else: # Currently records hints don't support ISO-8859-1 cant_handle_hint(fail_if_cant_handle_hint, 'encoding', hints) quiet_remove(unhandled_hints, 'encoding') # field_delimiter: The separator for fields in a CSV file. assert isinstance(hints.field_delimiter, str) config.field_delimiter = hints.field_delimiter quiet_remove(unhandled_hints, 'field-delimiter') # allow_jagged_rows: Allow missing trailing optional columns (CSV only). # null_marker: Represents a null value (CSV only) # # (documentation is mangled for this one, but I assume the default is # '' or something sensible, so not messing with it) # quote_character: Character used to quote data sections (CSV # only). # # [Optional] The value that is used to quote data sections in # a CSV file. BigQuery converts the string to ISO-8859-1 # encoding, and then uses the first byte of the encoded string # to split the data in its raw, binary state. The default # value is a double-quote ('"'). If your data does not contain # quoted sections, set the property value to an empty # string. If your data contains quoted newline characters, you # must also set the allowQuotedNewlines property to # true. # # @default " # I tried a few combinations and found that when you leave quote_character as the default # # * Fields quoted with "" are loaded without the surrounding quotes in the # string # * "" becomes " in a quoted field # * "" stays "" in a non-quoted field # * nonnumeric quoting works fine # * full quoting works fine if hints.quoting is None: config.quote_character = '' elif hints.quoting == 'all' or hints.quoting == 'minimal' or hints.quoting == 'nonnumeric': # allow_quoted_newlines: Allow quoted data containing newline # characters (CSV only). config.allow_quoted_newlines = True assert isinstance(hints.quotechar, str) config.quote_character = hints.quotechar if hints.doublequote: pass else: cant_handle_hint(fail_if_cant_handle_hint, 'doublequote', hints) else: _assert_never(hints.quoting) quiet_remove(unhandled_hints, 'quoting') quiet_remove(unhandled_hints, 'quotechar') quiet_remove(unhandled_hints, 'doublequote') # No mention of escaping in BigQuery documentation, and in # practice backslashes come through without being interpreted. if hints.escape is None: pass else: cant_handle_hint(fail_if_cant_handle_hint, 'escape', hints) quiet_remove(unhandled_hints, 'escape') # skip_leading_rows: Number of rows to skip when reading data (CSV only). if hints.header_row: config.skip_leading_rows = 1 else: config.skip_leading_rows = 0 quiet_remove(unhandled_hints, 'header-row') # "When you load CSV or JSON data, values in DATE columns must # use the dash (-) separator and the date must be in the # following format: YYYY-MM-DD (year-month-day)." if hints.dateformat == 'YYYY-MM-DD': pass else: cant_handle_hint(fail_if_cant_handle_hint, 'dateformat', hints) quiet_remove(unhandled_hints, 'dateformat') # "When you load JSON or CSV data, values in TIMESTAMP columns # must use a dash (-) separator for the date portion of the # timestamp, and the date must be in the following format: # YYYY-MM-DD (year-month-day). The hh:mm:ss # (hour-minute-second) portion of the timestamp must use a # colon (:) separator." # # # To test, log into BigQuery web console and try SQL like this # (assumption is that the same timestamp parser is used during # CSV loads) # # select TIMESTAMP("2000-01-02 16:34:56.789012US/Eastern") as a; # # Tests performed and result displayed on console query: # # DATE: # * 01-02-2019 (rejected): # * 01/02/19 (rejected): # * 2019-01-01 (accepted): 2019-01-01 # DATETIME: # * 2019-01-01 1:00pm (rejected): # * 2019-01-01 1:00:00pm (rejected) # * 2019-01-01 1:00PM (rejected): # * 2019-01-01 13:00 (rejected): # * 2019-01-01 13:00:00 (accepted): 2019-01-01T13:00:00 # * 2019-01-01 1:00pm US/Eastern (rejected): # * 2019-01-01 1:00:00pm US/Eastern (rejected): # * 2019-01-01 13:00:00 US/Eastern (rejected): # * 2019-01-01 13:00:00 EST (rejected): # * 1997-12-17 07:37:16-08 (rejected) # * 2019-01-01T13:00:00 (accepted): 2019-01-01T13:00:00 # # TIME: # * 1:00pm (rejected): # * 1:00:00pm (rejected): # * 13:00 (rejected): # * 13:00:00 (accepted): 13:00:00 # * 1:00pm US/Eastern (rejected): # * 1:00pm EST (rejected): # * 07:37:16-08 (rejected): # # TIMESTAMP ("Required format is YYYY-MM-DD # HH:MM[:SS[.SSSSSS]]", which is BS, as it doesn't specify the # timezone format): # # * 2019-01-01 1:00pm (rejected): # * 2019-01-01 1:00:00pm (rejected) # * 2019-01-01 1:00PM (rejected): # * 2019-01-01 13:00 (rejected): # * 2019-01-01 13:00:00 (accepted): 2019-01-01T13:00:00 # * 2019-01-01 1:00pm US/Eastern (rejected): # * 2019-01-01 1:00:00pm US/Eastern (rejected): # * 2019-01-01 13:00:00 US/Eastern (rejected): # * 2019-01-01 13:00:00 EST (rejected): # * 1997-12-17 07:37:16-08 (accepted): 1997-12-17 15:37:16 UTC # * 2019-01-01T13:00:00-08 (accepted): 2019-01-01 21:00:00 UTC # * 2000-01-02 16:34:56.789012+0000 (rejected) # * 2000-01-02 16:34:56.789012+00:00 (accepted) # * 2000-01-02 16:34:56.789012EST (rejected) # * 2000-01-02 16:34:56.789012US/Eastern (rejected) # * 2000-01-02 16:34:56.789012UTC (accepted): 2000-01-02 16:34:56.789012 UTC # * 2000-01-02 16:34:56.789012 UTC (accepted: 2000-01-02 16:34:56.789012 UTC # # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#examples # # https://stackoverflow.com/questions/47466296/bigquery-datetime-format-csv-to-bigquery-yyyy-mm-dd-hhmmss-ssssss # # BigQuery supports exactly one format of ingesting timestamps # with timezones (what they call 'TIMESTAMP' they call timestamps # without timezones 'DATETIME'. # # That format they accept is ISO 8601, which sounds all nice and # standardy. Usable timestamps look like 2000-01-02 # 16:34:56.789012+00:00. # Cool cool. The only issue is that Python's strftime doesn't # actually provide a way to add the ':' in the timezone # offset. The only timezone offset code, %z, does not provide the # colon. Other implementations (GNU libc) offers the %:z option, # but that doesn't exist in Python and thus in Pandas. # # So if you're using Python to export timestamps with timezones, # you should probably use the `YYYY-MM-DD HH24:MI:SS` format and # express them in UTC. # # https://stackoverflow.com/questions/44836581/does-python-time-strftime-process-timezone-options-correctly-for-rfc-3339 # https://stackoverflow.com/questions/28729212/pandas-save-date-in-iso-format # if hints.datetimeformat in ['YYYY-MM-DD HH24:MI:SS', 'YYYY-MM-DD HH:MI:SS']: pass else: cant_handle_hint(fail_if_cant_handle_hint, 'datetimeformat', hints) quiet_remove(unhandled_hints, 'datetimeformat') if hints.datetimeformattz in ['YYYY-MM-DD HH:MI:SSOF', 'YYYY-MM-DD HH24:MI:SSOF', 'YYYY-MM-DD HH:MI:SS']: pass else: cant_handle_hint(fail_if_cant_handle_hint, 'datetimeformattz', hints) quiet_remove(unhandled_hints, 'datetimeformattz') if hints.timeonlyformat in ['HH24:MI:SS', 'HH:MI:SS']: pass else: cant_handle_hint(fail_if_cant_handle_hint, 'timeonlyformat', hints) quiet_remove(unhandled_hints, 'timeonlyformat') # No options to change this. Tested with unix newlines, dos # newlines and mac newlines and all were understood.: if hints.record_terminator in ['\n', '\r\n', '\r', None]: pass else: cant_handle_hint(fail_if_cant_handle_hint, 'record-terminator', hints) quiet_remove(unhandled_hints, 'record-terminator') # No way to flag compression, but tested uncompressed, with # gzip and works great. .bz2 gives "400 Unsupported # compression type". Not sure about .lzo, but pandas can't # handle it regardless, so doubt it's handled. if hints.compression is None or hints.compression == 'GZIP': pass else: cant_handle_hint(fail_if_cant_handle_hint, 'compression', hints) quiet_remove(unhandled_hints, 'compression')