def upload_tweets(): big_query_client = bigquery.Client.from_service_account_json('my-beam-project-b2834963a4ae.json') dataset_ref = big_query_client.dataset('Tweets') dataset = Dataset(dataset_ref) dataset.description = 'This represents tweets of trending topics' dataset = big_query_client.create_dataset(dataset) SCHEMA = [ SchemaField('Tweets', 'STRING', mode='Nullable'), ] table_ref = big_query_client.dataset('Tweets').table('tabletweet') load_config = LoadJobConfig() load_config.skip_leading_rows = 0 load_config.schema = SCHEMA load_config.allow_quoted_newlines = True load_config.ignore_unknown_values = False load_config.max_bad_records = 200 with open('tweets.csv', 'rb') as readable: big_query_client.load_table_from_file( readable, table_ref, job_config=load_config) print('tweets file uploaded to big query')
def DTSTableDefinition_to_BQLoadJobConfig(dts_tabledef): """ https://cloud.google.com/bigquery/docs/reference/data-transfer/partner/rpc/google.cloud.bigquery.datatransfer.v1#tabledefinition TO https://googlecloudplatform.github.io/google-cloud-python/latest/bigquery/reference.html#google.cloud.bigquery.job.LoadJob :param dts_tabledef: :return: """ from bq_dts import rest_client job_config = LoadJobConfig() dts_schema = RPCRecordSchema_to_GCloudSchema(dts_tabledef['schema']) job_config.schema = dts_schema # BQ DTS does not provide controls for the following dispositions job_config.create_disposition = bigquery.CreateDisposition.CREATE_IF_NEEDED job_config.write_disposition = bigquery.WriteDisposition.WRITE_TRUNCATE if 'format' in dts_tabledef: dts_format = dts_tabledef['format'] source_format = rest_client.BQ_DTS_FORMAT_TO_BQ_SOURCE_FORMAT_MAP[ dts_format] assert source_format is not None job_config.source_format = source_format if 'max_bad_records' in dts_tabledef: job_config.max_bad_records = dts_tabledef['max_bad_records'] if 'encoding' in dts_tabledef: dts_encoding = dts_tabledef['encoding'] job_config.encoding = rest_client.BQ_DTS_ENCODING_TO_BQ_ENCODING_MAP[ dts_encoding] if 'csv_options' in dts_tabledef: csv_opts = dts_tabledef['csv_options'] if 'field_delimiter' in csv_opts: job_config.field_delimiter = csv_opts['field_delimiter'] if 'allow_quoted_newlines' in csv_opts: job_config.allow_quoted_newlines = csv_opts[ 'allow_quoted_newlines'] if 'quote_char' in csv_opts: job_config.quote_character = csv_opts['quote_char'] if 'skip_leading_rows' in csv_opts: job_config.skip_leading_rows = csv_opts['skip_leading_rows'] return job_config
def load_stage(dst_dataset: Dataset, bq_client: Client, bucket_name: str, gcs_client: storage.Client) -> List[LoadJob]: """ Stage files from a bucket to a dataset :param dst_dataset: reference to destination dataset object :param bq_client: a BigQuery client object :param bucket_name: the location in GCS containing the vocabulary files :param gcs_client: a Cloud Storage client object :return: list of completed load jobs """ blobs = list(gcs_client.list_blobs(bucket_name)) table_blobs = [_filename_to_table_name(blob.name) for blob in blobs] missing_blobs = [ table for table in VOCABULARY_TABLES if table not in table_blobs ] if missing_blobs: raise RuntimeError( f'Bucket {bucket_name} is missing files for tables {missing_blobs}' ) load_jobs = [] for blob in blobs: table_name = _filename_to_table_name(blob.name) # ignore any non-vocabulary files if table_name not in VOCABULARY_TABLES: continue destination = dst_dataset.table(table_name) safe_schema = safe_schema_for(table_name) job_config = LoadJobConfig() job_config.schema = safe_schema job_config.skip_leading_rows = 1 job_config.field_delimiter = FIELD_DELIMITER job_config.max_bad_records = MAX_BAD_RECORDS job_config.source_format = 'CSV' job_config.quote_character = '' source_uri = f'gs://{bucket_name}/{blob.name}' load_job = bq_client.load_table_from_uri(source_uri, destination, job_config=job_config) LOGGER.info(f'table:{destination} job_id:{load_job.job_id}') load_jobs.append(load_job) load_job.result() return load_jobs