示例#1
0
    def execute(self, context: Any) -> None:
        sheet_hook = GSheetsHook(
            gcp_conn_id=self.gcp_conn_id,
            delegate_to=self.delegate_to,
            impersonation_chain=self.impersonation_chain,
        )
        gcs_hook = GCSHook(
            gcp_conn_id=self.gcp_conn_id,
            delegate_to=self.delegate_to,
            impersonation_chain=self.impersonation_chain,
        )
        with NamedTemporaryFile("w+") as temp_file:
            # Download data
            gcs_hook.download(
                bucket_name=self.bucket_name,
                object_name=self.object_name,
                filename=temp_file.name,
            )

            # Upload data
            values = list(csv.reader(temp_file))
            sheet_hook.update_values(
                spreadsheet_id=self.spreadsheet_id,
                range_=self.spreadsheet_range,
                values=values,
            )
示例#2
0
    def execute(self, context: 'Context') -> None:
        gcs_hook = GCSHook(
            gcp_conn_id=self.gcp_conn_id,
            delegate_to=self.delegate_to,
            impersonation_chain=self.impersonation_chain,
        )

        presto_hook = PrestoHook(presto_conn_id=self.presto_conn_id)

        with NamedTemporaryFile("w+") as temp_file:
            self.log.info("Downloading data from %s", self.source_object)
            gcs_hook.download(
                bucket_name=self.source_bucket,
                object_name=self.source_object,
                filename=temp_file.name,
            )

            data = list(csv.reader(temp_file))
            fields = tuple(data[0])
            rows = []
            for row in data[1:]:
                rows.append(tuple(row))

            self.log.info("Inserting data into %s", self.presto_table)
            presto_hook.insert_rows(table=self.presto_table, rows=rows, target_fields=fields)
示例#3
0
    def execute(self, context: Dict):
        hook = GCSHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)

        with NamedTemporaryFile() as source_file, NamedTemporaryFile() as destination_file:
            self.log.info("Downloading file from %s", self.source_bucket)
            hook.download(
                bucket_name=self.source_bucket, object_name=self.source_object, filename=source_file.name
            )

            self.log.info("Starting the transformation")
            cmd = [self.transform_script] if isinstance(self.transform_script, str) else self.transform_script
            cmd += [source_file.name, destination_file.name]
            process = subprocess.Popen(
                args=cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True
            )
            self.log.info("Process output:")
            if process.stdout:
                for line in iter(process.stdout.readline, b''):
                    self.log.info(line.decode(self.output_encoding).rstrip())

            process.wait()
            if process.returncode:
                raise AirflowException("Transform script failed: {0}".format(process.returncode))

            self.log.info("Transformation succeeded. Output temporarily located at %s", destination_file.name)

            self.log.info("Uploading file to %s as %s", self.destination_bucket, self.destination_object)
            hook.upload(
                bucket_name=self.destination_bucket,
                object_name=self.destination_object,
                filename=destination_file.name,
            )
示例#4
0
    def _copy_single_object(
        self,
        gcs_hook: GCSHook,
        sftp_hook: SFTPHook,
        source_object: str,
        destination_path: str,
    ) -> None:
        """Helper function to copy single object."""
        self.log.info(
            "Executing copy of gs://%s/%s to %s",
            self.source_bucket,
            source_object,
            destination_path,
        )

        dir_path = os.path.dirname(destination_path)
        sftp_hook.create_directory(dir_path)

        with NamedTemporaryFile("w") as tmp:
            gcs_hook.download(
                bucket_name=self.source_bucket,
                object_name=source_object,
                filename=tmp.name,
            )
            sftp_hook.store_file(destination_path, tmp.name)

        if self.move_object:
            self.log.info("Executing delete of gs://%s/%s", self.source_bucket, source_object)
            gcs_hook.delete(self.source_bucket, source_object)
示例#5
0
    def execute(self, context: 'Context') -> None:
        gcs_hook = GCSHook(
            gcp_conn_id=self.gcp_conn_id,
            delegate_to=self.delegate_to,
            impersonation_chain=self.impersonation_chain,
        )

        trino_hook = TrinoHook(trino_conn_id=self.trino_conn_id)

        with NamedTemporaryFile("w+") as temp_file:
            self.log.info("Downloading data from %s", self.source_object)
            gcs_hook.download(
                bucket_name=self.source_bucket,
                object_name=self.source_object,
                filename=temp_file.name,
            )

            data = csv.reader(temp_file)
            rows = (tuple(row) for row in data)
            self.log.info("Inserting data into %s", self.trino_table)
            if self.schema_fields:
                trino_hook.insert_rows(table=self.trino_table,
                                       rows=rows,
                                       target_fields=self.schema_fields)
            elif self.schema_object:
                blob = gcs_hook.download(
                    bucket_name=self.source_bucket,
                    object_name=self.schema_object,
                )
                schema_fields = json.loads(blob.decode("utf-8"))
                trino_hook.insert_rows(table=self.trino_table,
                                       rows=rows,
                                       target_fields=schema_fields)
            else:
                trino_hook.insert_rows(table=self.trino_table, rows=rows)
示例#6
0
    def execute(self, context: 'Context') -> None:
        gcs_hook = GCSHook(
            gcp_conn_id=self.gcp_conn_id,
            delegate_to=self.delegate_to,
            impersonation_chain=self.impersonation_chain,
        )

        ga_hook = GoogleAnalyticsHook(
            gcp_conn_id=self.gcp_conn_id,
            delegate_to=self.delegate_to,
            api_version=self.api_version,
            impersonation_chain=self.impersonation_chain,
        )

        with NamedTemporaryFile("w+") as tmp_file:
            self.log.info(
                "Downloading file from GCS: %s/%s ",
                self.storage_bucket,
                self.storage_name_object,
            )
            gcs_hook.download(
                bucket_name=self.storage_bucket,
                object_name=self.storage_name_object,
                filename=tmp_file.name,
            )

            ga_hook.upload_data(
                tmp_file.name,
                self.account_id,
                self.web_property_id,
                self.custom_data_source_id,
                self.resumable_upload,
            )
示例#7
0
    def execute(self, context):
        gcs_hook = GCSHook(
            gcp_conn_id=self.gcp_conn_id,
            delegate_to=self.delegate_to,
            impersonation_chain=self.impersonation_chain,
        )
        with NamedTemporaryFile("w+") as tmp_file:
            # Download file from GCS
            self.log.info(
                "Downloading file from GCS: %s/%s ", self.storage_bucket, self.storage_name_object,
            )

            gcs_hook.download(
                bucket_name=self.storage_bucket, object_name=self.storage_name_object, filename=tmp_file.name,
            )

            # Modify file
            self.log.info("Modifying temporary file %s", tmp_file.name)
            self._modify_column_headers(
                tmp_file_location=tmp_file.name,
                custom_dimension_header_mapping=self.custom_dimension_header_mapping,
            )

            # Upload newly formatted file to cloud storage
            self.log.info(
                "Uploading file to GCS: %s/%s ", self.storage_bucket, self.storage_name_object,
            )
            gcs_hook.upload(
                bucket_name=self.storage_bucket, object_name=self.storage_name_object, filename=tmp_file.name,
            )
示例#8
0
    def execute(self, context: 'Context'):
        self.log.info('Executing download: %s, %s, %s', self.bucket,
                      self.object_name, self.filename)
        hook = GCSHook(
            gcp_conn_id=self.gcp_conn_id,
            delegate_to=self.delegate_to,
            impersonation_chain=self.impersonation_chain,
        )

        if self.store_to_xcom_key:
            file_size = hook.get_size(bucket_name=self.bucket,
                                      object_name=self.object_name)
            if file_size < MAX_XCOM_SIZE:
                file_bytes = hook.download(bucket_name=self.bucket,
                                           object_name=self.object_name)
                context['ti'].xcom_push(key=self.store_to_xcom_key,
                                        value=str(file_bytes,
                                                  self.file_encoding))
            else:
                raise AirflowException(
                    'The size of the downloaded file is too large to push to XCom!'
                )
        else:
            hook.download(bucket_name=self.bucket,
                          object_name=self.object_name,
                          filename=self.filename)
示例#9
0
    def execute(self, context):
        self.log.info('Executing download: %s, %s, %s', self.bucket, self.object, self.filename)
        hook = GCSHook(
            google_cloud_storage_conn_id=self.gcp_conn_id,
            delegate_to=self.delegate_to,
            impersonation_chain=self.impersonation_chain,
        )

        if self.store_to_xcom_key:
            file_bytes = hook.download(bucket_name=self.bucket, object_name=self.object)
            if sys.getsizeof(file_bytes) < MAX_XCOM_SIZE:
                context['ti'].xcom_push(key=self.store_to_xcom_key, value=file_bytes)
            else:
                raise AirflowException('The size of the downloaded file is too large to push to XCom!')
        else:
            hook.download(bucket_name=self.bucket, object_name=self.object, filename=self.filename)
示例#10
0
    def execute(self, context) -> List[str]:
        # list all files in an Google Cloud Storage bucket
        hook = GCSHook(
            gcp_conn_id=self.gcp_conn_id,
            delegate_to=self.delegate_to,
            impersonation_chain=self.google_impersonation_chain,
        )

        self.log.info(
            'Getting list of the files. Bucket: %s; Delimiter: %s; Prefix: %s',
            self.bucket,
            self.delimiter,
            self.prefix,
        )

        files = hook.list(bucket_name=self.bucket,
                          prefix=self.prefix,
                          delimiter=self.delimiter)

        s3_hook = S3Hook(aws_conn_id=self.dest_aws_conn_id,
                         verify=self.dest_verify,
                         extra_args=self.dest_s3_extra_args)

        if not self.replace:
            # if we are not replacing -> list all files in the S3 bucket
            # and only keep those files which are present in
            # Google Cloud Storage and not in S3
            bucket_name, prefix = S3Hook.parse_s3_url(self.dest_s3_key)
            # look for the bucket and the prefix to avoid look into
            # parent directories/keys
            existing_files = s3_hook.list_keys(bucket_name, prefix=prefix)
            # in case that no files exists, return an empty array to avoid errors
            existing_files = existing_files if existing_files is not None else []
            # remove the prefix for the existing files to allow the match
            existing_files = [
                file.replace(prefix, '', 1) for file in existing_files
            ]
            files = list(set(files) - set(existing_files))

        if files:

            for file in files:
                file_bytes = hook.download(object_name=file,
                                           bucket_name=self.bucket)

                dest_key = self.dest_s3_key + file
                self.log.info("Saving file to %s", dest_key)

                s3_hook.load_bytes(cast(bytes, file_bytes),
                                   key=dest_key,
                                   replace=self.replace,
                                   acl_policy=self.s3_acl_policy)

            self.log.info("All done, uploaded %d files to S3", len(files))
        else:
            self.log.info("In sync, no files needed to be uploaded to S3")

        return files
class GoogleCloudBucketHelper:
    """GoogleCloudStorageHook helper class to download GCS object."""
    GCS_PREFIX_LENGTH = 5

    def __init__(self,
                 gcp_conn_id: str = 'google_cloud_default',
                 delegate_to: Optional[str] = None) -> None:
        self._gcs_hook = GCSHook(gcp_conn_id, delegate_to)

    def google_cloud_to_local(self, file_name: str) -> str:
        """
        Checks whether the file specified by file_name is stored in Google Cloud
        Storage (GCS), if so, downloads the file and saves it locally. The full
        path of the saved file will be returned. Otherwise the local file_name
        will be returned immediately.

        :param file_name: The full path of input file.
        :type file_name: str
        :return: The full path of local file.
        :rtype: str
        """
        if not file_name.startswith('gs://'):
            return file_name

        # Extracts bucket_id and object_id by first removing 'gs://' prefix and
        # then split the remaining by path delimiter '/'.
        path_components = file_name[self.GCS_PREFIX_LENGTH:].split('/')
        if len(path_components) < 2:
            raise Exception(
                'Invalid Google Cloud Storage (GCS) object path: {}'
                .format(file_name))

        bucket_id = path_components[0]
        object_id = '/'.join(path_components[1:])
        local_file = os.path.join(
            tempfile.gettempdir(),
            'dataflow{}-{}'.format(str(uuid.uuid4())[:8], path_components[-1])
        )
        self._gcs_hook.download(bucket_id, object_id, local_file)

        if os.stat(local_file).st_size > 0:
            return local_file
        raise Exception(
            'Failed to download Google Cloud Storage (GCS) object: {}'
            .format(file_name))
示例#12
0
 def apply_validate_fn(*args, templates_dict, **kwargs):
     prediction_path = templates_dict["prediction_path"]
     scheme, bucket, obj, _, _ = urlsplit(prediction_path)
     if scheme != "gs" or not bucket or not obj:
         raise ValueError("Wrong format prediction_path: {}".format(prediction_path))
     summary = os.path.join(obj.strip("/"), "prediction.summary.json")
     gcs_hook = GCSHook()
     summary = json.loads(gcs_hook.download(bucket, summary))
     return validate_fn(summary)
示例#13
0
    def execute(self, context: Dict):
        gcs_hook = GCSHook(gcp_conn_id=self.gcp_conn_id,
                           delegate_to=self.delegate_to)
        hook = GoogleDisplayVideo360Hook(
            gcp_conn_id=self.gcp_conn_id,
            delegate_to=self.delegate_to,
            api_version=self.api_version,
        )

        self.log.info("Uploading file %s...")
        # Saving file in the temporary directory,
        # downloaded file from the GCS could be a 1GB size or even more
        with tempfile.NamedTemporaryFile("w+") as f:
            line_items = gcs_hook.download(
                bucket_name=self.bucket_name,
                object_name=self.object_name,
                filename=f.name,
            )
            f.flush()
            hook.upload_line_items(line_items=line_items)
    def execute(self, context):
        # use the super to list all files in an Google Cloud Storage bucket
        files = super().execute(context)
        s3_hook = S3Hook(aws_conn_id=self.dest_aws_conn_id,
                         verify=self.dest_verify)

        if not self.replace:
            # if we are not replacing -> list all files in the S3 bucket
            # and only keep those files which are present in
            # Google Cloud Storage and not in S3
            bucket_name, prefix = S3Hook.parse_s3_url(self.dest_s3_key)
            # look for the bucket and the prefix to avoid look into
            # parent directories/keys
            existing_files = s3_hook.list_keys(bucket_name, prefix=prefix)
            # in case that no files exists, return an empty array to avoid errors
            existing_files = existing_files if existing_files is not None else []
            # remove the prefix for the existing files to allow the match
            existing_files = [
                file.replace(prefix, '', 1) for file in existing_files
            ]
            files = list(set(files) - set(existing_files))

        if files:
            hook = GCSHook(google_cloud_storage_conn_id=self.gcp_conn_id,
                           delegate_to=self.delegate_to)

            for file in files:
                file_bytes = hook.download(self.bucket, file)

                dest_key = self.dest_s3_key + file
                self.log.info("Saving file to %s", dest_key)

                s3_hook.load_bytes(file_bytes,
                                   key=dest_key,
                                   replace=self.replace)

            self.log.info("All done, uploaded %d files to S3", len(files))
        else:
            self.log.info("In sync, no files needed to be uploaded to S3")

        return files
示例#15
0
class GCSToGoogleDriveOperator(BaseOperator):
    """
    Copies objects from a Google Cloud Storage service service to Google Drive service, with renaming
    if requested.

    Using this operator requires the following OAuth 2.0 scope:

    .. code-block:: none

        https://www.googleapis.com/auth/drive

    .. seealso::
        For more information on how to use this operator, take a look at the guide:
        :ref:`howto/operator:GCSToGoogleDriveOperator`

    :param source_bucket: The source Google Cloud Storage bucket where the object is. (templated)
    :type source_bucket: str
    :param source_object: The source name of the object to copy in the Google cloud
        storage bucket. (templated)
        You can use only one wildcard for objects (filenames) within your bucket. The wildcard can appear
        inside the object name or at the end of the object name. Appending a wildcard to the bucket name
        is unsupported.
    :type source_object: str
    :param destination_object: The destination name of the object in the destination Google Drive
        service. (templated)
        If a wildcard is supplied in the source_object argument, this is the prefix that will be prepended
        to the final destination objects' paths.
        Note that the source path's part before the wildcard will be removed;
        if it needs to be retained it should be appended to destination_object.
        For example, with prefix ``foo/*`` and destination_object ``blah/``, the file ``foo/baz`` will be
        copied to ``blah/baz``; to retain the prefix write the destination_object as e.g. ``blah/foo``, in
        which case the copied file will be named ``blah/foo/baz``.
    :type destination_object: str
    :param move_object: When move object is True, the object is moved instead of copied to the new location.
        This is the equivalent of a mv command as opposed to a cp command.
    :type move_object: bool
    :param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
    :type gcp_conn_id: str
    :param delegate_to: The account to impersonate, if any.
        For this to work, the service account making the request must have domain-wide delegation enabled.
    :type delegate_to: str
    """

    template_fields = ("source_bucket", "source_object", "destination_object")
    ui_color = "#f0eee4"

    @apply_defaults
    def __init__(self,
                 source_bucket: str,
                 source_object: str,
                 destination_object: Optional[str] = None,
                 move_object: bool = False,
                 gcp_conn_id: str = "google_cloud_default",
                 delegate_to: Optional[str] = None,
                 *args,
                 **kwargs):
        super().__init__(*args, **kwargs)

        self.source_bucket = source_bucket
        self.source_object = source_object
        self.destination_object = destination_object
        self.move_object = move_object
        self.gcp_conn_id = gcp_conn_id
        self.delegate_to = delegate_to
        self.gcs_hook = None  # type: Optional[GCSHook]
        self.gdrive_hook = None  # type: Optional[GoogleDriveHook]

    def execute(self, context):

        self.gcs_hook = GCSHook(google_cloud_storage_conn_id=self.gcp_conn_id,
                                delegate_to=self.delegate_to)
        self.gdrive_hook = GoogleDriveHook(gcp_conn_id=self.gcp_conn_id,
                                           delegate_to=self.delegate_to)

        if WILDCARD in self.source_object:
            total_wildcards = self.source_object.count(WILDCARD)
            if total_wildcards > 1:
                error_msg = (
                    "Only one wildcard '*' is allowed in source_object parameter. "
                    "Found {} in {}.".format(total_wildcards,
                                             self.source_object))

                raise AirflowException(error_msg)

            prefix, delimiter = self.source_object.split(WILDCARD, 1)
            objects = self.gcs_hook.list(self.source_bucket,
                                         prefix=prefix,
                                         delimiter=delimiter)

            for source_object in objects:
                if self.destination_object is None:
                    destination_object = source_object
                else:
                    destination_object = source_object.replace(
                        prefix, self.destination_object, 1)

                self._copy_single_object(source_object=source_object,
                                         destination_object=destination_object)
        else:
            self._copy_single_object(
                source_object=self.source_object,
                destination_object=self.destination_object)

    def _copy_single_object(self, source_object, destination_object):
        self.log.info(
            "Executing copy of gs://%s/%s to gdrive://%s",
            self.source_bucket,
            source_object,
            destination_object,
        )

        with tempfile.NamedTemporaryFile() as file:
            filename = file.name
            self.gcs_hook.download(bucket_name=self.source_bucket,
                                   object_name=source_object,
                                   filename=filename)
            self.gdrive_hook.upload_file(local_location=filename,
                                         remote_location=destination_object)

        if self.move_object:
            self.gcs_hook.delete(self.source_bucket, source_object)
示例#16
0
文件: gcs.py 项目: iVerner/airflow
    def execute(self, context: "Context") -> List[str]:
        # Define intervals and prefixes.
        try:
            timespan_start = context["data_interval_start"]
            timespan_end = context["data_interval_end"]
        except KeyError:
            timespan_start = pendulum.instance(context["execution_date"])
            following_execution_date = context["dag"].following_schedule(
                context["execution_date"])
            if following_execution_date is None:
                timespan_end = None
            else:
                timespan_end = pendulum.instance(following_execution_date)

        if timespan_end is None:  # Only possible in Airflow before 2.2.
            self.log.warning(
                "No following schedule found, setting timespan end to max %s",
                timespan_end)
            timespan_end = DateTime.max
        elif timespan_start >= timespan_end:  # Airflow 2.2 sets start == end for non-perodic schedules.
            self.log.warning(
                "DAG schedule not periodic, setting timespan end to max %s",
                timespan_end)
            timespan_end = DateTime.max

        timespan_start = timespan_start.in_timezone(timezone.utc)
        timespan_end = timespan_end.in_timezone(timezone.utc)

        source_prefix_interp = GCSTimeSpanFileTransformOperator.interpolate_prefix(
            self.source_prefix,
            timespan_start,
        )
        destination_prefix_interp = GCSTimeSpanFileTransformOperator.interpolate_prefix(
            self.destination_prefix,
            timespan_start,
        )

        source_hook = GCSHook(
            gcp_conn_id=self.source_gcp_conn_id,
            impersonation_chain=self.source_impersonation_chain,
        )
        destination_hook = GCSHook(
            gcp_conn_id=self.destination_gcp_conn_id,
            impersonation_chain=self.destination_impersonation_chain,
        )

        # Fetch list of files.
        blobs_to_transform = source_hook.list_by_timespan(
            bucket_name=self.source_bucket,
            prefix=source_prefix_interp,
            timespan_start=timespan_start,
            timespan_end=timespan_end,
        )

        with TemporaryDirectory() as temp_input_dir, TemporaryDirectory(
        ) as temp_output_dir:
            temp_input_dir_path = Path(temp_input_dir)
            temp_output_dir_path = Path(temp_output_dir)

            # TODO: download in parallel.
            for blob_to_transform in blobs_to_transform:
                destination_file = temp_input_dir_path / blob_to_transform
                destination_file.parent.mkdir(parents=True, exist_ok=True)
                try:
                    source_hook.download(
                        bucket_name=self.source_bucket,
                        object_name=blob_to_transform,
                        filename=str(destination_file),
                        chunk_size=self.chunk_size,
                        num_max_attempts=self.download_num_attempts,
                    )
                except GoogleCloudError:
                    if self.download_continue_on_fail:
                        continue
                    raise

            self.log.info("Starting the transformation")
            cmd = [self.transform_script] if isinstance(
                self.transform_script, str) else self.transform_script
            cmd += [
                str(temp_input_dir_path),
                str(temp_output_dir_path),
                timespan_start.replace(microsecond=0).isoformat(),
                timespan_end.replace(microsecond=0).isoformat(),
            ]
            with subprocess.Popen(args=cmd,
                                  stdout=subprocess.PIPE,
                                  stderr=subprocess.STDOUT,
                                  close_fds=True) as process:
                self.log.info("Process output:")
                if process.stdout:
                    for line in iter(process.stdout.readline, b''):
                        self.log.info(
                            line.decode(self.output_encoding).rstrip())

                process.wait()
                if process.returncode:
                    raise AirflowException(
                        f"Transform script failed: {process.returncode}")

            self.log.info(
                "Transformation succeeded. Output temporarily located at %s",
                temp_output_dir_path)

            files_uploaded = []

            # TODO: upload in parallel.
            for upload_file in temp_output_dir_path.glob("**/*"):
                if upload_file.is_dir():
                    continue

                upload_file_name = str(
                    upload_file.relative_to(temp_output_dir_path))

                if self.destination_prefix is not None:
                    upload_file_name = f"{destination_prefix_interp}/{upload_file_name}"

                self.log.info("Uploading file %s to %s", upload_file,
                              upload_file_name)

                try:
                    destination_hook.upload(
                        bucket_name=self.destination_bucket,
                        object_name=upload_file_name,
                        filename=str(upload_file),
                        chunk_size=self.chunk_size,
                        num_max_attempts=self.upload_num_attempts,
                    )
                    files_uploaded.append(str(upload_file_name))
                except GoogleCloudError:
                    if self.upload_continue_on_fail:
                        continue
                    raise

            return files_uploaded
示例#17
0
    def execute(self, context):
        bq_hook = BigQueryHook(
            bigquery_conn_id=self.bigquery_conn_id,
            delegate_to=self.delegate_to,
            location=self.location,
            impersonation_chain=self.impersonation_chain,
        )

        if not self.schema_fields:
            if self.schema_object and self.source_format != 'DATASTORE_BACKUP':
                gcs_hook = GCSHook(
                    gcp_conn_id=self.google_cloud_storage_conn_id,
                    delegate_to=self.delegate_to,
                    impersonation_chain=self.impersonation_chain,
                )
                blob = gcs_hook.download(
                    bucket_name=self.bucket,
                    object_name=self.schema_object,
                )
                schema_fields = json.loads(blob.decode("utf-8"))
            elif self.schema_object is None and self.autodetect is False:
                raise AirflowException(
                    'At least one of `schema_fields`, `schema_object`, or `autodetect` must be passed.'
                )
            else:
                schema_fields = None

        else:
            schema_fields = self.schema_fields

        source_uris = [
            f'gs://{self.bucket}/{source_object}'
            for source_object in self.source_objects
        ]
        conn = bq_hook.get_conn()
        cursor = conn.cursor()

        if self.external_table:
            cursor.create_external_table(
                external_project_dataset_table=self.
                destination_project_dataset_table,
                schema_fields=schema_fields,
                source_uris=source_uris,
                source_format=self.source_format,
                compression=self.compression,
                skip_leading_rows=self.skip_leading_rows,
                field_delimiter=self.field_delimiter,
                max_bad_records=self.max_bad_records,
                quote_character=self.quote_character,
                ignore_unknown_values=self.ignore_unknown_values,
                allow_quoted_newlines=self.allow_quoted_newlines,
                allow_jagged_rows=self.allow_jagged_rows,
                encoding=self.encoding,
                src_fmt_configs=self.src_fmt_configs,
                encryption_configuration=self.encryption_configuration,
                labels=self.labels,
                description=self.description,
            )
        else:
            cursor.run_load(
                destination_project_dataset_table=self.
                destination_project_dataset_table,
                schema_fields=schema_fields,
                source_uris=source_uris,
                source_format=self.source_format,
                autodetect=self.autodetect,
                create_disposition=self.create_disposition,
                skip_leading_rows=self.skip_leading_rows,
                write_disposition=self.write_disposition,
                field_delimiter=self.field_delimiter,
                max_bad_records=self.max_bad_records,
                quote_character=self.quote_character,
                ignore_unknown_values=self.ignore_unknown_values,
                allow_quoted_newlines=self.allow_quoted_newlines,
                allow_jagged_rows=self.allow_jagged_rows,
                encoding=self.encoding,
                schema_update_options=self.schema_update_options,
                src_fmt_configs=self.src_fmt_configs,
                time_partitioning=self.time_partitioning,
                cluster_fields=self.cluster_fields,
                encryption_configuration=self.encryption_configuration,
                labels=self.labels,
                description=self.description,
            )

        if cursor.use_legacy_sql:
            escaped_table_name = f'[{self.destination_project_dataset_table}]'
        else:
            escaped_table_name = f'`{self.destination_project_dataset_table}`'

        if self.max_id_key:
            cursor.execute(
                f'SELECT MAX({self.max_id_key}) FROM {escaped_table_name}')
            row = cursor.fetchone()
            max_id = row[0] if row[0] else 0
            self.log.info(
                'Loaded BQ data with max %s.%s=%s',
                self.destination_project_dataset_table,
                self.max_id_key,
                max_id,
            )
示例#18
0
    def execute(self, context: 'Context'):
        bq_hook = BigQueryHook(
            gcp_conn_id=self.gcp_conn_id,
            delegate_to=self.delegate_to,
            location=self.location,
            impersonation_chain=self.impersonation_chain,
        )

        if not self.schema_fields:
            if self.schema_object and self.source_format != 'DATASTORE_BACKUP':
                gcs_hook = GCSHook(
                    gcp_conn_id=self.gcp_conn_id,
                    delegate_to=self.delegate_to,
                    impersonation_chain=self.impersonation_chain,
                )
                blob = gcs_hook.download(
                    bucket_name=self.bucket,
                    object_name=self.schema_object,
                )
                schema_fields = json.loads(blob.decode("utf-8"))
            else:
                schema_fields = None
        else:
            schema_fields = self.schema_fields

        self.source_objects = (self.source_objects if isinstance(
            self.source_objects, list) else [self.source_objects])
        source_uris = [
            f'gs://{self.bucket}/{source_object}'
            for source_object in self.source_objects
        ]

        if self.external_table:
            with warnings.catch_warnings():
                warnings.simplefilter("ignore", DeprecationWarning)
                bq_hook.create_external_table(
                    external_project_dataset_table=self.
                    destination_project_dataset_table,
                    schema_fields=schema_fields,
                    source_uris=source_uris,
                    source_format=self.source_format,
                    autodetect=self.autodetect,
                    compression=self.compression,
                    skip_leading_rows=self.skip_leading_rows,
                    field_delimiter=self.field_delimiter,
                    max_bad_records=self.max_bad_records,
                    quote_character=self.quote_character,
                    ignore_unknown_values=self.ignore_unknown_values,
                    allow_quoted_newlines=self.allow_quoted_newlines,
                    allow_jagged_rows=self.allow_jagged_rows,
                    encoding=self.encoding,
                    src_fmt_configs=self.src_fmt_configs,
                    encryption_configuration=self.encryption_configuration,
                    labels=self.labels,
                    description=self.description,
                )
        else:
            with warnings.catch_warnings():
                warnings.simplefilter("ignore", DeprecationWarning)
                bq_hook.run_load(
                    destination_project_dataset_table=self.
                    destination_project_dataset_table,
                    schema_fields=schema_fields,
                    source_uris=source_uris,
                    source_format=self.source_format,
                    autodetect=self.autodetect,
                    create_disposition=self.create_disposition,
                    skip_leading_rows=self.skip_leading_rows,
                    write_disposition=self.write_disposition,
                    field_delimiter=self.field_delimiter,
                    max_bad_records=self.max_bad_records,
                    quote_character=self.quote_character,
                    ignore_unknown_values=self.ignore_unknown_values,
                    allow_quoted_newlines=self.allow_quoted_newlines,
                    allow_jagged_rows=self.allow_jagged_rows,
                    encoding=self.encoding,
                    schema_update_options=self.schema_update_options,
                    src_fmt_configs=self.src_fmt_configs,
                    time_partitioning=self.time_partitioning,
                    cluster_fields=self.cluster_fields,
                    encryption_configuration=self.encryption_configuration,
                    labels=self.labels,
                    description=self.description,
                )

        if self.max_id_key:
            select_command = f'SELECT MAX({self.max_id_key}) FROM `{self.destination_project_dataset_table}`'
            with warnings.catch_warnings():
                warnings.simplefilter("ignore", DeprecationWarning)
                job_id = bq_hook.run_query(
                    sql=select_command,
                    use_legacy_sql=False,
                )
            row = list(bq_hook.get_job(job_id).result())
            if row:
                max_id = row[0] if row[0] else 0
                self.log.info(
                    'Loaded BQ data with max %s.%s=%s',
                    self.destination_project_dataset_table,
                    self.max_id_key,
                    max_id,
                )
            else:
                raise RuntimeError(f"The {select_command} returned no rows!")