def _copy_single_object( self, gcs_hook: GCSHook, sftp_hook: SFTPHook, source_object: str, destination_path: str, ) -> None: """ Helper function to copy single object. """ self.log.info( "Executing copy of gs://%s/%s to %s", self.source_bucket, source_object, destination_path, ) dir_path = os.path.dirname(destination_path) sftp_hook.create_directory(dir_path) with NamedTemporaryFile("w") as tmp: gcs_hook.download( bucket_name=self.source_bucket, object_name=source_object, filename=tmp.name, ) sftp_hook.store_file(destination_path, tmp.name) if self.move_object: self.log.info("Executing delete of gs://%s/%s", self.source_bucket, source_object) gcs_hook.delete(self.source_bucket, source_object)
class GoogleCloudBucketHelper: """GoogleCloudStorageHook helper class to download GCS object.""" GCS_PREFIX_LENGTH = 5 def __init__(self, gcp_conn_id: str = 'google_cloud_default', delegate_to: Optional[str] = None) -> None: self._gcs_hook = GCSHook(gcp_conn_id, delegate_to) def google_cloud_to_local(self, file_name: str) -> str: """ Checks whether the file specified by file_name is stored in Google Cloud Storage (GCS), if so, downloads the file and saves it locally. The full path of the saved file will be returned. Otherwise the local file_name will be returned immediately. :param file_name: The full path of input file. :type file_name: str :return: The full path of local file. :rtype: str """ if not file_name.startswith('gs://'): return file_name # Extracts bucket_id and object_id by first removing 'gs://' prefix and # then split the remaining by path delimiter '/'. path_components = file_name[self.GCS_PREFIX_LENGTH:].split('/') if len(path_components) < 2: raise Exception( 'Invalid Google Cloud Storage (GCS) object path: {}'.format( file_name)) bucket_id = path_components[0] object_id = '/'.join(path_components[1:]) local_file = os.path.join( tempfile.gettempdir(), 'dataflow{}-{}'.format(str(uuid.uuid4())[:8], path_components[-1])) self._gcs_hook.download(bucket_id, object_id, local_file) if os.stat(local_file).st_size > 0: return local_file raise Exception( 'Failed to download Google Cloud Storage (GCS) object: {}'.format( file_name))
def apply_validate_fn(*args, **kwargs): prediction_path = kwargs["templates_dict"]["prediction_path"] scheme, bucket, obj, _, _ = urlsplit(prediction_path) if scheme != "gs" or not bucket or not obj: raise ValueError( "Wrong format prediction_path: {}".format(prediction_path)) summary = os.path.join(obj.strip("/"), "prediction.summary.json") gcs_hook = GCSHook() summary = json.loads(gcs_hook.download(bucket, summary)) return validate_fn(summary)
def execute(self, context): self.log.info('Executing download: %s, %s, %s', self.bucket, self.object, self.filename) hook = GCSHook(google_cloud_storage_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to) if self.store_to_xcom_key: file_bytes = hook.download(bucket_name=self.bucket, object_name=self.object) if sys.getsizeof(file_bytes) < MAX_XCOM_SIZE: context['ti'].xcom_push(key=self.store_to_xcom_key, value=file_bytes) else: raise AirflowException( 'The size of the downloaded file is too large to push to XCom!' ) else: hook.download(bucket_name=self.bucket, object_name=self.object, filename=self.filename)
def execute(self, context: Dict): hook = GCSHook(gcp_conn_id=self.gcp_conn_id) with NamedTemporaryFile() as source_file, NamedTemporaryFile( ) as destination_file: self.log.info("Downloading file from %s", self.source_bucket) hook.download(bucket_name=self.source_bucket, object_name=self.source_object, filename=source_file.name) self.log.info("Starting the transformation") cmd = [self.transform_script] if isinstance( self.transform_script, str) else self.transform_script cmd += [source_file.name, destination_file.name] process = subprocess.Popen(args=cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True) self.log.info("Process output:") for line in iter(process.stdout.readline, b''): self.log.info(line.decode(self.output_encoding).rstrip()) process.wait() if process.returncode > 0: raise AirflowException("Transform script failed: {0}".format( process.returncode)) self.log.info( "Transformation succeeded. Output temporarily located at %s", destination_file.name) self.log.info("Uploading file to %s as %s", self.destination_bucket, self.destination_object) hook.upload(bucket_name=self.destination_bucket, object_name=self.destination_object, filename=destination_file.name)
def execute(self, context): # use the super to list all files in an Google Cloud Storage bucket files = super().execute(context) s3_hook = S3Hook(aws_conn_id=self.dest_aws_conn_id, verify=self.dest_verify) if not self.replace: # if we are not replacing -> list all files in the S3 bucket # and only keep those files which are present in # Google Cloud Storage and not in S3 bucket_name, prefix = S3Hook.parse_s3_url(self.dest_s3_key) # look for the bucket and the prefix to avoid look into # parent directories/keys existing_files = s3_hook.list_keys(bucket_name, prefix=prefix) # in case that no files exists, return an empty array to avoid errors existing_files = existing_files if existing_files is not None else [] # remove the prefix for the existing files to allow the match existing_files = [ file.replace(prefix, '', 1) for file in existing_files ] files = list(set(files) - set(existing_files)) if files: hook = GCSHook(google_cloud_storage_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to) for file in files: file_bytes = hook.download(self.bucket, file) dest_key = self.dest_s3_key + file self.log.info("Saving file to %s", dest_key) s3_hook.load_bytes(file_bytes, key=dest_key, replace=self.replace) self.log.info("All done, uploaded %d files to S3", len(files)) else: self.log.info("In sync, no files needed to be uploaded to S3") return files
def execute(self, context): bq_hook = BigQueryHook(bigquery_conn_id=self.bigquery_conn_id, delegate_to=self.delegate_to, location=self.location) if not self.schema_fields: if self.schema_object and self.source_format != 'DATASTORE_BACKUP': gcs_hook = GCSHook(google_cloud_storage_conn_id=self. google_cloud_storage_conn_id, delegate_to=self.delegate_to) schema_fields = json.loads( gcs_hook.download(self.bucket, self.schema_object).decode("utf-8")) elif self.schema_object is None and self.autodetect is False: raise AirflowException( 'At least one of `schema_fields`, ' '`schema_object`, or `autodetect` must be passed.') else: schema_fields = None else: schema_fields = self.schema_fields source_uris = [ 'gs://{}/{}'.format(self.bucket, source_object) for source_object in self.source_objects ] conn = bq_hook.get_conn() cursor = conn.cursor() if self.external_table: cursor.create_external_table( external_project_dataset_table=self. destination_project_dataset_table, schema_fields=schema_fields, source_uris=source_uris, source_format=self.source_format, compression=self.compression, skip_leading_rows=self.skip_leading_rows, field_delimiter=self.field_delimiter, max_bad_records=self.max_bad_records, quote_character=self.quote_character, ignore_unknown_values=self.ignore_unknown_values, allow_quoted_newlines=self.allow_quoted_newlines, allow_jagged_rows=self.allow_jagged_rows, encoding=self.encoding, src_fmt_configs=self.src_fmt_configs, encryption_configuration=self.encryption_configuration) else: cursor.run_load( destination_project_dataset_table=self. destination_project_dataset_table, schema_fields=schema_fields, source_uris=source_uris, source_format=self.source_format, autodetect=self.autodetect, create_disposition=self.create_disposition, skip_leading_rows=self.skip_leading_rows, write_disposition=self.write_disposition, field_delimiter=self.field_delimiter, max_bad_records=self.max_bad_records, quote_character=self.quote_character, ignore_unknown_values=self.ignore_unknown_values, allow_quoted_newlines=self.allow_quoted_newlines, allow_jagged_rows=self.allow_jagged_rows, encoding=self.encoding, schema_update_options=self.schema_update_options, src_fmt_configs=self.src_fmt_configs, time_partitioning=self.time_partitioning, cluster_fields=self.cluster_fields, encryption_configuration=self.encryption_configuration) if self.max_id_key: cursor.execute('SELECT MAX({}) FROM {}'.format( self.max_id_key, self.destination_project_dataset_table)) row = cursor.fetchone() max_id = row[0] if row[0] else 0 self.log.info('Loaded BQ data with max %s.%s=%s', self.destination_project_dataset_table, self.max_id_key, max_id)
class GCSToGoogleDriveOperator(BaseOperator): """ Copies objects from a Google Cloud Storage service service to Google Drive service, with renaming if requested. Using this operator requires the following OAuth 2.0 scope: .. code-block:: none https://www.googleapis.com/auth/drive :param source_bucket: The source Google Cloud Storage bucket where the object is. (templated) :type source_bucket: str :param source_object: The source name of the object to copy in the Google cloud storage bucket. (templated) You can use only one wildcard for objects (filenames) within your bucket. The wildcard can appear inside the object name or at the end of the object name. Appending a wildcard to the bucket name is unsupported. :type source_object: str :param destination_object: The destination name of the object in the destination Google Drive service. (templated) If a wildcard is supplied in the source_object argument, this is the prefix that will be prepended to the final destination objects' paths. Note that the source path's part before the wildcard will be removed; if it needs to be retained it should be appended to destination_object. For example, with prefix ``foo/*`` and destination_object ``blah/``, the file ``foo/baz`` will be copied to ``blah/baz``; to retain the prefix write the destination_object as e.g. ``blah/foo``, in which case the copied file will be named ``blah/foo/baz``. :type destination_object: str :param move_object: When move object is True, the object is moved instead of copied to the new location. This is the equivalent of a mv command as opposed to a cp command. :type move_object: bool :param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform. :type gcp_conn_id: str :param delegate_to: The account to impersonate, if any. For this to work, the service account making the request must have domain-wide delegation enabled. :type delegate_to: str """ template_fields = ("source_bucket", "source_object", "destination_object") ui_color = "#f0eee4" @apply_defaults def __init__(self, source_bucket: str, source_object: str, destination_object: Optional[str] = None, move_object: bool = False, gcp_conn_id: str = "google_cloud_default", delegate_to: Optional[str] = None, *args, **kwargs): super().__init__(*args, **kwargs) self.source_bucket = source_bucket self.source_object = source_object self.destination_object = destination_object self.move_object = move_object self.gcp_conn_id = gcp_conn_id self.delegate_to = delegate_to self.gcs_hook = None # type: Optional[GCSHook] self.gdrive_hook = None # type: Optional[GoogleDriveHook] def execute(self, context): self.gcs_hook = GCSHook(google_cloud_storage_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to) self.gdrive_hook = GoogleDriveHook(gcp_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to) if WILDCARD in self.source_object: total_wildcards = self.source_object.count(WILDCARD) if total_wildcards > 1: error_msg = ( "Only one wildcard '*' is allowed in source_object parameter. " "Found {} in {}.".format(total_wildcards, self.source_object)) raise AirflowException(error_msg) prefix, delimiter = self.source_object.split(WILDCARD, 1) objects = self.gcs_hook.list(self.source_bucket, prefix=prefix, delimiter=delimiter) for source_object in objects: if self.destination_object is None: destination_object = source_object else: destination_object = source_object.replace( prefix, self.destination_object, 1) self._copy_single_object(source_object=source_object, destination_object=destination_object) else: self._copy_single_object( source_object=self.source_object, destination_object=self.destination_object) def _copy_single_object(self, source_object, destination_object): self.log.info( "Executing copy of gs://%s/%s to gdrive://%s", self.source_bucket, source_object, destination_object, ) with tempfile.NamedTemporaryFile() as file: filename = file.name self.gcs_hook.download(bucket_name=self.source_bucket, object_name=source_object, filename=filename) self.gdrive_hook.upload_file(local_location=filename, remote_location=destination_object) if self.move_object: self.gcs_hook.delete(self.source_bucket, source_object)