def execute(self, *args): # essential parameters check valid = EssentialParameters( self.__class__.__name__, [self._src_dir, self._src_pattern, self._encoding_from, self._encoding_to], ) valid() files = super().get_target_files(self._src_dir, self._src_pattern) if len(files) == 0: self._logger.info("No files are found. Nothing to do.") return for file in files: basename = os.path.basename(file) if self._dest_dir: File().convert_encoding( file, os.path.join(self._dest_dir, basename), self._encoding_from, self._encoding_to, ) else: tmpfile = os.path.join( os.path.dirname(file), "." + StringUtil().random_str(10) + "." + basename, ) File().convert_encoding( file, tmpfile, self._encoding_from, self._encoding_to ) os.remove(file) os.rename(tmpfile, file) self._logger.info("Encoded file %s" % basename)
def execute(self, *args): # essential parameters check valid = EssentialParameters( self.__class__.__name__, [ self._order, self._src_dir, self._src_pattern, self._dest_dir, ], ) valid() files = super().get_target_files(self._src_dir, self._src_pattern) self._logger.info("Files found %s" % files) ymd_hms = datetime.now().strftime("%Y%m%d%H%M%S%f") dbname = ".%s_%s.db" % (ymd_hms, StringUtil().random_str(8)) tblname = "temp_table" sqlite = SqliteAdapter() sqlite.connect(dbname) try: for file in files: _, filename = os.path.split(file) dest_file = os.path.join(self._dest_dir, filename) sqlite.import_table(file, tblname, encoding=self._encoding) sqlite.export_table( tblname, dest_file, encoding=self._encoding, order=self._order, no_duplicate=self._no_duplicate, ) sqlite.close() finally: os.remove(dbname)
def _save_as_file_via_gcs(self): self._logger.info("Save data as a file via GCS") os.makedirs(self._dest_dir, exist_ok=True) ymd_hms = datetime.now().strftime("%Y%m%d%H%M%S%f") path = "%s-%s" % (StringUtil().random_str(self._RANDOM_STR_LENGTH), ymd_hms,) prefix = "%s/%s/%s" % (self._dataset, self._tblname, path) if isinstance(self._credentials, str): self._logger.warning( ( "DeprecationWarning: " "In the near future, " "the `credentials` will be changed to accept only dictionary types. " "Please see more information " "https://github.com/BrainPad/cliboa/blob/master/docs/modules/bigquery_read.md" ) ) key_filepath = self._credentials else: key_filepath = self._source_path_reader(self._credentials) gbq_client = BigQuery.get_bigquery_client(key_filepath) if self._dataset and self._tblname: table_ref = gbq_client.dataset(self._dataset).table(self._tblname) elif self._dataset and not self._tblname: tmp_tbl = ( "tmp_" + StringUtil().random_str(self._RANDOM_STR_LENGTH) + "_" + ymd_hms ) table_ref = gbq_client.dataset(self._dataset).table(tmp_tbl) gcs_client = Gcs.get_gcs_client(key_filepath) gcs_bucket = gcs_client.bucket(self._bucket) # extract job config settings ext_job_config = BigQuery.get_extract_job_config() ext_job_config.compression = BigQuery.get_compression_type() ext = ".csv" if self._filename: _, ext = os.path.splitext(self._filename) support_ext = [".csv", ".json"] if ext not in support_ext: raise InvalidParameter("%s is not supported as filename." % ext) ext_job_config.destination_format = BigQuery.get_destination_format(ext) comp_format_and_ext = {"GZIP": ".gz"} comp_ext = comp_format_and_ext.get(str(BigQuery.get_compression_type())) if self._filename: dest_gcs = "gs://%s/%s/%s%s" % ( self._bucket, prefix, self._filename, comp_ext, ) else: dest_gcs = "gs://%s/%s/*%s%s" % (self._bucket, prefix, ext, comp_ext) # Execute query. if self._query: query_job_config = BigQuery.get_query_job_config() query_job_config.destination = table_ref query_job_config.write_disposition = BigQuery.get_write_disposition() query_job = gbq_client.query( self._query, location=self._location, job_config=query_job_config ) query_job.result() # Extract to GCS extract_job = gbq_client.extract_table( table_ref, dest_gcs, job_config=ext_job_config, location=self._location ) extract_job.result() # Download from gcs for blob in gcs_bucket.list_blobs(prefix=prefix): dest = os.path.join(self._dest_dir, os.path.basename(blob.name)) blob.download_to_filename(dest) # Cleanup temporary table if self._query: gbq_client.delete_table(table_ref) # Cleanup temporary files for blob in gcs_bucket.list_blobs(prefix=prefix): blob.delete()
def _save_as_file_via_gcs(self): self._logger.info("Save data as a file via GCS") os.makedirs(self._dest_dir, exist_ok=True) ymd_hms = datetime.now().strftime("%Y%m%d%H%M%S%f") path = "%s-%s" % ( StringUtil().random_str(self._RANDOM_STR_LENGTH), ymd_hms, ) prefix = "%s/%s/%s" % (self._dataset, self._tblname, path) gbq_client = BigQuery.get_bigquery_client(self._credentials) if self._dataset and self._tblname: table_ref = gbq_client.dataset(self._dataset).table(self._tblname) elif self._dataset and not self._tblname: tmp_tbl = ("tmp_" + StringUtil().random_str(self._RANDOM_STR_LENGTH) + "_" + ymd_hms) table_ref = gbq_client.dataset(self._dataset).table(tmp_tbl) gcs_client = Gcs.get_gcs_client(self._credentials) gcs_bucket = gcs_client.get_bucket(self._bucket) # extract job config settings ext_job_config = BigQuery.get_extract_job_config() ext_job_config.compression = BigQuery.get_compression_type() ext = ".csv" if self._filename: _, ext = os.path.splitext(self._filename) support_ext = [".csv", ".json"] if ext not in support_ext: raise InvalidParameter("%s is not supported as filename." % ext) ext_job_config.destination_format = BigQuery.get_destination_format( ext) comp_format_and_ext = {"GZIP": ".gz"} comp_ext = comp_format_and_ext.get(str( BigQuery.get_compression_type())) if self._filename: dest_gcs = "gs://%s/%s/%s%s" % ( self._bucket, prefix, self._filename, comp_ext, ) else: dest_gcs = "gs://%s/%s/*%s%s" % (self._bucket, prefix, ext, comp_ext) # Execute query. if self._query: query_job_config = BigQuery.get_query_job_config() query_job_config.destination = table_ref query_job_config.write_disposition = BigQuery.get_write_disposition( ) query_job = gbq_client.query(self._query, location=self._location, job_config=query_job_config) query_job.result() # Extract to GCS extract_job = gbq_client.extract_table(table_ref, dest_gcs, job_config=ext_job_config, location=self._location) extract_job.result() # Download from gcs for blob in gcs_bucket.list_blobs(prefix=prefix): dest = os.path.join(self._dest_dir, os.path.basename(blob.name)) blob.download_to_filename(dest) # Cleanup temporary table if self._query: gbq_client.delete_table(table_ref) # Cleanup temporary files for blob in gcs_bucket.list_blobs(prefix=prefix): blob.delete()