def main(): """ Functional testing of minio python library. """ fake = Factory.create() client = Minio('s3.amazonaws.com', os.getenv('ACCESS_KEY'), os.getenv('SECRET_KEY')) _http = urllib3.PoolManager( cert_reqs='CERT_REQUIRED', ca_certs=certifi.where() ) # Get unique bucket_name, object_name. bucket_name = uuid.uuid4().__str__() object_name = uuid.uuid4().__str__() # Enable trace # client.trace_on(sys.stderr) # Make a new bucket. bucket_name = 'minio-pytest' print(client.make_bucket(bucket_name)) print(client.make_bucket(bucket_name+'.unique', location='us-west-1')) ## Check if return codes a valid from server. try: client.make_bucket(bucket_name+'.unique', location='us-west-1') except ResponseError as err: if str(err.code) in ['BucketAlreadyOwnedByYou', 'BucketAlreadyExists']: pass else: raise # Check if bucket was created properly. print(client.bucket_exists(bucket_name)) print(client.bucket_exists(bucket_name+'.unique')) # List all buckets. buckets = client.list_buckets() for bucket in buckets: print(bucket.name, bucket.creation_date) with open('testfile', 'wb') as file_data: file_data.write(fake.text().encode('utf-8')) file_data.close() # Put a file file_stat = os.stat('testfile') with open('testfile', 'rb') as file_data: client.put_object(bucket_name, object_name, file_data, file_stat.st_size) file_data.close() # Fput a file print(client.fput_object(bucket_name, object_name+'-f', 'testfile')) # Fetch stats on your object. print(client.stat_object(bucket_name, object_name)) # Get a full object object_data = client.get_object(bucket_name, object_name) with open('newfile', 'wb') as file_data: for data in object_data: file_data.write(data) file_data.close() # Get a full object locally. print(client.fget_object(bucket_name, object_name, 'newfile-f')) # List all object paths in bucket. objects = client.list_objects(bucket_name, recursive=True) for obj in objects: print(obj.bucket_name, obj.object_name, obj.last_modified, \ obj.etag, obj.size, obj.content_type) presigned_get_object_url = client.presigned_get_object(bucket_name, object_name) response = _http.urlopen('GET', presigned_get_object_url) if response.status != 200: response_error = ResponseError(response) raise response_error.get(bucket_name, object_name) presigned_put_object_url = client.presigned_put_object(bucket_name, object_name) value = fake.text().encode('utf-8') data = io.BytesIO(value).getvalue() response = _http.urlopen('PUT', presigned_put_object_url, body=data) if response.status != 200: response_error = ResponseError(response) raise response_error.put(bucket_name, object_name) object_data = client.get_object(bucket_name, object_name) if object_data.read() != value: raise ValueError('Bytes not equal') # Post policy. policy = PostPolicy() policy.set_bucket_name(bucket_name) policy.set_key_startswith('objectPrefix/') expires_date = datetime.utcnow()+timedelta(days=10) policy.set_expires(expires_date) print(client.presigned_post_policy(policy)) # Remove an object. print(client.remove_object(bucket_name, object_name)) print(client.remove_object(bucket_name, object_name+'-f')) # Remove a bucket. This operation will only work if your bucket is empty. print(client.remove_bucket(bucket_name)) print(client.remove_bucket(bucket_name+'.unique')) # Remove temporary files. os.remove('testfile') os.remove('newfile') os.remove('newfile-f')
class StorageClient: def __init__(self, endpoint_url, access_key, secret_key): url = sub(r"https?://", "", endpoint_url) self.client = Minio( url, access_key=access_key, secret_key=secret_key, secure=False) def __create_bucket(self, bucket_name): try: if not self.client.bucket_exists(bucket_name): self.client.make_bucket(bucket_name) except Exception as err: raise Exception("Unable to create new bucket: %s" % bucket_name) def upload_file(self, file_name, bucket_name, object_name): self.__create_bucket(bucket_name) if not object_name: object_name = file_name try: self.client.fput_object(bucket_name, object_name, file_name) except Exception as err: raise Exception("Unable to upload notebook:\n%s" % err)
def sequential_random_rw(data, size,profile, bucket_name,filename): with tempfile.TemporaryDirectory(prefix="format", suffix="-tmp") as temp_dir: if profile == "gcs": storage_client = storage.Client() gc_bucket = storage_client.get_bucket(bucket_name) # Prepare for data read and write(update precedent data) for i in range(0, size): blob = gc_bucket.blob(str(i)) blob.upload_from_filename(filename) elif profile == "minio": gc_bucket = Minio('d23.dev.qopius.net:9000', access_key='minioadmin', secret_key='minioadmin', secure=False) # Prepare for data read and write(update precedent data) for i in range(0, size): gc_bucket.fput_object(bucket_name, str(i), filename) start = time.time() for i in range(0, size): choice = random.randint(1, 2) #path = os.path.join(temp_dir, str(i)) if choice == 1: read_with_profile(profile,temp_dir,i,gc_bucket,bucket_name) else: write_with_profile(profile,temp_dir,i,data,gc_bucket,bucket_name,filename) end = time.time() print("time posix seq read/write =" + str(end - start) + "s")
class MinIO(ObjectStorage): def create_client(self, *args, **kwargs): ip = kwargs.get('ip', "localhost") access_key = kwargs.get('access_key', None) secret_key = kwargs.get('secret_key', None) self.client = Minio(ip + ':9000', access_key=access_key, secret_key=secret_key, secure=False) def retrieve_from_bucket(self, source_bucket, file_name): try: self.client.fget_object(source_bucket, file_name, "/tmp/" + file_name) except Exception as e: raise Exception( "There was an error retrieving object from the bucket: " + str(e)) def store_to_bucket(self, destination_bucket, file_name, img_path): try: self.client.fput_object(destination_bucket, file_name, img_path) except Exception as e: raise Exception( "There was an error storing object to the bucket: " + str(e))
def export_to_s3(args): """S3 export routine""" # Retrieve s3 access and secret keys access_key = get_secret( 's3_access_key') if not args.s3accesskey else args.s3accesskey secret_key = get_secret( 's3_secret_key') if not args.s3secretkey else args.s3secretkey # Initialize minioClient with an endpoint and access/secret keys. minioClient = Minio('%s:%s' % (args.s3endpoint, args.s3port), access_key=access_key, secret_key=secret_key, secure=args.secure) minioClient._http.connection_pool_kw['timeout'] = Timeout( connect=args.timeout, read=3 * args.timeout) # Make a bucket with the make_bucket API call. try: if not minioClient.bucket_exists(args.s3bucket): minioClient.make_bucket(args.s3bucket, location=args.s3location) except MaxRetryError: logging.error( 'Connection timeout! Removing traces older than %d minutes', args.agemin) cleanup(args) pass except BucketAlreadyOwnedByYou: pass except BucketAlreadyExists: pass except ResponseError: raise else: # Upload traces to the server try: traces = [f for f in files(args.dir)] traces.sort(key=lambda f: int(''.join(filter(str.isdigit, f)))) # Upload complete traces, exclude most recent log for trace in traces[:-1]: minioClient.fput_object( args.s3bucket, '%s.%s.sf' % (os.path.basename(trace), args.nodeip), trace, metadata={ 'x-amz-meta-nodename': args.nodename, 'x-amz-meta-nodeip': args.nodeip, 'x-amz-meta-podname': args.podname, 'x-amz-meta-podip': args.podip, 'x-amz-meta-podservice': args.podservice, 'x-amz-meta-podns': args.podns, 'x-amz-meta-poduuid': args.poduuid }) os.remove(trace) logging.info('Uploaded trace %s', trace) # Upload partial trace without removing it #minioClient.fput_object(args.s3bucket, os.path.basename(traces[-1]), traces[-1], metadata={'X-Amz-Meta-Trace': 'partial'}) #logging.info('Uploaded trace %s', traces[-1]) except ResponseError: logging.exception( 'Caught exception while uploading traces to object store')
def _inner_2s3(self, dataset_type, instance_config, filename, job_id, dataset_uid): job = models.Job.query.get(job_id) dataset = _retrieve_dataset_and_set_state(dataset_type, job.id) instance = job.instance logger = get_instance_logger(instance, task_id=job_id) try: filename = zip_if_needed(filename) config = MinioConfig() client = Minio(endpoint=config.host, access_key=config.key, secret_key=config.secret, secure=False) dt_now_str = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S") tags = { "coverage": instance_config.name, "datetime": dt_now_str, "data_type": dataset_type, "tyr_data_path": filename, } file_key = "{coverage}/{dataset_type}.zip".format( coverage=instance_config.name, dataset_type=dataset_type ) with collect_metric("{dataset_type}2s3".format(dataset_type=dataset_type), job, dataset_uid): client.fput_object(config.bucket, file_key, filename, metadata=tags, content_type="application/zip") dataset.state = "done" except: logger.exception("") job.state = "failed" dataset.state = "failed" raise finally: models.db.session.commit()
def main(): submissionspec = sys.argv[1] log.info('staging out data according to specfile %s', submissionspec) subdata = json.load(open(submissionspec)) pubspec = subdata['publisher_spec'] parameters = subdata['parameters'] state = subdata['state'] resultfile = subdata['resultfile'] log.info('pub: \n' + json.dumps(pubspec, indent=4)) log.info('pars: \n' + json.dumps(parameters, indent=4)) log.info('stat: \n' + json.dumps(state, indent=4)) ydgconfig = json.load(open(os.environ.get('YDGCONFIG', 'ydgconfig.json'))) state = LocalFSGlobalObjectsState.fromJSON(state) parameters = TypedLeafs(parameters, state.datamodel) teardown_spec, pubdata = publish(pubspec, parameters, state) for upload in teardown_spec['uploads']: state.put_file(upload['source'], upload['target']) with open('result.json', 'wb') as fl: fl.write(json.dumps(pubdata).encode('utf-8')) client = Minio(ydgconfig['resultstorage']['host'], access_key=ydgconfig['resultstorage']['access_key'], secret_key=ydgconfig['resultstorage']['secret_key'], secure=True) client.fput_object(ydgconfig['resultstorage']['bucket'], resultfile, 'result.json') log.info('writing result data to: %s', resultfile)
class MinioClient(BaseStorageClient): def __init__(self, *args, **kwargs): hostname = kwargs.get("hostname") access_key = kwargs.get("access_key") secret_key = kwargs.get("secret_key") self.storage_client = Minio(hostname, access_key=access_key, secret_key=secret_key, secure=False) def stat_file(self, bucket_name, prefix=None, recursive=False): return self.storage_client.list_objects(bucket_name, prefix=prefix, recursive=recursive) def download_file(self, uri, file_path): scheme = urlparse(uri).scheme if scheme != "minio": raise ValueError(f"Unknown URI scheme: {scheme}") bucket_name = urlparse(uri).path.split("/")[1] object_name = "/".join(urlparse(uri).path.split("/")[2:]) return self.storage_client.fget_object(bucket_name, object_name, file_path) def upload_file(self, bucket_name, object_name, file_path): try: self.storage_client.make_bucket(bucket_name) except (BucketAlreadyExists, BucketAlreadyOwnedByYou): pass self.storage_client.fput_object(bucket_name, object_name, file_path) return self.storage_client.stat_object(bucket_name, object_name)
class MinioBlobMechanism: def __init__(self): self.minioClient = Minio(os.environ['MINIO_ENDPOINT'], access_key=os.environ['MINIO_ACCESS_KEY'], secret_key=os.environ['MINIO_SECRET_KEY'], secure=False) if not self.minioClient.bucket_exists(DataBlob): self.minioClient.make_bucket(DataBlob) if not self.minioClient.bucket_exists(ToolsBlob): self.minioClient.make_bucket(ToolsBlob) if not self.minioClient.bucket_exists(ResultsBlob): self.minioClient.make_bucket(ResultsBlob) def download_blob(self, container_name, blob_name): return self.minioClient.get_object(container_name, blob_name) def get_blob_to_path(self, container_name, blob_name, file_path): self.minioClient.fget_object(container_name, blob_name, file_path) def create_blob_from_path(self, container_name, blob_name, file_path): self.minioClient.fput_object(container_name, blob_name, file_path) def list_blobs(self, container_name): blobObjects = self.minioClient.list_objects(container_name) objects = [ BlobFile(blobObject.object_name) for blobObject in blobObjects ] return objects def delete_blob(self, container_name, blob_name): self.minioClient.remove_object(container_name, blob_name)
def handle(req): client = Minio(os.environ['minio_hostname'], access_key=os.environ['minio_access_key'], secret_key=os.environ['minio_secret_key'], secure=False) client.fget_object("incoming", "q_2.png", "/tmp/q_2.png") img = cv.imread("/tmp/q_2.png") img = cv.cvtColor(img, cv.COLOR_BGR2RGB) h, w = img.shape[:2] img = cv.resize(img, (h // 2, w // 2), interpolation=cv.INTER_LINEAR) img = imutils.rotate_bound(img, -90) cv.imwrite("/tmp/q_2.png", img) if not client.bucket_exists("processed"): client.make_bucket("processed") else: print(f"Bucket 'processed' already exists.") client.fput_object("processed", "q_2.png", "/tmp/q_2.png") return req
class DataStoreHandler: def __init__(self, endpoint, access_key, secret_key, bucket_name): self.bucket_name = bucket_name self.minioClient = Minio(endpoint=endpoint, access_key=access_key, secret_key=secret_key, secure=False) print('Connected to DataStore') try: self.minioClient.make_bucket(bucket_name) except BucketAlreadyOwnedByYou as err: print('BucketAlreadyOwnedByYou') pass except BucketAlreadyExists as err: print('BucketAlreadyExists') pass except ResponseError as err: print('ResponseError') pass def upload(self, from_path, to_path): try: print("Uploading...") self.minioClient.fput_object(self.bucket_name, to_path, from_path) print("Upload Sucess") except ResponseError as err: return err def download(self, from_path, to_path): try: print("Downloading...") self.minioClient.fget_object(self.bucket_name, from_path, to_path) print("Download Success") except ResponseError as err: print(err)
def upload( url: str, access_key: str, secret_key: str, bucket_name: str, object_name: str, local_file: Path, ): # Create a client with the MinIO server # its access key and secret key. client = Minio( url, access_key=access_key, secret_key=secret_key, http_client=http_client, ) # Make a bucket if not exist. found = client.bucket_exists(bucket_name) if not found: client.make_bucket(bucket_name) else: print(f"Bucket '{bucket_name}' already exists") # Upload local_file as object name client.fput_object( bucket_name, object_name, local_file, ) print(f"'{str(local_file)}' is successfully uploaded as " f"object '{object_name}' to bucket '{bucket_name}'.")
def handle(req): res = json.loads(req) minioClient = Minio('172.17.67.176:9000', access_key='AKIAIOSFODNN7EXAMPLE', secret_key='wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY', secure=False) json_file = json.dumps(res) f = open("/tmp/dict.json", "w") f.write(json_file) f.close() if res["result"] == "Success": # try: minioClient.fput_object('psuccess', str(randrange(100)) + '.json', '/tmp/dict.json') # except ResponseError as err: # print(err) else: # try: minioClient.fput_object('pnotsuccess', str(randrange(100)) + '.json', '/tmp/dict.json') # except ResponseError as err: # print(err) return "Completed"
class FileRepository: def __init__(self, config: RefitConfig): self._minio = Minio(endpoint=config.minio_host, access_key=config.minio_access_key, secret_key=config.minio_secret_key, secure=False) def upload_file(self, bucket_name: str, object_name: str, file_path: str): try: self._minio.make_bucket(bucket_name, location="us-east-1") except BucketAlreadyOwnedByYou as err: pass except BucketAlreadyExists as err: pass except ResponseError as err: raise try: self._minio.fput_object(bucket_name, object_name, file_path) except ResponseError as err: print("Error putting file") print(err) return True def download_file(self, bucket_name: str, object_name: str): try: response = self._minio.get_object(bucket_name, object_name) finally: response.close() response.release_conn() return response
def main(): url = generate_upload_url("dsadsa") print(url) return # Create a client with the MinIO server playground, its access key # and secret key. client = Minio("localhost:9000", access_key="MEDDLER", secret_key="SUPERDUPERSECRET", secure=False) # Make 'asiatrip' bucket if not exist. found = client.bucket_exists("asiatrip") if not found: client.make_bucket("asiatrip") else: print("Bucket 'asiatrip' already exists") # Upload '/home/user/Photos/asiaphotos.zip' as object name # 'asiaphotos-2015.zip' to bucket 'asiatrip'. client.fput_object( "asiatrip", "asiaphotos-2015.zip", "./test_file.tar", ) print("'/home/user/Photos/asiaphotos.zip' is successfully uploaded as " "object 'asiaphotos-2015.zip' to bucket 'asiatrip'.")
def handle(req): client = Minio(os.environ['minio_hostname'], access_key=os.environ['minio_access_key'], secret_key=os.environ['minio_secret_key'], secure=False) r = requests.get(req) with open("/tmp/image.png", "wb") as file: file.write(r.content) if not client.bucket_exists("incoming"): client.make_bucket("incoming") else: print("Bucket 'incoming' already exists.") client.fput_object("incoming", "image.png", "/tmp/image.png") gateway_hostname = os.getenv("gateway_hostname", "gateway.openfaas") url = "http://" + gateway_hostname + ":8080/function/resize" r = requests.post(url) return req
def test1(): client = Minio("localhost:9000", access_key="minio", secret_key="abcde12345", secure=False) # 所有的buckets buckets = client.list_buckets() for bucket in buckets: print(bucket.name) bucket_name = "files" # 检查和创建bucket if not client.bucket_exists(bucket_name): client.make_bucket(bucket_name) # files下所有的文件 files = client.list_objects(bucket_name, recursive=True) for f in files: print(f.object_name) object_name = "mydir/test.py" # 可以有目录,也可以没有 try: # 获取object的状态 r = client.stat_object(bucket_name, object_name) print(object_name, r.size) except NoSuchKey as e: print("not exists", object_name) # 写入文件到object client.fput_object(bucket_name, object_name, "test1.py") content = client.get_object(bucket_name, object_name) print(object_name, len(content.data))
class S3Manager: def __init__(self): self.endpoint = os.getenv('SA_BUCKET_ENDPOINT') self.accessKey = os.getenv('SA_BUCKET_ACCESS_KEY') self.secretKey = os.getenv('SA_BUCKET_SECRET_KEY') self.bucketName = os.getenv('SA_BUCKET_NAME') self.bucketPath = os.getenv('SA_BUCKET_PATH') self.signObject = os.getenv('SA_BUCKET_SIGN_OBJECT') self.minioClient = Minio(self.endpoint, access_key=self.accessKey, secret_key=self.secretKey) def init_bucket(self): if not self.minioClient.bucket_exists(self.bucketName): self.minioClient.make_bucket(self.bucketName) def save_into_s3(self, qr_id: str, qr_path: str, path_under_bucket: str = None): self.minioClient.fput_object( self.bucketName, "{}/{}.png".format( self.bucketPath if path_under_bucket is None else path_under_bucket, qr_id), qr_path) os.remove(qr_path) if self.signObject: return self.minioClient.presigned_get_object( self.bucketName, "{}/{}.png".format(self.bucketPath, qr_id)) else: return ""
def main(): # Create a client with the MinIO server playground, its access key # and secret key. client = Minio( os.environ['MINIO_SERVER_AND_PORT'], access_key=os.environ['MINIO_ACCESS_KEY'], secret_key=os.environ['MINIO_SECRET_KEY'], secure=False, ) new_bucket_str = make_new_bucket_string(); found = client.bucket_exists(new_bucket_str); while found: new_bucket_str = make_new_bucket_string(); found = client.bucket_exists(new_bucket_str) # bucket doesn't already exist, so create it client.make_bucket(new_bucket_str) # Upload '/home/user/Photos/asiaphotos.zip' as object name # 'asiaphotos-2015.zip' to bucket 'new_bucket_str'. for root, dirs, files in os.walk("./files-to-upload/"): for filename in files: full_path_to_file = "./files-to-upload/" + filename; client.fput_object(new_bucket_str, filename, full_path_to_file); print(full_path_to_file + " is successfully uploaded as" + " object " + filename + " to bucket \'" + new_bucket_str + "\'.")
def handle(event, context): bin_data = event.body bin_length = len(bin_data) # Convert it into a binary stream bin_stream = io.BytesIO(bin_data) # Fetch the content-type together with multipart boundary separator value from the Header # OpenFaaS passes HTTP header values through the environment input_content_type = event.headers.get('Content-Type') # Parse the multi party form data using cgi FieldStorage library form = cgi.FieldStorage(fp=bin_stream, environ={'REQUEST_METHOD': 'POST', 'CONTENT_LENGTH': bin_length, 'CONTENT_TYPE': input_content_type}) filename = form["filename"].value file = form["file"].file.read() data = { "result":"Not found","text": str(file)} words = file.split() for word in words: if word=="son": data = {"result":"Success","text": str(file)} json_file = json.dumps(data) f = open("/tmp/dict.json","w") f.write(json_file) f.close() minioClient = Minio('192.168.1.223:9002', access_key='AKIAIOSFODNN7EXAMPLE', secret_key='wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',secure=False) minioClient.fput_object('processedpocketsphinx',filename+".json",'/tmp/dict.json') return { "statusCode": 200, "body": "Success" }
class MinioBackend(Storage): def __init__(self, logger=None): self.client = Minio(os.getenv("MINIO_URI"), access_key=os.getenv("MINIO_ACCESS_KEY_ID"), secret_key=os.getenv("MINIO_SECRET_ACCESS_KEY"), secure=False) self.logger = logger if logger else logging.getLogger("Minio") def get_bucket(self, bucket_name): return self.client.Bucket(name=bucket_name) def delete(self, filepath, bucket_name=None): try: self.client.remove_object(bucket_name, filepath) self.logger.info( f"Successfully deleted file {filepath} from {bucket_name}") return True, None except Exception as e: self.logger.error(f"Cannot delete {filepath} from {bucket_name}." + f" Unexpected exception: {str(e)}") return False, e def list(self, bucket_name=None): try: self.logger.info(f"Successfully got the contents of {bucket_name}") return True, [obj for obj in self.client.list_objects(bucket_name)] except Exception as e: self.logger.error(f"Cannot read contents from {bucket_name}." + f" Unexpected exception: {str(e)}") return False, e def read(self, filepath, bucket_name=None): try: content = self.client.get_object(bucket_name, filepath) self.logger.info( f"Successfully got the content of {filepath} to {bucket_name}") return True, content except Exception as e: self.logger.error(f"Cannot read {filepath} from {bucket_name}." + f" Unexpected exception: {str(e)}") return False, e def save_file(self, filename, key=None, bucket_name=None): try: self.client.fput_object(bucket_name, key, filename) self.logger.info(f"Successfully uploaded to {bucket_name}") return True, None except Exception as e: self.logger.error(f"Cannot upload to {bucket_name}." + f" Unexpected exception: {str(e)}") return False, e def save(self, filename, content, bucket_name=None): import uuid random_filename = str(uuid.uuid4()) content = content.encode("utf-8") with open(random_filename, "wb") as f: f.write(content) self.save_file(random_filename, filename, bucket_name=bucket_name) os.remove(random_filename)
def kafka_consumer(topic_name): consumer = KafkaConsumer(topic_name, bootstrap_servers=['172.18.0.2:9092'], auto_offset_reset='latest') for message in consumer: json_data = json.loads(message.value) bucket_name = json_data['Key'].split('/')[0] if (json_data['EventName'] == 's3:ObjectCreated:Put' and bucket_name == sys.argv[1]): print('\nInput Bucket :', bucket_name) print('\nInput File name :', json_data['Key'].split('/')[1]) with open('kafka_log.json', 'w') as outfile: json.dump(json_data, outfile) # Initialize minioClient with an endpoint and access/secret keys. minioClient = Minio('52.116.33.131:9000', access_key='sanity', secret_key='CloudforAll!', secure=False) # Put a json object log with contents from kafka consumer in store bucket try: minioClient.fput_object('store', 'kafka_log.json', 'kafka_log.json') output_reference = process(json_data['Key']) print('\nOutput File reference :', output_reference) except ResponseError as err: print(err)
def proces_data(access_key='minio_access_key', secret_key='minio_secret_key'): client = Minio('host.docker.internal:9000', access_key=access_key, secret_key=secret_key, secure=False) required_buckets = ["my-bucket", "good", "bad"] for bucket in required_buckets: if client.bucket_exists(bucket): print(bucket + " exists") else: client.make_bucket(bucket) objects = client.list_objects("my-bucket", recursive=True) data_check = DataQualityCheck() for obj in objects: # print(obj.__dict__) obj_name = obj.__dict__['_object_name'] response = client.get_object("my-bucket", obj.__dict__['_object_name']) client.fget_object("my-bucket", obj_name, obj_name) good_lines = [] bad_lines = [] with open(obj_name) as f: for line in f: line = str(line) if data_check.is_valid_json(line): line = data_check.quality_check(line) else: bad_lines.append(line) continue if len(line['error']) == 0: good_lines.append(line) else: bad_lines.append(line) badline_file = 'bad_line' + str(datetime.now()) + '.txt' with open(badline_file, 'w') as filehandle: for bad in bad_lines: filehandle.write('%s\n' % bad) goodline_file = 'good_line' + str(datetime.now()) + '.txt' with open(goodline_file, 'w') as filehandle: for good in goodline_file: filehandle.write('%s\n' % good) print(good_lines) if len(good_lines) > 0: make_pandas(good_lines) client.fput_object( "bad", badline_file, badline_file, ) client.fput_object( "good", goodline_file, goodline_file, ) client.remove_object("my-bucket", obj_name)
def load_task( endpoint: str, bucket: str, train_images: str, train_labels: str, test_images: str, test_labels: str, ) -> NamedTuple('Data', [('filename', str)]): """Transforms MNIST data from upstream format into numpy array.""" from gzip import GzipFile from pathlib import Path from tensorflow.python.keras.utils import get_file import numpy as np import struct from minio import Minio mclient = Minio( endpoint, access_key=Path('/secrets/accesskey').read_text(), secret_key=Path('/secrets/secretkey').read_text(), secure=False, ) filename = 'mnist.npz' def load(path): """Ensures that a file is downloaded locally, then unzips and reads it.""" return GzipFile(get_file(Path(path).name, path)).read() def parse_labels(b: bytes) -> np.array: """Parses numeric labels from input data.""" assert struct.unpack('>i', b[:4])[0] == 0x801 return np.frombuffer(b[8:], dtype=np.uint8) def parse_images(b: bytes) -> np.array: """Parses images from input data.""" assert struct.unpack('>i', b[:4])[0] == 0x803 count = struct.unpack('>i', b[4:8])[0] rows = struct.unpack('>i', b[8:12])[0] cols = struct.unpack('>i', b[12:16])[0] return np.frombuffer(b[16:], dtype=np.uint8).reshape((count, rows, cols)) np.savez_compressed( f'/output/{filename}', **{ 'train_x': parse_images(load(train_images)), 'train_y': parse_labels(load(train_labels)), 'test_x': parse_images(load(test_images)), 'test_y': parse_labels(load(test_labels)), }, ) mclient.fput_object(bucket, filename, f'/output/{filename}') return filename,
def init(): client = Minio('minio:9000', access_key='minioadmin', secret_key='minioadmin', secure=False) if not client.bucket_exists('files'): client.make_bucket('files') if 'sample.c10' not in client.list_objects('files'): client.fput_object('files', 'sample.c10', 'sample.c10')
def upload_artifact(bucket_name, object_name, file_path, url, access_key, secret_key): minioClient = Minio(url, access_key=access_key, secret_key=secret_key, secure=False ) minioClient.fput_object(bucket_name, object_name, file_path) return minioClient.stat_object(bucket_name, object_name)
def upload_file(body): if body: try: minioClient = Minio(BACKEND, access_key=ACCESS_KEY_ID, secret_key=SECRET_KEY_ID, secure=True) try: bucket = minioClient.bucket_exists(BUCKET) if not bucket: try: """ Create the bucket if not exists. """ minioClient.make_bucket(BUCKET, location='us-east-1') print("Bucket has been created successfully.") except Exception as e: return {'Code':404, 'Error':e} """ Upload the object/file. """ file_path = body['uri'] version_of_package = body['version'] file_name = list(file_path.split('/'))[-1] ver = list(version_of_package.split('.')) first_version, second_version, third_version = ver[0], ver[1], ver[2] backend_file = first_version+"/"+second_version+"/"+third_version+"/"+file_name minioClient.fput_object(BUCKET, backend_file, file_path) return {'code':200, 'message':'File uploaded successfully.'} except Exception as e: return {'Code':404, 'Error':e} except Exception as e: print(e)
class S3Upload(Upload): def __init__(self): self.logger = logging.getLogger('run.s3upload') self.minio = Minio(config['s3_server'], access_key=config['s3_access_key'], secret_key=config['s3_secret_key'], secure=True) def upload_item(self, item_path, item_name): self.minio.fput_object('matsuri', item_name, item_path)
class S3Upload(Upload): def __init__(self): self.logger = get_logger('S3Upload') self.minio = Minio(s3_server, access_key=s3_access_key, secret_key=s3_secret_key, secure=True) def upload_item(self, item_path, item_name): self.minio.fput_object('matsuri', item_name, item_path)
def main(): client = Minio("127.0.0.1:9000", access_key="minioadmin", secret_key="minioadmin", secure=False) client.fput_object( "test-bucket", "test_object", "data/a.log", )
def put_object(client: Minio, src_file:str, dst_filename:str, bucket_name:str): # Upload '/home/user/Photos/asiaphotos.zip' as object name # 'asiaphotos-2015.zip' to bucket 'asiatrip'. client.fput_object( bucket_name, dst_filename, src_file, ) print( f"'{src_file}' is successfully uploaded as " f"object '{dst_filename}' to bucket '{bucket_name}'. 😃" )
# удаление файлов на машине for object in deleteObjects: path = object["name"] # удаляем файла на машине os.remove(dir + path) # удаляем файл из списка текущих файлов for file in filesNow: if file["name"] == path: filesNow.remove(file) # загрузка на сервер новых файлов for file in newFiles: path = file["name"] # загрузка файла на сервер try: minioClient.fput_object(bucketName, path, dir + path) except BaseException as err: errorPutsFiles.append(file) pass else: # добавляем в список текущих объектов objectTime = datetime.datetime.fromtimestamp( minioClient.stat_object(bucketName, path).last_modified) objectTime = objectTime.replace(tzinfo=timeZone) objectsNow.append({"name": path, "time": objectTime}) # удаление файлов с сервера for file in deleteFiles: path = file["name"] # удаляем объект на сервере minioClient.remove_object(bucketName, path)
# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname and # my-filepath dummy values, please replace them with original values. from minio import Minio from minio.error import ResponseError client = Minio('s3.amazonaws.com', access_key='YOUR-ACCESSKEYID', secret_key='YOUR-SECRETACCESSKEY') # Put an object 'my-objectname' with contents from 'my-filepath' try: client.fput_object('my-bucketname', 'my-objectname', 'my-filepath') except ResponseError as err: print(err) # Put on object 'my-objectname-csv' with contents from # 'my-filepath.csv' as 'application/csv'. try: client.fput_object('my-bucketname', 'my-objectname-csv', 'my-filepath.csv', content_type='application/csv') except ResponseError as err: print(err)