def tearDown(self): if self.data_saver is not None: # delete bucket client = (Minio(**self.storage_config) if not self.check_nas(self.data_config.endpoint) else NAS(self.data_config.endpoint)) objects = client.list_objects(self.data_config.bucket_name, recursive=True) for obj in objects: client.remove_object(self.data_config.bucket_name, obj.object_name) client.remove_bucket(self.data_config.bucket_name) # remove on local file for _file in self.data_saver.get_filelist: if os.path.exists(_file): os.remove(_file) if self.dataset is not None: if os.path.exists(self.dataset.cache_path): os.remove(self.dataset.cache_path) if self.data_config_file is not None: if os.path.exists(self.data_config_file): os.remove(self.data_config_file)
def _check_bucket(self): """ Check bucket name is exist. If not exist, create new bucket If bucket and metadata sub folder exist, get metadata(attributes, compressor) from there. """ _client = (Minio(self.endpoint, access_key=self.access_key, secret_key=self.secret_key, secure=self.secure, region=self.region) if not check_nas(self.endpoint) else NAS(self.endpoint)) if _client.bucket_exists(self.bucket_name): try: _metadata = _client.get_object(self.bucket_name, "metadata.json") except: _client.remove_bucket(self.bucket_name) raise FileNotFoundError( "metadata.json is not in bucket name {}" ", So this bucket will be removed".format( self.bucket_name)) metadata_dict = json.loads(_metadata.read().decode("utf-8")) if self.endpoint != metadata_dict["endpoint"]: raise ValueError( "Already created endpoint({}) doesn't current endpoint str({})" " It may occurs permission denied error".format( metadata_dict["endpoint"], self.endpoint)) self.compressor = metadata_dict["compressor"] self.metadata = metadata_dict else: logger.info("{} {} is not exist!".format(self.optimizer_name, str(self.additional)))
def _create_client(self): return (Minio( endpoint=self.config.endpoint, access_key=self.config.access_key, secret_key=self.config.secret_key, secure=self.config.secure, ) if not check_nas(self.config.endpoint) else NAS( self.config.endpoint))
def _create_client(self): return ( Minio( endpoint=self.endpoint, access_key=self.access_key, secret_key=self.secret_key, secure=self.secure, region=self.region, ) if not check_nas(self.endpoint) else NAS(self.endpoint) )
def tearDown(self): if self.model_manager is not None: # delete bucket client = (Minio(**self.storage_config) if not self.check_nas(self.model_config.endpoint) else NAS(self.model_config.endpoint)) objects = client.list_objects(self.model_config.bucket_name, recursive=True) for obj in objects: client.remove_object(self.model_config.bucket_name, obj.object_name) client.remove_bucket(self.model_config.bucket_name) if self.model_config_file is not None: if os.path.exists(self.model_config_file): os.remove(self.model_config_file)
def __init__( self, config, multipart_upload_size=5 * _MB, num_worker_threads=4, inmemory=False, refresh=False, ): self.config = config # Storage configuration self.multipart_upload_size = multipart_upload_size self.num_worker_threads = num_worker_threads # HDF5 configuration self.inmemory = inmemory self.filter = tb.Filters(**config.compressor) self._filelist = [] self._file, self._earray = self._get_newfile() self._disconnected = False self._client = (Minio( endpoint=self.config.endpoint, access_key=self.config.access_key, secret_key=self.config.secret_key, secure=self.config.secure, region=self.config.region, ) if not check_nas(self.config.endpoint) else NAS( self.config.endpoint)) self._check_and_create_bucket(refresh=refresh) self._uploader = Uploader( client=self._client, bucket=self.config.bucket_name, num_worker_threads=self.num_worker_threads, multipart_upload_size=self.multipart_upload_size, inmemory=self.inmemory, ) atexit.register(self._exit)
def _check_bucket(self): """ Check bucket name is exist. If not exist, create new bucket If bucket and metadata sub folder exist, get metadata(attributes, compressor) from there. Returns: :obj: `None`: """ _client = (Minio( self.endpoint, access_key=self.access_key, secret_key=self.secret_key, secure=self.secure, ) if not check_nas(self.endpoint) else NAS(self.endpoint)) if _client.bucket_exists(self.bucket_name): objects = _client.list_objects(self.bucket_name, prefix="metadata/") _metadata = None for obj in objects: _metadata = _client.get_object(self.bucket_name, obj.object_name) break if not _metadata: return metadata_dict = json.loads(_metadata.read().decode("utf-8")) if self.endpoint != metadata_dict["endpoint"]: raise ValueError( "Already created endpoint({}) doesn't current endpoint str({})" " It may occurs permission denied error".format( metadata_dict["endpoint"], self.endpoint)) self.compressor = metadata_dict["compressor"] self.attributes = [ DataAttribute(**item) for item in metadata_dict["attributes"] ] else: logger.warn("{} {} is not exist!".format(self.dataset_name, str(self.additional)))
def __init__(self, config, num_worker_threads=4, multipart_upload_size=5 * _MB): self.config = config self.num_worker_threads = num_worker_threads self.multipart_upload_size = multipart_upload_size self._client = ( Minio( endpoint=self.config.endpoint, access_key=self.config.access_key, secret_key=self.config.secret_key, secure=self.config.secure, ) if not check_nas(self.config.endpoint) else NAS(self.config.endpoint) ) self._uploader = Uploader( client=self._client, bucket=self.config.bucket_name, num_worker_threads=self.num_worker_threads, multipart_upload_size=self.multipart_upload_size, inmemory=True, )
class DataSaverTest(StorageTest, unittest.TestCase): minio_config = Minio( endpoint="127.0.0.1:9001", access_key="minio", secret_key="miniosecretkey", secure=False, ) nas_config = NAS(path="/tmp/unittest") def _create_bucket(self): self.minio_config.make_bucket('testminio') self.nas_config.make_bucket('testnas') def _get_obj_names(self, generator): res = sorted([g.object_name for g in generator]) return res def _check_assertEqual(self, prefix=''): for r in [True, False]: self.assertEqual( self._get_obj_names( self.minio_config.list_objects('testminio', prefix=prefix, recursive=r)), self._get_obj_names( self.nas_config.list_objects('testnas', prefix=prefix, recursive=r))) def test_bucket_exists(self): self._create_bucket() self.assertEqual(self.minio_config.bucket_exists('testminio'), self.nas_config.bucket_exists('testnas')) def test_fput_object_with_list_objects(self): self._create_bucket() with open('test', 'w') as f: f.write('this is test') for _object_name in ['test', 'metadata/test']: self.minio_config.fput_object(bucket_name='testminio', object_name=_object_name, file_path='test') for _object_name in ['test', 'metadata/test']: self.nas_config.fput_object(bucket_name='testnas', object_name=_object_name, file_path='test') self._check_assertEqual(prefix='') self._check_assertEqual(prefix='metadata/') self._check_assertEqual(prefix='metadata') self._check_assertEqual(prefix='meta') self._check_assertEqual(prefix='metadata/t') self._check_assertEqual(prefix='metadata/test') self.assertEqual( self._get_obj_names( self.minio_config.list_objects('testminio', prefix='metadata/test/')), self._get_obj_names( self.nas_config.list_objects('testnas', prefix='metadata/test/'))) self.assertNotEqual( self._get_obj_names( self.minio_config.list_objects('testminio', prefix='metadata/test/', recursive=True)), self._get_obj_names( self.nas_config.list_objects('testnas', prefix='metadata/test/', recursive=True)))