def _check_directory_paths(self, datadir_path, directory_paths, priority_paths): """ Checks if directory_path is already present in directory_paths. :datadir_path is directory path. :datadir_paths is set of all directory paths. :raises: BadStoreConfiguration exception if same directory path is already present in directory_paths. """ if datadir_path in directory_paths: msg = (_("Directory %(datadir_path)s specified " "multiple times in filesystem_store_datadirs " "option of filesystem configuration") % { 'datadir_path': datadir_path }) # If present with different priority it's a bad configuration if datadir_path not in priority_paths: LOG.exception(msg) raise exceptions.BadStoreConfiguration(store_name="filesystem", reason=msg) # Present with same prio (exact duplicate) only deserves a warning LOG.warning(msg)
def _create_image_directories(self, directory_paths): """ Create directories to write image files if it does not exist. :directory_paths is a list of directories belonging to glance store. :raises: BadStoreConfiguration exception if creating a directory fails. """ for datadir in directory_paths: if os.path.exists(datadir): self._check_write_permission(datadir) self._set_exec_permission(datadir) else: msg = _("Directory to write image files does not exist " "(%s). Creating.") % datadir LOG.info(msg) try: os.makedirs(datadir) self._check_write_permission(datadir) self._set_exec_permission(datadir) except (IOError, OSError): if os.path.exists(datadir): # NOTE(markwash): If the path now exists, some other # process must have beat us in the race condition. # But it doesn't hurt, so we can safely ignore # the error. self._check_write_permission(datadir) self._set_exec_permission(datadir) continue reason = _("Unable to create datadir: %s") % datadir LOG.error(reason) raise exceptions.BadStoreConfiguration( store_name="filesystem", reason=reason)
def _option_get(self, param): result = getattr(self.conf.glance_store, param) if not result: reason = (_("Could not find %(param)s in configuration " "options.") % {'param': param}) raise exceptions.BadStoreConfiguration( store_name='vmware_datastore', reason=reason) return result
def _sanity_check(self): if self.backend_group: store_conf = getattr(self.conf, self.backend_group) else: store_conf = self.conf.glance_store if store_conf.vmware_api_retry_count <= 0: msg = _('vmware_api_retry_count should be greater than zero') LOG.error(msg) raise exceptions.BadStoreConfiguration( store_name='vmware_datastore', reason=msg) if store_conf.vmware_task_poll_interval <= 0: msg = _('vmware_task_poll_interval should be greater than zero') LOG.error(msg) raise exceptions.BadStoreConfiguration( store_name='vmware_datastore', reason=msg)
def __init__(self, store, store_location, context=None, allow_reauth=False): # no context - no party if context is None: reason = _("Multi-tenant Swift storage requires a user context.") raise exceptions.BadStoreConfiguration(store_name="swift", reason=reason) super(MultiTenantConnectionManager, self).__init__( store, store_location, context, allow_reauth)
def _option_get(self, param): result = getattr(CONF, param) if not result: reason = _("Could not find %(param)s in configuration options.") \ % locals() LOG.error(reason) raise exceptions.BadStoreConfiguration(store_name="irods", reason=reason) return result
def _option_get(self, param): result = getattr(self.conf.glance_store, param) if not result: reason = (_("Could not find %(param)s in configuration options.") % param) LOG.error(reason) raise exceptions.BadStoreConfiguration(store_name="swift", reason=reason) return result
def get_cinderclient(conf, context=None, backend=None): if backend: glance_store = getattr(conf, backend) else: glance_store = conf.glance_store user_overriden = is_user_overriden(conf, backend=backend) if user_overriden: username = glance_store.cinder_store_user_name password = glance_store.cinder_store_password project = glance_store.cinder_store_project_name url = glance_store.cinder_store_auth_address else: username = context.user password = context.auth_token project = context.tenant if glance_store.cinder_endpoint_template: url = glance_store.cinder_endpoint_template % context.to_dict() else: info = glance_store.cinder_catalog_info service_type, service_name, interface = info.split(':') try: catalog = keystone_sc.ServiceCatalogV2(context.service_catalog) url = catalog.url_for( region_name=glance_store.cinder_os_region_name, service_type=service_type, service_name=service_name, interface=interface) except keystone_exc.EndpointNotFound: reason = _("Failed to find Cinder from a service catalog.") raise exceptions.BadStoreConfiguration(store_name="cinder", reason=reason) c = cinderclient.Client(username, password, project, auth_url=url, region_name=glance_store.cinder_os_region_name, insecure=glance_store.cinder_api_insecure, retries=glance_store.cinder_http_retries, cacert=glance_store.cinder_ca_certificates_file) LOG.debug( 'Cinderclient connection created for user %(user)s using URL: ' '%(url)s.', { 'user': username, 'url': url }) # noauth extracts user_id:project_id from auth_token if not user_overriden: c.client.auth_token = context.auth_token or '%s:%s' % (username, project) c.client.management_url = url return c
def get_cinderclient(self, context=None, legacy_update=False): # NOTE: For legacy image update from single store to multiple # stores we need to use admin context rather than user provided # credentials if legacy_update: user_overriden = False context = context.elevated() else: user_overriden = self.is_user_overriden() if user_overriden: username = self.store_conf.cinder_store_user_name password = self.store_conf.cinder_store_password project = self.store_conf.cinder_store_project_name url = self.store_conf.cinder_store_auth_address else: username = context.user_id password = context.auth_token project = context.project_id if self.store_conf.cinder_endpoint_template: template = self.store_conf.cinder_endpoint_template url = template % context.to_dict() else: info = self.store_conf.cinder_catalog_info service_type, service_name, interface = info.split(':') try: catalog = keystone_sc.ServiceCatalogV2( context.service_catalog) url = catalog.url_for( region_name=self.store_conf.cinder_os_region_name, service_type=service_type, service_name=service_name, interface=interface) except keystone_exc.EndpointNotFound: reason = _("Failed to find Cinder from a service catalog.") raise exceptions.BadStoreConfiguration(store_name="cinder", reason=reason) c = cinderclient.Client( username, password, project, auth_url=url, region_name=self.store_conf.cinder_os_region_name, insecure=self.store_conf.cinder_api_insecure, retries=self.store_conf.cinder_http_retries, cacert=self.store_conf.cinder_ca_certificates_file) LOG.debug( 'Cinderclient connection created for user %(user)s using URL: ' '%(url)s.', {'user': username, 'url': url}) # noauth extracts user_id:project_id from auth_token if not user_overriden: c.client.auth_token = context.auth_token or '%s:%s' % (username, project) c.client.management_url = url return c
def configure_add(self): """ Configure the Store to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exceptions.BadStoreConfiguration` """ self.s3_host = self._option_get('s3_store_host') self.access_key = self._option_get('s3_store_access_key') self.secret_key = self._option_get('s3_store_secret_key') self.bucket = self._option_get('s3_store_bucket') self.scheme = 's3' if self.s3_host.startswith('https://'): self.scheme = 's3+https' self.full_s3_host = self.s3_host elif self.s3_host.startswith('http://'): self.full_s3_host = self.s3_host else: # Defaults http self.full_s3_host = 'http://' + self.s3_host _s3_obj_size = self._option_get('s3_store_large_object_size') self.s3_store_large_object_size = _s3_obj_size * units.Mi _s3_ck_size = self._option_get('s3_store_large_object_chunk_size') _s3_ck_min = DEFAULT_LARGE_OBJECT_MIN_CHUNK_SIZE if _s3_ck_size < _s3_ck_min: reason = _("s3_store_large_object_chunk_size must be at " "least %d MB.") % _s3_ck_min LOG.error(reason) raise exceptions.BadStoreConfiguration(store_name="s3", reason=reason) self.s3_store_large_object_chunk_size = _s3_ck_size * units.Mi self.s3_store_thread_pools = self._option_get('s3_store_thread_pools') if self.s3_store_thread_pools <= 0: reason = _("s3_store_thread_pools must be a positive " "integer. %s") % self.s3_store_thread_pools LOG.error(reason) raise exceptions.BadStoreConfiguration(store_name="s3", reason=reason) if self.backend_group: self._set_url_prefix()
def validate_buffering(buffer_dir): if buffer_dir is None: msg = _('Configuration option "swift_upload_buffer_dir" is ' 'not set. Please set it to a valid path to buffer ' 'during Swift uploads.') raise exceptions.BadStoreConfiguration(store_name='swift', reason=msg) # NOTE(dharinic): Ensure that the provided directory path for # buffering is valid try: _tmpfile = tempfile.TemporaryFile(dir=buffer_dir) except OSError as err: msg = (_('Unable to use buffer directory set with ' '"swift_upload_buffer_dir". Error: %s') % encodeutils.exception_to_unicode(err)) raise exceptions.BadStoreConfiguration(store_name='swift', reason=msg) else: _tmpfile.close() return True
def _validate_metadata(self, metadata_file): """Validate metadata against json schema. If metadata is valid then cache metadata and use it when creating new image. :param metadata_file: JSON metadata file path :raises: BadStoreConfiguration exception if metadata is not valid. """ try: with open(metadata_file, 'rb') as fptr: metadata = jsonutils.load(fptr) if isinstance(metadata, dict): # If metadata is of type dictionary # i.e. - it contains only one mountpoint # then convert it to list of dictionary. metadata = [metadata] # Validate metadata against json schema jsonschema.validate(metadata, MULTI_FILESYSTEM_METADATA_SCHEMA) glance_store.check_location_metadata(metadata) self.FILESYSTEM_STORE_METADATA = metadata except (jsonschema.exceptions.ValidationError, exceptions.BackendException, ValueError) as vee: err_msg = encodeutils.exception_to_unicode(vee) reason = _('The JSON in the metadata file %(file)s is ' 'not valid and it can not be used: ' '%(vee)s.') % dict(file=metadata_file, vee=err_msg) LOG.error(reason) raise exceptions.BadStoreConfiguration( store_name="filesystem", reason=reason) except IOError as ioe: err_msg = encodeutils.exception_to_unicode(ioe) reason = _('The path for the metadata file %(file)s could ' 'not be accessed: ' '%(ioe)s.') % dict(file=metadata_file, ioe=err_msg) LOG.error(reason) raise exceptions.BadStoreConfiguration( store_name="filesystem", reason=reason)
def _option_get(self, param): result = getattr(self.conf.glance_store, param) if not result: reason = ("Could not find %(param)s in configuration " "options." % { 'param': param }) LOG.debug(reason) raise exceptions.BadStoreConfiguration(store_name="s3", reason=reason) return result
def _get_endpoint(self, context): self.container = self.conf.glance_store.swift_store_container if context is None: reason = _("Multi-tenant Swift storage requires a context.") raise exceptions.BadStoreConfiguration(store_name="swift", reason=reason) if context.service_catalog is None: reason = _("Multi-tenant Swift storage requires " "a service catalog.") raise exceptions.BadStoreConfiguration(store_name="swift", reason=reason) self.storage_url = auth.get_endpoint(context.service_catalog, service_type=self.service_type, endpoint_region=self.region, endpoint_type=self.endpoint_type) if self.storage_url.startswith('http://'): self.scheme = 'swift+http' else: self.scheme = 'swift+https' return self.storage_url
def configure_add(self): """ Configure the Store to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exceptions.BadStoreConfiguration` """ try: if self.backend_group: chunk = getattr(self.conf, self.backend_group).rbd_store_chunk_size pool = getattr(self.conf, self.backend_group).rbd_store_pool user = getattr(self.conf, self.backend_group).rbd_store_user conf_file = getattr(self.conf, self.backend_group).rbd_store_ceph_conf connect_timeout = getattr( self.conf, self.backend_group).rados_connect_timeout thin_provisioning = getattr(self.conf, self.backend_group).\ rbd_thin_provisioning rbd_size_by_diff = getattr(self.conf, self.backend_group).rbd_size_by_diff else: chunk = self.conf.glance_store.rbd_store_chunk_size pool = self.conf.glance_store.rbd_store_pool user = self.conf.glance_store.rbd_store_user conf_file = self.conf.glance_store.rbd_store_ceph_conf connect_timeout = self.conf.glance_store.rados_connect_timeout thin_provisioning = \ self.conf.glance_store.rbd_thin_provisioning rbd_size_by_diff = self.conf.glance_store.rbd_size_by_diff self.thin_provisioning = thin_provisioning self.chunk_size = chunk * units.Mi self.READ_CHUNKSIZE = self.chunk_size self.WRITE_CHUNKSIZE = self.READ_CHUNKSIZE self.size_by_diff = rbd_size_by_diff # these must not be unicode since they will be passed to a # non-unicode-aware C library self.pool = str(pool) self.user = str(user) self.conf_file = str(conf_file) self.connect_timeout = connect_timeout except cfg.ConfigFileValueError as e: reason = _("Error in store configuration: %s") % e LOG.error(reason) raise exceptions.BadStoreConfiguration(store_name='rbd', reason=reason) if self.backend_group: self._set_url_prefix() self.size = 0 self.resize_amount = self.WRITE_CHUNKSIZE
def _check_write_permission(self, datadir): """ Checks if directory created to write image files has write permission. :datadir is a directory path in which glance wites image files. :raises: BadStoreConfiguration exception if datadir is read-only. """ if not os.access(datadir, os.W_OK): msg = (_("Permission to write in %s denied") % datadir) LOG.exception(msg) raise exceptions.BadStoreConfiguration( store_name="filesystem", reason=msg)
def configure_add(self): default_ref = self.conf.glance_store.default_swift_reference default_swift_reference = self.ref_params.get(default_ref) if default_swift_reference: self.auth_address = default_swift_reference.get('auth_address') if (not default_swift_reference) or (not self.auth_address): reason = _("A value for swift_store_auth_address is required.") LOG.error(reason) raise exceptions.BadStoreConfiguration(message=reason) if self.auth_address.startswith('http://'): self.scheme = 'swift+http' else: self.scheme = 'swift+https' self.container = self.conf.glance_store.swift_store_container self.user = default_swift_reference.get('user') self.key = default_swift_reference.get('key') if not (self.user or self.key): reason = _("A value for swift_store_ref_params is required.") LOG.error(reason) raise exceptions.BadStoreConfiguration(store_name="swift", reason=reason)
def configure(self, re_raise_bsc=False): self._sanity_check() self.scheme = STORE_SCHEME self.server_host = self._option_get('vmware_server_host') self.server_username = self._option_get('vmware_server_username') self.server_password = self._option_get('vmware_server_password') self.api_retry_count = self.conf.glance_store.vmware_api_retry_count self.tpoll_interval = self.conf.glance_store.vmware_task_poll_interval self.api_insecure = self.conf.glance_store.vmware_api_insecure if api is None: msg = _("Missing dependencies: oslo_vmware") raise exceptions.BadStoreConfiguration( store_name="vmware_datastore", reason=msg) self.session = self.reset_session() super(Store, self).configure(re_raise_bsc=re_raise_bsc)
def _option_get(self, param): if self.backend_group: store_conf = getattr(self.conf, self.backend_group) else: store_conf = self.conf.glance_store result = getattr(store_conf, param) if not result: if param == 's3_store_create_bucket_on_put': return result reason = _("Could not find %s in configuration options.") % param LOG.error(reason) raise exceptions.BadStoreConfiguration(store_name="s3", reason=reason) return result
def _check_directory_paths(self, datadir_path, directory_paths): """ Checks if directory_path is already present in directory_paths. :datadir_path is directory path. :datadir_paths is set of all directory paths. :raise BadStoreConfiguration exception if same directory path is already present in directory_paths. """ if datadir_path in directory_paths: msg = (_("Directory %(datadir_path)s specified " "multiple times in filesystem_store_datadirs " "option of filesystem configuration") % {'datadir_path': datadir_path}) LOG.exception(msg) raise exceptions.BadStoreConfiguration( store_name="filesystem", reason=msg)
def _load_config(self): if self.backend_group: scf = getattr(self.conf, self.backend_group).swift_store_config_file else: scf = self.conf.glance_store.swift_store_config_file try: conf_file = self.conf.find_file(scf) CONFIG.read(conf_file) except Exception as e: msg = (_("swift config file " "%(conf)s:%(exc)s not found"), { 'conf': scf, 'exc': e }) LOG.error(msg) raise exceptions.BadStoreConfiguration(store_name='swift', reason=msg) account_params = {} account_references = CONFIG.sections() for ref in account_references: reference = {} try: for param in ('auth_address', 'user', 'key', 'project_domain_id', 'project_domain_name', 'user_domain_id', 'user_domain_name'): reference[param] = CONFIG.get(ref, param) try: reference['auth_version'] = CONFIG.get(ref, 'auth_version') except configparser.NoOptionError: if self.backend_group: av = getattr( self.conf, self.backend_group).swift_store_auth_version else: av = self.conf.glance_store.swift_store_auth_version reference['auth_version'] = av account_params[ref] = reference except (ValueError, SyntaxError, configparser.NoOptionError): LOG.exception(_LE("Invalid format of swift store config cfg")) return account_params
def get_connection(self, conffile, rados_id): client = rados.Rados(conffile=conffile, rados_id=rados_id) try: client.connect(timeout=self.connect_timeout) except (rados.Error, rados.ObjectNotFound) as e: if self.backend_group and len(self.conf.enabled_backends) > 1: reason = _("Error in store configuration: %s") % e LOG.debug(reason) raise exceptions.BadStoreConfiguration( store_name=self.backend_group, reason=reason) else: msg = _LE("Error connecting to ceph cluster.") LOG.exception(msg) raise exceptions.BackendException() try: yield client finally: client.shutdown()
def configure(self, re_raise_bsc=False): glance_conf = self.conf.glance_store _obj_size = self._option_get('swift_store_large_object_size') self.large_object_size = _obj_size * ONE_MB _chunk_size = self._option_get('swift_store_large_object_chunk_size') self.large_object_chunk_size = _chunk_size * ONE_MB self.admin_tenants = glance_conf.swift_store_admin_tenants self.region = glance_conf.swift_store_region self.service_type = glance_conf.swift_store_service_type self.conf_endpoint = glance_conf.swift_store_endpoint self.endpoint_type = glance_conf.swift_store_endpoint_type self.insecure = glance_conf.swift_store_auth_insecure self.ssl_compression = glance_conf.swift_store_ssl_compression self.cacert = glance_conf.swift_store_cacert if swiftclient is None: msg = _("Missing dependency python_swiftclient.") raise exceptions.BadStoreConfiguration(store_name="swift", reason=msg) super(BaseStore, self).configure(re_raise_bsc=re_raise_bsc)
def configure_add(self): """ Configure the Store to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exceptions.BadStoreConfiguration` """ if pymongo is None: msg = _("Missing dependencies: pymongo") raise exceptions.BadStoreConfiguration(store_name="gridfs", reason=msg) self.mongodb_uri = self._option_get('mongodb_store_uri') parsed = uri_parser.parse_uri(self.mongodb_uri) self.mongodb_db = self._option_get('mongodb_store_db') or \ parsed.get("database") self.mongodb = pymongo.MongoClient(self.mongodb_uri) self.fs = gridfs.GridFS(self.mongodb[self.mongodb_db])
def configure_add(self): """ Configure the Store to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exception.BadStoreConfiguration` """ self.host = self._option_get('irods_store_host') self.port = self._option_get('irods_store_port') self.zone = self._option_get('irods_store_zone') self.path = self._option_get('irods_store_path').rstrip('/') self.user = self._option_get('irods_store_user') self.password = self._option_get('irods_store_password') if self.host is None or self.zone is None \ or self.path is None or self.user is None: reason = (_("Invalid configuration options, host = '%(host)s', " + "port = '%(port)s', zone = '%(zone)s', " + "path = '%(path)s', user = '******'") % ({ 'host': self.host, 'port': self.port, 'zone': self.zone, 'path': self.path, 'user': self.user })) LOG.error(reason) raise exceptions.BadStoreConfiguration(store_name="irods", reason=reason) self.irods_manager = IrodsManager({ 'host': self.host, 'port': self.port, 'zone': self.zone, 'path': self.path, 'user': self.user, 'password': self.password, })
def _build_datastore_weighted_map(self, datastores): """Build an ordered map where the key is a weight and the value is a Datastore object. :param: a list of datastores in the format datacenter_path:datastore_name:weight :return: a map with key-value <weight>:<Datastore> """ ds_map = {} for ds in datastores: dc_path, name, weight = self._parse_datastore_info_and_weight(ds) # Fetch the server side reference. ds_obj = self._get_datastore(dc_path, name) if not ds_obj: msg = (_("Could not find datastore %(ds_name)s " "in datacenter %(dc_path)s") % {'ds_name': name, 'dc_path': dc_path}) LOG.error(msg) raise exceptions.BadStoreConfiguration( store_name='vmware_datastore', reason=msg) ds_map.setdefault(int(weight), []).append(ds_obj) return ds_map
def configure_add(self): """ Configure the Store to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exceptions.BadStoreConfiguration` """ try: chunk = self.conf.glance_store.rbd_store_chunk_size self.chunk_size = chunk * (1024**2) self.READ_CHUNKSIZE = self.chunk_size self.WRITE_CHUNKSIZE = self.READ_CHUNKSIZE # these must not be unicode since they will be passed to a # non-unicode-aware C library self.pool = str(self.conf.glance_store.rbd_store_pool) self.user = str(self.conf.glance_store.rbd_store_user) self.conf_file = str(self.conf.glance_store.rbd_store_ceph_conf) except cfg.ConfigFileValueError as e: reason = _("Error in store configuration: %s") % e LOG.error(reason) raise exceptions.BadStoreConfiguration(store_name='rbd', reason=reason)
def configure_add(self): self.datacenter_path = self.conf.glance_store.vmware_datacenter_path self.datastore_name = self._option_get('vmware_datastore_name') global _datastore_info_valid if not _datastore_info_valid: search_index_moref = self._service_content.searchIndex inventory_path = ('%s/datastore/%s' % (self.datacenter_path, self.datastore_name)) ds_moref = self._session.invoke_api(self._session.vim, 'FindByInventoryPath', search_index_moref, inventoryPath=inventory_path) if ds_moref is None: reason = (_("Could not find datastore %(ds_name)s " "in datacenter %(dc_path)s") % { 'ds_name': self.datastore_name, 'dc_path': self.datacenter_path }) raise exceptions.BadStoreConfiguration( store_name='vmware_datastore', reason=reason) else: _datastore_info_valid = True self.store_image_dir = self.conf.glance_store.vmware_store_image_dir
def configure_add(self): """ Configure the Store to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exceptions.BadStoreConfiguration` """ if self.backend_group: store_conf = getattr(self.conf, self.backend_group) else: store_conf = self.conf.glance_store fdir = store_conf.filesystem_store_datadir fdirs = store_conf.filesystem_store_datadirs fstore_perm = store_conf.filesystem_store_file_perm meta_file = store_conf.filesystem_store_metadata_file self.chunk_size = store_conf.filesystem_store_chunk_size self.READ_CHUNKSIZE = self.chunk_size self.WRITE_CHUNKSIZE = self.READ_CHUNKSIZE if not (fdir or fdirs): reason = (_("Specify at least 'filesystem_store_datadir' or " "'filesystem_store_datadirs' option")) LOG.error(reason) raise exceptions.BadStoreConfiguration(store_name="filesystem", reason=reason) if fdir and fdirs: reason = (_("Specify either 'filesystem_store_datadir' or " "'filesystem_store_datadirs' option")) LOG.error(reason) raise exceptions.BadStoreConfiguration(store_name="filesystem", reason=reason) if fstore_perm > 0: perm = int(str(fstore_perm), 8) if not perm & stat.S_IRUSR: reason = _LE("Specified an invalid " "'filesystem_store_file_perm' option which " "could make image file to be unaccessible by " "glance service.") LOG.error(reason) reason = _("Invalid 'filesystem_store_file_perm' option.") raise exceptions.BadStoreConfiguration(store_name="filesystem", reason=reason) self.multiple_datadirs = False directory_paths = set() if fdir: self.datadir = fdir directory_paths.add(self.datadir) else: self.multiple_datadirs = True self.priority_data_map = {} for datadir in fdirs: (datadir_path, priority) = self._get_datadir_path_and_priority(datadir) priority_paths = self.priority_data_map.setdefault( int(priority), []) self._check_directory_paths(datadir_path, directory_paths, priority_paths) directory_paths.add(datadir_path) priority_paths.append(datadir_path) self.priority_list = sorted(self.priority_data_map, reverse=True) self._create_image_directories(directory_paths) if meta_file: self._validate_metadata(meta_file)
def configure(self): raise exceptions.BadStoreConfiguration()