def setup_openstack_logging(client_config, logger): # Get the logging object logging_config = client_config.pop('logging', dict()) # Get a flag in order to check if we should redirect all the logs to # the cloudify logs use_cfy_logger = logging_config.get(KEY_USE_CFY_LOGGER) # Get group of logging we want to configure them and update their # handler as cloudify handler groups_config = logging_config.get(KEY_GROUPS, {}) # Get any custom logger that want also to configure them loggers_config = logging_config.get(KEY_LOGGERS, {}) # Get a copy of the default configuration for the logging of openstack final_logging_cfg = copy.deepcopy(DEFAULT_LOGGING_CONFIG) if use_cfy_logger: final_logging_cfg[KEY_USE_CFY_LOGGER] = use_cfy_logger else: use_cfy_logger = final_logging_cfg[KEY_USE_CFY_LOGGER] final_logging_cfg[KEY_GROUPS].update(groups_config) final_logging_cfg[KEY_LOGGERS].update(loggers_config) # Prepare mapping between logger names and logging levels. configured_loggers = { v: final_logging_cfg[KEY_GROUPS][k] for k, values in LOGGING_GROUPS.items() for v in values } # Update the final configuration logging for openstack configured_loggers.update(final_logging_cfg[KEY_LOGGERS]) # After checking how openstack sdk handle logging it seems that # openstack & keystoneauth have the same level passed when setup the # logging, more info can be checked # 1. https://github.com/openstack/openstacksdk/blob/master/openstack/_log.py#L106 # NOQA # 2. https://github.com/openstack/openstacksdk/blob/master/openstack/_log.py#L107 # NOQA # 3. https://docs.openstack.org/openstacksdk/latest/user/guides/logging.html#python-logging # NOQA os_level = final_logging_cfg[KEY_GROUPS]['openstack'] configured_loggers.update({ 'keystoneauth': os_level, 'keystoneauth.discovery': logging.WARNING, 'keystoneauth.identity.base': logging.WARNING, 'keystoneauth.identity.generic.base': logging.WARNING, }) # Check if it is allowed to redirect openstack logs to cloudify ctx_log_handler = CloudifyLogHandler(logger) if use_cfy_logger else None # Check each logger with is logging level so that we can add for # each logger the cloudify handler to log all events there for logger_name, logger_level in configured_loggers.items(): # Before set the log make sure to convert it to upper case is_str = isinstance(logger_level, str)\ or isinstance(logger_level, unicode) if is_str: logger_level = logger_level.upper() setup_logging(logger_name, [ctx_log_handler], logger_level)
def iterate_timeout(timeout, message, wait=2): """Iterate and raise an exception on timeout. This is a generator that will continually yield and sleep for wait seconds, and if the timeout is reached, will raise an exception with <message>. """ log = _log.setup_logging('openstack.iterate_timeout') try: # None as a wait winds up flowing well in the per-resource cache # flow. We could spread this logic around to all of the calling # points, but just having this treat None as "I don't have a value" # seems friendlier if wait is None: wait = 2 elif wait == 0: # wait should be < timeout, unless timeout is None wait = 0.1 if timeout is None else min(0.1, timeout) wait = float(wait) except ValueError: raise exceptions.SDKException( "Wait value must be an int or float value. {wait} given" " instead".format(wait=wait)) start = time.time() count = 0 while (timeout is None) or (time.time() < start + timeout): count += 1 yield count log.debug('Waiting %s seconds', wait) time.sleep(wait) raise exceptions.ResourceTimeout(message)
def __init__(self, name=None, region_name=None, config=None, force_ipv4=False, auth_plugin=None, openstack_config=None, session_constructor=None, app_name=None, app_version=None, session=None, discovery_cache=None): self._name = name self.region_name = region_name self.config = config self.log = _log.setup_logging('openstack.config') self._force_ipv4 = force_ipv4 self._auth = auth_plugin self._openstack_config = openstack_config self._keystone_session = session self._session_constructor = session_constructor or ks_session.Session self._app_name = app_name self._app_version = app_version self._discovery_cache = discovery_cache or None
def __init__(self, name=None, region_name=None, config=None, force_ipv4=False, auth_plugin=None, openstack_config=None, session_constructor=None, app_name=None, app_version=None, session=None, discovery_cache=None, extra_config=None, cache_expiration_time=0, cache_expirations=None, cache_path=None, cache_class='dogpile.cache.null', cache_arguments=None, password_callback=None): self._name = name self.region_name = region_name self.config = _util.normalize_keys(config) self._extra_config = extra_config or {} self.log = _log.setup_logging('openstack.config') self._force_ipv4 = force_ipv4 self._auth = auth_plugin self._openstack_config = openstack_config self._keystone_session = session self._session_constructor = session_constructor or ks_session.Session self._app_name = app_name self._app_version = app_version self._discovery_cache = discovery_cache or None self._cache_expiration_time = cache_expiration_time self._cache_expirations = cache_expirations or {} self._cache_path = cache_path self._cache_class = cache_class self._cache_arguments = cache_arguments self._password_callback = password_callback
def __init__(self, session, statsd_client=None, statsd_prefix=None, prometheus_counter=None, prometheus_histogram=None, influxdb_config=None, influxdb_client=None, *args, **kwargs): # NOTE(dtantsur): keystoneauth defaults retriable_status_codes to None, # override it with a class-level value. kwargs.setdefault('retriable_status_codes', self.retriable_status_codes) super(Proxy, self).__init__(session=session, *args, **kwargs) self._statsd_client = statsd_client self._statsd_prefix = statsd_prefix self._prometheus_counter = prometheus_counter self._prometheus_histogram = prometheus_histogram self._influxdb_client = influxdb_client self._influxdb_config = influxdb_config if self.service_type: log_name = 'openstack.{0}'.format(self.service_type) else: log_name = 'openstack' self.log = _log.setup_logging(log_name)
def iterate_timeout(timeout, message, wait=2): """Iterate and raise an exception on timeout. This is a generator that will continually yield and sleep for wait seconds, and if the timeout is reached, will raise an exception with <message>. """ log = _log.setup_logging('openstack.iterate_timeout') try: # None as a wait winds up flowing well in the per-resource cache # flow. We could spread this logic around to all of the calling # points, but just having this treat None as "I don't have a value" # seems friendlier if wait is None: wait = 2 elif wait == 0: # wait should be < timeout, unless timeout is None wait = 0.1 if timeout is None else min(0.1, timeout) wait = float(wait) except ValueError: raise exceptions.SDKException( "Wait value must be an int or float value. {wait} given" " instead".format(wait=wait)) start = time.time() count = 0 while (timeout is None) or (time.time() < start + timeout): count += 1 yield count log.debug('Waiting %s seconds', wait) time.sleep(wait) raise exceptions.ResourceTimeout(message)
def find_best_address(addresses, public=False, cloud_public=True): do_check = public == cloud_public if not addresses: return None if len(addresses) == 1: return addresses[0] if len(addresses) > 1 and do_check: # We only want to do this check if the address is supposed to be # reachable. Otherwise we're just debug log spamming on every listing # of private ip addresses for address in addresses: # Return the first one that is reachable try: for res in socket.getaddrinfo( address, 22, socket.AF_UNSPEC, socket.SOCK_STREAM, 0): family, socktype, proto, _, sa = res connect_socket = socket.socket(family, socktype, proto) connect_socket.settimeout(1) connect_socket.connect(sa) return address except Exception: pass # Give up and return the first - none work as far as we can tell if do_check: log = _log.setup_logging('openstack') log.debug( 'The cloud returned multiple addresses, and none of them seem' ' to work. That might be what you wanted, but we have no clue' " what's going on, so we just picked one at random") return addresses[0]
def find_best_address(addresses, public=False, cloud_public=True): do_check = public == cloud_public if not addresses: return None if len(addresses) == 1: return addresses[0] if len(addresses) > 1 and do_check: # We only want to do this check if the address is supposed to be # reachable. Otherwise we're just debug log spamming on every listing # of private ip addresses for address in addresses: # Return the first one that is reachable try: for res in socket.getaddrinfo(address, 22, socket.AF_UNSPEC, socket.SOCK_STREAM, 0): family, socktype, proto, _, sa = res connect_socket = socket.socket(family, socktype, proto) connect_socket.settimeout(1) connect_socket.connect(sa) return address except Exception: pass # Give up and return the first - none work as far as we can tell if do_check: log = _log.setup_logging('openstack') log.debug( 'The cloud returned multiple addresses, and none of them seem' ' to work. That might be what you wanted, but we have no clue' " what's going on, so we just picked one at random") return addresses[0]
def cleanup_task(graph, service, fn): try: fn() except Exception: log = _log.setup_logging('openstack.project_cleanup') log.exception('Error in the %s cleanup function' % service) finally: graph.node_done(service)
def __init__(self, name=None, region_name=None, config=None, force_ipv4=False, auth_plugin=None, openstack_config=None, session_constructor=None, app_name=None, app_version=None, session=None, discovery_cache=None, extra_config=None, cache_expiration_time=0, cache_expirations=None, cache_path=None, cache_class='dogpile.cache.null', cache_arguments=None, password_callback=None, statsd_host=None, statsd_port=None, statsd_prefix=None, influxdb_config=None, collector_registry=None): self._name = name self.config = _util.normalize_keys(config) # NOTE(efried): For backward compatibility: a) continue to accept the # region_name kwarg; b) make it take precedence over (non-service_type- # specific) region_name set in the config dict. if region_name is not None: self.config['region_name'] = region_name self._extra_config = extra_config or {} self.log = _log.setup_logging('openstack.config') self._force_ipv4 = force_ipv4 self._auth = auth_plugin self._openstack_config = openstack_config self._keystone_session = session self._session_constructor = session_constructor or ks_session.Session self._app_name = app_name self._app_version = app_version self._discovery_cache = discovery_cache or None self._cache_expiration_time = cache_expiration_time self._cache_expirations = cache_expirations or {} self._cache_path = cache_path self._cache_class = cache_class self._cache_arguments = cache_arguments self._password_callback = password_callback self._statsd_host = statsd_host self._statsd_port = statsd_port self._statsd_prefix = statsd_prefix self._statsd_client = None self._influxdb_config = influxdb_config self._influxdb_client = None self._collector_registry = collector_registry self._service_type_manager = os_service_types.ServiceTypes()
def _call_client_and_retry(client, url, retry_on=None, call_retries=3, retry_wait=2, **kwargs): """Method to provide retry operations. Some APIs utilize HTTP errors on certain operations to indicate that the resource is presently locked, and as such this mechanism provides the ability to retry upon known error codes. :param object client: The client method, such as: ``self.baremetal_client.post`` :param string url: The URL to perform the operation upon. :param integer retry_on: A list of error codes that can be retried on. The method also supports a single integer to be defined. :param integer call_retries: The number of times to retry the call upon the error code defined by the 'retry_on' parameter. Default: 3 :param integer retry_wait: The time in seconds to wait between retry attempts. Default: 2 :returns: The object returned by the client call. """ # NOTE(TheJulia): This method, as of this note, does not have direct # unit tests, although is fairly well tested by the tests checking # retry logic in test_baremetal_node.py. log = _log.setup_logging('shade.http') if isinstance(retry_on, int): retry_on = [retry_on] count = 0 while (count < call_retries): count += 1 try: ret_val = client(url, **kwargs) except exc.OpenStackCloudHTTPError as e: if (retry_on is not None and e.response.status_code in retry_on): log.debug( 'Received retryable error {err}, waiting ' '{wait} seconds to retry', { 'err': e.response.status_code, 'wait': retry_wait }) time.sleep(retry_wait) continue else: raise # Break out of the loop, since the loop should only continue # when we encounter a known connection error. return ret_val
def simple_logging(debug=False, http_debug=False): if http_debug: debug = True if debug: log_level = logging.DEBUG else: log_level = logging.INFO if http_debug: # Enable HTTP level tracing log = _log.setup_logging('keystoneauth') log.addHandler(logging.StreamHandler()) log.setLevel(log_level) # We only want extra shade HTTP tracing in http debug mode log = _log.setup_logging('openstack.cloud.http') log.setLevel(log_level) else: # We only want extra shade HTTP tracing in http debug mode log = _log.setup_logging('openstack.cloud.http') log.setLevel(logging.WARNING) log = _log.setup_logging('openstack.cloud') log.addHandler(logging.StreamHandler()) log.setLevel(log_level) # Suppress warning about keystoneauth loggers log = _log.setup_logging('keystoneauth.identity.base') log = _log.setup_logging('keystoneauth.identity.generic.base')
def _call_client_and_retry(client, url, retry_on=None, call_retries=3, retry_wait=2, **kwargs): """Method to provide retry operations. Some APIs utilize HTTP errors on certain operations to indicate that the resource is presently locked, and as such this mechanism provides the ability to retry upon known error codes. :param object client: The client method, such as: ``self.baremetal_client.post`` :param string url: The URL to perform the operation upon. :param integer retry_on: A list of error codes that can be retried on. The method also supports a single integer to be defined. :param integer call_retries: The number of times to retry the call upon the error code defined by the 'retry_on' parameter. Default: 3 :param integer retry_wait: The time in seconds to wait between retry attempts. Default: 2 :returns: The object returned by the client call. """ # NOTE(TheJulia): This method, as of this note, does not have direct # unit tests, although is fairly well tested by the tests checking # retry logic in test_baremetal_node.py. log = _log.setup_logging('shade.http') if isinstance(retry_on, int): retry_on = [retry_on] count = 0 while (count < call_retries): count += 1 try: ret_val = client(url, **kwargs) except exc.OpenStackCloudHTTPError as e: if (retry_on is not None and e.response.status_code in retry_on): log.debug('Received retryable error {err}, waiting ' '{wait} seconds to retry', { 'err': e.response.status_code, 'wait': retry_wait }) time.sleep(retry_wait) continue else: raise # Break out of the loop, since the loop should only continue # when we encounter a known connection error. return ret_val
def setUp(self): super(TestImage, self).setUp() self.resp = mock.Mock() self.resp.body = None self.resp.json = mock.Mock(return_value=self.resp.body) self.sess = mock.Mock(spec=adapter.Adapter) self.sess.post = mock.Mock(return_value=self.resp) self.sess.put = mock.Mock(return_value=FakeResponse({})) self.sess.delete = mock.Mock(return_value=FakeResponse({})) self.sess.get = mock.Mock(return_value=FakeResponse({})) self.sess.default_microversion = None self.sess.retriable_status_codes = None self.sess.log = _log.setup_logging('openstack')
def setUp(self): super(TestImage, self).setUp() self.resp = mock.Mock() self.resp.body = None self.resp.json = mock.Mock(return_value=self.resp.body) self.sess = mock.Mock(spec=adapter.Adapter) self.sess.post = mock.Mock(return_value=self.resp) self.sess.put = mock.Mock(return_value=FakeResponse({})) self.sess.delete = mock.Mock(return_value=FakeResponse({})) self.sess.fetch = mock.Mock(return_value=FakeResponse({})) self.sess.default_microversion = None self.sess.retriable_status_codes = None self.sess.log = _log.setup_logging('openstack')
def __init__(self, name, region, config, force_ipv4=False, auth_plugin=None, openstack_config=None, session_constructor=None, app_name=None, app_version=None): self.name = name self.region = region self.config = config self.log = _log.setup_logging(__name__) self._force_ipv4 = force_ipv4 self._auth = auth_plugin self._openstack_config = openstack_config self._keystone_session = None self._session_constructor = session_constructor or session.Session self._app_name = app_name self._app_version = app_version
def __init__(self, name=None, region_name=None, config=None, force_ipv4=False, auth_plugin=None, openstack_config=None, session_constructor=None, app_name=None, app_version=None, session=None, discovery_cache=None): self._name = name self.region_name = region_name self.config = config self.log = _log.setup_logging('openstack.config') self._force_ipv4 = force_ipv4 self._auth = auth_plugin self._openstack_config = openstack_config self._keystone_session = session self._session_constructor = session_constructor or ks_session.Session self._app_name = app_name self._app_version = app_version self._discovery_cache = discovery_cache or None
def find_best_address(addresses, public=False, cloud_public=True): do_check = public == cloud_public if not addresses: return None if len(addresses) == 1: return addresses[0] if len(addresses) > 1 and do_check: # We only want to do this check if the address is supposed to be # reachable. Otherwise we're just debug log spamming on every listing # of private ip addresses for address in addresses: try: for count in utils.iterate_timeout(5, "Timeout waiting for %s" % address, wait=0.1): # Return the first one that is reachable try: for res in socket.getaddrinfo(address, 22, socket.AF_UNSPEC, socket.SOCK_STREAM, 0): family, socktype, proto, _, sa = res connect_socket = socket.socket( family, socktype, proto) connect_socket.settimeout(1) connect_socket.connect(sa) return address except socket.error: # Sometimes a "no route to address" type error # will fail fast, but can often come alive # when retried. continue except Exception: pass # Give up and return the first - none work as far as we can tell if do_check: log = _log.setup_logging('openstack') log.debug( "The cloud returned multiple addresses %s:, and we could not " "connect to port 22 on either. That might be what you wanted, " "but we have no clue what's going on, so we picked the first one " "%s" % (addresses, addresses[0])) return addresses[0]
def maximum_supported_microversion(adapter, client_maximum): """Determinte the maximum microversion supported by both client and server. :param adapter: :class:`~keystoneauth1.adapter.Adapter` instance. :param client_maximum: Maximum microversion supported by the client. If ``None``, ``None`` is returned. :returns: the maximum supported microversion as string or ``None``. """ if client_maximum is None: return None # NOTE(dtantsur): if we cannot determine supported microversions, fall back # to the default one. try: endpoint_data = adapter.get_endpoint_data() except keystoneauth1.exceptions.discovery.DiscoveryFailure: endpoint_data = None if endpoint_data is None: log = _log.setup_logging('openstack') log.warning('Cannot determine endpoint data for service %s', adapter.service_type or adapter.service_name) return None if not endpoint_data.max_microversion: return None client_max = discover.normalize_version_number(client_maximum) server_max = discover.normalize_version_number( endpoint_data.max_microversion) if endpoint_data.min_microversion: server_min = discover.normalize_version_number( endpoint_data.min_microversion) if client_max < server_min: # NOTE(dtantsur): we may want to raise in this case, but this keeps # the current behavior intact. return None result = min(client_max, server_max) return discover.version_to_string(result)
def find_best_address(addresses, public=False, cloud_public=True): do_check = public == cloud_public if not addresses: return None if len(addresses) == 1: return addresses[0] if len(addresses) > 1 and do_check: # We only want to do this check if the address is supposed to be # reachable. Otherwise we're just debug log spamming on every listing # of private ip addresses for address in addresses: try: for count in utils.iterate_timeout( 5, "Timeout waiting for %s" % address, wait=0.1): # Return the first one that is reachable try: for res in socket.getaddrinfo( address, 22, socket.AF_UNSPEC, socket.SOCK_STREAM, 0): family, socktype, proto, _, sa = res connect_socket = socket.socket( family, socktype, proto) connect_socket.settimeout(1) connect_socket.connect(sa) return address except socket.error: # Sometimes a "no route to address" type error # will fail fast, but can often come alive # when retried. continue except Exception: pass # Give up and return the first - none work as far as we can tell if do_check: log = _log.setup_logging('openstack') log.debug( "The cloud returned multiple addresses %s:, and we could not " "connect to port 22 on either. That might be what you wanted, " "but we have no clue what's going on, so we picked the first one " "%s" % (addresses, addresses[0])) return addresses[0]
def maximum_supported_microversion(adapter, client_maximum): """Determinte the maximum microversion supported by both client and server. :param adapter: :class:`~keystoneauth1.adapter.Adapter` instance. :param client_maximum: Maximum microversion supported by the client. If ``None``, ``None`` is returned. :returns: the maximum supported microversion as string or ``None``. """ if client_maximum is None: return None # NOTE(dtantsur): if we cannot determine supported microversions, fall back # to the default one. try: endpoint_data = adapter.get_endpoint_data() except keystoneauth1.exceptions.discovery.DiscoveryFailure: endpoint_data = None if endpoint_data is None: log = _log.setup_logging('openstack') log.warning('Cannot determine endpoint data for service %s', adapter.service_type or adapter.service_name) return None if not endpoint_data.max_microversion: return None client_max = discover.normalize_version_number(client_maximum) server_max = discover.normalize_version_number( endpoint_data.max_microversion) if endpoint_data.min_microversion: server_min = discover.normalize_version_number( endpoint_data.min_microversion) if client_max < server_min: # NOTE(dtantsur): we may want to raise in this case, but this keeps # the current behavior intact. return None result = min(client_max, server_max) return discover.version_to_string(result)
def __init__( self, session, statsd_client=None, statsd_prefix=None, prometheus_counter=None, prometheus_histogram=None, *args, **kwargs): # NOTE(dtantsur): keystoneauth defaults retriable_status_codes to None, # override it with a class-level value. kwargs.setdefault('retriable_status_codes', self.retriable_status_codes) super(Proxy, self).__init__(session=session, *args, **kwargs) self._statsd_client = statsd_client self._statsd_prefix = statsd_prefix self._prometheus_counter = prometheus_counter self._prometheus_histogram = prometheus_histogram if self.service_type: log_name = 'openstack.{0}'.format(self.service_type) else: log_name = 'openstack' self.log = _log.setup_logging(log_name)
def _get_supplemental_addresses(cloud, server): fixed_ip_mapping = {} for name, network in server['addresses'].items(): for address in network: if address['version'] == 6: continue if address.get('OS-EXT-IPS:type') == 'floating': # We have a floating IP that nova knows about, do nothing return server['addresses'] fixed_ip_mapping[address['addr']] = name try: # Don't bother doing this before the server is active, it's a waste # of an API call while polling for a server to come up if (cloud.has_service('network') and cloud._has_floating_ips() and server['status'] == 'ACTIVE'): for port in cloud.search_ports( filters=dict(device_id=server['id'])): # This SHOULD return one and only one FIP - but doing it as a # search/list lets the logic work regardless for fip in cloud.search_floating_ips( filters=dict(port_id=port['id'])): fixed_net = fixed_ip_mapping.get(fip['fixed_ip_address']) if fixed_net is None: log = _log.setup_logging('openstack') log.debug( "The cloud returned floating ip %(fip)s attached" " to server %(server)s but the fixed ip associated" " with the floating ip in the neutron listing" " does not exist in the nova listing. Something" " is exceptionally broken.", dict(fip=fip['id'], server=server['id'])) else: server['addresses'][fixed_net].append( _make_address_dict(fip, port)) except exc.OpenStackCloudException: # If something goes wrong with a cloud call, that's cool - this is # an attempt to provide additional data and should not block forward # progress pass return server['addresses']
def _get_supplemental_addresses(cloud, server): fixed_ip_mapping = {} for name, network in server['addresses'].items(): for address in network: if address['version'] == 6: continue if address.get('OS-EXT-IPS:type') == 'floating': # We have a floating IP that nova knows about, do nothing return server['addresses'] fixed_ip_mapping[address['addr']] = name try: # Don't bother doing this before the server is active, it's a waste # of an API call while polling for a server to come up if (cloud.has_service('network') and cloud._has_floating_ips() and server['status'] == 'ACTIVE'): for port in cloud.search_ports(filters=dict( device_id=server['id'])): # This SHOULD return one and only one FIP - but doing it as a # search/list lets the logic work regardless for fip in cloud.search_floating_ips(filters=dict( port_id=port['id'])): fixed_net = fixed_ip_mapping.get(fip['fixed_ip_address']) if fixed_net is None: log = _log.setup_logging('openstack') log.debug( "The cloud returned floating ip %(fip)s attached" " to server %(server)s but the fixed ip associated" " with the floating ip in the neutron listing" " does not exist in the nova listing. Something" " is exceptionally broken.", dict(fip=fip['id'], server=server['id'])) else: server['addresses'][fixed_net].append( _make_address_dict(fip, port)) except exc.OpenStackCloudException: # If something goes wrong with a cloud call, that's cool - this is # an attempt to provide additional data and should not block forward # progress pass return server['addresses']
class Proxy(proxy.Proxy): skip_discovery = True Account = _account.Account Container = _container.Container Object = _obj.Object log = _log.setup_logging('openstack') def _extract_name(self, url, service_type=None, project_id=None): url_path = parse.urlparse(url).path.strip() # Remove / from the beginning to keep the list indexes of interesting # things consistent if url_path.startswith('/'): url_path = url_path[1:] # Split url into parts and exclude potential project_id in some urls url_parts = [ x for x in url_path.split('/') if (x != project_id and ( not project_id or (project_id and x != 'AUTH_' + project_id))) ] # Strip leading version piece so that # GET /v1/AUTH_xxx # returns ['AUTH_xxx'] if (url_parts[0] and url_parts[0][0] == 'v' and url_parts[0][1] and url_parts[0][1].isdigit()): url_parts = url_parts[1:] # Strip out anything that's empty or None parts = [part for part in url_parts if part] # Getting the root of an endpoint is doing version discovery if not parts: return ['account'] if len(parts) == 1: if 'endpoints' in parts: return ['endpoints'] else: return ['container'] else: return ['object'] def get_account_metadata(self): """Get metadata for this account. :rtype: :class:`~openstack.object_store.v1.account.Account` """ return self._head(_account.Account) def set_account_metadata(self, **metadata): """Set metadata for this account. :param kwargs metadata: Key/value pairs to be set as metadata on the container. Custom metadata can be set. Custom metadata are keys and values defined by the user. """ account = self._get_resource(_account.Account, None) account.set_metadata(self, metadata) def delete_account_metadata(self, keys): """Delete metadata for this account. :param keys: The keys of metadata to be deleted. """ account = self._get_resource(_account.Account, None) account.delete_metadata(self, keys) def containers(self, **query): """Obtain Container objects for this account. :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :rtype: A generator of :class:`~openstack.object_store.v1.container.Container` objects. """ return self._list(_container.Container, paginated=True, **query) def create_container(self, name, **attrs): """Create a new container from attributes :param container: Name of the container to create. :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.object_store.v1.container.Container`, comprised of the properties on the Container class. :returns: The results of container creation :rtype: :class:`~openstack.object_store.v1.container.Container` """ return self._create(_container.Container, name=name, **attrs) def delete_container(self, container, ignore_missing=True): """Delete a container :param container: The value can be either the name of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.ResourceNotFound` will be raised when the container does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent server. :returns: ``None`` """ self._delete(_container.Container, container, ignore_missing=ignore_missing) def get_container_metadata(self, container): """Get metadata for a container :param container: The value can be the name of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :returns: One :class:`~openstack.object_store.v1.container.Container` :raises: :class:`~openstack.exceptions.ResourceNotFound` when no resource can be found. """ return self._head(_container.Container, container) def set_container_metadata(self, container, refresh=True, **metadata): """Set metadata for a container. :param container: The value can be the name of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :param refresh: Flag to trigger refresh of container object re-fetch. :param kwargs metadata: Key/value pairs to be set as metadata on the container. Both custom and system metadata can be set. Custom metadata are keys and values defined by the user. System metadata are keys defined by the Object Store and values defined by the user. The system metadata keys are: - `content_type` - `is_content_type_detected` - `versions_location` - `read_ACL` - `write_ACL` - `sync_to` - `sync_key` """ res = self._get_resource(_container.Container, container) res.set_metadata(self, metadata, refresh=refresh) return res def delete_container_metadata(self, container, keys): """Delete metadata for a container. :param container: The value can be the ID of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :param keys: The keys of metadata to be deleted. """ res = self._get_resource(_container.Container, container) res.delete_metadata(self, keys) return res def objects(self, container, **query): """Return a generator that yields the Container's objects. :param container: A container object or the name of a container that you want to retrieve objects from. :type container: :class:`~openstack.object_store.v1.container.Container` :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :rtype: A generator of :class:`~openstack.object_store.v1.obj.Object` objects. """ container = self._get_container_name(container=container) for obj in self._list(_obj.Object, container=container, paginated=True, format='json', **query): obj.container = container yield obj def _get_container_name(self, obj=None, container=None): if obj is not None: obj = self._get_resource(_obj.Object, obj) if obj.container is not None: return obj.container if container is not None: container = self._get_resource(_container.Container, container) return container.name raise ValueError("container must be specified") def get_object(self, obj, container=None): """Get the data associated with an object :param obj: The value can be the name of an object or a :class:`~openstack.object_store.v1.obj.Object` instance. :param container: The value can be the name of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :returns: The contents of the object. Use the :func:`~get_object_metadata` method if you want an object resource. :raises: :class:`~openstack.exceptions.ResourceNotFound` when no resource can be found. """ container_name = self._get_container_name(obj=obj, container=container) return self._get(_obj.Object, obj, container=container_name) def download_object(self, obj, container=None, **attrs): """Download the data contained inside an object. :param obj: The value can be the name of an object or a :class:`~openstack.object_store.v1.obj.Object` instance. :param container: The value can be the name of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :raises: :class:`~openstack.exceptions.ResourceNotFound` when no resource can be found. """ container_name = self._get_container_name(obj=obj, container=container) obj = self._get_resource(_obj.Object, obj, container=container_name, **attrs) return obj.download(self) def stream_object(self, obj, container=None, chunk_size=1024, **attrs): """Stream the data contained inside an object. :param obj: The value can be the name of an object or a :class:`~openstack.object_store.v1.obj.Object` instance. :param container: The value can be the name of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :raises: :class:`~openstack.exceptions.ResourceNotFound` when no resource can be found. :returns: An iterator that iterates over chunk_size bytes """ container_name = self._get_container_name(obj=obj, container=container) container_name = self._get_container_name(container=container) obj = self._get_resource(_obj.Object, obj, container=container_name, **attrs) return obj.stream(self, chunk_size=chunk_size) def create_object(self, container, name, filename=None, md5=None, sha256=None, segment_size=None, use_slo=True, metadata=None, generate_checksums=None, data=None, **headers): """Create a file object. Automatically uses large-object segments if needed. :param container: The name of the container to store the file in. This container will be created if it does not exist already. :param name: Name for the object within the container. :param filename: The path to the local file whose contents will be uploaded. Mutually exclusive with data. :param data: The content to upload to the object. Mutually exclusive with filename. :param md5: A hexadecimal md5 of the file. (Optional), if it is known and can be passed here, it will save repeating the expensive md5 process. It is assumed to be accurate. :param sha256: A hexadecimal sha256 of the file. (Optional) See md5. :param segment_size: Break the uploaded object into segments of this many bytes. (Optional) SDK will attempt to discover the maximum value for this from the server if it is not specified, or will use a reasonable default. :param headers: These will be passed through to the object creation API as HTTP Headers. :param use_slo: If the object is large enough to need to be a Large Object, use a static rather than dynamic object. Static Objects will delete segment objects when the manifest object is deleted. (optional, defaults to True) :param generate_checksums: Whether to generate checksums on the client side that get added to headers for later prevention of double uploads of identical data. (optional, defaults to True) :param metadata: This dict will get changed into headers that set metadata of the object :raises: ``OpenStackCloudException`` on operation error. """ if data is not None and filename: raise ValueError( "Both filename and data given. Please choose one.") if data is not None and not name: raise ValueError("name is a required parameter when data is given") if data is not None and generate_checksums: raise ValueError( "checksums cannot be generated with data parameter") if generate_checksums is None: if data is not None: generate_checksums = False else: generate_checksums = True if not metadata: metadata = {} if not filename and data is None: filename = name if generate_checksums and (md5 is None or sha256 is None): (md5, sha256) = self._connection._get_file_hashes(filename) if md5: headers[self._connection._OBJECT_MD5_KEY] = md5 or '' if sha256: headers[self._connection._OBJECT_SHA256_KEY] = sha256 or '' for (k, v) in metadata.items(): if not k.lower().startswith('x-object-meta-'): headers['x-object-meta-' + k] = v else: headers[k] = v container_name = self._get_container_name(container=container) endpoint = '{container}/{name}'.format(container=container_name, name=name) if data is not None: self.log.debug("swift uploading data to %(endpoint)s", {'endpoint': endpoint}) # TODO(gtema): custom headers need to be somehow injected return self._create(_obj.Object, container=container_name, name=name, data=data, **headers) # segment_size gets used as a step value in a range call, so needs # to be an int if segment_size: segment_size = int(segment_size) segment_size = self.get_object_segment_size(segment_size) file_size = os.path.getsize(filename) if self.is_object_stale(container_name, name, filename, md5, sha256): self._connection.log.debug( "swift uploading %(filename)s to %(endpoint)s", { 'filename': filename, 'endpoint': endpoint }) if file_size <= segment_size: # TODO(gtema): replace with regular resource put, but # custom headers need to be somehow injected self._upload_object(endpoint, filename, headers) else: self._upload_large_object(endpoint, filename, headers, file_size, segment_size, use_slo) # Backwards compat upload_object = create_object def copy_object(self): """Copy an object.""" raise NotImplementedError def delete_object(self, obj, ignore_missing=True, container=None): """Delete an object :param obj: The value can be either the name of an object or a :class:`~openstack.object_store.v1.container.Container` instance. :param container: The value can be the ID of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.ResourceNotFound` will be raised when the object does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent server. :returns: ``None`` """ container_name = self._get_container_name(obj, container) self._delete(_obj.Object, obj, ignore_missing=ignore_missing, container=container_name) def get_object_metadata(self, obj, container=None): """Get metadata for an object. :param obj: The value can be the name of an object or a :class:`~openstack.object_store.v1.obj.Object` instance. :param container: The value can be the ID of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :returns: One :class:`~openstack.object_store.v1.obj.Object` :raises: :class:`~openstack.exceptions.ResourceNotFound` when no resource can be found. """ container_name = self._get_container_name(obj, container) return self._head(_obj.Object, obj, container=container_name) def set_object_metadata(self, obj, container=None, **metadata): """Set metadata for an object. Note: This method will do an extra HEAD call. :param obj: The value can be the name of an object or a :class:`~openstack.object_store.v1.obj.Object` instance. :param container: The value can be the name of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :param kwargs metadata: Key/value pairs to be set as metadata on the container. Both custom and system metadata can be set. Custom metadata are keys and values defined by the user. System metadata are keys defined by the Object Store and values defined by the user. The system metadata keys are: - `content_type` - `content_encoding` - `content_disposition` - `delete_after` - `delete_at` - `is_content_type_detected` """ container_name = self._get_container_name(obj, container) res = self._get_resource(_obj.Object, obj, container=container_name) res.set_metadata(self, metadata) return res def delete_object_metadata(self, obj, container=None, keys=None): """Delete metadata for an object. :param obj: The value can be the name of an object or a :class:`~openstack.object_store.v1.obj.Object` instance. :param container: The value can be the ID of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :param keys: The keys of metadata to be deleted. """ container_name = self._get_container_name(obj, container) res = self._get_resource(_obj.Object, obj, container=container_name) res.delete_metadata(self, keys) return res def is_object_stale(self, container, name, filename, file_md5=None, file_sha256=None): """Check to see if an object matches the hashes of a file. :param container: Name of the container. :param name: Name of the object. :param filename: Path to the file. :param file_md5: Pre-calculated md5 of the file contents. Defaults to None which means calculate locally. :param file_sha256: Pre-calculated sha256 of the file contents. Defaults to None which means calculate locally. """ metadata = self._connection.get_object_metadata(container, name) if not metadata: self._connection.log.debug( "swift stale check, no object: {container}/{name}".format( container=container, name=name)) return True if not (file_md5 or file_sha256): (file_md5, file_sha256) = \ self._connection._get_file_hashes(filename) md5_key = metadata.get( self._connection._OBJECT_MD5_KEY, metadata.get(self._connection._SHADE_OBJECT_MD5_KEY, '')) sha256_key = metadata.get( self._connection._OBJECT_SHA256_KEY, metadata.get(self._connection._SHADE_OBJECT_SHA256_KEY, '')) up_to_date = self._connection._hashes_up_to_date(md5=file_md5, sha256=file_sha256, md5_key=md5_key, sha256_key=sha256_key) if not up_to_date: self._connection.log.debug( "swift checksum mismatch: " " %(filename)s!=%(container)s/%(name)s", { 'filename': filename, 'container': container, 'name': name }) return True self._connection.log.debug( "swift object up to date: %(container)s/%(name)s", { 'container': container, 'name': name }) return False def _upload_large_object(self, endpoint, filename, headers, file_size, segment_size, use_slo): # If the object is big, we need to break it up into segments that # are no larger than segment_size, upload each of them individually # and then upload a manifest object. The segments can be uploaded in # parallel, so we'll use the async feature of the TaskManager. segment_futures = [] segment_results = [] retry_results = [] retry_futures = [] manifest = [] # Get an OrderedDict with keys being the swift location for the # segment, the value a FileSegment file-like object that is a # slice of the data for the segment. segments = self._get_file_segments(endpoint, filename, file_size, segment_size) # Schedule the segments for upload for name, segment in segments.items(): # Async call to put - schedules execution and returns a future segment_future = self._connection._pool_executor.submit( self.put, name, headers=headers, data=segment, raise_exc=False) segment_futures.append(segment_future) # TODO(mordred) Collect etags from results to add to this manifest # dict. Then sort the list of dicts by path. manifest.append( dict(path='/{name}'.format(name=name), size_bytes=segment.length)) # Try once and collect failed results to retry segment_results, retry_results = self._connection._wait_for_futures( segment_futures, raise_on_error=False) self._add_etag_to_manifest(segment_results, manifest) for result in retry_results: # Grab the FileSegment for the failed upload so we can retry name = self._object_name_from_url(result.url) segment = segments[name] segment.seek(0) # Async call to put - schedules execution and returns a future segment_future = self._connection._pool_executor.submit( self.put, name, headers=headers, data=segment) # TODO(mordred) Collect etags from results to add to this manifest # dict. Then sort the list of dicts by path. retry_futures.append(segment_future) # If any segments fail the second time, just throw the error segment_results, retry_results = self._connection._wait_for_futures( retry_futures, raise_on_error=True) self._add_etag_to_manifest(segment_results, manifest) if use_slo: return self._finish_large_object_slo(endpoint, headers, manifest) else: return self._finish_large_object_dlo(endpoint, headers) def _finish_large_object_slo(self, endpoint, headers, manifest): # TODO(mordred) send an etag of the manifest, which is the md5sum # of the concatenation of the etags of the results headers = headers.copy() return self.put(endpoint, params={'multipart-manifest': 'put'}, headers=headers, data=json.dumps(manifest)) def _finish_large_object_dlo(self, endpoint, headers): headers = headers.copy() headers['X-Object-Manifest'] = endpoint return self.put(endpoint, headers=headers) def _upload_object(self, endpoint, filename, headers): with open(filename, 'rb') as dt: return proxy._json_response( self.put(endpoint, headers=headers, data=dt)) def _get_file_segments(self, endpoint, filename, file_size, segment_size): # Use an ordered dict here so that testing can replicate things segments = collections.OrderedDict() for (index, offset) in enumerate(range(0, file_size, segment_size)): remaining = file_size - (index * segment_size) segment = _utils.FileSegment( filename, offset, segment_size if segment_size < remaining else remaining) name = '{endpoint}/{index:0>6}'.format(endpoint=endpoint, index=index) segments[name] = segment return segments def get_object_segment_size(self, segment_size): """Get a segment size that will work given capabilities""" if segment_size is None: segment_size = DEFAULT_OBJECT_SEGMENT_SIZE min_segment_size = 0 try: # caps = self.get_object_capabilities() caps = self.get_info() except exceptions.SDKException as e: if e.response.status_code in (404, 412): server_max_file_size = DEFAULT_MAX_FILE_SIZE self._connection.log.info("Swift capabilities not supported. " "Using default max file size.") else: raise else: server_max_file_size = caps.swift.get('max_file_size', 0) min_segment_size = caps.slo.get('min_segment_size', 0) if segment_size > server_max_file_size: return server_max_file_size if segment_size < min_segment_size: return min_segment_size return segment_size def _object_name_from_url(self, url): '''Get container_name/object_name from the full URL called. Remove the Swift endpoint from the front of the URL, and remove the leaving / that will leave behind.''' endpoint = self.get_endpoint() object_name = url.replace(endpoint, '') if object_name.startswith('/'): object_name = object_name[1:] return object_name def _add_etag_to_manifest(self, segment_results, manifest): for result in segment_results: if 'Etag' not in result.headers: continue name = self._object_name_from_url(result.url) for entry in manifest: if entry['path'] == '/{name}'.format(name=name): entry['etag'] = result.headers['Etag'] def get_info(self): """Get infomation about the object-storage service The object-storage service publishes a set of capabilities that include metadata about maximum values and thresholds. """ return self._get(_info.Info) def set_account_temp_url_key(self, key, secondary=False): """Set the temporary URL key for the account. :param key: Text of the key to use. :param bool secondary: Whether this should set the secondary key. (defaults to False) """ account = self._get_resource(_account.Account, None) account.set_temp_url_key(self, key, secondary) def set_container_temp_url_key(self, container, key, secondary=False): """Set the temporary URL key for a container. :param container: The value can be the name of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :param key: Text of the key to use. :param bool secondary: Whether this should set the secondary key. (defaults to False) """ res = self._get_resource(_container.Container, container) res.set_temp_url_key(self, key, secondary) def get_temp_url_key(self, container=None): """Get the best temporary url key for a given container. Will first try to return Temp-URL-Key-2 then Temp-URL-Key for the container, and if neither exist, will attempt to return Temp-URL-Key-2 then Temp-URL-Key for the account. If neither exist, will return None. :param container: The value can be the name of a container or a :class:`~openstack.object_store.v1.container.Container` instance. """ temp_url_key = None if container: container_meta = self.get_container_metadata(container) temp_url_key = (container_meta.meta_temp_url_key_2 or container_meta.meta_temp_url_key) if not temp_url_key: account_meta = self.get_account_metadata() temp_url_key = (account_meta.meta_temp_url_key_2 or account_meta.meta_temp_url_key) if temp_url_key and not isinstance(temp_url_key, bytes): temp_url_key = temp_url_key.encode('utf8') return temp_url_key def _check_temp_url_key(self, container=None, temp_url_key=None): if temp_url_key: if not isinstance(temp_url_key, bytes): temp_url_key = temp_url_key.encode('utf8') else: temp_url_key = self.get_temp_url_key(container) if not temp_url_key: raise exceptions.SDKException( 'temp_url_key was not given, nor was a temporary url key' ' found for the account or the container.') return temp_url_key def generate_form_signature(self, container, object_prefix, redirect_url, max_file_size, max_upload_count, timeout, temp_url_key=None): """Generate a signature for a FormPost upload. :param container: The value can be the name of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :param object_prefix: Prefix to apply to limit all object names created using this signature. :param redirect_url: The URL to redirect the browser to after the uploads have completed. :param max_file_size: The maximum file size per file uploaded. :param max_upload_count: The maximum number of uploaded files allowed. :param timeout: The number of seconds from now to allow the form post to begin. :param temp_url_key: The X-Account-Meta-Temp-URL-Key for the account. Optional, if omitted, the key will be fetched from the container or the account. """ max_file_size = int(max_file_size) if max_file_size < 1: raise exceptions.SDKException( 'Please use a positive max_file_size value.') max_upload_count = int(max_upload_count) if max_upload_count < 1: raise exceptions.SDKException( 'Please use a positive max_upload_count value.') if timeout < 1: raise exceptions.SDKException( 'Please use a positive <timeout> value.') expires = int(time.time() + int(timeout)) temp_url_key = self._check_temp_url_key(container=container, temp_url_key=temp_url_key) res = self._get_resource(_container.Container, container) endpoint = parse.urlparse(self.get_endpoint()) path = '/'.join([endpoint.path, res.name, object_prefix]) data = '%s\n%s\n%s\n%s\n%s' % (path, redirect_url, max_file_size, max_upload_count, expires) data = data.encode('utf8') sig = hmac.new(temp_url_key, data, sha1).hexdigest() return (expires, sig) def generate_temp_url(self, path, seconds, method, absolute=False, prefix=False, iso8601=False, ip_range=None, temp_url_key=None): """Generates a temporary URL that gives unauthenticated access to the Swift object. :param path: The full path to the Swift object or prefix if a prefix-based temporary URL should be generated. Example: /v1/AUTH_account/c/o or /v1/AUTH_account/c/prefix. :param seconds: time in seconds or ISO 8601 timestamp. If absolute is False and this is the string representation of an integer, then this specifies the amount of time in seconds for which the temporary URL will be valid. If absolute is True then this specifies an absolute time at which the temporary URL will expire. :param method: A HTTP method, typically either GET or PUT, to allow for this temporary URL. :param absolute: if True then the seconds parameter is interpreted as a Unix timestamp, if seconds represents an integer. :param prefix: if True then a prefix-based temporary URL will be generated. :param iso8601: if True, a URL containing an ISO 8601 UTC timestamp instead of a UNIX timestamp will be created. :param ip_range: if a valid ip range, restricts the temporary URL to the range of ips. :param temp_url_key: The X-Account-Meta-Temp-URL-Key for the account. Optional, if omitted, the key will be fetched from the container or the account. :raises ValueError: if timestamp or path is not in valid format. :return: the path portion of a temporary URL """ try: try: timestamp = float(seconds) except ValueError: formats = (EXPIRES_ISO8601_FORMAT, EXPIRES_ISO8601_FORMAT[:-1], SHORT_EXPIRES_ISO8601_FORMAT) for f in formats: try: t = time.strptime(seconds, f) except ValueError: t = None else: if f == EXPIRES_ISO8601_FORMAT: timestamp = timegm(t) else: # Use local time if UTC designator is missing. timestamp = int(time.mktime(t)) absolute = True break if t is None: raise ValueError() else: if not timestamp.is_integer(): raise ValueError() timestamp = int(timestamp) if timestamp < 0: raise ValueError() except ValueError: raise ValueError('time must either be a whole number ' 'or in specific ISO 8601 format.') if isinstance(path, bytes): try: path_for_body = path.decode('utf-8') except UnicodeDecodeError: raise ValueError('path must be representable as UTF-8') else: path_for_body = path parts = path_for_body.split('/', 4) if len(parts) != 5 or parts[0] or not all( parts[1:(4 if prefix else 5)]): if prefix: raise ValueError('path must at least contain /v1/a/c/') else: raise ValueError('path must be full path to an object' ' e.g. /v1/a/c/o') standard_methods = ['GET', 'PUT', 'HEAD', 'POST', 'DELETE'] if method.upper() not in standard_methods: self.log.warning( 'Non default HTTP method %s for tempurl ' 'specified, possibly an error', method.upper()) if not absolute: expiration = int(time.time() + timestamp) else: expiration = timestamp hmac_parts = [ method.upper(), str(expiration), ('prefix:' if prefix else '') + path_for_body ] if ip_range: if isinstance(ip_range, bytes): try: ip_range = ip_range.decode('utf-8') except UnicodeDecodeError: raise ValueError('ip_range must be representable as UTF-8') hmac_parts.insert(0, "ip=%s" % ip_range) hmac_body = u'\n'.join(hmac_parts) temp_url_key = self._check_temp_url_key(temp_url_key=temp_url_key) sig = hmac.new(temp_url_key, hmac_body.encode('utf-8'), sha1).hexdigest() if iso8601: expiration = time.strftime(EXPIRES_ISO8601_FORMAT, time.gmtime(expiration)) temp_url = u'{path}?temp_url_sig={sig}&temp_url_expires={exp}'.format( path=path_for_body, sig=sig, exp=expiration) if ip_range: temp_url += u'&temp_url_ip_range={}'.format(ip_range) if prefix: temp_url += u'&temp_url_prefix={}'.format(parts[4]) # Have return type match path from caller if isinstance(path, bytes): return temp_url.encode('utf-8') else: return temp_url
class Proxy(proxy.Proxy): skip_discovery = True Account = _account.Account Container = _container.Container Object = _obj.Object log = _log.setup_logging('openstack') def get_account_metadata(self): """Get metadata for this account. :rtype: :class:`~openstack.object_store.v1.account.Account` """ return self._head(_account.Account) def set_account_metadata(self, **metadata): """Set metadata for this account. :param kwargs metadata: Key/value pairs to be set as metadata on the container. Custom metadata can be set. Custom metadata are keys and values defined by the user. """ account = self._get_resource(_account.Account, None) account.set_metadata(self, metadata) def delete_account_metadata(self, keys): """Delete metadata for this account. :param keys: The keys of metadata to be deleted. """ account = self._get_resource(_account.Account, None) account.delete_metadata(self, keys) def containers(self, **query): """Obtain Container objects for this account. :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :rtype: A generator of :class:`~openstack.object_store.v1.container.Container` objects. """ return self._list(_container.Container, paginated=True, **query) def create_container(self, name, **attrs): """Create a new container from attributes :param container: Name of the container to create. :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.object_store.v1.container.Container`, comprised of the properties on the Container class. :returns: The results of container creation :rtype: :class:`~openstack.object_store.v1.container.Container` """ return self._create(_container.Container, name=name, **attrs) def delete_container(self, container, ignore_missing=True): """Delete a container :param container: The value can be either the name of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.ResourceNotFound` will be raised when the container does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent server. :returns: ``None`` """ self._delete(_container.Container, container, ignore_missing=ignore_missing) def get_container_metadata(self, container): """Get metadata for a container :param container: The value can be the name of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :returns: One :class:`~openstack.object_store.v1.container.Container` :raises: :class:`~openstack.exceptions.ResourceNotFound` when no resource can be found. """ return self._head(_container.Container, container) def set_container_metadata(self, container, refresh=True, **metadata): """Set metadata for a container. :param container: The value can be the name of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :param refresh: Flag to trigger refresh of container object re-fetch. :param kwargs metadata: Key/value pairs to be set as metadata on the container. Both custom and system metadata can be set. Custom metadata are keys and values defined by the user. System metadata are keys defined by the Object Store and values defined by the user. The system metadata keys are: - `content_type` - `is_content_type_detected` - `versions_location` - `read_ACL` - `write_ACL` - `sync_to` - `sync_key` """ res = self._get_resource(_container.Container, container) res.set_metadata(self, metadata, refresh=refresh) return res def delete_container_metadata(self, container, keys): """Delete metadata for a container. :param container: The value can be the ID of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :param keys: The keys of metadata to be deleted. """ res = self._get_resource(_container.Container, container) res.delete_metadata(self, keys) return res def objects(self, container, **query): """Return a generator that yields the Container's objects. :param container: A container object or the name of a container that you want to retrieve objects from. :type container: :class:`~openstack.object_store.v1.container.Container` :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :rtype: A generator of :class:`~openstack.object_store.v1.obj.Object` objects. """ container = self._get_container_name(container=container) for obj in self._list(_obj.Object, container=container, paginated=True, format='json', **query): obj.container = container yield obj def _get_container_name(self, obj=None, container=None): if obj is not None: obj = self._get_resource(_obj.Object, obj) if obj.container is not None: return obj.container if container is not None: container = self._get_resource(_container.Container, container) return container.name raise ValueError("container must be specified") def get_object(self, obj, container=None): """Get the data associated with an object :param obj: The value can be the name of an object or a :class:`~openstack.object_store.v1.obj.Object` instance. :param container: The value can be the name of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :returns: The contents of the object. Use the :func:`~get_object_metadata` method if you want an object resource. :raises: :class:`~openstack.exceptions.ResourceNotFound` when no resource can be found. """ container_name = self._get_container_name(obj=obj, container=container) return self._get(_obj.Object, obj, container=container_name) def download_object(self, obj, container=None, **attrs): """Download the data contained inside an object. :param obj: The value can be the name of an object or a :class:`~openstack.object_store.v1.obj.Object` instance. :param container: The value can be the name of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :raises: :class:`~openstack.exceptions.ResourceNotFound` when no resource can be found. """ container_name = self._get_container_name(obj=obj, container=container) obj = self._get_resource(_obj.Object, obj, container=container_name, **attrs) return obj.download(self) def stream_object(self, obj, container=None, chunk_size=1024, **attrs): """Stream the data contained inside an object. :param obj: The value can be the name of an object or a :class:`~openstack.object_store.v1.obj.Object` instance. :param container: The value can be the name of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :raises: :class:`~openstack.exceptions.ResourceNotFound` when no resource can be found. :returns: An iterator that iterates over chunk_size bytes """ container_name = self._get_container_name(obj=obj, container=container) container_name = self._get_container_name(container=container) obj = self._get_resource(_obj.Object, obj, container=container_name, **attrs) return obj.stream(self, chunk_size=chunk_size) def create_object(self, container, name, filename=None, md5=None, sha256=None, segment_size=None, use_slo=True, metadata=None, generate_checksums=None, data=None, **headers): """Create a file object. Automatically uses large-object segments if needed. :param container: The name of the container to store the file in. This container will be created if it does not exist already. :param name: Name for the object within the container. :param filename: The path to the local file whose contents will be uploaded. Mutually exclusive with data. :param data: The content to upload to the object. Mutually exclusive with filename. :param md5: A hexadecimal md5 of the file. (Optional), if it is known and can be passed here, it will save repeating the expensive md5 process. It is assumed to be accurate. :param sha256: A hexadecimal sha256 of the file. (Optional) See md5. :param segment_size: Break the uploaded object into segments of this many bytes. (Optional) SDK will attempt to discover the maximum value for this from the server if it is not specified, or will use a reasonable default. :param headers: These will be passed through to the object creation API as HTTP Headers. :param use_slo: If the object is large enough to need to be a Large Object, use a static rather than dynamic object. Static Objects will delete segment objects when the manifest object is deleted. (optional, defaults to True) :param generate_checksums: Whether to generate checksums on the client side that get added to headers for later prevention of double uploads of identical data. (optional, defaults to True) :param metadata: This dict will get changed into headers that set metadata of the object :raises: ``OpenStackCloudException`` on operation error. """ if data is not None and filename: raise ValueError( "Both filename and data given. Please choose one.") if data is not None and not name: raise ValueError("name is a required parameter when data is given") if data is not None and generate_checksums: raise ValueError( "checksums cannot be generated with data parameter") if generate_checksums is None: if data is not None: generate_checksums = False else: generate_checksums = True if not metadata: metadata = {} if not filename and data is None: filename = name if generate_checksums and (md5 is None or sha256 is None): (md5, sha256) = self._connection._get_file_hashes(filename) if md5: headers[self._connection._OBJECT_MD5_KEY] = md5 or '' if sha256: headers[self._connection._OBJECT_SHA256_KEY] = sha256 or '' for (k, v) in metadata.items(): headers['x-object-meta-' + k] = v container_name = self._get_container_name(container=container) endpoint = '{container}/{name}'.format(container=container_name, name=name) if data is not None: self.log.debug("swift uploading data to %(endpoint)s", {'endpoint': endpoint}) # TODO(gtema): custom headers need to be somehow injected return self._create(_obj.Object, container=container_name, name=name, data=data, **headers) # segment_size gets used as a step value in a range call, so needs # to be an int if segment_size: segment_size = int(segment_size) segment_size = self.get_object_segment_size(segment_size) file_size = os.path.getsize(filename) if self.is_object_stale(container_name, name, filename, md5, sha256): self._connection.log.debug( "swift uploading %(filename)s to %(endpoint)s", { 'filename': filename, 'endpoint': endpoint }) if file_size <= segment_size: # TODO(gtema): replace with regular resource put, but # custom headers need to be somehow injected self._upload_object(endpoint, filename, headers) else: self._upload_large_object(endpoint, filename, headers, file_size, segment_size, use_slo) # Backwards compat upload_object = create_object def copy_object(self): """Copy an object.""" raise NotImplementedError def delete_object(self, obj, ignore_missing=True, container=None): """Delete an object :param obj: The value can be either the name of an object or a :class:`~openstack.object_store.v1.container.Container` instance. :param container: The value can be the ID of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.ResourceNotFound` will be raised when the object does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent server. :returns: ``None`` """ container_name = self._get_container_name(obj, container) self._delete(_obj.Object, obj, ignore_missing=ignore_missing, container=container_name) def get_object_metadata(self, obj, container=None): """Get metadata for an object. :param obj: The value can be the name of an object or a :class:`~openstack.object_store.v1.obj.Object` instance. :param container: The value can be the ID of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :returns: One :class:`~openstack.object_store.v1.obj.Object` :raises: :class:`~openstack.exceptions.ResourceNotFound` when no resource can be found. """ container_name = self._get_container_name(obj, container) return self._head(_obj.Object, obj, container=container_name) def set_object_metadata(self, obj, container=None, **metadata): """Set metadata for an object. Note: This method will do an extra HEAD call. :param obj: The value can be the name of an object or a :class:`~openstack.object_store.v1.obj.Object` instance. :param container: The value can be the name of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :param kwargs metadata: Key/value pairs to be set as metadata on the container. Both custom and system metadata can be set. Custom metadata are keys and values defined by the user. System metadata are keys defined by the Object Store and values defined by the user. The system metadata keys are: - `content_type` - `content_encoding` - `content_disposition` - `delete_after` - `delete_at` - `is_content_type_detected` """ container_name = self._get_container_name(obj, container) res = self._get_resource(_obj.Object, obj, container=container_name) res.set_metadata(self, metadata) return res def delete_object_metadata(self, obj, container=None, keys=None): """Delete metadata for an object. :param obj: The value can be the name of an object or a :class:`~openstack.object_store.v1.obj.Object` instance. :param container: The value can be the ID of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :param keys: The keys of metadata to be deleted. """ container_name = self._get_container_name(obj, container) res = self._get_resource(_obj.Object, obj, container=container_name) res.delete_metadata(self, keys) return res def is_object_stale(self, container, name, filename, file_md5=None, file_sha256=None): """Check to see if an object matches the hashes of a file. :param container: Name of the container. :param name: Name of the object. :param filename: Path to the file. :param file_md5: Pre-calculated md5 of the file contents. Defaults to None which means calculate locally. :param file_sha256: Pre-calculated sha256 of the file contents. Defaults to None which means calculate locally. """ metadata = self._connection.get_object_metadata(container, name) if not metadata: self._connection.log.debug( "swift stale check, no object: {container}/{name}".format( container=container, name=name)) return True if not (file_md5 or file_sha256): (file_md5, file_sha256) = \ self._connection._get_file_hashes(filename) md5_key = metadata.get( self._connection._OBJECT_MD5_KEY, metadata.get(self._connection._SHADE_OBJECT_MD5_KEY, '')) sha256_key = metadata.get( self._connection._OBJECT_SHA256_KEY, metadata.get(self._connection._SHADE_OBJECT_SHA256_KEY, '')) up_to_date = self._connection._hashes_up_to_date(md5=file_md5, sha256=file_sha256, md5_key=md5_key, sha256_key=sha256_key) if not up_to_date: self._connection.log.debug( "swift checksum mismatch: " " %(filename)s!=%(container)s/%(name)s", { 'filename': filename, 'container': container, 'name': name }) return True self._connection.log.debug( "swift object up to date: %(container)s/%(name)s", { 'container': container, 'name': name }) return False def _upload_large_object(self, endpoint, filename, headers, file_size, segment_size, use_slo): # If the object is big, we need to break it up into segments that # are no larger than segment_size, upload each of them individually # and then upload a manifest object. The segments can be uploaded in # parallel, so we'll use the async feature of the TaskManager. segment_futures = [] segment_results = [] retry_results = [] retry_futures = [] manifest = [] # Get an OrderedDict with keys being the swift location for the # segment, the value a FileSegment file-like object that is a # slice of the data for the segment. segments = self._get_file_segments(endpoint, filename, file_size, segment_size) # Schedule the segments for upload for name, segment in segments.items(): # Async call to put - schedules execution and returns a future segment_future = self._connection._pool_executor.submit( self.put, name, headers=headers, data=segment, raise_exc=False) segment_futures.append(segment_future) # TODO(mordred) Collect etags from results to add to this manifest # dict. Then sort the list of dicts by path. manifest.append( dict(path='/{name}'.format(name=name), size_bytes=segment.length)) # Try once and collect failed results to retry segment_results, retry_results = self._connection._wait_for_futures( segment_futures, raise_on_error=False) self._add_etag_to_manifest(segment_results, manifest) for result in retry_results: # Grab the FileSegment for the failed upload so we can retry name = self._object_name_from_url(result.url) segment = segments[name] segment.seek(0) # Async call to put - schedules execution and returns a future segment_future = self._connection._pool_executor.submit( self.put, name, headers=headers, data=segment) # TODO(mordred) Collect etags from results to add to this manifest # dict. Then sort the list of dicts by path. retry_futures.append(segment_future) # If any segments fail the second time, just throw the error segment_results, retry_results = self._connection._wait_for_futures( retry_futures, raise_on_error=True) self._add_etag_to_manifest(segment_results, manifest) if use_slo: return self._finish_large_object_slo(endpoint, headers, manifest) else: return self._finish_large_object_dlo(endpoint, headers) def _finish_large_object_slo(self, endpoint, headers, manifest): # TODO(mordred) send an etag of the manifest, which is the md5sum # of the concatenation of the etags of the results headers = headers.copy() return self.put(endpoint, params={'multipart-manifest': 'put'}, headers=headers, data=json.dumps(manifest)) def _finish_large_object_dlo(self, endpoint, headers): headers = headers.copy() headers['X-Object-Manifest'] = endpoint return self.put(endpoint, headers=headers) def _upload_object(self, endpoint, filename, headers): with open(filename, 'rb') as dt: return proxy._json_response( self.put(endpoint, headers=headers, data=dt)) def _get_file_segments(self, endpoint, filename, file_size, segment_size): # Use an ordered dict here so that testing can replicate things segments = collections.OrderedDict() for (index, offset) in enumerate(range(0, file_size, segment_size)): remaining = file_size - (index * segment_size) segment = _utils.FileSegment( filename, offset, segment_size if segment_size < remaining else remaining) name = '{endpoint}/{index:0>6}'.format(endpoint=endpoint, index=index) segments[name] = segment return segments def get_object_segment_size(self, segment_size): """Get a segment size that will work given capabilities""" if segment_size is None: segment_size = DEFAULT_OBJECT_SEGMENT_SIZE min_segment_size = 0 try: # caps = self.get_object_capabilities() caps = self.get_info() except exceptions.SDKException as e: if e.response.status_code in (404, 412): # Clear the exception so that it doesn't linger # and get reported as an Inner Exception later _utils._exc_clear() server_max_file_size = DEFAULT_MAX_FILE_SIZE self._connection.log.info("Swift capabilities not supported. " "Using default max file size.") else: raise else: server_max_file_size = caps.swift.get('max_file_size', 0) min_segment_size = caps.slo.get('min_segment_size', 0) if segment_size > server_max_file_size: return server_max_file_size if segment_size < min_segment_size: return min_segment_size return segment_size def _object_name_from_url(self, url): '''Get container_name/object_name from the full URL called. Remove the Swift endpoint from the front of the URL, and remove the leaving / that will leave behind.''' endpoint = self.get_endpoint() object_name = url.replace(endpoint, '') if object_name.startswith('/'): object_name = object_name[1:] return object_name def _add_etag_to_manifest(self, segment_results, manifest): for result in segment_results: if 'Etag' not in result.headers: continue name = self._object_name_from_url(result.url) for entry in manifest: if entry['path'] == '/{name}'.format(name=name): entry['etag'] = result.headers['Etag'] def get_info(self): """Get infomation about the object-storage service The object-storage service publishes a set of capabilities that include metadata about maximum values and thresholds. """ return self._get(_info.Info) def set_account_temp_url_key(self, key, secondary=False): """Set the temporary URL key for the account. :param key: Text of the key to use. :param bool secondary: Whether this should set the secondary key. (defaults to False) """ account = self._get_resource(_account.Account, None) account.set_temp_url_key(self, key, secondary) def set_container_temp_url_key(self, container, key, secondary=False): """Set the temporary URL key for a container. :param container: The value can be the name of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :param key: Text of the key to use. :param bool secondary: Whether this should set the secondary key. (defaults to False) """ res = self._get_resource(_container.Container, container) res.set_temp_url_key(self, key, secondary) def get_temp_url_key(self, container=None): """Get the best temporary url key for a given container. Will first try to return Temp-URL-Key-2 then Temp-URL-Key for the container, and if neither exist, will attempt to return Temp-URL-Key-2 then Temp-URL-Key for the account. If neither exist, will return None. :param container: The value can be the name of a container or a :class:`~openstack.object_store.v1.container.Container` instance. """ temp_url_key = None if container: container_meta = self.get_container_metadata(container) temp_url_key = (container_meta.meta_temp_url_key_2 or container_meta.meta_temp_url_key) if not temp_url_key: account_meta = self.get_account_metadata() temp_url_key = (account_meta.meta_temp_url_key_2 or account_meta.meta_temp_url_key) if temp_url_key and not isinstance(temp_url_key, six.binary_type): temp_url_key = temp_url_key.encode('utf8') return temp_url_key def generate_form_signature(self, container, object_prefix, redirect_url, max_file_size, max_upload_count, timeout, temp_url_key=None): """Generate a signature for a FormPost upload. :param container: The value can be the name of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :param object_prefix: Prefix to apply to limit all object names created using this signature. :param redirect_url: The URL to redirect the browser to after the uploads have completed. :param max_file_size: The maximum file size per file uploaded. :param max_upload_count: The maximum number of uploaded files allowed. :param timeout: The number of seconds from now to allow the form post to begin. :param temp_url_key: The X-Account-Meta-Temp-URL-Key for the account. Optional, if omitted, the key will be fetched from the container or the account. """ max_file_size = int(max_file_size) if max_file_size < 1: raise exceptions.SDKException( 'Please use a positive max_file_size value.') max_upload_count = int(max_upload_count) if max_upload_count < 1: raise exceptions.SDKException( 'Please use a positive max_upload_count value.') if timeout < 1: raise exceptions.SDKException( 'Please use a positive <timeout> value.') expires = int(time.time() + int(timeout)) if temp_url_key: if not isinstance(temp_url_key, six.binary_type): temp_url_key = temp_url_key.encode('utf8') else: temp_url_key = self.get_temp_url_key(container) if not temp_url_key: raise exceptions.SDKException( 'temp_url_key was not given, nor was a temporary url key' ' found for the account or the container.') res = self._get_resource(_container.Container, container) endpoint = parse.urlparse(self.get_endpoint()) path = '/'.join([endpoint.path, res.name, object_prefix]) data = '%s\n%s\n%s\n%s\n%s' % (path, redirect_url, max_file_size, max_upload_count, expires) if six.PY3: data = data.encode('utf8') sig = hmac.new(temp_url_key, data, sha1).hexdigest() return (expires, sig)
def __init__(self, *args, **kwargs): super(CloudConfig, self).__init__(*args, **kwargs) self.log = _log.setup_logging(__name__)
def __init__(self, config_files=None, vendor_files=None, override_defaults=None, force_ipv4=None, envvar_prefix=None, secure_files=None, pw_func=None, session_constructor=None, app_name=None, app_version=None, load_yaml_config=True, load_envvars=True): self.log = _log.setup_logging('openstack.config') self._session_constructor = session_constructor self._app_name = app_name self._app_version = app_version self._load_envvars = load_envvars if load_yaml_config: self._config_files = config_files or CONFIG_FILES self._secure_files = secure_files or SECURE_FILES self._vendor_files = vendor_files or VENDOR_FILES else: self._config_files = [] self._secure_files = [] self._vendor_files = [] config_file_override = self._get_envvar('OS_CLIENT_CONFIG_FILE') if config_file_override: self._config_files.insert(0, config_file_override) secure_file_override = self._get_envvar('OS_CLIENT_SECURE_FILE') if secure_file_override: self._secure_files.insert(0, secure_file_override) self.defaults = self._defaults_module.get_defaults() if override_defaults: self.defaults.update(override_defaults) # First, use a config file if it exists where expected self.config_filename, self.cloud_config = self._load_config_file() _, secure_config = self._load_secure_file() if secure_config: self.cloud_config = _merge_clouds(self.cloud_config, secure_config) if not self.cloud_config: self.cloud_config = {'clouds': {}} if 'clouds' not in self.cloud_config: self.cloud_config['clouds'] = {} # Grab ipv6 preference settings from env client_config = self.cloud_config.get('client', {}) if force_ipv4 is not None: # If it's passed in to the constructor, honor it. self.force_ipv4 = force_ipv4 else: # Get the backwards compat value prefer_ipv6 = get_boolean( self._get_envvar( 'OS_PREFER_IPV6', client_config.get('prefer_ipv6', client_config.get('prefer-ipv6', True)))) force_ipv4 = get_boolean( self._get_envvar( 'OS_FORCE_IPV4', client_config.get('force_ipv4', client_config.get('broken-ipv6', False)))) self.force_ipv4 = force_ipv4 if not prefer_ipv6: # this will only be false if someone set it explicitly # honor their wishes self.force_ipv4 = True # Next, process environment variables and add them to the mix self.envvar_key = self._get_envvar('OS_CLOUD_NAME', 'envvars') if self.envvar_key in self.cloud_config['clouds']: raise exceptions.ConfigException( '"{0}" defines a cloud named "{1}", but' ' OS_CLOUD_NAME is also set to "{1}". Please rename' ' either your environment based cloud, or one of your' ' file-based clouds.'.format(self.config_filename, self.envvar_key)) self.default_cloud = self._get_envvar('OS_CLOUD') if load_envvars: envvars = self._get_os_environ(envvar_prefix=envvar_prefix) if envvars: self.cloud_config['clouds'][self.envvar_key] = envvars if not self.default_cloud: self.default_cloud = self.envvar_key if not self.default_cloud and self.cloud_config['clouds']: if len(self.cloud_config['clouds'].keys()) == 1: # If there is only one cloud just use it. This matches envvars # behavior and allows for much less typing. # TODO(mordred) allow someone to mark a cloud as "default" in # clouds.yaml. # The next/iter thing is for python3 compat where dict.keys # returns an iterator but in python2 it's a list. self.default_cloud = next( iter(self.cloud_config['clouds'].keys())) # Finally, fall through and make a cloud that starts with defaults # because we need somewhere to put arguments, and there are neither # config files or env vars if not self.cloud_config['clouds']: self.cloud_config = dict(clouds=dict(defaults=dict(self.defaults))) self.default_cloud = 'defaults' self._cache_expiration_time = 0 self._cache_path = CACHE_PATH self._cache_class = 'dogpile.cache.null' self._cache_arguments = {} self._cache_expiration = {} if 'cache' in self.cloud_config: cache_settings = _util.normalize_keys(self.cloud_config['cache']) # expiration_time used to be 'max_age' but the dogpile setting # is expiration_time. Support max_age for backwards compat. self._cache_expiration_time = cache_settings.get( 'expiration_time', cache_settings.get('max_age', self._cache_expiration_time)) # If cache class is given, use that. If not, but if cache time # is given, default to memory. Otherwise, default to nothing. # to memory. if self._cache_expiration_time: self._cache_class = 'dogpile.cache.memory' self._cache_class = self.cloud_config['cache'].get( 'class', self._cache_class) self._cache_path = os.path.expanduser( cache_settings.get('path', self._cache_path)) self._cache_arguments = cache_settings.get('arguments', self._cache_arguments) self._cache_expiration = cache_settings.get( 'expiration', self._cache_expiration) # Flag location to hold the peeked value of an argparse timeout value self._argv_timeout = False # Save the password callback # password = self._pw_callback(prompt="Password: ") self._pw_callback = pw_func
def __init__(self, *args, **kwargs): super(CloudConfig, self).__init__(*args, **kwargs) self.log = _log.setup_logging(__name__)
def __init__(self, config_files=None, vendor_files=None, override_defaults=None, force_ipv4=None, envvar_prefix=None, secure_files=None, pw_func=None, session_constructor=None, app_name=None, app_version=None, load_yaml_config=True, load_envvars=True): self.log = _log.setup_logging('openstack.config') self._session_constructor = session_constructor self._app_name = app_name self._app_version = app_version self._load_envvars = load_envvars if load_yaml_config: self._config_files = config_files or CONFIG_FILES self._secure_files = secure_files or SECURE_FILES self._vendor_files = vendor_files or VENDOR_FILES else: self._config_files = [] self._secure_files = [] self._vendor_files = [] config_file_override = self._get_envvar('OS_CLIENT_CONFIG_FILE') if config_file_override: self._config_files.insert(0, config_file_override) secure_file_override = self._get_envvar('OS_CLIENT_SECURE_FILE') if secure_file_override: self._secure_files.insert(0, secure_file_override) self.defaults = self._defaults_module.get_defaults() if override_defaults: self.defaults.update(override_defaults) # First, use a config file if it exists where expected self.config_filename, self.cloud_config = self._load_config_file() _, secure_config = self._load_secure_file() if secure_config: self.cloud_config = _merge_clouds( self.cloud_config, secure_config) if not self.cloud_config: self.cloud_config = {'clouds': {}} if 'clouds' not in self.cloud_config: self.cloud_config['clouds'] = {} # Grab ipv6 preference settings from env client_config = self.cloud_config.get('client', {}) if force_ipv4 is not None: # If it's passed in to the constructor, honor it. self.force_ipv4 = force_ipv4 else: # Get the backwards compat value prefer_ipv6 = get_boolean( self._get_envvar( 'OS_PREFER_IPV6', client_config.get( 'prefer_ipv6', client_config.get( 'prefer-ipv6', True)))) force_ipv4 = get_boolean( self._get_envvar( 'OS_FORCE_IPV4', client_config.get( 'force_ipv4', client_config.get( 'broken-ipv6', False)))) self.force_ipv4 = force_ipv4 if not prefer_ipv6: # this will only be false if someone set it explicitly # honor their wishes self.force_ipv4 = True # Next, process environment variables and add them to the mix self.envvar_key = self._get_envvar('OS_CLOUD_NAME', 'envvars') if self.envvar_key in self.cloud_config['clouds']: raise exceptions.ConfigException( '"{0}" defines a cloud named "{1}", but' ' OS_CLOUD_NAME is also set to "{1}". Please rename' ' either your environment based cloud, or one of your' ' file-based clouds.'.format(self.config_filename, self.envvar_key)) self.default_cloud = self._get_envvar('OS_CLOUD') if load_envvars: envvars = self._get_os_environ(envvar_prefix=envvar_prefix) if envvars: self.cloud_config['clouds'][self.envvar_key] = envvars if not self.default_cloud: self.default_cloud = self.envvar_key if not self.default_cloud and self.cloud_config['clouds']: if len(self.cloud_config['clouds'].keys()) == 1: # If there is only one cloud just use it. This matches envvars # behavior and allows for much less typing. # TODO(mordred) allow someone to mark a cloud as "default" in # clouds.yaml. # The next/iter thing is for python3 compat where dict.keys # returns an iterator but in python2 it's a list. self.default_cloud = next(iter( self.cloud_config['clouds'].keys())) # Finally, fall through and make a cloud that starts with defaults # because we need somewhere to put arguments, and there are neither # config files or env vars if not self.cloud_config['clouds']: self.cloud_config = dict( clouds=dict(defaults=dict(self.defaults))) self.default_cloud = 'defaults' self._cache_expiration_time = 0 self._cache_path = CACHE_PATH self._cache_class = 'dogpile.cache.null' self._cache_arguments = {} self._cache_expiration = {} if 'cache' in self.cloud_config: cache_settings = self._normalize_keys(self.cloud_config['cache']) # expiration_time used to be 'max_age' but the dogpile setting # is expiration_time. Support max_age for backwards compat. self._cache_expiration_time = cache_settings.get( 'expiration_time', cache_settings.get( 'max_age', self._cache_expiration_time)) # If cache class is given, use that. If not, but if cache time # is given, default to memory. Otherwise, default to nothing. # to memory. if self._cache_expiration_time: self._cache_class = 'dogpile.cache.memory' self._cache_class = self.cloud_config['cache'].get( 'class', self._cache_class) self._cache_path = os.path.expanduser( cache_settings.get('path', self._cache_path)) self._cache_arguments = cache_settings.get( 'arguments', self._cache_arguments) self._cache_expiration = cache_settings.get( 'expiration', self._cache_expiration) # Flag location to hold the peeked value of an argparse timeout value self._argv_timeout = False # Save the password callback # password = self._pw_callback(prompt="Password: ") self._pw_callback = pw_func
def wait_for_status(session, resource, status, failures, interval=None, wait=None, attribute='status'): """Wait for the resource to be in a particular status. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param resource: The resource to wait on to reach the status. The resource must have a status attribute specified via ``attribute``. :type resource: :class:`~openstack.resource.Resource` :param status: Desired status of the resource. :param list failures: Statuses that would indicate the transition failed such as 'ERROR'. Defaults to ['ERROR']. :param interval: Number of seconds to wait between checks. Set to ``None`` to use the default interval. :param wait: Maximum number of seconds to wait for transition. Set to ``None`` to wait forever. :param attribute: Name of the resource attribute that contains the status. :return: The updated resource. :raises: :class:`~openstack.exceptions.ResourceTimeout` transition to status failed to occur in wait seconds. :raises: :class:`~openstack.exceptions.ResourceFailure` resource transitioned to one of the failure states. :raises: :class:`~AttributeError` if the resource does not have a status attribute """ log = _log.setup_logging(__name__) current_status = getattr(resource, attribute) if _normalize_status(current_status) == status.lower(): return resource if failures is None: failures = ['ERROR'] failures = [f.lower() for f in failures] name = "{res}:{id}".format(res=resource.__class__.__name__, id=resource.id) msg = "Timeout waiting for {name} to transition to {status}".format( name=name, status=status) for count in utils.iterate_timeout(timeout=wait, message=msg, wait=interval): resource = resource.get(session) if not resource: raise exceptions.ResourceFailure( "{name} went away while waiting for {status}".format( name=name, status=status)) new_status = getattr(resource, attribute) normalized_status = _normalize_status(new_status) if normalized_status == status.lower(): return resource elif normalized_status in failures: raise exceptions.ResourceFailure( "{name} transitioned to failure state {status}".format( name=name, status=new_status)) log.debug( 'Still waiting for resource %s to reach state %s, ' 'current state is %s', name, status, new_status)
def __init__(self): super(_OpenStackCloudMixin, self).__init__() self.log = _log.setup_logging('openstack') self.name = self.config.name self.auth = self.config.get_auth_args() self.default_interface = self.config.get_interface() self.force_ipv4 = self.config.force_ipv4 (self.verify, self.cert) = self.config.get_requests_verify_args() # Turn off urllib3 warnings about insecure certs if we have # explicitly configured requests to tell it we do not want # cert verification if not self.verify: self.log.debug( "Turning off Insecure SSL warnings since verify=False") category = requestsexceptions.InsecureRequestWarning if category: # InsecureRequestWarning references a Warning class or is None warnings.filterwarnings('ignore', category=category) self._disable_warnings = {} cache_expiration_time = int(self.config.get_cache_expiration_time()) cache_class = self.config.get_cache_class() cache_arguments = self.config.get_cache_arguments() self._resource_caches = {} if cache_class != 'dogpile.cache.null': self.cache_enabled = True self._cache = self._make_cache(cache_class, cache_expiration_time, cache_arguments) expirations = self.config.get_cache_expirations() for expire_key in expirations.keys(): # Only build caches for things we have list operations for if getattr(self, 'list_{0}'.format(expire_key), None): self._resource_caches[expire_key] = self._make_cache( cache_class, expirations[expire_key], cache_arguments) self._SERVER_AGE = DEFAULT_SERVER_AGE self._PORT_AGE = DEFAULT_PORT_AGE self._FLOAT_AGE = DEFAULT_FLOAT_AGE else: self.cache_enabled = False def _fake_invalidate(unused): pass class _FakeCache(object): def invalidate(self): pass # Don't cache list_servers if we're not caching things. # Replace this with a more specific cache configuration # soon. self._SERVER_AGE = 0 self._PORT_AGE = 0 self._FLOAT_AGE = 0 self._cache = _FakeCache() # Undecorate cache decorated methods. Otherwise the call stacks # wind up being stupidly long and hard to debug for method in _utils._decorated_methods: meth_obj = getattr(self, method, None) if not meth_obj: continue if (hasattr(meth_obj, 'invalidate') and hasattr(meth_obj, 'func')): new_func = functools.partial(meth_obj.func, self) new_func.invalidate = _fake_invalidate setattr(self, method, new_func) # If server expiration time is set explicitly, use that. Otherwise # fall back to whatever it was before self._SERVER_AGE = self.config.get_cache_resource_expiration( 'server', self._SERVER_AGE) self._PORT_AGE = self.config.get_cache_resource_expiration( 'port', self._PORT_AGE) self._FLOAT_AGE = self.config.get_cache_resource_expiration( 'floating_ip', self._FLOAT_AGE) self._container_cache = dict() self._file_hash_cache = dict() # self.__pool_executor = None self._raw_clients = {} self._local_ipv6 = (_utils.localhost_supports_ipv6() if not self.force_ipv4 else False)
def __init__(self): super(_OpenStackCloudMixin, self).__init__() self.log = _log.setup_logging('openstack') self.name = self.config.name self.auth = self.config.get_auth_args() self.default_interface = self.config.get_interface() self.force_ipv4 = self.config.force_ipv4 (self.verify, self.cert) = self.config.get_requests_verify_args() # Turn off urllib3 warnings about insecure certs if we have # explicitly configured requests to tell it we do not want # cert verification if not self.verify: self.log.debug( "Turning off Insecure SSL warnings since verify=False") category = requestsexceptions.InsecureRequestWarning if category: # InsecureRequestWarning references a Warning class or is None warnings.filterwarnings('ignore', category=category) self._disable_warnings = {} cache_expiration_time = int(self.config.get_cache_expiration_time()) cache_class = self.config.get_cache_class() cache_arguments = self.config.get_cache_arguments() self._resource_caches = {} if cache_class != 'dogpile.cache.null': self.cache_enabled = True self._cache = self._make_cache( cache_class, cache_expiration_time, cache_arguments) expirations = self.config.get_cache_expirations() for expire_key in expirations.keys(): # Only build caches for things we have list operations for if getattr( self, 'list_{0}'.format(expire_key), None): self._resource_caches[expire_key] = self._make_cache( cache_class, expirations[expire_key], cache_arguments) self._SERVER_AGE = DEFAULT_SERVER_AGE self._PORT_AGE = DEFAULT_PORT_AGE self._FLOAT_AGE = DEFAULT_FLOAT_AGE else: self.cache_enabled = False def _fake_invalidate(unused): pass class _FakeCache(object): def invalidate(self): pass # Don't cache list_servers if we're not caching things. # Replace this with a more specific cache configuration # soon. self._SERVER_AGE = 0 self._PORT_AGE = 0 self._FLOAT_AGE = 0 self._cache = _FakeCache() # Undecorate cache decorated methods. Otherwise the call stacks # wind up being stupidly long and hard to debug for method in _utils._decorated_methods: meth_obj = getattr(self, method, None) if not meth_obj: continue if (hasattr(meth_obj, 'invalidate') and hasattr(meth_obj, 'func')): new_func = functools.partial(meth_obj.func, self) new_func.invalidate = _fake_invalidate setattr(self, method, new_func) # If server expiration time is set explicitly, use that. Otherwise # fall back to whatever it was before self._SERVER_AGE = self.config.get_cache_resource_expiration( 'server', self._SERVER_AGE) self._PORT_AGE = self.config.get_cache_resource_expiration( 'port', self._PORT_AGE) self._FLOAT_AGE = self.config.get_cache_resource_expiration( 'floating_ip', self._FLOAT_AGE) self._container_cache = dict() self._file_hash_cache = dict() # self.__pool_executor = None self._raw_clients = {} self._local_ipv6 = ( _utils.localhost_supports_ipv6() if not self.force_ipv4 else False)
def _filter_list(data, name_or_id, filters): """Filter a list by name/ID and arbitrary meta data. :param list data: The list of dictionary data to filter. It is expected that each dictionary contains an 'id' and 'name' key if a value for name_or_id is given. :param string name_or_id: The name or ID of the entity being filtered. Can be a glob pattern, such as 'nb01*'. :param filters: A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. """ # The logger is openstack.cloud.fmmatch to allow a user/operator to # configure logging not to communicate about fnmatch misses # (they shouldn't be too spammy, but one never knows) log = _log.setup_logging('openstack.fnmatch') if name_or_id: # name_or_id might already be unicode name_or_id = _make_unicode(name_or_id) identifier_matches = [] bad_pattern = False try: fn_reg = re.compile(fnmatch.translate(name_or_id)) except sre_constants.error: # If the fnmatch re doesn't compile, then we don't care, # but log it in case the user DID pass a pattern but did # it poorly and wants to know what went wrong with their # search fn_reg = None for e in data: e_id = _make_unicode(e.get('id', None)) e_name = _make_unicode(e.get('name', None)) if ((e_id and e_id == name_or_id) or (e_name and e_name == name_or_id)): identifier_matches.append(e) else: # Only try fnmatch if we don't match exactly if not fn_reg: # If we don't have a pattern, skip this, but set the flag # so that we log the bad pattern bad_pattern = True continue if ((e_id and fn_reg.match(e_id)) or (e_name and fn_reg.match(e_name))): identifier_matches.append(e) if not identifier_matches and bad_pattern: log.debug("Bad pattern passed to fnmatch", exc_info=True) data = identifier_matches if not filters: return data if isinstance(filters, six.string_types): return jmespath.search(filters, data) def _dict_filter(f, d): if not d: return False for key in f.keys(): if isinstance(f[key], dict): if not _dict_filter(f[key], d.get(key, None)): return False elif d.get(key, None) != f[key]: return False return True filtered = [] for e in data: filtered.append(e) for key in filters.keys(): if isinstance(filters[key], dict): if not _dict_filter(filters[key], e.get(key, None)): filtered.pop() break elif e.get(key, None) != filters[key]: filtered.pop() break return filtered
def _filter_list(data, name_or_id, filters): """Filter a list by name/ID and arbitrary meta data. :param list data: The list of dictionary data to filter. It is expected that each dictionary contains an 'id' and 'name' key if a value for name_or_id is given. :param string name_or_id: The name or ID of the entity being filtered. Can be a glob pattern, such as 'nb01*'. :param filters: A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. """ # The logger is openstack.cloud.fmmatch to allow a user/operator to # configure logging not to communicate about fnmatch misses # (they shouldn't be too spammy, but one never knows) log = _log.setup_logging('openstack.fnmatch') if name_or_id: # name_or_id might already be unicode name_or_id = _make_unicode(name_or_id) identifier_matches = [] bad_pattern = False try: fn_reg = re.compile(fnmatch.translate(name_or_id)) except sre_constants.error: # If the fnmatch re doesn't compile, then we don't care, # but log it in case the user DID pass a pattern but did # it poorly and wants to know what went wrong with their # search fn_reg = None for e in data: e_id = _make_unicode(e.get('id', None)) e_name = _make_unicode(e.get('name', None)) if ((e_id and e_id == name_or_id) or (e_name and e_name == name_or_id)): identifier_matches.append(e) else: # Only try fnmatch if we don't match exactly if not fn_reg: # If we don't have a pattern, skip this, but set the flag # so that we log the bad pattern bad_pattern = True continue if ((e_id and fn_reg.match(e_id)) or (e_name and fn_reg.match(e_name))): identifier_matches.append(e) if not identifier_matches and bad_pattern: log.debug("Bad pattern passed to fnmatch", exc_info=True) data = identifier_matches if not filters: return data if isinstance(filters, six.string_types): return jmespath.search(filters, data) def _dict_filter(f, d): if not d: return False for key in f.keys(): if isinstance(f[key], dict): if not _dict_filter(f[key], d.get(key, None)): return False elif d.get(key, None) != f[key]: return False return True filtered = [] for e in data: filtered.append(e) for key in filters.keys(): if isinstance(filters[key], dict): if not _dict_filter(filters[key], e.get(key, None)): filtered.pop() break elif e.get(key, None) != filters[key]: filtered.pop() break return filtered
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import warnings import os_service_types from openstack import _log from openstack import exceptions __all__ = [ 'ServiceDescription', ] _logger = _log.setup_logging('openstack') _service_type_manager = os_service_types.ServiceTypes() class _ServiceDisabledProxyShim(object): def __init__(self, service_type, reason): self.service_type = service_type self.reason = reason def __getattr__(self, item): raise exceptions.ServiceDisabledException( "Service '{service_type}' is disabled because its configuration " "could not be loaded. {reason}".format( service_type=self.service_type, reason=self.reason or ''))
# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import importlib import warnings import os_service_types from openstack import _log from openstack import service_description _logger = _log.setup_logging('openstack') _service_type_manager = os_service_types.ServiceTypes() _DOC_TEMPLATE = ( ":class:`{class_name}` for {service_type} aka {project}") _PROXY_TEMPLATE = """Proxy for {service_type} aka {project} This proxy object could be an instance of {class_doc_strings} depending on client configuration and which version of the service is found on remotely on the cloud. """ class ConnectionMeta(type): def __new__(meta, name, bases, dct): for service in _service_type_manager.services: