def _retrieve_key(self, location): loc = location.store_location s3host, s3port = netutils.parse_host_port(loc.s3serviceurl, 80) from boto.s3.connection import S3Connection uformat = self.conf.glance_store.s3_store_bucket_url_format calling_format = get_calling_format(s3_store_bucket_url_format=uformat) s3_conn = S3Connection(loc.accesskey, loc.secretkey, host=s3host, port=s3port, is_secure=(loc.scheme == 's3+https'), calling_format=calling_format) bucket_obj = get_bucket(s3_conn, loc.bucket) key = get_key(bucket_obj, loc.key) msg = ("Retrieved image object from S3 using (s3_host=%(s3_host)s, " "access_key=%(accesskey)s, bucket=%(bucket)s, " "key=%(obj_name)s)" % ({'s3_host': loc.s3serviceurl, 'accesskey': loc.accesskey, 'bucket': loc.bucket, 'obj_name': loc.key})) LOG.debug(msg) return key
def delete(self, location, context=None): """ Takes a `glance_store.location.Location` object that indicates where to find the image file to delete :location `glance_store.location.Location` object, supplied from glance_store.location.get_location_from_uri() :raises NotFound if image does not exist """ loc = location.store_location s3host, s3port = netutils.parse_host_port(loc.s3serviceurl, 80) from boto.s3.connection import S3Connection uformat = self.conf.glance_store.s3_store_bucket_url_format calling_format = get_calling_format(s3_store_bucket_url_format=uformat) s3_conn = S3Connection(loc.accesskey, loc.secretkey, host=s3host, port=s3port, is_secure=(loc.scheme == 's3+https'), calling_format=calling_format) bucket_obj = get_bucket(s3_conn, loc.bucket) # Close the key when we're through. key = get_key(bucket_obj, loc.key) msg = _("Deleting image object from S3 using (s3_host=%(s3_host)s, " "access_key=%(accesskey)s, bucket=%(bucket)s, " "key=%(obj_name)s)") % ({'s3_host': loc.s3serviceurl, 'accesskey': loc.accesskey, 'bucket': loc.bucket, 'obj_name': loc.key}) LOG.debug(msg) return key.delete()
def _retrieve_key(self, location): loc = location.store_location s3host, s3port = netutils.parse_host_port(loc.s3serviceurl, 80) from boto.s3.connection import S3Connection uformat = self.conf.glance_store.s3_store_bucket_url_format calling_format = get_calling_format(s3_store_bucket_url_format=uformat) s3_conn = S3Connection(loc.accesskey, loc.secretkey, host=s3host, port=s3port, is_secure=(loc.scheme == 's3+https'), calling_format=calling_format) bucket_obj = get_bucket(s3_conn, loc.bucket) key = get_key(bucket_obj, loc.key) msg = ("Retrieved image object from S3 using (s3_host=%(s3_host)s, " "access_key=%(accesskey)s, bucket=%(bucket)s, " "key=%(obj_name)s)" % ({ 's3_host': loc.s3serviceurl, 'accesskey': loc.accesskey, 'bucket': loc.bucket, 'obj_name': loc.key })) LOG.debug(msg) return key
def get_port_from_address(address): parse_result = urlparse.urlparse(address) # urlparse do not parse values like 0.0.0.0:8000, # netutils do not parse values like http://localhost:8000, # so combine approach is using if parse_result.port: return parse_result.port else: return netutils.parse_host_port(address)[1]
def __init__(self, parsed_url): self.host, self.port = network_utils.parse_host_port( parsed_url.netloc, default_port=cfg.CONF.graphite.default_port) if cfg.CONF.graphite.hypervisor_in_prefix: self.prefix = (cfg.CONF.graphite.prefix + socket.gethostname().split('.')[0] + ".") else: self.prefix = cfg.CONF.graphite.prefix
def __init__(self, parsed_url): self.host, self.port = network_utils.parse_host_port( parsed_url.netloc, default_port=cfg.CONF.graphite.default_port) if cfg.CONF.graphite.hypervisor_in_prefix: self.prefix = ( cfg.CONF.graphite.prefix + socket.gethostname().split('.')[0] + ".") else: self.prefix = cfg.CONF.graphite.prefix
def test_parse_host_port(self): self.assertEqual(('server01', 80), netutils.parse_host_port('server01:80')) self.assertEqual(('server01', None), netutils.parse_host_port('server01')) self.assertEqual(('server01', 1234), netutils.parse_host_port('server01', default_port=1234)) self.assertEqual(('::1', 80), netutils.parse_host_port('[::1]:80')) self.assertEqual(('::1', None), netutils.parse_host_port('[::1]')) self.assertEqual(('::1', 1234), netutils.parse_host_port('[::1]', default_port=1234)) self.assertEqual( ('2001:db8:85a3::8a2e:370:7334', 1234), netutils.parse_host_port('2001:db8:85a3::8a2e:370:7334', default_port=1234))
def __init__(self, conf, url): if not qpid_messaging: raise ImportError("Failed to import qpid.messaging") self.connection = None self.session = None self.consumers = {} self.conf = conf self._consume_loop_stopped = False self.brokers_params = [] if url.hosts: for host in url.hosts: params = { 'username': host.username or '', 'password': host.password or '', } if host.port is not None: params['host'] = '%s:%d' % (host.hostname, host.port) else: params['host'] = host.hostname self.brokers_params.append(params) else: # Old configuration format for adr in self.conf.qpid_hosts: hostname, port = netutils.parse_host_port( adr, default_port=5672) if ':' in hostname: hostname = '[' + hostname + ']' params = { 'host': '%s:%d' % (hostname, port), 'username': self.conf.qpid_username, 'password': self.conf.qpid_password, } self.brokers_params.append(params) random.shuffle(self.brokers_params) self.brokers = itertools.cycle(self.brokers_params) self._initial_pid = os.getpid() self.reconnect()
def __init__(self, conf, url): if not qpid_messaging: raise ImportError("Failed to import qpid.messaging") self.connection = None self.session = None self.consumers = {} self.conf = conf self._consume_loop_stopped = False self.brokers_params = [] if url.hosts: for host in url.hosts: params = { 'username': host.username or '', 'password': host.password or '', } if host.port is not None: params['host'] = '%s:%d' % (host.hostname, host.port) else: params['host'] = host.hostname self.brokers_params.append(params) else: # Old configuration format for adr in self.conf.qpid_hosts: hostname, port = netutils.parse_host_port(adr, default_port=5672) if ':' in hostname: hostname = '[' + hostname + ']' params = { 'host': '%s:%d' % (hostname, port), 'username': self.conf.qpid_username, 'password': self.conf.qpid_password, } self.brokers_params.append(params) random.shuffle(self.brokers_params) self.brokers = itertools.cycle(self.brokers_params) self._initial_pid = os.getpid() self.reconnect()
def test_parse_host_port(self): self.assertEqual(('server01', 80), netutils.parse_host_port('server01:80')) self.assertEqual(('server01', None), netutils.parse_host_port('server01')) self.assertEqual(('server01', 1234), netutils.parse_host_port('server01', default_port=1234)) self.assertEqual(('::1', 80), netutils.parse_host_port('[::1]:80')) self.assertEqual(('::1', None), netutils.parse_host_port('[::1]')) self.assertEqual(('::1', 1234), netutils.parse_host_port('[::1]', default_port=1234)) self.assertEqual(('2001:db8:85a3::8a2e:370:7334', 1234), netutils.parse_host_port( '2001:db8:85a3::8a2e:370:7334', default_port=1234))
def delete(self, location, context=None): """ Takes a `glance_store.location.Location` object that indicates where to find the image file to delete :location `glance_store.location.Location` object, supplied from glance_store.location.get_location_from_uri() :raises NotFound if image does not exist """ loc = location.store_location s3host, s3port = netutils.parse_host_port(loc.s3serviceurl, 80) from boto.s3.connection import S3Connection uformat = self.conf.glance_store.s3_store_bucket_url_format calling_format = get_calling_format(s3_store_bucket_url_format=uformat) s3_conn = S3Connection(loc.accesskey, loc.secretkey, host=s3host, port=s3port, is_secure=(loc.scheme == 's3+https'), calling_format=calling_format) bucket_obj = get_bucket(s3_conn, loc.bucket) # Close the key when we're through. key = get_key(bucket_obj, loc.key) msg = _("Deleting image object from S3 using (s3_host=%(s3_host)s, " "access_key=%(accesskey)s, bucket=%(bucket)s, " "key=%(obj_name)s)") % ({ 's3_host': loc.s3serviceurl, 'accesskey': loc.accesskey, 'bucket': loc.bucket, 'obj_name': loc.key }) LOG.debug(msg) return key.delete()
def parse_valid_host_port(host_port): """ Given a "host:port" string, attempts to parse it as intelligently as possible to determine if it is valid. This includes IPv6 [host]:port form, IPv4 ip:port form, and hostname:port or fqdn:port form. Invalid inputs will raise a ValueError, while valid inputs will return a (host, port) tuple where the port will always be of type int. """ try: try: host, port = netutils.parse_host_port(host_port) except Exception: raise ValueError(_('Host and port "%s" is not valid.') % host_port) if not is_valid_port(port): raise ValueError(_('Port "%s" is not valid.') % port) # First check for valid IPv6 and IPv4 addresses, then a generic # hostname. Failing those, if the host includes a period, then this # should pass a very generic FQDN check. The FQDN check for letters at # the tail end will weed out any hilariously absurd IPv4 addresses. if not (is_valid_ipv6(host) or is_valid_ipv4(host) or is_valid_hostname(host) or is_valid_fqdn(host)): raise ValueError(_('Host "%s" is not valid.') % host) except Exception as ex: raise ValueError( _('%s ' 'Please specify a host:port pair, where host is an ' 'IPv4 address, IPv6 address, hostname, or FQDN. If ' 'using an IPv6 address, enclose it in brackets ' 'separately from the port (i.e., ' '"[fe80::a:b:c]:9876").') % ex) return (host, int(port))
def parse_valid_host_port(host_port): """ Given a "host:port" string, attempts to parse it as intelligently as possible to determine if it is valid. This includes IPv6 [host]:port form, IPv4 ip:port form, and hostname:port or fqdn:port form. Invalid inputs will raise a ValueError, while valid inputs will return a (host, port) tuple where the port will always be of type int. """ try: try: host, port = netutils.parse_host_port(host_port) except Exception: raise ValueError(_('Host and port "%s" is not valid.') % host_port) if not is_valid_port(port): raise ValueError(_('Port "%s" is not valid.') % port) # First check for valid IPv6 and IPv4 addresses, then a generic # hostname. Failing those, if the host includes a period, then this # should pass a very generic FQDN check. The FQDN check for letters at # the tail end will weed out any hilariously absurd IPv4 addresses. if not (is_valid_ipv6(host) or is_valid_ipv4(host) or is_valid_hostname(host) or is_valid_fqdn(host)): raise ValueError(_('Host "%s" is not valid.') % host) except Exception as ex: raise ValueError(_('%s ' 'Please specify a host:port pair, where host is an ' 'IPv4 address, IPv6 address, hostname, or FQDN. If ' 'using an IPv6 address, enclose it in brackets ' 'separately from the port (i.e., ' '"[fe80::a:b:c]:9876").') % ex) return (host, int(port))
def __init__(self, conf, url): self.consumers = [] self.consumer_num = itertools.count(1) self.conf = conf self.max_retries = self.conf.rabbit_max_retries # Try forever? if self.max_retries <= 0: self.max_retries = None self.interval_start = self.conf.rabbit_retry_interval self.interval_stepping = self.conf.rabbit_retry_backoff # max retry-interval = 30 seconds self.interval_max = 30 self._ssl_params = self._fetch_ssl_params() self._login_method = self.conf.rabbit_login_method if url.virtual_host is not None: virtual_host = url.virtual_host else: virtual_host = self.conf.rabbit_virtual_host self._url = '' if self.conf.fake_rabbit: LOG.warn("Deprecated: fake_rabbit option is deprecated, set " "rpc_backend to kombu+memory or use the fake " "driver instead.") self._url = 'memory://%s/' % virtual_host elif url.hosts: for host in url.hosts: transport = url.transport.replace('kombu+', '') transport = url.transport.replace('rabbit', 'amqp') self._url += '%s%s://%s:%s@%s:%s/%s' % ( ";" if self._url else '', transport, parse.quote(host.username or ''), parse.quote(host.password or ''), host.hostname or '', str(host.port or 5672), virtual_host) elif url.transport.startswith('kombu+'): # NOTE(sileht): url have a + but no hosts # (like kombu+memory:///), pass it to kombu as-is transport = url.transport.replace('kombu+', '') self._url = "%s://%s" % (transport, virtual_host) else: for adr in self.conf.rabbit_hosts: hostname, port = netutils.parse_host_port( adr, default_port=self.conf.rabbit_port) self._url += '%samqp://%s:%s@%s:%s/%s' % ( ";" if self._url else '', parse.quote(self.conf.rabbit_userid), parse.quote(self.conf.rabbit_password), hostname, port, virtual_host) self._initial_pid = os.getpid() self.do_consume = True self._consume_loop_stopped = False self.channel = None self.connection = kombu.connection.Connection( self._url, ssl=self._ssl_params, login_method=self._login_method, failover_strategy="shuffle") LOG.info(_('Connecting to AMQP server on %(hostname)s:%(port)d'), {'hostname': self.connection.hostname, 'port': self.connection.port}) # NOTE(sileht): just ensure the connection is setuped at startup self.ensure(error_callback=None, method=lambda channel: True) LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d'), {'hostname': self.connection.hostname, 'port': self.connection.port}) if self._url.startswith('memory://'): # Kludge to speed up tests. self.connection.transport.polling_interval = 0.0
def __init__(self, parsed_url): self.host, self.port = netutils.parse_host_port( parsed_url.netloc, default_port=cfg.CONF.collector.udp_port) self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def add(self, image_id, image_file, image_size, context=None): """ Stores an image file with supplied identifier to the backend storage system and returns a tuple containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :retval tuple of URL in backing store, bytes written, checksum and a dictionary with storage system specific information :raises `glance_store.exceptions.Duplicate` if the image already existed S3 writes the image data using the scheme: s3://<ACCESS_KEY>:<SECRET_KEY>@<S3_URL>/<BUCKET>/<OBJ> where: <USER> = ``s3_store_user`` <KEY> = ``s3_store_key`` <S3_HOST> = ``s3_store_host`` <BUCKET> = ``s3_store_bucket`` <ID> = The id of the image being added """ from boto.s3.connection import S3Connection loc = StoreLocation({'scheme': self.scheme, 'bucket': self.bucket, 'key': image_id, 's3serviceurl': self.full_s3_host, 'accesskey': self.access_key, 'secretkey': self.secret_key}, self.conf) s3host, s3port = netutils.parse_host_port(loc.s3serviceurl, 80) uformat = self.conf.glance_store.s3_store_bucket_url_format calling_format = get_calling_format(s3_store_bucket_url_format=uformat) s3_conn = S3Connection(loc.accesskey, loc.secretkey, host=s3host, port=s3port, is_secure=(loc.scheme == 's3+https'), calling_format=calling_format) create_bucket_if_missing(self.conf, self.bucket, s3_conn) bucket_obj = get_bucket(s3_conn, self.bucket) obj_name = str(image_id) key = bucket_obj.get_key(obj_name) if key and key.exists(): raise exceptions.Duplicate(message=_("S3 already has an image at " "location %s") % self._sanitize(loc.get_uri())) msg = _("Adding image object to S3 using (s3_host=%(s3_host)s, " "access_key=%(access_key)s, bucket=%(bucket)s, " "key=%(obj_name)s)") % ({'s3_host': self.s3_host, 'access_key': self.access_key, 'bucket': self.bucket, 'obj_name': obj_name}) LOG.debug(msg) LOG.debug("Uploading an image file to S3 for %s" % self._sanitize(loc.get_uri())) if image_size < self.s3_store_large_object_size: return self.add_singlepart(image_file, bucket_obj, obj_name, loc) else: return self.add_multipart(image_file, image_size, bucket_obj, obj_name, loc)
def __init__(self, conf, url): self.consumers = [] self.conf = conf self.max_retries = self.conf.rabbit_max_retries # Try forever? if self.max_retries <= 0: self.max_retries = None self.interval_start = self.conf.rabbit_retry_interval self.interval_stepping = self.conf.rabbit_retry_backoff # max retry-interval = 30 seconds self.interval_max = 30 self.memory_transport = False ssl_params = self._fetch_ssl_params() if url.virtual_host is not None: virtual_host = url.virtual_host else: virtual_host = self.conf.rabbit_virtual_host self.brokers_params = [] if url.hosts: for host in url.hosts: params = { 'hostname': host.hostname, 'port': host.port or 5672, 'userid': host.username or '', 'password': host.password or '', 'login_method': self.conf.rabbit_login_method, 'virtual_host': virtual_host } if self.conf.fake_rabbit: params['transport'] = 'memory' if self.conf.rabbit_use_ssl: params['ssl'] = ssl_params self.brokers_params.append(params) else: # Old configuration format for adr in self.conf.rabbit_hosts: hostname, port = netutils.parse_host_port( adr, default_port=self.conf.rabbit_port) params = { 'hostname': hostname, 'port': port, 'userid': self.conf.rabbit_userid, 'password': self.conf.rabbit_password, 'login_method': self.conf.rabbit_login_method, 'virtual_host': virtual_host } if self.conf.fake_rabbit: params['transport'] = 'memory' if self.conf.rabbit_use_ssl: params['ssl'] = ssl_params self.brokers_params.append(params) random.shuffle(self.brokers_params) self.brokers = itertools.cycle(self.brokers_params) self.memory_transport = self.conf.fake_rabbit self.connection = None self.do_consume = None self.reconnect()
def __init__(self, conf, url): self.consumers = [] self.consumer_num = itertools.count(1) self.conf = conf self.max_retries = self.conf.rabbit_max_retries # Try forever? if self.max_retries <= 0: self.max_retries = None self.interval_start = self.conf.rabbit_retry_interval self.interval_stepping = self.conf.rabbit_retry_backoff # max retry-interval = 30 seconds self.interval_max = 30 self._ssl_params = self._fetch_ssl_params() self._login_method = self.conf.rabbit_login_method if url.virtual_host is not None: virtual_host = url.virtual_host else: virtual_host = self.conf.rabbit_virtual_host self._url = '' if self.conf.fake_rabbit: # TODO(sileht): use memory://virtual_host into # unit tests to remove cfg.CONF.fake_rabbit self._url = 'memory://%s/' % virtual_host elif url.hosts: for host in url.hosts: transport = url.transport.replace('kombu+', '') transport = url.transport.replace('rabbit', 'amqp') self._url += '%s%s://%s:%s@%s:%s/%s' % ( ";" if self._url else '', transport, parse.quote(host.username or ''), parse.quote(host.password or ''), host.hostname or '', str(host.port or 5672), virtual_host) else: for adr in self.conf.rabbit_hosts: hostname, port = netutils.parse_host_port( adr, default_port=self.conf.rabbit_port) self._url += '%samqp://%s:%s@%s:%s/%s' % ( ";" if self._url else '', parse.quote(self.conf.rabbit_userid), parse.quote(self.conf.rabbit_password), hostname, port, virtual_host) self.do_consume = True self.channel = None self.connection = kombu.connection.Connection( self._url, ssl=self._ssl_params, login_method=self._login_method, failover_strategy="shuffle") LOG.info(_('Connecting to AMQP server on %(hostname)s:%(port)d'), {'hostname': self.connection.hostname, 'port': self.connection.port}) # NOTE(sileht): just ensure the connection is setuped at startup self.ensure(error_callback=None, method=lambda channel: True) LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d'), {'hostname': self.connection.hostname, 'port': self.connection.port}) if self.conf.fake_rabbit: # Kludge to speed up tests. self.connection.transport.polling_interval = 0.0
def add(self, image_id, image_file, image_size, context=None): """ Stores an image file with supplied identifier to the backend storage system and returns a tuple containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :retval tuple of URL in backing store, bytes written, checksum and a dictionary with storage system specific information :raises `glance_store.exceptions.Duplicate` if the image already existed S3 writes the image data using the scheme: s3://<ACCESS_KEY>:<SECRET_KEY>@<S3_URL>/<BUCKET>/<OBJ> where: <USER> = ``s3_store_user`` <KEY> = ``s3_store_key`` <S3_HOST> = ``s3_store_host`` <BUCKET> = ``s3_store_bucket`` <ID> = The id of the image being added """ from boto.s3.connection import S3Connection loc = StoreLocation( { 'scheme': self.scheme, 'bucket': self.bucket, 'key': image_id, 's3serviceurl': self.full_s3_host, 'accesskey': self.access_key, 'secretkey': self.secret_key }, self.conf) s3host, s3port = netutils.parse_host_port(loc.s3serviceurl, 80) uformat = self.conf.glance_store.s3_store_bucket_url_format calling_format = get_calling_format(s3_store_bucket_url_format=uformat) s3_conn = S3Connection(loc.accesskey, loc.secretkey, host=s3host, port=s3port, is_secure=(loc.scheme == 's3+https'), calling_format=calling_format) create_bucket_if_missing(self.conf, self.bucket, s3_conn) bucket_obj = get_bucket(s3_conn, self.bucket) obj_name = str(image_id) key = bucket_obj.get_key(obj_name) if key and key.exists(): raise exceptions.Duplicate(message=_("S3 already has an image at " "location %s") % self._sanitize(loc.get_uri())) msg = _("Adding image object to S3 using (s3_host=%(s3_host)s, " "access_key=%(access_key)s, bucket=%(bucket)s, " "key=%(obj_name)s)") % ({ 's3_host': self.s3_host, 'access_key': self.access_key, 'bucket': self.bucket, 'obj_name': obj_name }) LOG.debug(msg) LOG.debug("Uploading an image file to S3 for %s" % self._sanitize(loc.get_uri())) if image_size < self.s3_store_large_object_size: return self.add_singlepart(image_file, bucket_obj, obj_name, loc) else: return self.add_multipart(image_file, image_size, bucket_obj, obj_name, loc)