Пример #1
0
    def _create_connection(self, loc):
        from boto.s3.connection import S3Connection

        s3host, s3port = netutils.parse_host_port(loc.s3serviceurl, 80)
        uformat = self.conf.glance_store.s3_store_bucket_url_format
        calling_format = get_calling_format(s3_store_bucket_url_format=uformat)
        use_proxy = self.conf.glance_store.s3_store_enable_proxy

        if use_proxy:
            proxy_host = self._option_get('s3_store_proxy_host')
            proxy_user = self.conf.glance_store.s3_store_proxy_user
            proxy_pass = self.conf.glance_store.s3_store_proxy_password
            proxy_port = self.conf.glance_store.s3_store_proxy_port

            return S3Connection(loc.accesskey,
                                loc.secretkey,
                                proxy=proxy_host,
                                proxy_port=proxy_port,
                                proxy_user=proxy_user,
                                proxy_pass=proxy_pass,
                                is_secure=(loc.scheme == 's3+https'),
                                calling_format=calling_format)

        return S3Connection(loc.accesskey,
                            loc.secretkey,
                            host=s3host,
                            port=s3port,
                            is_secure=(loc.scheme == 's3+https'),
                            calling_format=calling_format)
Пример #2
0
    def _retrieve_key(self, location):
        loc = location.store_location
        s3host, s3port = netutils.parse_host_port(loc.s3serviceurl, 80)
        from boto.s3.connection import S3Connection

        uformat = self.conf.glance_store.s3_store_bucket_url_format
        calling_format = get_calling_format(s3_store_bucket_url_format=uformat)

        s3_conn = S3Connection(loc.accesskey, loc.secretkey,
                               host=s3host, port=s3port,
                               is_secure=(loc.scheme == 's3+https'),
                               calling_format=calling_format)
        bucket_obj = get_bucket(s3_conn, loc.bucket)

        key = get_key(bucket_obj, loc.key)

        msg = ("Retrieved image object from S3 using (s3_host=%(s3_host)s, "
               "access_key=%(accesskey)s, bucket=%(bucket)s, "
               "key=%(obj_name)s)" % ({'s3_host': loc.s3serviceurl,
                                       'accesskey': loc.accesskey,
                                       'bucket': loc.bucket,
                                       'obj_name': loc.key}))
        LOG.debug(msg)

        return key
Пример #3
0
    def __init__(self, parsed_url):
        self.kafka_client = None
        self.kafka_server = None

        self.host, self.port = netutils.parse_host_port(
            parsed_url.netloc, default_port=9092)

        self.local_queue = []

        params = urlparse.parse_qs(parsed_url.query)
        self.topic = params.get('topic', ['ceilometer'])[-1]
        self.policy = params.get('policy', ['default'])[-1]
        self.max_queue_length = int(params.get(
            'max_queue_length', [1024])[-1])
        self.max_retry = int(params.get('max_retry', [100])[-1])

        if self.policy in ['default', 'drop', 'queue']:
            LOG.info(('Publishing policy set to %s') % self.policy)
        else:
            LOG.warn(('Publishing policy is unknown (%s) force to default')
                     % self.policy)
            self.policy = 'default'

        try:
            self._get_client()
            self._get_server()
        except Exception as e:
            LOG.exception("Failed to connect to Kafka service: %s", e)
Пример #4
0
    def _retrieve_key(self, location):
        loc = location.store_location
        s3host, s3port = netutils.parse_host_port(loc.s3serviceurl, 80)
        from boto.s3.connection import S3Connection

        uformat = self.conf.glance_store.s3_store_bucket_url_format
        calling_format = get_calling_format(s3_store_bucket_url_format=uformat)

        s3_conn = S3Connection(loc.accesskey,
                               loc.secretkey,
                               host=s3host,
                               port=s3port,
                               is_secure=(loc.scheme == 's3+https'),
                               calling_format=calling_format)
        bucket_obj = get_bucket(s3_conn, loc.bucket)

        key = get_key(bucket_obj, loc.key)

        msg = ("Retrieved image object from S3 using (s3_host=%(s3_host)s, "
               "access_key=%(accesskey)s, bucket=%(bucket)s, "
               "key=%(obj_name)s)" % ({
                   's3_host': loc.s3serviceurl,
                   'accesskey': loc.accesskey,
                   'bucket': loc.bucket,
                   'obj_name': loc.key
               }))
        LOG.debug(msg)

        return key
Пример #5
0
    def __init__(self, parsed_url):
        self.kafka_client = None
        self.kafka_server = None

        self.host, self.port = netutils.parse_host_port(
            parsed_url.netloc, default_port=9092)

        self.local_queue = []

        params = urlparse.parse_qs(parsed_url.query)
        self.topic = params.get('topic', ['ceilometer'])[-1]
        self.policy = params.get('policy', ['default'])[-1]
        self.max_queue_length = int(params.get(
            'max_queue_length', [1024])[-1])
        self.max_retry = int(params.get('max_retry', [100])[-1])

        if self.policy in ['default', 'drop', 'queue']:
            LOG.info(('Publishing policy set to %s') % self.policy)
        else:
            LOG.warn(('Publishing policy is unknown (%s) force to default')
                     % self.policy)
            self.policy = 'default'

        try:
            self._get_client()
            self._get_server()
        except Exception as e:
            LOG.exception("Failed to connect to Kafka service: %s", e)
Пример #6
0
    def delete(self, location, context=None):
        """
        Takes a `glance_store.location.Location` object that indicates
        where to find the image file to delete

        :location `glance_store.location.Location` object, supplied
                  from glance_store.location.get_location_from_uri()

        :raises NotFound if image does not exist
        """
        loc = location.store_location
        s3host, s3port = netutils.parse_host_port(loc.s3serviceurl, 80)
        from boto.s3.connection import S3Connection

        uformat = self.conf.glance_store.s3_store_bucket_url_format
        calling_format = get_calling_format(s3_store_bucket_url_format=uformat)

        s3_conn = S3Connection(loc.accesskey, loc.secretkey,
                               host=s3host, port=s3port,
                               is_secure=(loc.scheme == 's3+https'),
                               calling_format=calling_format)
        bucket_obj = get_bucket(s3_conn, loc.bucket)

        # Close the key when we're through.
        key = get_key(bucket_obj, loc.key)

        msg = _("Deleting image object from S3 using (s3_host=%(s3_host)s, "
                "access_key=%(accesskey)s, bucket=%(bucket)s, "
                "key=%(obj_name)s)") % ({'s3_host': loc.s3serviceurl,
                                         'accesskey': loc.accesskey,
                                         'bucket': loc.bucket,
                                         'obj_name': loc.key})
        LOG.debug(msg)

        return key.delete()
Пример #7
0
    def _create_connection(self, loc):
        from boto.s3.connection import S3Connection

        s3host, s3port = netutils.parse_host_port(loc.s3serviceurl, 80)
        uformat = self.conf.glance_store.s3_store_bucket_url_format
        calling_format = get_calling_format(s3_store_bucket_url_format=uformat)
        use_proxy = self.conf.glance_store.s3_store_enable_proxy

        if use_proxy:
            proxy_host = self._option_get('s3_store_proxy_host')
            proxy_user = self.conf.glance_store.s3_store_proxy_user
            proxy_pass = self.conf.glance_store.s3_store_proxy_password
            proxy_port = self.conf.glance_store.s3_store_proxy_port

            return S3Connection(loc.accesskey, loc.secretkey,
                                proxy=proxy_host,
                                proxy_port=proxy_port,
                                proxy_user=proxy_user,
                                proxy_pass=proxy_pass,
                                is_secure=(loc.scheme == 's3+https'),
                                calling_format=calling_format)

        return S3Connection(loc.accesskey, loc.secretkey,
                            host=s3host, port=s3port,
                            is_secure=(loc.scheme == 's3+https'),
                            calling_format=calling_format)
Пример #8
0
    def __init__(self, conf, parsed_url):
        super(KafkaBrokerPublisher, self).__init__(conf, parsed_url)
        options = urlparse.parse_qs(parsed_url.query)

        self._producer = None
        self._host, self._port = netutils.parse_host_port(parsed_url.netloc, default_port=9092)
        self._topic = options.get("topic", ["ceilometer"])[-1]
        self.max_retry = int(options.get("max_retry", [100])[-1])
Пример #9
0
 def __init__(self, parsed_url):
     self.host, self.port = netutils.parse_host_port(
         parsed_url.netloc, default_port=cfg.CONF.collector.udp_port)
     if netutils.is_valid_ipv6(self.host):
         addr_family = socket.AF_INET6
     else:
         addr_family = socket.AF_INET
     self.socket = socket.socket(addr_family, socket.SOCK_DGRAM)
Пример #10
0
def get_port_from_address(address):
    parse_result = urlparse.urlparse(address)
    # urlparse do not parse values like 0.0.0.0:8000,
    # netutils do not parse values like http://localhost:8000,
    # so combine approach is using
    if parse_result.port:
        return parse_result.port
    else:
        return netutils.parse_host_port(address)[1]
Пример #11
0
    def __init__(self, parsed_url):
        super(KafkaBrokerPublisher, self).__init__(parsed_url)
        options = urlparse.parse_qs(parsed_url.query)

        self._producer = None
        self._host, self._port = netutils.parse_host_port(parsed_url.netloc,
                                                          default_port=9092)
        self._topic = options.get('topic', ['ceilometer'])[-1]
        self.max_retry = int(options.get('max_retry', [100])[-1])
Пример #12
0
def get_port_from_address(address):
    parse_result = urlparse.urlparse(address)
    # urlparse do not parse values like 0.0.0.0:8000,
    # netutils do not parse values like http://localhost:8000,
    # so combine approach is using
    if parse_result.port:
        return parse_result.port
    else:
        return netutils.parse_host_port(address)[1]
Пример #13
0
 def __init__(self, parsed_url):
     self.host, self.port = netutils.parse_host_port(
         parsed_url.netloc,
         default_port=cfg.CONF.collector.udp_port)
     if netutils.is_valid_ipv6(self.host):
         addr_family = socket.AF_INET6
     else:
         addr_family = socket.AF_INET
     self.socket = socket.socket(addr_family,
                                 socket.SOCK_DGRAM)
Пример #14
0
 def __init__(self, parsed_url):
     self.host, self.port = netutils.parse_host_port(
         parsed_url.netloc, default_port=cfg.CONF.graphite.default_port)
     self.hostname = socket.gethostname().split('.')[0]
     self.prefix_account = "testacct2.cloud." + self.hostname
     self.ks = self._get_keystone()
     if cfg.CONF.graphite.hypervisor_in_prefix:
         self.prefix = (cfg.CONF.graphite.prefix + self.hostname + ".")
     else:
         self.prefix = cfg.CONF.graphite.prefix
Пример #15
0
    def __call__(self, value):
        addr, port = netutils.parse_host_port(value)

        addr = self._validate_addr(addr)
        port = self._validate_port(port)
        LOG.debug('addr: %s port: %s' % (addr, port))

        if addr and port:
            return '%s:%d' % (addr, port)
        raise ValueError('%s is not valid host address with optional port')
Пример #16
0
 def __init__(self, parsed_url):
     self.host, self.port = netutils.parse_host_port(
         parsed_url.netloc,
         default_port=cfg.CONF.graphite.default_port)
     self.hostname = socket.gethostname().split('.')[0]
     self.prefix_account = "testacct2.cloud." + self.hostname
     self.ks = self._get_keystone()
     if cfg.CONF.graphite.hypervisor_in_prefix:
         self.prefix = (cfg.CONF.graphite.prefix + self.hostname + ".")
     else:
         self.prefix = cfg.CONF.graphite.prefix
Пример #17
0
 def __call__(self, value):
     addr, port = netutils.parse_host_port(value)
     # NOTE(gmann): parse_host_port() return port as None if no port is
     # supplied in value so setting port as string for correct
     # parsing and error otherwise it will not be parsed for NoneType.
     port = 'None' if port is None else port
     addr = self.validate_addr(addr)
     port = self._validate_port(port)
     if not addr and not port:
         raise ValueError('%s is not valid ip with optional port')
     return '%s:%d' % (addr, port)
Пример #18
0
    def __init__(self, parsed_url):
        if not kafka:
            raise IndexError(_LE("Kafka could not be imported"))
        super(KafkaBrokerPublisher, self).__init__(parsed_url)
        options = urlparse.parse_qs(parsed_url.query)

        self._producer = None
        self._host, self._port = netutils.parse_host_port(
            parsed_url.netloc, default_port=9092)
        self._topic = options.get('topic', ['ceilometer'])[-1]
        self.max_retry = int(options.get('max_retry', [100])[-1])
Пример #19
0
    def __init__(self, conf, server_params=None):
        self.consumers = []
        self.consumer_thread = None
        self.proxy_callbacks = []
        self.conf = conf
        self.max_retries = self.conf.rabbit_max_retries
        # Try forever?
        if self.max_retries <= 0:
            self.max_retries = None
        self.interval_start = self.conf.rabbit_retry_interval
        self.interval_stepping = self.conf.rabbit_retry_backoff
        # max retry-interval = 30 seconds
        self.interval_max = 30
        self.memory_transport = False

        if server_params is None:
            server_params = {}
        # Keys to translate from server_params to kombu params
        server_params_to_kombu_params = {'username': '******'}

        ssl_params = self._fetch_ssl_params()
        params_list = []
        for adr in self.conf.rabbit_hosts:
            hostname, port = network_utils.parse_host_port(
                adr, default_port=self.conf.rabbit_port)

            params = {
                'hostname': hostname,
                'port': port,
                'userid': self.conf.rabbit_userid,
                'password': self.conf.rabbit_password,
                'virtual_host': self.conf.rabbit_virtual_host,
                'connect_timeout': self.conf.rabbit_connect_timeout,
            }

            for sp_key, value in server_params.items():
                p_key = server_params_to_kombu_params.get(sp_key, sp_key)
                params[p_key] = value

            if self.conf.fake_rabbit:
                params['transport'] = 'memory'
            if self.conf.rabbit_use_ssl:
                params['ssl'] = ssl_params

            params_list.append(params)

        self.params_list = params_list

        self.memory_transport = self.conf.fake_rabbit

        self.connection = None
        self.reconnect()
Пример #20
0
def parse_valid_host_port(host_port):
    """
    Given a "host:port" string, attempts to parse it as intelligently as
    possible to determine if it is valid. This includes IPv6 [host]:port form,
    IPv4 ip:port form, and hostname:port or fqdn:port form.

    Invalid inputs will raise a ValueError, while valid inputs will return
    a (host, port) tuple where the port will always be of type int.
    """

    try:
        try:
            host, port = netutils.parse_host_port(host_port)
        except Exception:
            raise ValueError(_('Host and port "%s" is not valid.') % host_port)

        if not netutils.is_valid_port(port):
            raise ValueError(_('Port "%s" is not valid.') % port)

        # First check for valid IPv6 and IPv4 addresses, then a generic
        # hostname. Failing those, if the host includes a period, then this
        # should pass a very generic FQDN check. The FQDN check for letters at
        # the tail end will weed out any hilariously absurd IPv4 addresses.

        if not (
            netutils.is_valid_ipv6(host)
            or netutils.is_valid_ipv4(host)
            or is_valid_hostname(host)
            or is_valid_fqdn(host)
        ):
            raise ValueError(_('Host "%s" is not valid.') % host)

    except Exception as ex:
        raise ValueError(
            _(
                "%s "
                "Please specify a host:port pair, where host is an "
                "IPv4 address, IPv6 address, hostname, or FQDN. If "
                "using an IPv6 address, enclose it in brackets "
                "separately from the port (i.e., "
                '"[fe80::a:b:c]:9876").'
            )
            % ex
        )

    return (host, int(port))
Пример #21
0
 def test_parse_host_port(self):
     self.assertEqual(('server01', 80),
                      netutils.parse_host_port('server01:80'))
     self.assertEqual(('server01', None),
                      netutils.parse_host_port('server01'))
     self.assertEqual(('server01', 1234),
                      netutils.parse_host_port('server01',
                                               default_port=1234))
     self.assertEqual(('::1', 80), netutils.parse_host_port('[::1]:80'))
     self.assertEqual(('::1', None), netutils.parse_host_port('[::1]'))
     self.assertEqual(('::1', 1234),
                      netutils.parse_host_port('[::1]', default_port=1234))
     self.assertEqual(
         ('2001:db8:85a3::8a2e:370:7334', 1234),
         netutils.parse_host_port('2001:db8:85a3::8a2e:370:7334',
                                  default_port=1234))
Пример #22
0
    def __init__(self, conf, url, purpose):
        if not qpid_messaging:
            raise ImportError("Failed to import qpid.messaging")

        self.connection = None
        self.session = None
        self.consumers = {}
        self.conf = conf
        self.driver_conf = conf.oslo_messaging_qpid

        self._consume_loop_stopped = False

        self.brokers_params = []
        if url.hosts:
            for host in url.hosts:
                params = {
                    'username': host.username or '',
                    'password': host.password or '',
                }
                if host.port is not None:
                    params['host'] = '%s:%d' % (host.hostname, host.port)
                else:
                    params['host'] = host.hostname
                self.brokers_params.append(params)
        else:
            # Old configuration format
            for adr in self.driver_conf.qpid_hosts:
                hostname, port = netutils.parse_host_port(
                    adr, default_port=5672)

                if ':' in hostname:
                    hostname = '[' + hostname + ']'

                params = {
                    'host': '%s:%d' % (hostname, port),
                    'username': self.driver_conf.qpid_username,
                    'password': self.driver_conf.qpid_password,
                }
                self.brokers_params.append(params)

        random.shuffle(self.brokers_params)
        self.brokers = itertools.cycle(self.brokers_params)

        self._initial_pid = os.getpid()
        self.reconnect()
Пример #23
0
    def __init__(self, conf, url, purpose):
        if not qpid_messaging:
            raise ImportError("Failed to import qpid.messaging")

        self.connection = None
        self.session = None
        self.consumers = {}
        self.conf = conf
        self.driver_conf = conf.oslo_messaging_qpid

        self._consume_loop_stopped = False

        self.brokers_params = []
        if url.hosts:
            for host in url.hosts:
                params = {
                    'username': host.username or '',
                    'password': host.password or '',
                }
                if host.port is not None:
                    params['host'] = '%s:%d' % (host.hostname, host.port)
                else:
                    params['host'] = host.hostname
                self.brokers_params.append(params)
        else:
            # Old configuration format
            for adr in self.driver_conf.qpid_hosts:
                hostname, port = netutils.parse_host_port(adr,
                                                          default_port=5672)

                if ':' in hostname:
                    hostname = '[' + hostname + ']'

                params = {
                    'host': '%s:%d' % (hostname, port),
                    'username': self.driver_conf.qpid_username,
                    'password': self.driver_conf.qpid_password,
                }
                self.brokers_params.append(params)

        random.shuffle(self.brokers_params)
        self.brokers = itertools.cycle(self.brokers_params)

        self._initial_pid = os.getpid()
        self.reconnect()
Пример #24
0
def initialize(ceilometer_client):
     logging.debug("Ceilometer client info:%s",ceilometer_client)
     parse_target=netutils.urlsplit(ceilometer_client)
     if not parse_target.netloc:
        err_str = "Error:Invalid client format"
        logging.error("* Invalid client format")
        return err_str
     if parse_target.scheme == "udp" :
         host,port=netutils.parse_host_port(parse_target.netloc)
         scheme = parse_target.scheme
         app_ip = host
         app_port = port
         if host == None or port == None :
             err_str = "* Error: Invalid IP Address format"
             logging.error("* Invalid IP Address format")
             return err_str
         thread.start_new(read_notification_from_ceilometer_over_udp,(host,port,))
     elif parse_target.scheme == "kafka" :
         thread.start_new(read_notification_from_ceilometer_over_kafka,(parse_target,))
Пример #25
0
 def test_parse_host_port(self):
     self.assertEqual(('server01', 80),
                      netutils.parse_host_port('server01:80'))
     self.assertEqual(('server01', None),
                      netutils.parse_host_port('server01'))
     self.assertEqual(('server01', 1234),
                      netutils.parse_host_port('server01',
                      default_port=1234))
     self.assertEqual(('::1', 80),
                      netutils.parse_host_port('[::1]:80'))
     self.assertEqual(('::1', None),
                      netutils.parse_host_port('[::1]'))
     self.assertEqual(('::1', 1234),
                      netutils.parse_host_port('[::1]',
                      default_port=1234))
     self.assertEqual(('2001:db8:85a3::8a2e:370:7334', 1234),
                      netutils.parse_host_port(
                          '2001:db8:85a3::8a2e:370:7334',
                          default_port=1234))
Пример #26
0
    def __init__(self, app, name, listen, max_url_len=None):
        super(WSGIService, self).__init__(CONF.senlin_api.threads)
        self.app = app
        self.name = name

        self.listen = listen

        self.servers = []

        for address in self.listen:
            host, port = netutils.parse_host_port(address)
            server = wsgi.Server(CONF,
                                 name,
                                 app,
                                 host=host,
                                 port=port,
                                 pool_size=CONF.senlin_api.threads,
                                 use_ssl=sslutils.is_enabled(CONF),
                                 max_url_len=max_url_len)

            self.servers.append(server)
Пример #27
0
 def __init__(self, conf, parsed_url):
     super(UDPPublisher, self).__init__(conf, parsed_url)
     self.host, self.port = netutils.parse_host_port(parsed_url.netloc,
                                                     default_port=4952)
     addrinfo = None
     try:
         addrinfo = socket.getaddrinfo(self.host, None, socket.AF_INET6,
                                       socket.SOCK_DGRAM)[0]
     except socket.gaierror:
         try:
             addrinfo = socket.getaddrinfo(self.host, None, socket.AF_INET,
                                           socket.SOCK_DGRAM)[0]
         except socket.gaierror:
             pass
     if addrinfo:
         addr_family = addrinfo[0]
     else:
         LOG.warning("Cannot resolve host %s, creating AF_INET socket...",
                     self.host)
         addr_family = socket.AF_INET
     self.socket = socket.socket(addr_family, socket.SOCK_DGRAM)
Пример #28
0
 def __init__(self, parsed_url):
     self.host, self.port = netutils.parse_host_port(
         parsed_url.netloc, default_port=cfg.CONF.collector.udp_port)
     addrinfo = None
     try:
         addrinfo = socket.getaddrinfo(self.host, None, socket.AF_INET6,
                                       socket.SOCK_DGRAM)[0]
     except socket.gaierror:
         try:
             addrinfo = socket.getaddrinfo(self.host, None, socket.AF_INET,
                                           socket.SOCK_DGRAM)[0]
         except socket.gaierror:
             pass
     if addrinfo:
         addr_family = addrinfo[0]
     else:
         LOG.warning(
             _LW("Cannot resolve host %s, creating AF_INET socket..."),
             self.host)
         addr_family = socket.AF_INET
     self.socket = socket.socket(addr_family, socket.SOCK_DGRAM)
Пример #29
0
    def delete(self, location, context=None):
        """
        Takes a `glance_store.location.Location` object that indicates
        where to find the image file to delete

        :location `glance_store.location.Location` object, supplied
                  from glance_store.location.get_location_from_uri()

        :raises NotFound if image does not exist
        """
        loc = location.store_location
        s3host, s3port = netutils.parse_host_port(loc.s3serviceurl, 80)
        from boto.s3.connection import S3Connection

        uformat = self.conf.glance_store.s3_store_bucket_url_format
        calling_format = get_calling_format(s3_store_bucket_url_format=uformat)

        s3_conn = S3Connection(loc.accesskey,
                               loc.secretkey,
                               host=s3host,
                               port=s3port,
                               is_secure=(loc.scheme == 's3+https'),
                               calling_format=calling_format)
        bucket_obj = get_bucket(s3_conn, loc.bucket)

        # Close the key when we're through.
        key = get_key(bucket_obj, loc.key)

        msg = _("Deleting image object from S3 using (s3_host=%(s3_host)s, "
                "access_key=%(accesskey)s, bucket=%(bucket)s, "
                "key=%(obj_name)s)") % ({
                    's3_host': loc.s3serviceurl,
                    'accesskey': loc.accesskey,
                    'bucket': loc.bucket,
                    'obj_name': loc.key
                })
        LOG.debug(msg)

        return key.delete()
Пример #30
0
def parse_valid_host_port(host_port):
    """
    Given a "host:port" string, attempts to parse it as intelligently as
    possible to determine if it is valid. This includes IPv6 [host]:port form,
    IPv4 ip:port form, and hostname:port or fqdn:port form.

    Invalid inputs will raise a ValueError, while valid inputs will return
    a (host, port) tuple where the port will always be of type int.
    """

    try:
        try:
            host, port = netutils.parse_host_port(host_port)
        except Exception:
            raise ValueError(_('Host and port "%s" is not valid.') % host_port)

        if not netutils.is_valid_port(port):
            raise ValueError(_('Port "%s" is not valid.') % port)

        # First check for valid IPv6 and IPv4 addresses, then a generic
        # hostname. Failing those, if the host includes a period, then this
        # should pass a very generic FQDN check. The FQDN check for letters at
        # the tail end will weed out any hilariously absurd IPv4 addresses.

        if not (netutils.is_valid_ipv6(host) or netutils.is_valid_ipv4(host)
                or is_valid_hostname(host) or is_valid_fqdn(host)):
            raise ValueError(_('Host "%s" is not valid.') % host)

    except Exception as ex:
        raise ValueError(
            _('%s '
              'Please specify a host:port pair, where host is an '
              'IPv4 address, IPv6 address, hostname, or FQDN. If '
              'using an IPv6 address, enclose it in brackets '
              'separately from the port (i.e., '
              '"[fe80::a:b:c]:9876").') % ex)

    return (host, int(port))
Пример #31
0
 def __init__(self, parsed_url):
     self.host, self.port = netutils.parse_host_port(
         parsed_url.netloc,
         default_port=cfg.CONF.collector.udp_port)
     addrinfo = None
     try:
         addrinfo = socket.getaddrinfo(self.host, None, socket.AF_INET6,
                                       socket.SOCK_DGRAM)[0]
     except socket.gaierror:
         try:
             addrinfo = socket.getaddrinfo(self.host, None, socket.AF_INET,
                                           socket.SOCK_DGRAM)[0]
         except socket.gaierror:
             pass
     if addrinfo:
         addr_family = addrinfo[0]
     else:
         LOG.warning(_LW(
                     "Cannot resolve host %s, creating AF_INET socket..."),
                     self.host)
         addr_family = socket.AF_INET
     self.socket = socket.socket(addr_family,
                                 socket.SOCK_DGRAM)
Пример #32
0
def initialize(ceilometer_client):
    logging.debug("Ceilometer client info:%s", ceilometer_client)
    parse_target = netutils.urlsplit(ceilometer_client)
    if not parse_target.netloc:
        err_str = "Error:Invalid client format"
        logging.error("* Invalid client format")
        return err_str
    if parse_target.scheme == "udp":
        host, port = netutils.parse_host_port(parse_target.netloc)
        scheme = parse_target.scheme
        app_ip = host
        app_port = port
        if host == None or port == None:
            err_str = "* Error: Invalid IP Address format"
            logging.error("* Invalid IP Address format")
            return err_str
        thread.start_new(read_notification_from_ceilometer_over_udp, (
            host,
            port,
        ))
    elif parse_target.scheme == "kafka":
        thread.start_new(read_notification_from_ceilometer_over_kafka,
                         (parse_target, ))
Пример #33
0
 def __init__(self, conf, parsed_url):
     super(UDPPublisher, self).__init__(conf, parsed_url)
     self.host, self.port = netutils.parse_host_port(
         parsed_url.netloc, default_port=4952)
     addrinfo = None
     try:
         addrinfo = socket.getaddrinfo(self.host, None, socket.AF_INET6,
                                       socket.SOCK_DGRAM)[0]
     except socket.gaierror:
         try:
             addrinfo = socket.getaddrinfo(self.host, None, socket.AF_INET,
                                           socket.SOCK_DGRAM)[0]
         except socket.gaierror:
             pass
     if addrinfo:
         addr_family = addrinfo[0]
     else:
         LOG.warning(
             "Cannot resolve host %s, creating AF_INET socket...",
             self.host)
         addr_family = socket.AF_INET
     self.socket = socket.socket(addr_family,
                                 socket.SOCK_DGRAM)
Пример #34
0
    def __init__(self, conf, url):
        self.consumers = []
        self.consumer_num = itertools.count(1)
        self.conf = conf
        self.max_retries = self.conf.rabbit_max_retries
        # Try forever?
        if self.max_retries <= 0:
            self.max_retries = None
        self.interval_start = self.conf.rabbit_retry_interval
        self.interval_stepping = self.conf.rabbit_retry_backoff
        # max retry-interval = 30 seconds
        self.interval_max = 30

        self._ssl_params = self._fetch_ssl_params()
        self._login_method = self.conf.rabbit_login_method

        if url.virtual_host is not None:
            virtual_host = url.virtual_host
        else:
            virtual_host = self.conf.rabbit_virtual_host

        self._url = ''
        if self.conf.fake_rabbit:
            LOG.warn("Deprecated: fake_rabbit option is deprecated, set "
                     "rpc_backend to kombu+memory or use the fake "
                     "driver instead.")
            self._url = 'memory://%s/' % virtual_host
        elif url.hosts:
            for host in url.hosts:
                transport = url.transport.replace('kombu+', '')
                transport = url.transport.replace('rabbit', 'amqp')
                self._url += '%s%s://%s:%s@%s:%s/%s' % (
                    ";" if self._url else '', transport,
                    parse.quote(host.username or ''),
                    parse.quote(host.password or ''), host.hostname
                    or '', str(host.port or 5672), virtual_host)
        elif url.transport.startswith('kombu+'):
            # NOTE(sileht): url have a + but no hosts
            # (like kombu+memory:///), pass it to kombu as-is
            transport = url.transport.replace('kombu+', '')
            self._url = "%s://%s" % (transport, virtual_host)
        else:
            for adr in self.conf.rabbit_hosts:
                hostname, port = netutils.parse_host_port(
                    adr, default_port=self.conf.rabbit_port)
                self._url += '%samqp://%s:%s@%s:%s/%s' % (
                    ";" if self._url else '',
                    parse.quote(self.conf.rabbit_userid),
                    parse.quote(self.conf.rabbit_password), hostname, port,
                    virtual_host)

        self._initial_pid = os.getpid()

        self.do_consume = True
        self._consume_loop_stopped = False

        self.channel = None
        self.connection = kombu.connection.Connection(
            self._url,
            ssl=self._ssl_params,
            login_method=self._login_method,
            failover_strategy="shuffle")

        LOG.info(_LI('Connecting to AMQP server on %(hostname)s:%(port)d'), {
            'hostname': self.connection.hostname,
            'port': self.connection.port
        })
        # NOTE(sileht): just ensure the connection is setuped at startup
        self.ensure(error_callback=None, method=lambda: True)
        LOG.info(_LI('Connected to AMQP server on %(hostname)s:%(port)d'), {
            'hostname': self.connection.hostname,
            'port': self.connection.port
        })

        # NOTE(sileht):
        # value choosen according the best practice from kombu:
        # http://kombu.readthedocs.org/en/latest/reference/kombu.common.html#kombu.common.eventloop
        self._poll_timeout = 1

        if self._url.startswith('memory://'):
            # Kludge to speed up tests.
            self.connection.transport.polling_interval = 0.0
            self._poll_timeout = 0.05
Пример #35
0
 def __init__(self, parsed_url):
     self.host, self.port = netutils.parse_host_port(
         parsed_url.netloc,
         default_port=cfg.CONF.collector.udp_port)
     self.socket = socket.socket(socket.AF_INET,
                                 socket.SOCK_DGRAM)
Пример #36
0
    def __init__(self, conf, url, purpose):
        self.consumers = []
        self.consumer_num = itertools.count(1)
        self.conf = conf
        self.driver_conf = self.conf.oslo_messaging_rabbit
        self.max_retries = self.driver_conf.rabbit_max_retries
        # Try forever?
        if self.max_retries <= 0:
            self.max_retries = None
        self.interval_start = self.driver_conf.rabbit_retry_interval
        self.interval_stepping = self.driver_conf.rabbit_retry_backoff
        # max retry-interval = 30 seconds
        self.interval_max = 30

        self._login_method = self.driver_conf.rabbit_login_method

        if url.virtual_host is not None:
            virtual_host = url.virtual_host
        else:
            virtual_host = self.driver_conf.rabbit_virtual_host

        self._url = ''
        if self.driver_conf.fake_rabbit:
            LOG.warn("Deprecated: fake_rabbit option is deprecated, set "
                     "rpc_backend to kombu+memory or use the fake "
                     "driver instead.")
            self._url = 'memory://%s/' % virtual_host
        elif url.hosts:
            if url.transport.startswith('kombu+'):
                LOG.warn(_LW('Selecting the kombu transport through the '
                             'transport url (%s) is a experimental feature '
                             'and this is not yet supported.') % url.transport)
            for host in url.hosts:
                transport = url.transport.replace('kombu+', '')
                transport = transport.replace('rabbit', 'amqp')
                self._url += '%s%s://%s:%s@%s:%s/%s' % (
                    ";" if self._url else '',
                    transport,
                    parse.quote(host.username or ''),
                    parse.quote(host.password or ''),
                    self._parse_url_hostname(host.hostname) or '',
                    str(host.port or 5672),
                    virtual_host)
        elif url.transport.startswith('kombu+'):
            # NOTE(sileht): url have a + but no hosts
            # (like kombu+memory:///), pass it to kombu as-is
            transport = url.transport.replace('kombu+', '')
            self._url = "%s://%s" % (transport, virtual_host)
        else:
            for adr in self.driver_conf.rabbit_hosts:
                hostname, port = netutils.parse_host_port(
                    adr, default_port=self.driver_conf.rabbit_port)
                self._url += '%samqp://%s:%s@%s:%s/%s' % (
                    ";" if self._url else '',
                    parse.quote(self.driver_conf.rabbit_userid),
                    parse.quote(self.driver_conf.rabbit_password),
                    self._parse_url_hostname(hostname), port,
                    virtual_host)

        self._initial_pid = os.getpid()

        self.do_consume = True
        self._consume_loop_stopped = False
        self.channel = None

        # NOTE(sileht): if purpose is PURPOSE_LISTEN
        # we don't need the lock because we don't
        # have a heartbeat thread
        if purpose == rpc_amqp.PURPOSE_SEND:
            self._connection_lock = ConnectionLock()
        else:
            self._connection_lock = DummyConnectionLock()

        self.connection = kombu.connection.Connection(
            self._url, ssl=self._fetch_ssl_params(),
            login_method=self._login_method,
            failover_strategy="shuffle",
            heartbeat=self.driver_conf.heartbeat_timeout_threshold)

        LOG.info(_LI('Connecting to AMQP server on %(hostname)s:%(port)d'),
                 self.connection.info())

        # NOTE(sileht): kombu recommend to run heartbeat_check every
        # seconds, but we use a lock around the kombu connection
        # so, to not lock to much this lock to most of the time do nothing
        # expected waiting the events drain, we start heartbeat_check and
        # retreive the server heartbeat packet only two times more than
        # the minimum required for the heartbeat works
        # (heatbeat_timeout/heartbeat_rate/2.0, default kombu
        # heartbeat_rate is 2)
        self._heartbeat_wait_timeout = (
            float(self.driver_conf.heartbeat_timeout_threshold) /
            float(self.driver_conf.heartbeat_rate) / 2.0)
        self._heartbeat_support_log_emitted = False

        # NOTE(sileht): just ensure the connection is setuped at startup
        self.ensure_connection()

        # NOTE(sileht): if purpose is PURPOSE_LISTEN
        # the consume code does the heartbeat stuff
        # we don't need a thread
        if purpose == rpc_amqp.PURPOSE_SEND:
            self._heartbeat_start()

        LOG.info(_LI('Connected to AMQP server on %(hostname)s:%(port)d'),
                 self.connection.info())

        # NOTE(sileht):
        # value choosen according the best practice from kombu:
        # http://kombu.readthedocs.org/en/latest/reference/kombu.common.html#kombu.common.eventloop
        self._poll_timeout = 1

        if self._url.startswith('memory://'):
            # Kludge to speed up tests.
            self.connection.transport.polling_interval = 0.0
            self._poll_timeout = 0.05
Пример #37
0
    def __init__(self, conf, url, purpose):
        # NOTE(viktors): Parse config options
        driver_conf = conf.oslo_messaging_rabbit

        self.max_retries = driver_conf.rabbit_max_retries
        self.interval_start = driver_conf.rabbit_retry_interval
        self.interval_stepping = driver_conf.rabbit_retry_backoff

        self.login_method = driver_conf.rabbit_login_method
        self.fake_rabbit = driver_conf.fake_rabbit
        self.virtual_host = driver_conf.rabbit_virtual_host
        self.rabbit_hosts = driver_conf.rabbit_hosts
        self.rabbit_port = driver_conf.rabbit_port
        self.rabbit_userid = driver_conf.rabbit_userid
        self.rabbit_password = driver_conf.rabbit_password
        self.rabbit_ha_queues = driver_conf.rabbit_ha_queues
        self.heartbeat_timeout_threshold = \
            driver_conf.heartbeat_timeout_threshold
        self.heartbeat_rate = driver_conf.heartbeat_rate
        self.kombu_reconnect_delay = driver_conf.kombu_reconnect_delay
        self.amqp_durable_queues = driver_conf.amqp_durable_queues
        self.amqp_auto_delete = driver_conf.amqp_auto_delete
        self.rabbit_use_ssl = driver_conf.rabbit_use_ssl
        self.kombu_missing_consumer_retry_timeout = \
            driver_conf.kombu_missing_consumer_retry_timeout
        self.kombu_failover_strategy = driver_conf.kombu_failover_strategy

        if self.rabbit_use_ssl:
            self.kombu_ssl_version = driver_conf.kombu_ssl_version
            self.kombu_ssl_keyfile = driver_conf.kombu_ssl_keyfile
            self.kombu_ssl_certfile = driver_conf.kombu_ssl_certfile
            self.kombu_ssl_ca_certs = driver_conf.kombu_ssl_ca_certs

        # Try forever?
        if self.max_retries <= 0:
            self.max_retries = None

        # max retry-interval = 30 seconds
        self.interval_max = 30

        if url.virtual_host is not None:
            virtual_host = url.virtual_host
        else:
            virtual_host = self.virtual_host

        self._url = ''
        if self.fake_rabbit:
            LOG.warn(_LW("Deprecated: fake_rabbit option is deprecated, set "
                         "rpc_backend to kombu+memory or use the fake "
                         "driver instead."))
            self._url = 'memory://%s/' % virtual_host
        elif url.hosts:
            if url.transport.startswith('kombu+'):
                LOG.warn(_LW('Selecting the kombu transport through the '
                             'transport url (%s) is a experimental feature '
                             'and this is not yet supported.'), url.transport)
            if len(url.hosts) > 1:
                random.shuffle(url.hosts)
            for host in url.hosts:
                transport = url.transport.replace('kombu+', '')
                transport = transport.replace('rabbit', 'amqp')
                self._url += '%s%s://%s:%s@%s:%s/%s' % (
                    ";" if self._url else '',
                    transport,
                    parse.quote(host.username or ''),
                    parse.quote(host.password or ''),
                    self._parse_url_hostname(host.hostname) or '',
                    str(host.port or 5672),
                    virtual_host)
        elif url.transport.startswith('kombu+'):
            # NOTE(sileht): url have a + but no hosts
            # (like kombu+memory:///), pass it to kombu as-is
            transport = url.transport.replace('kombu+', '')
            self._url = "%s://%s" % (transport, virtual_host)
        else:
            if len(self.rabbit_hosts) > 1:
                random.shuffle(self.rabbit_hosts)
            for adr in self.rabbit_hosts:
                hostname, port = netutils.parse_host_port(
                    adr, default_port=self.rabbit_port)
                self._url += '%samqp://%s:%s@%s:%s/%s' % (
                    ";" if self._url else '',
                    parse.quote(self.rabbit_userid, ''),
                    parse.quote(self.rabbit_password, ''),
                    self._parse_url_hostname(hostname), port,
                    virtual_host)

        self._initial_pid = os.getpid()

        self._consumers = []
        self._new_consumers = []
        self._consume_loop_stopped = False
        self.channel = None

        # NOTE(sileht): if purpose is PURPOSE_LISTEN
        # we don't need the lock because we don't
        # have a heartbeat thread
        if purpose == rpc_common.PURPOSE_SEND:
            self._connection_lock = ConnectionLock()
        else:
            self._connection_lock = DummyConnectionLock()

        self.connection = kombu.connection.Connection(
            self._url, ssl=self._fetch_ssl_params(),
            login_method=self.login_method,
            heartbeat=self.heartbeat_timeout_threshold,
            failover_strategy=self.kombu_failover_strategy,
            transport_options={
                'confirm_publish': True,
                'on_blocked': self._on_connection_blocked,
                'on_unblocked': self._on_connection_unblocked,
            },
        )

        LOG.debug('Connecting to AMQP server on %(hostname)s:%(port)s',
                  self.connection.info())

        # NOTE(sileht): kombu recommend to run heartbeat_check every
        # seconds, but we use a lock around the kombu connection
        # so, to not lock to much this lock to most of the time do nothing
        # expected waiting the events drain, we start heartbeat_check and
        # retrieve the server heartbeat packet only two times more than
        # the minimum required for the heartbeat works
        # (heatbeat_timeout/heartbeat_rate/2.0, default kombu
        # heartbeat_rate is 2)
        self._heartbeat_wait_timeout = (
            float(self.heartbeat_timeout_threshold) /
            float(self.heartbeat_rate) / 2.0)
        self._heartbeat_support_log_emitted = False

        # NOTE(sileht): just ensure the connection is setuped at startup
        self.ensure_connection()

        # NOTE(sileht): if purpose is PURPOSE_LISTEN
        # the consume code does the heartbeat stuff
        # we don't need a thread
        self._heartbeat_thread = None
        if purpose == rpc_common.PURPOSE_SEND:
            self._heartbeat_start()

        LOG.debug('Connected to AMQP server on %(hostname)s:%(port)s '
                  'via [%(transport)s] client',
                  self.connection.info())

        # NOTE(sileht): value chosen according the best practice from kombu
        # http://kombu.readthedocs.org/en/latest/reference/kombu.common.html#kombu.common.eventloop
        # For heatbeat, we can set a bigger timeout, and check we receive the
        # heartbeat packets regulary
        if self._heartbeat_supported_and_enabled():
            self._poll_timeout = self._heartbeat_wait_timeout
        else:
            self._poll_timeout = 1

        if self._url.startswith('memory://'):
            # Kludge to speed up tests.
            self.connection.transport.polling_interval = 0.0
            # Fixup logging
            self.connection.hostname = "memory_driver"
            self.connection.port = 1234
            self._poll_timeout = 0.05
Пример #38
0
def subscribe():
    try:
        app_id = request.json['app_id']
        target = request.json['target']
        sub_info = request.json['sub_info']

        try:
            validate_sub_info(sub_info)
        except Exception as e:
            logging.error("* %s", e.__str__())
            return e.__str__()
        ''' Flag to Update pipeling cfg file '''
        config = ConfigParser.ConfigParser()
        config.read('pub_sub.conf')
        if config.get('RABBITMQ', 'UpdateConfMgmt') == "True":
            update_pipeline_conf(sub_info, target, app_id, "ADD")
        else:
            logging.warning(
                "Update Conf Mgmt flag is disabled,enable the flag to  update Conf Mgmt"
            )

        if not 'query' in request.json.keys():
            logging.info("query request is not provided by user")
            query = None
        else:
            query = request.json['query']
            for i in range(len(query)):
                if not 'field' in query[i].keys():
                    err_str = "Query field"
                    raise Exception(err_str)
                if not 'op' in query[i].keys():
                    err_str = "Query op"
                    raise Exception(err_str)
                if not 'value' in query[i].keys():
                    err_str = "Query value"
                    raise Exception(err_str)
    except Exception as e:
        err_str = "KeyError: Parsing subscription request " + e.__str__(
        ) + "\n"
        logging.error("* KeyError: Parsing subscription request :%s",
                      e.__str__())
        return err_str

    parse_target = netutils.urlsplit(target)
    if not parse_target.netloc:
        err_str = "Error:Invalid target format"
        logging.error("* Invalid target format")
        return err_str

    status = ""
    if parse_target.scheme == "udp" or parse_target.scheme == "kafka":
        host, port = netutils.parse_host_port(parse_target.netloc)
        scheme = parse_target.scheme
        app_ip = host
        app_port = port

        if host == None or port == None:
            err_str = "* Error: Invalid IP Address format"
            logging.error("* Invalid IP Address format")
            return err_str

        subscription_info = sub_info
        sub_info_filter = query
        logging.info(
            "Creating subscription for app:%s for meters:%s with filters:%s and target:%s",
            app_id, subscription_info, sub_info_filter, target)
        subscrip_obj = subinfo(scheme, app_id, app_ip, app_port,
                               subscription_info, sub_info_filter, target)
        status = subscrip_obj.update_subinfo()
        subinfo.print_subinfo()

    if parse_target.scheme == "file":
        pass
    return status
Пример #39
0
    def add(self, image_id, image_file, image_size, context=None):
        """
        Stores an image file with supplied identifier to the backend
        storage system and returns a tuple containing information
        about the stored image.

        :param image_id: The opaque image identifier
        :param image_file: The image data to write, as a file-like object
        :param image_size: The size of the image data to write, in bytes

        :retval tuple of URL in backing store, bytes written, checksum
                and a dictionary with storage system specific information
        :raises `glance_store.exceptions.Duplicate` if the image already
                existed

        S3 writes the image data using the scheme:
            s3://<ACCESS_KEY>:<SECRET_KEY>@<S3_URL>/<BUCKET>/<OBJ>
        where:
            <USER> = ``s3_store_user``
            <KEY> = ``s3_store_key``
            <S3_HOST> = ``s3_store_host``
            <BUCKET> = ``s3_store_bucket``
            <ID> = The id of the image being added
        """
        from boto.s3.connection import S3Connection

        loc = StoreLocation({'scheme': self.scheme,
                             'bucket': self.bucket,
                             'key': image_id,
                             's3serviceurl': self.full_s3_host,
                             'accesskey': self.access_key,
                             'secretkey': self.secret_key}, self.conf)

        s3host, s3port = netutils.parse_host_port(loc.s3serviceurl, 80)
        uformat = self.conf.glance_store.s3_store_bucket_url_format
        calling_format = get_calling_format(s3_store_bucket_url_format=uformat)

        s3_conn = S3Connection(loc.accesskey, loc.secretkey,
                               host=s3host, port=s3port,
                               is_secure=(loc.scheme == 's3+https'),
                               calling_format=calling_format)

        create_bucket_if_missing(self.conf, self.bucket, s3_conn)

        bucket_obj = get_bucket(s3_conn, self.bucket)
        obj_name = str(image_id)
        key = bucket_obj.get_key(obj_name)
        if key and key.exists():
            raise exceptions.Duplicate(message=_("S3 already has an image at "
                                                 "location %s") %
                                       self._sanitize(loc.get_uri()))

        msg = _("Adding image object to S3 using (s3_host=%(s3_host)s, "
                "access_key=%(access_key)s, bucket=%(bucket)s, "
                "key=%(obj_name)s)") % ({'s3_host': self.s3_host,
                                         'access_key': self.access_key,
                                         'bucket': self.bucket,
                                         'obj_name': obj_name})
        LOG.debug(msg)
        LOG.debug("Uploading an image file to S3 for %s" %
                  self._sanitize(loc.get_uri()))

        if image_size < self.s3_store_large_object_size:
            return self.add_singlepart(image_file, bucket_obj, obj_name, loc)
        else:
            return self.add_multipart(image_file, image_size, bucket_obj,
                                      obj_name, loc)
Пример #40
0
 def __init__(self, parsed_url):
     self.host, self.port = netutils.parse_host_port(
         parsed_url.netloc, default_port=cfg.CONF.collector.udp_port)
     self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
Пример #41
0
    def __init__(self, conf, url, purpose):
        self.consumers = []
        self.consumer_num = itertools.count(1)
        self.conf = conf
        self.driver_conf = self.conf.oslo_messaging_rabbit
        self.max_retries = self.driver_conf.rabbit_max_retries
        # Try forever?
        if self.max_retries <= 0:
            self.max_retries = None
        self.interval_start = self.driver_conf.rabbit_retry_interval
        self.interval_stepping = self.driver_conf.rabbit_retry_backoff
        # max retry-interval = 30 seconds
        self.interval_max = 30

        self._login_method = self.driver_conf.rabbit_login_method

        if url.virtual_host is not None:
            virtual_host = url.virtual_host
        else:
            virtual_host = self.driver_conf.rabbit_virtual_host

        self._url = ''
        if self.driver_conf.fake_rabbit:
            LOG.warn("Deprecated: fake_rabbit option is deprecated, set "
                     "rpc_backend to kombu+memory or use the fake "
                     "driver instead.")
            self._url = 'memory://%s/' % virtual_host
        elif url.hosts:
            if url.transport.startswith('kombu+'):
                LOG.warn(
                    _LW('Selecting the kombu transport through the '
                        'transport url (%s) is a experimental feature '
                        'and this is not yet supported.') % url.transport)
            for host in url.hosts:
                transport = url.transport.replace('kombu+', '')
                transport = transport.replace('rabbit', 'amqp')
                self._url += '%s%s://%s:%s@%s:%s/%s' % (
                    ";" if self._url else '', transport,
                    parse.quote(host.username
                                or ''), parse.quote(host.password or ''),
                    self._parse_url_hostname(host.hostname)
                    or '', str(host.port or 5672), virtual_host)
        elif url.transport.startswith('kombu+'):
            # NOTE(sileht): url have a + but no hosts
            # (like kombu+memory:///), pass it to kombu as-is
            transport = url.transport.replace('kombu+', '')
            self._url = "%s://%s" % (transport, virtual_host)
        else:
            for adr in self.driver_conf.rabbit_hosts:
                hostname, port = netutils.parse_host_port(
                    adr, default_port=self.driver_conf.rabbit_port)
                self._url += '%samqp://%s:%s@%s:%s/%s' % (
                    ";" if self._url else '',
                    parse.quote(self.driver_conf.rabbit_userid),
                    parse.quote(self.driver_conf.rabbit_password),
                    self._parse_url_hostname(hostname), port, virtual_host)

        self._initial_pid = os.getpid()

        self.do_consume = True
        self._consume_loop_stopped = False
        self.channel = None

        # NOTE(sileht): if purpose is PURPOSE_LISTEN
        # we don't need the lock because we don't
        # have a heartbeat thread
        if purpose == rpc_amqp.PURPOSE_SEND:
            self._connection_lock = ConnectionLock()
        else:
            self._connection_lock = DummyConnectionLock()

        self.connection = kombu.connection.Connection(
            self._url,
            ssl=self._fetch_ssl_params(),
            login_method=self._login_method,
            failover_strategy="shuffle",
            heartbeat=self.driver_conf.heartbeat_timeout_threshold)

        LOG.info(_LI('Connecting to AMQP server on %(hostname)s:%(port)d'),
                 self.connection.info())

        # NOTE(sileht): kombu recommend to run heartbeat_check every
        # seconds, but we use a lock around the kombu connection
        # so, to not lock to much this lock to most of the time do nothing
        # expected waiting the events drain, we start heartbeat_check and
        # retreive the server heartbeat packet only two times more than
        # the minimum required for the heartbeat works
        # (heatbeat_timeout/heartbeat_rate/2.0, default kombu
        # heartbeat_rate is 2)
        self._heartbeat_wait_timeout = (
            float(self.driver_conf.heartbeat_timeout_threshold) /
            float(self.driver_conf.heartbeat_rate) / 2.0)
        self._heartbeat_support_log_emitted = False

        # NOTE(sileht): just ensure the connection is setuped at startup
        self.ensure_connection()

        # NOTE(sileht): if purpose is PURPOSE_LISTEN
        # the consume code does the heartbeat stuff
        # we don't need a thread
        if purpose == rpc_amqp.PURPOSE_SEND:
            self._heartbeat_start()

        LOG.info(_LI('Connected to AMQP server on %(hostname)s:%(port)d'),
                 self.connection.info())

        # NOTE(sileht):
        # value choosen according the best practice from kombu:
        # http://kombu.readthedocs.org/en/latest/reference/kombu.common.html#kombu.common.eventloop
        self._poll_timeout = 1

        if self._url.startswith('memory://'):
            # Kludge to speed up tests.
            self.connection.transport.polling_interval = 0.0
            self._poll_timeout = 0.05
Пример #42
0
    def add(self, image_id, image_file, image_size, context=None):
        """
        Stores an image file with supplied identifier to the backend
        storage system and returns a tuple containing information
        about the stored image.

        :param image_id: The opaque image identifier
        :param image_file: The image data to write, as a file-like object
        :param image_size: The size of the image data to write, in bytes

        :retval tuple of URL in backing store, bytes written, checksum
                and a dictionary with storage system specific information
        :raises `glance_store.exceptions.Duplicate` if the image already
                existed

        S3 writes the image data using the scheme:
            s3://<ACCESS_KEY>:<SECRET_KEY>@<S3_URL>/<BUCKET>/<OBJ>
        where:
            <USER> = ``s3_store_user``
            <KEY> = ``s3_store_key``
            <S3_HOST> = ``s3_store_host``
            <BUCKET> = ``s3_store_bucket``
            <ID> = The id of the image being added
        """
        from boto.s3.connection import S3Connection

        loc = StoreLocation(
            {
                'scheme': self.scheme,
                'bucket': self.bucket,
                'key': image_id,
                's3serviceurl': self.full_s3_host,
                'accesskey': self.access_key,
                'secretkey': self.secret_key
            }, self.conf)

        s3host, s3port = netutils.parse_host_port(loc.s3serviceurl, 80)
        uformat = self.conf.glance_store.s3_store_bucket_url_format
        calling_format = get_calling_format(s3_store_bucket_url_format=uformat)

        s3_conn = S3Connection(loc.accesskey,
                               loc.secretkey,
                               host=s3host,
                               port=s3port,
                               is_secure=(loc.scheme == 's3+https'),
                               calling_format=calling_format)

        create_bucket_if_missing(self.conf, self.bucket, s3_conn)

        bucket_obj = get_bucket(s3_conn, self.bucket)
        obj_name = str(image_id)
        key = bucket_obj.get_key(obj_name)
        if key and key.exists():
            raise exceptions.Duplicate(message=_("S3 already has an image at "
                                                 "location %s") %
                                       self._sanitize(loc.get_uri()))

        msg = _("Adding image object to S3 using (s3_host=%(s3_host)s, "
                "access_key=%(access_key)s, bucket=%(bucket)s, "
                "key=%(obj_name)s)") % ({
                    's3_host': self.s3_host,
                    'access_key': self.access_key,
                    'bucket': self.bucket,
                    'obj_name': obj_name
                })
        LOG.debug(msg)
        LOG.debug("Uploading an image file to S3 for %s" %
                  self._sanitize(loc.get_uri()))

        if image_size < self.s3_store_large_object_size:
            return self.add_singlepart(image_file, bucket_obj, obj_name, loc)
        else:
            return self.add_multipart(image_file, image_size, bucket_obj,
                                      obj_name, loc)
Пример #43
0
def subscribe():
    try :
        app_id = request.json['app_id']
        target = request.json['target']
        sub_info = request.json['sub_info']

        try :
            validate_sub_info(sub_info)
        except Exception as e:
            logging.error("* %s",e.__str__())
            return e.__str__()

        ''' Flag to Update pipeling cfg file '''
        config = ConfigParser.ConfigParser()
        config.read('pub_sub.conf')
        if config.get('RABBITMQ','UpdateConfMgmt') == "True" : 
            update_pipeline_conf(sub_info,target,app_id,"ADD")
        else:
            logging.warning("Update Conf Mgmt flag is disabled,enable the flag to  update Conf Mgmt")

        if not 'query' in request.json.keys():
            logging.info("query request is not provided by user")
            query = None 
        else:
             query = request.json['query']
             for i in range(len(query)):
                 if not 'field' in query[i].keys():
                     err_str = "Query field"
                     raise Exception (err_str)
                 if not 'op' in query[i].keys():
                     err_str = "Query op"
                     raise Exception (err_str)
                 if not 'value' in query[i].keys():
                     err_str = "Query value" 
                     raise Exception (err_str)
    except Exception as e:
        err_str = "KeyError: Parsing subscription request " + e.__str__() + "\n"
        logging.error("* KeyError: Parsing subscription request :%s",e.__str__())  
        return err_str 

    parse_target=netutils.urlsplit(target)
    if not parse_target.netloc:
        err_str = "Error:Invalid target format"
        logging.error("* Invalid target format")
        return err_str 

    status = "" 
    if parse_target.scheme == "udp" or  parse_target.scheme == "kafka":
         host,port=netutils.parse_host_port(parse_target.netloc)
         scheme = parse_target.scheme
         app_ip = host 
         app_port = port
 
         if host == None or port == None :
             err_str = "* Error: Invalid IP Address format"
             logging.error("* Invalid IP Address format")
             return err_str
  
         subscription_info = sub_info
         sub_info_filter = query 
         subscrip_obj=subinfo(scheme,app_id,app_ip,app_port,subscription_info,sub_info_filter,target)
         status = subscrip_obj.update_subinfo()
         subinfo.print_subinfo()

    if parse_target.scheme == "file" :
         pass
    return status