コード例 #1
0
ファイル: member.py プロジェクト: Hybrid-Cloud/conveyor
 def _tag_check(self, tag, ids):
     try:
         if self._match_servers_tag(tag, ids):
             LOG.info(_LI('Check tags success!'))
     except retrying.RetryError:
         # just log the find server's tag failed
         LOG.info(_LI('Check tags failed!'))
コード例 #2
0
ファイル: iam.py プロジェクト: Hybrid-Cloud/conveyor
    def _get_token(self, creds, method='token', admin_token_info=None):
        headers = {
            'Content-Type': 'application/json',
            'Accept': 'application/json'
        }

        if method == self.ASSUME_ROLE:
            admin_token_id = self.admin_token()
            headers.update({
                "X-Auth-Token": admin_token_id
            })

        creds_json = json.dumps(creds)
        response = requests.post(self.client.auth_url + '/auth/tokens',
                                 data=creds_json,
                                 headers=headers,
                                 verify=False)

        token_id = None
        try:
            token_id = response.headers['X-Subject-Token']
            LOG.info(_LI("IAM authentication successful."))
        except (AttributeError, KeyError):
            LOG.info(_LI("IAM authentication failure."))
            raise exception.AuthorizationFailure()

        return token_id
コード例 #3
0
 def _tag_check(self, tag, ids):
     try:
         if self._match_servers_tag(tag, ids):
             LOG.info(_LI('Check tags success!'))
     except retrying.RetryError:
         # just log the find server's tag failed
         LOG.info(_LI('Check tags failed!'))
コード例 #4
0
    def handle_signal(self, details=None):
        # ceilometer sends details like this:
        # {u'alarm_id': ID, u'previous': u'ok', u'current': u'alarm',
        #  u'reason': u'...'})
        # in this policy we currently assume that this gets called
        # only when there is an alarm. But the template writer can
        # put the policy in all the alarm notifiers (nodata, and ok).
        #
        # our watchrule has upper case states so lower() them all.
        if details is None:
            alarm_state = 'alarm'
        else:
            alarm_state = details.get('current', details.get('state',
                                                             'alarm')).lower()

        LOG.info(_LI('Alarm %(name)s, new state %(state)s'), {
            'name': self.name,
            'state': alarm_state
        })

        if alarm_state != 'alarm':
            raise exception.NoActionRequired()
        if not self._is_scaling_allowed():
            LOG.info(
                _LI("%(name)s NOT performing scaling action, "
                    "cooldown %(cooldown)s"), {
                        'name': self.name,
                        'cooldown': self.properties[self.COOLDOWN]
                    })
            raise exception.NoActionRequired()

        asgn_id = self.properties[self.AUTO_SCALING_GROUP_NAME]
        group = self.stack.resource_by_refid(asgn_id)
        changed_size = False
        try:
            if group is None:
                raise exception.NotFound(
                    _('Alarm %(alarm)s could not find '
                      'scaling group named "%(group)s"') % {
                          'alarm': self.name,
                          'group': asgn_id
                      })

            LOG.info(
                _LI('%(name)s Alarm, adjusting Group %(group)s with id '
                    '%(asgn_id)s by %(filter)s'), {
                        'name': self.name,
                        'group': group.name,
                        'asgn_id': asgn_id,
                        'filter': self.properties[self.SCALING_ADJUSTMENT]
                    })
            changed_size = group.adjust(
                self.properties[self.SCALING_ADJUSTMENT],
                self.properties[self.ADJUSTMENT_TYPE],
                self.properties[self.MIN_ADJUSTMENT_STEP])
        finally:
            self._finished_scaling("%s : %s" %
                                   (self.properties[self.ADJUSTMENT_TYPE],
                                    self.properties[self.SCALING_ADJUSTMENT]),
                                   changed_size=changed_size)
コード例 #5
0
ファイル: eip.py プロジェクト: Hybrid-Cloud/conveyor
    def handle_create(self):
        """Allocate a floating IP for the current tenant."""
        ips = None
        if self.properties[self.DOMAIN]:
            ext_net = internet_gateway.InternetGateway.get_external_network_id(
                self.neutron())
            props = {'floating_network_id': ext_net}
            ips = self.neutron().create_floatingip({'floatingip':
                                                    props})['floatingip']
            self.ipaddress = ips['floating_ip_address']
            self.resource_id_set(ips['id'])
            LOG.info(_LI('ElasticIp create %s'), str(ips))
        else:
            try:
                ips = self.client().floating_ips.create()
            except Exception as e:
                with excutils.save_and_reraise_exception():
                    if self.client_plugin('nova').is_not_found(e):
                        LOG.error(
                            _LE("No default floating IP pool configured."
                                " Set 'default_floating_pool' in "
                                "nova.conf."))

            if ips:
                self.ipaddress = ips.ip
                self.resource_id_set(ips.id)
                LOG.info(_LI('ElasticIp create %s'), str(ips))

        instance_id = self.properties[self.INSTANCE_ID]
        if instance_id:
            server = self.client().servers.get(instance_id)
            server.add_floating_ip(self._ipaddress())
コード例 #6
0
ファイル: user.py プロジェクト: Hybrid-Cloud/conveyor
    def _secret_accesskey(self):
        """Return the user's access key.

        Fetching it from keystone if necessary.
        """
        if self._secret is None:
            if not self.resource_id:
                LOG.info(_LI('could not get secret for %(username)s '
                             'Error:%(msg)s'),
                         {'username': self.properties[self.USER_NAME],
                          'msg': "resource_id not yet set"})
            else:
                # First try to retrieve the secret from resource_data, but
                # for backwards compatibility, fall back to requesting from
                # keystone
                self._secret = self.data().get('secret_key')
                if self._secret is None:
                    try:
                        user_id = self._get_user().resource_id
                        kp = self.keystone().get_ec2_keypair(
                            user_id=user_id, access=self.resource_id)
                        self._secret = kp.secret
                        # Store the key in resource_data
                        self.data_set('secret_key', kp.secret, redact=True)
                        # And the ID of the v3 credential
                        self.data_set('credential_id', kp.id, redact=True)
                    except Exception as ex:
                        LOG.info(_LI('could not get secret for %(username)s '
                                     'Error:%(msg)s'), {
                                 'username': self.properties[self.USER_NAME],
                                 'msg': ex})

        return self._secret or '000-000-000'
コード例 #7
0
ファイル: volume.py プロジェクト: Hybrid-Cloud/conveyor
    def _check_backup_restore_complete(self):
        vol = self.client().volumes.get(self.resource_id)
        if vol.status == 'restoring-backup':
            LOG.debug("Volume %s is being restoring from backup" % vol.id)
            return False

        if vol.status != 'available':
            LOG.info(_LI("Restore failed: Volume %(vol)s is in %(status)s "
                         "state."), {'vol': vol.id, 'status': vol.status})
            raise exception.ResourceUnknownStatus(
                resource_status=vol.status,
                result=_('Volume backup restore failed'))

        LOG.info(_LI('Volume %(id)s backup restore complete'), {'id': vol.id})
        return True
コード例 #8
0
def _do_ops(cinstances, opname, cnxt, stack, current_stack=None, action=None,
            is_stack_failure=None):
    success_count = 0
    failure = False
    failure_exception_message = None
    for ci in cinstances:
        op = getattr(ci, opname, None)
        if callable(op):
            try:
                if is_stack_failure is not None:
                    op(cnxt, stack, current_stack, action, is_stack_failure)
                else:
                    op(cnxt, stack, current_stack, action)
                success_count += 1
            except Exception as ex:
                LOG.exception(_LE(
                    "%(opname)s %(ci)s failed for %(a)s on %(sid)s"),
                    {'opname': opname, 'ci': type(ci),
                     'a': action, 'sid': stack.id})
                failure = True
                failure_exception_message = ex.args[0] if ex.args else str(ex)
                break
        LOG.info(_LI("done with class=%(c)s, stackid=%(sid)s, action=%(a)s"),
                 {'c': type(ci), 'sid': stack.id, 'a': action})
    return (failure, failure_exception_message, success_count)
コード例 #9
0
    def create_software_deployment(self,
                                   cnxt,
                                   server_id,
                                   config_id,
                                   input_values,
                                   action,
                                   status,
                                   status_reason,
                                   stack_user_project_id,
                                   deployment_id=None):
        if server_id and not isinstance(server_id, six.string_types):
            LOG.error(_LI('server_id %s must be string.') % server_id)
            raise ValueError(_('server_id must be string.'))

        if deployment_id is None:
            deployment_id = str(uuid.uuid4())
        sd = software_deployment_object.SoftwareDeployment.create(
            cnxt, {
                'id': deployment_id,
                'config_id': config_id,
                'server_id': server_id,
                'input_values': input_values,
                'tenant': cnxt.tenant_id,
                'stack_user_project_id': stack_user_project_id,
                'action': action,
                'status': status,
                'status_reason': status_reason
            })
        self._push_metadata_software_deployments(cnxt, server_id,
                                                 stack_user_project_id)
        return api.format_software_deployment(sd)
コード例 #10
0
ファイル: update.py プロジェクト: Hybrid-Cloud/conveyor
    def _process_new_resource_update(self, new_res):
        res_name = new_res.name

        if res_name in self.existing_stack:
            if type(self.existing_stack[res_name]) is type(new_res):
                existing_res = self.existing_stack[res_name]
                try:
                    yield self._update_in_place(existing_res,
                                                new_res)
                except exception.UpdateReplace:
                    pass
                else:
                    # Save updated resource definition to backup stack
                    # cause it allows the backup stack resources to be
                    # synchronized
                    LOG.debug("Backing up updated Resource %s" % res_name)
                    definition = existing_res.t.reparse(self.previous_stack,
                                                        existing_res.stack.t)
                    self.previous_stack.t.add_resource(definition)
                    self.previous_stack.t.store(self.previous_stack.context)

                    LOG.info(_LI("Resource %(res_name)s for stack "
                                 "%(stack_name)s updated"),
                             {'res_name': res_name,
                              'stack_name': self.existing_stack.name})
                    return
            else:
                self._check_replace_restricted(new_res)

        yield self._create_resource(new_res)
コード例 #11
0
ファイル: environment.py プロジェクト: Hybrid-Cloud/conveyor
def read_global_environment(env, env_dir=None):
    if env_dir is None:
        cfg.CONF.import_opt('environment_dir',
                            'conveyor.conveyorheat.common.config')
        env_dir = cfg.CONF.environment_dir

    try:
        env_files = glob.glob(os.path.join(env_dir, '*'))
    except OSError as osex:
        LOG.error(_LE('Failed to read %s'), env_dir)
        LOG.exception(osex)
        return

    for file_path in env_files:
        try:
            with open(file_path) as env_fd:
                LOG.info(_LI('Loading %s'), file_path)
                env_body = env_fmt.parse(env_fd.read())
                env_fmt.default_for_missing(env_body)
                env.load(env_body)
        except ValueError as vex:
            LOG.error(_LE('Failed to parse %(file_path)s'), {
                      'file_path': file_path})
            LOG.exception(vex)
        except IOError as ioex:
            LOG.error(_LE('Failed to read %(file_path)s'), {
                      'file_path': file_path})
            LOG.exception(ioex)
コード例 #12
0
    def adjust(self,
               adjustment,
               adjustment_type=sc_util.CFN_CHANGE_IN_CAPACITY,
               min_adjustment_step=None):
        """Adjust the size of the scaling group if the cooldown permits."""
        if not self._is_scaling_allowed():
            LOG.info(
                _LI("%(name)s NOT performing scaling adjustment, "
                    "cooldown %(cooldown)s"), {
                        'name': self.name,
                        'cooldown': self.properties[self.COOLDOWN]
                    })
            raise exception.NoActionRequired()

        capacity = grouputils.get_size(self)
        new_capacity = self._get_new_capacity(capacity, adjustment,
                                              adjustment_type,
                                              min_adjustment_step)

        changed_size = new_capacity != capacity
        # send a notification before, on-error and on-success.
        notif = {
            'stack': self.stack,
            'adjustment': adjustment,
            'adjustment_type': adjustment_type,
            'capacity': capacity,
            'groupname': self.FnGetRefId(),
            'message': _("Start resizing the group %(group)s") % {
                'group': self.FnGetRefId()
            },
            'suffix': 'start',
        }
        notification.send(**notif)
        try:
            self.resize(new_capacity)
        except Exception as resize_ex:
            with excutils.save_and_reraise_exception():
                try:
                    notif.update({
                        'suffix': 'error',
                        'message': six.text_type(resize_ex),
                        'capacity': grouputils.get_size(self),
                    })
                    notification.send(**notif)
                except Exception:
                    LOG.exception(_LE('Failed sending error notification'))
        else:
            notif.update({
                'suffix': 'end',
                'capacity': new_capacity,
                'message': _("End resizing the group %(group)s") % {
                    'group': notif['groupname']
                },
            })
            notification.send(**notif)
        finally:
            self._update_groupwatch()
            self._finished_scaling("%s : %s" % (adjustment_type, adjustment),
                                   changed_size=changed_size)
        return changed_size
コード例 #13
0
ファイル: fault.py プロジェクト: Hybrid-Cloud/conveyor
    def _error(self, inner, req):
        if not isinstance(inner, exception.QuotaError):
            LOG.error(_LE("Caught error: %s"), inner)
        safe = getattr(inner, 'safe', False)
        headers = getattr(inner, 'headers', None)
        status = getattr(inner, 'code', 500)
        if status is None:
            status = 500

        msg_dict = dict(url=req.url, status=status)
        LOG.info(_LI("%(url)s returned with HTTP %(status)d"), msg_dict)
        outer = self.status_to_type(status)
        if headers:
            outer.headers = headers
        # NOTE(johannes): We leave the explanation empty here on
        # purpose. It could possibly have sensitive information
        # that should not be returned back to the user. See
        # bugs 868360 and 874472
        # NOTE(eglynn): However, it would be over-conservative and
        # inconsistent with the EC2 API to hide every exception,
        # including those that are safe to expose, see bug 1021373
        if safe:
            msg = (inner.msg if isinstance(inner, exception.V2vException)
                   else six.text_type(inner))
            params = {'exception': inner.__class__.__name__,
                      'explanation': msg}
            outer.explanation = _('%(exception)s: %(explanation)s') % params
        return wsgi.Fault(outer)
コード例 #14
0
ファイル: update.py プロジェクト: Hybrid-Cloud/conveyor
    def _process_new_resource_update(self, new_res):
        res_name = new_res.name

        if res_name in self.existing_stack:
            if type(self.existing_stack[res_name]) is type(new_res):
                existing_res = self.existing_stack[res_name]
                try:
                    yield self._update_in_place(existing_res, new_res)
                except exception.UpdateReplace:
                    pass
                else:
                    # Save updated resource definition to backup stack
                    # cause it allows the backup stack resources to be
                    # synchronized
                    LOG.debug("Backing up updated Resource %s" % res_name)
                    definition = existing_res.t.reparse(
                        self.previous_stack, existing_res.stack.t)
                    self.previous_stack.t.add_resource(definition)
                    self.previous_stack.t.store(self.previous_stack.context)

                    LOG.info(
                        _LI("Resource %(res_name)s for stack "
                            "%(stack_name)s updated"), {
                                'res_name': res_name,
                                'stack_name': self.existing_stack.name
                            })
                    return
            else:
                self._check_replace_restricted(new_res)

        yield self._create_resource(new_res)
コード例 #15
0
ファイル: service.py プロジェクト: Hybrid-Cloud/conveyor
    def start(self):
        version_string = version.version_string()
        LOG.info(_LI('Starting %(topic)s node (version %(version_string)s)'), {
            'topic': self.topic,
            'version_string': version_string
        })
        self.model_disconnected = False

        # 1.init host info
        # self.manager.init_host()
        # ctxt = context.get_admin_context()

        # 2.create service in db#####
        # try:
        #    service_ref = db.service_get_by_args(ctxt,
        #                                         self.host,
        #                                         self.binary)
        #    self.service_id = service_ref['id']
        # except exception.NotFound:
        #   self._create_service_ref(ctxt)

        # 3.start mananger RPC service####

        LOG.debug("Creating RPC server for service %s", self.topic)
        if self.backdoor_port is not None:
            self.manager.backdoor_port = self.backdoor_port

        target = messaging.Target(topic=self.topic, server=self.host)
        endpoints = [self.manager]
        endpoints.extend(self.manager.additional_endpoints)
        self.rpcserver = rpc.get_server(target, endpoints)
        self.rpcserver.start()
コード例 #16
0
    def prepare_ports_for_replace(self):
        if not self.is_using_neutron():
            return

        data = {'external_ports': [],
                'internal_ports': []}
        port_data = list(itertools.chain(
            [('internal_ports', port) for port in self._data_get_ports()],
            [('external_ports', port)
             for port in self._data_get_ports('external_ports')]))
        for port_type, port in port_data:
            data[port_type].append({'id': port['id']})

        # detach the ports from the server
        server_id = self.resource_id
        for port_type, port in port_data:
            try:
                self.client_plugin().interface_detach(server_id, port['id'])
            except nova_exceptions.Forbidden as exc:
                LOG.exception('Error while detach interface: %s', str(exc))
                if 'deletion of primary port' in str(exc):
                    continue
                raise exc
            try:
                if self.client_plugin().check_interface_detach(
                        server_id, port['id']):
                    LOG.info(_LI('Detach interface %(port)s successful '
                                 'from server %(server)s when prepare '
                                 'for replace.')
                             % {'port': port['id'],
                                'server': server_id})
            except retrying.RetryError:
                raise exception.InterfaceDetachFailed(
                    port=port['id'], server=server_id)
コード例 #17
0
ファイル: volume.py プロジェクト: Hybrid-Cloud/conveyor
    def _check_extend_volume_complete(self):
        vol = self.client().volumes.get(self.resource_id)
        if vol.status == 'extending':
            LOG.debug("Volume %s is being extended" % vol.id)
            return False

        if vol.status != 'available':
            LOG.info(_LI("Resize failed: Volume %(vol)s "
                         "is in %(status)s state."),
                     {'vol': vol.id, 'status': vol.status})
            raise exception.ResourceUnknownStatus(
                resource_status=vol.status,
                result=_('Volume resize failed'))

        LOG.info(_LI('Volume %(id)s resize complete'), {'id': vol.id})
        return True
コード例 #18
0
    def _refresh_swift_software_deployment(self, cnxt, sd, deploy_signal_id):
        container, object_name = urlparse.urlparse(
            deploy_signal_id).path.split('/')[-2:]
        swift_plugin = cnxt.clients.client_plugin('swift')
        swift = swift_plugin.client()

        try:
            headers = swift.head_object(container, object_name)
        except Exception as ex:
            # ignore not-found, in case swift is not consistent yet
            if swift_plugin.is_not_found(ex):
                LOG.info(_LI('Signal object not found: %(c)s %(o)s'), {
                    'c': container,
                    'o': object_name
                })
                return sd
            raise

        lm = headers.get('last-modified')

        last_modified = swift_plugin.parse_last_modified(lm)
        prev_last_modified = sd.updated_at

        if prev_last_modified:
            # assume stored as utc, convert to offset-naive datetime
            prev_last_modified = prev_last_modified.replace(tzinfo=None)

        if prev_last_modified and (last_modified <= prev_last_modified):
            return sd

        try:
            (headers, obj) = swift.get_object(container, object_name)
        except Exception as ex:
            # ignore not-found, in case swift is not consistent yet
            if swift_plugin.is_not_found(ex):
                LOG.info(_LI('Signal object not found: %(c)s %(o)s'), {
                    'c': container,
                    'o': object_name
                })
                return sd
            raise
        if obj:
            self.signal_software_deployment(cnxt, sd.id, jsonutils.loads(obj),
                                            last_modified.isoformat())

        return software_deployment_object.SoftwareDeployment.get_by_id(
            cnxt, sd.id)
コード例 #19
0
    def _refresh_swift_software_deployment(self, cnxt, sd, deploy_signal_id):
        container, object_name = urlparse.urlparse(
            deploy_signal_id).path.split('/')[-2:]
        swift_plugin = cnxt.clients.client_plugin('swift')
        swift = swift_plugin.client()

        try:
            headers = swift.head_object(container, object_name)
        except Exception as ex:
            # ignore not-found, in case swift is not consistent yet
            if swift_plugin.is_not_found(ex):
                LOG.info(_LI('Signal object not found: %(c)s %(o)s'), {
                    'c': container, 'o': object_name})
                return sd
            raise

        lm = headers.get('last-modified')

        last_modified = swift_plugin.parse_last_modified(lm)
        prev_last_modified = sd.updated_at

        if prev_last_modified:
            # assume stored as utc, convert to offset-naive datetime
            prev_last_modified = prev_last_modified.replace(tzinfo=None)

        if prev_last_modified and (last_modified <= prev_last_modified):
            return sd

        try:
            (headers, obj) = swift.get_object(container, object_name)
        except Exception as ex:
            # ignore not-found, in case swift is not consistent yet
            if swift_plugin.is_not_found(ex):
                LOG.info(_LI(
                    'Signal object not found: %(c)s %(o)s'), {
                        'c': container, 'o': object_name})
                return sd
            raise
        if obj:
            self.signal_software_deployment(
                cnxt, sd.id, jsonutils.loads(obj),
                last_modified.isoformat())

        return software_deployment_object.SoftwareDeployment.get_by_id(
            cnxt, sd.id)
コード例 #20
0
ファイル: urlfetch.py プロジェクト: Hybrid-Cloud/conveyor
def get(url, allowed_schemes=('http', 'https')):
    """Get the data at the specified URL.

    The URL must use the http: or https: schemes.
    The file: scheme is also supported if you override
    the allowed_schemes argument.
    Raise an IOError if getting the data fails.
    """
    LOG.info(_LI('Fetching data from %s'), url)

    components = urllib.parse.urlparse(url)

    if components.scheme not in allowed_schemes:
        raise URLFetchError(_('Invalid URL scheme %s') % components.scheme)

    if components.scheme == 'file':
        try:
            return urllib.request.urlopen(url).read()
        except urllib.error.URLError as uex:
            raise URLFetchError(_('Failed to retrieve template: %s') % uex)

    try:
        resp = requests.get(url, stream=True)
        resp.raise_for_status()

        # We cannot use resp.text here because it would download the
        # entire file, and a large enough file would bring down the
        # engine.  The 'Content-Length' header could be faked, so it's
        # necessary to download the content in chunks to until
        # max_template_size is reached.  The chunk_size we use needs
        # to balance CPU-intensive string concatenation with accuracy
        # (eg. it's possible to fetch 1000 bytes greater than
        # max_template_size with a chunk_size of 1000).
        reader = resp.iter_content(chunk_size=1000)
        result = b""
        for chunk in reader:
            result += chunk
            if len(result) > cfg.CONF.max_template_size:
                raise URLFetchError("Template exceeds maximum allowed size (%s"
                                    " bytes)" % cfg.CONF.max_template_size)
        return result

    except exceptions.RequestException as ex:
        LOG.info(_LI('Failed to retrieve template: %s') % ex)
        raise URLFetchError(_('Failed to retrieve template from %s') % url)
コード例 #21
0
def get(url, allowed_schemes=('http', 'https')):
    """Get the data at the specified URL.

    The URL must use the http: or https: schemes.
    The file: scheme is also supported if you override
    the allowed_schemes argument.
    Raise an IOError if getting the data fails.
    """
    LOG.info(_LI('Fetching data from %s'), url)

    components = urllib.parse.urlparse(url)

    if components.scheme not in allowed_schemes:
        raise URLFetchError(_('Invalid URL scheme %s') % components.scheme)

    if components.scheme == 'file':
        try:
            return urllib.request.urlopen(url).read()
        except urllib.error.URLError as uex:
            raise URLFetchError(_('Failed to retrieve template: %s') % uex)

    try:
        resp = requests.get(url, stream=True)
        resp.raise_for_status()

        # We cannot use resp.text here because it would download the
        # entire file, and a large enough file would bring down the
        # engine.  The 'Content-Length' header could be faked, so it's
        # necessary to download the content in chunks to until
        # max_template_size is reached.  The chunk_size we use needs
        # to balance CPU-intensive string concatenation with accuracy
        # (eg. it's possible to fetch 1000 bytes greater than
        # max_template_size with a chunk_size of 1000).
        reader = resp.iter_content(chunk_size=1000)
        result = b""
        for chunk in reader:
            result += chunk
            if len(result) > cfg.CONF.max_template_size:
                raise URLFetchError("Template exceeds maximum allowed size (%s"
                                    " bytes)" % cfg.CONF.max_template_size)
        return result

    except exceptions.RequestException as ex:
        LOG.info(_LI('Failed to retrieve template: %s') % ex)
        raise URLFetchError(_('Failed to retrieve template from %s') % url)
コード例 #22
0
ファイル: loadbalancer.py プロジェクト: Hybrid-Cloud/conveyor
 def get_parsed_template(self):
     if cfg.CONF.loadbalancer_template:
         with open(cfg.CONF.loadbalancer_template) as templ_fd:
             LOG.info(_LI('Using custom loadbalancer template %s'),
                      cfg.CONF.loadbalancer_template)
             contents = templ_fd.read()
     else:
         contents = lb_template_default
     return template_format.parse(contents)
コード例 #23
0
ファイル: loadbalancer.py プロジェクト: Hybrid-Cloud/conveyor
 def get_parsed_template(self):
     if cfg.CONF.loadbalancer_template:
         with open(cfg.CONF.loadbalancer_template) as templ_fd:
             LOG.info(_LI('Using custom loadbalancer template %s'),
                      cfg.CONF.loadbalancer_template)
             contents = templ_fd.read()
     else:
         contents = lb_template_default
     return template_format.parse(contents)
コード例 #24
0
        def merge_signal_metadata(signal_data, latest_rsrc_metadata):
            signal_data = self.normalise_signal_data(signal_data,
                                                     latest_rsrc_metadata)

            if not self._metadata_format_ok(signal_data):
                LOG.info(_LI("Metadata failed validation for %s"), self.name)
                raise ValueError(_("Metadata format invalid"))

            new_entry = signal_data.copy()
            unique_id = new_entry.pop(self.UNIQUE_ID)

            new_rsrc_metadata = latest_rsrc_metadata.copy()
            if unique_id in new_rsrc_metadata:
                LOG.info(_LI("Overwriting Metadata item for id %s!"),
                         unique_id)
            new_rsrc_metadata.update({unique_id: new_entry})

            write_attempts.append(signal_data)
            return new_rsrc_metadata
コード例 #25
0
ファイル: instance.py プロジェクト: Hybrid-Cloud/conveyor
    def _resolve_attribute(self, name):
        res = None
        if name == self.AVAILABILITY_ZONE_ATTR:
            res = self._availability_zone()
        elif name in self.ATTRIBUTES[1:]:
            res = self._ipaddress()

        LOG.info(_LI('%(name)s._resolve_attribute(%(attname)s) == %(res)s'),
                 {'name': self.name, 'attname': name, 'res': res})
        return six.text_type(res) if res else None
コード例 #26
0
    def _wait(self, handle, started_at, timeout_in):
        if timeutils.is_older_than(started_at, timeout_in):
            exc = wc_base.WaitConditionTimeout(self, handle)
            LOG.info(_LI('%(name)s Timed out (%(timeout)s)'),
                     {'name': str(self), 'timeout': str(exc)})
            raise exc

        handle_status = handle.get_status()

        if any(s != handle.STATUS_SUCCESS for s in handle_status):
            failure = wc_base.WaitConditionFailure(self, handle)
            LOG.info(_LI('%(name)s Failed (%(failure)s)'),
                     {'name': str(self), 'failure': str(failure)})
            raise failure

        if len(handle_status) >= self.properties[self.COUNT]:
            LOG.info(_LI("%s Succeeded"), str(self))
            return True
        return False
コード例 #27
0
    def adjust(self, adjustment,
               adjustment_type=sc_util.CFN_CHANGE_IN_CAPACITY,
               min_adjustment_step=None):
        """Adjust the size of the scaling group if the cooldown permits."""
        if not self._is_scaling_allowed():
            LOG.info(_LI("%(name)s NOT performing scaling adjustment, "
                         "cooldown %(cooldown)s"),
                     {'name': self.name,
                      'cooldown': self.properties[self.COOLDOWN]})
            raise exception.NoActionRequired()

        capacity = grouputils.get_size(self)
        new_capacity = self._get_new_capacity(capacity, adjustment,
                                              adjustment_type,
                                              min_adjustment_step)

        changed_size = new_capacity != capacity
        # send a notification before, on-error and on-success.
        notif = {
            'stack': self.stack,
            'adjustment': adjustment,
            'adjustment_type': adjustment_type,
            'capacity': capacity,
            'groupname': self.FnGetRefId(),
            'message': _("Start resizing the group %(group)s") % {
                'group': self.FnGetRefId()},
            'suffix': 'start',
        }
        notification.send(**notif)
        try:
            self.resize(new_capacity)
        except Exception as resize_ex:
            with excutils.save_and_reraise_exception():
                try:
                    notif.update({'suffix': 'error',
                                  'message': six.text_type(resize_ex),
                                  'capacity': grouputils.get_size(self),
                                  })
                    notification.send(**notif)
                except Exception:
                    LOG.exception(_LE('Failed sending error notification'))
        else:
            notif.update({
                'suffix': 'end',
                'capacity': new_capacity,
                'message': _("End resizing the group %(group)s") % {
                    'group': notif['groupname']},
            })
            notification.send(**notif)
        finally:
            self._update_groupwatch()
            self._finished_scaling("%s : %s" % (adjustment_type, adjustment),
                                   changed_size=changed_size)
        return changed_size
コード例 #28
0
    def register(self, ext):
        # Do nothing if the extension doesn't check out
        if not self._check_extension(ext):
            return

        alias = ext.alias
        LOG.info(_LI('Loaded extension: %s'), alias)

        if alias in self.extensions:
            raise exception.Error("Found duplicate extension: %s" % alias)
        self.extensions[alias] = ext
コード例 #29
0
ファイル: environment.py プロジェクト: Hybrid-Cloud/conveyor
 def log_resource_info(self, show_all=False, prefix=None):
     registry = self._registry
     prefix = '%s ' % prefix if prefix is not None else ''
     for name in registry:
         if name == 'resources':
             continue
         if show_all or isinstance(registry[name], TemplateResourceInfo):
             msg = (_LI('%(p)sRegistered: %(t)s') %
                    {'p': prefix,
                     't': six.text_type(registry[name])})
             LOG.info(msg)
コード例 #30
0
ファイル: extensions.py プロジェクト: Hybrid-Cloud/conveyor
    def register(self, ext):
        # Do nothing if the extension doesn't check out
        if not self._check_extension(ext):
            return

        alias = ext.alias
        LOG.info(_LI('Loaded extension: %s'), alias)

        if alias in self.extensions:
            raise exception.Error("Found duplicate extension: %s" % alias)
        self.extensions[alias] = ext
コード例 #31
0
ファイル: worker.py プロジェクト: Hybrid-Cloud/conveyor
    def stop(self):
        # Stop rpc connection at first for preventing new requests
        LOG.info(_LI("Stopping %(topic)s in engine %(engine)s."),
                 {'topic': self.topic, 'engine': self.engine_id})
        try:
            self._rpc_server.stop()
            self._rpc_server.wait()
        except Exception as e:
            LOG.error(_LE("%(topic)s is failed to stop, %(exc)s"),
                      {'topic': self.topic, 'exc': e})

        super(WorkerService, self).stop()
コード例 #32
0
ファイル: swiftsignal.py プロジェクト: Hybrid-Cloud/conveyor
    def check_create_complete(self, create_data):
        if timeutils.is_older_than(*create_data):
            raise SwiftSignalTimeout(self)

        statuses = self.get_status()
        if not statuses:
            return False

        for status in statuses:
            if status == self.STATUS_FAILURE:
                failure = SwiftSignalFailure(self)
                LOG.info(_LI('%(name)s Failed (%(failure)s)'),
                         {'name': str(self), 'failure': str(failure)})
                raise failure
            elif status != self.STATUS_SUCCESS:
                raise exception.Error(_("Unknown status: %s") % status)

        if len(statuses) >= self.properties[self.COUNT]:
            LOG.info(_LI("%s Succeeded"), str(self))
            return True
        return False
コード例 #33
0
ファイル: watchrule.py プロジェクト: Hybrid-Cloud/conveyor
 def rule_actions(self, new_state):
     LOG.info(_LI('WATCH: stack:%(stack)s, watch_name:%(watch_name)s, '
                  'new_state:%(new_state)s'), {'stack': self.stack_id,
                                               'watch_name': self.name,
                                               'new_state': new_state})
     actions = []
     if self.ACTION_MAP[new_state] not in self.rule:
         LOG.info(_LI('no action for new state %s'), new_state)
     else:
         s = stack_object.Stack.get_by_id(
             self.context,
             self.stack_id,
             eager_load=True)
         stk = stack.Stack.load(self.context, stack=s)
         if (stk.action != stk.DELETE
                 and stk.status == stk.COMPLETE):
             for refid in self.rule[self.ACTION_MAP[new_state]]:
                 actions.append(stk.resource_by_refid(refid).signal)
         else:
             LOG.warning(_LW("Could not process watch state %s for stack"),
                         new_state)
     return actions
コード例 #34
0
ファイル: instance.py プロジェクト: Hybrid-Cloud/conveyor
    def _resolve_attribute(self, name):
        res = None
        if name == self.AVAILABILITY_ZONE_ATTR:
            res = self._availability_zone()
        elif name in self.ATTRIBUTES[1:]:
            res = self._ipaddress()

        LOG.info(_LI('%(name)s._resolve_attribute(%(attname)s) == %(res)s'), {
            'name': self.name,
            'attname': name,
            'res': res
        })
        return six.text_type(res) if res else None
コード例 #35
0
ファイル: user.py プロジェクト: Hybrid-Cloud/conveyor
    def _secret_accesskey(self):
        """Return the user's access key.

        Fetching it from keystone if necessary.
        """
        if self._secret is None:
            if not self.resource_id:
                LOG.info(
                    _LI('could not get secret for %(username)s '
                        'Error:%(msg)s'), {
                            'username': self.properties[self.USER_NAME],
                            'msg': "resource_id not yet set"
                        })
            else:
                # First try to retrieve the secret from resource_data, but
                # for backwards compatibility, fall back to requesting from
                # keystone
                self._secret = self.data().get('secret_key')
                if self._secret is None:
                    try:
                        user_id = self._get_user().resource_id
                        kp = self.keystone().get_ec2_keypair(
                            user_id=user_id, access=self.resource_id)
                        self._secret = kp.secret
                        # Store the key in resource_data
                        self.data_set('secret_key', kp.secret, redact=True)
                        # And the ID of the v3 credential
                        self.data_set('credential_id', kp.id, redact=True)
                    except Exception as ex:
                        LOG.info(
                            _LI('could not get secret for %(username)s '
                                'Error:%(msg)s'),
                            {
                                'username': self.properties[self.USER_NAME],
                                'msg': ex
                            })

        return self._secret or '000-000-000'
コード例 #36
0
    def _wait(self, handle, started_at, timeout_in):
        if timeutils.is_older_than(started_at, timeout_in):
            exc = wc_base.WaitConditionTimeout(self, handle)
            LOG.info(_LI('%(name)s Timed out (%(timeout)s)'), {
                'name': str(self),
                'timeout': str(exc)
            })
            raise exc

        handle_status = handle.get_status()

        if any(s != handle.STATUS_SUCCESS for s in handle_status):
            failure = wc_base.WaitConditionFailure(self, handle)
            LOG.info(_LI('%(name)s Failed (%(failure)s)'), {
                'name': str(self),
                'failure': str(failure)
            })
            raise failure

        if len(handle_status) >= self.properties[self.COUNT]:
            LOG.info(_LI("%s Succeeded"), str(self))
            return True
        return False
コード例 #37
0
ファイル: swiftsignal.py プロジェクト: Hybrid-Cloud/conveyor
    def check_create_complete(self, create_data):
        if timeutils.is_older_than(*create_data):
            raise SwiftSignalTimeout(self)

        statuses = self.get_status()
        if not statuses:
            return False

        for status in statuses:
            if status == self.STATUS_FAILURE:
                failure = SwiftSignalFailure(self)
                LOG.info(_LI('%(name)s Failed (%(failure)s)'), {
                    'name': str(self),
                    'failure': str(failure)
                })
                raise failure
            elif status != self.STATUS_SUCCESS:
                raise exception.Error(_("Unknown status: %s") % status)

        if len(statuses) >= self.properties[self.COUNT]:
            LOG.info(_LI("%s Succeeded"), str(self))
            return True
        return False
コード例 #38
0
 def rule_actions(self, new_state):
     LOG.info(
         _LI('WATCH: stack:%(stack)s, watch_name:%(watch_name)s, '
             'new_state:%(new_state)s'), {
                 'stack': self.stack_id,
                 'watch_name': self.name,
                 'new_state': new_state
             })
     actions = []
     if self.ACTION_MAP[new_state] not in self.rule:
         LOG.info(_LI('no action for new state %s'), new_state)
     else:
         s = stack_object.Stack.get_by_id(self.context,
                                          self.stack_id,
                                          eager_load=True)
         stk = stack.Stack.load(self.context, stack=s)
         if (stk.action != stk.DELETE and stk.status == stk.COMPLETE):
             for refid in self.rule[self.ACTION_MAP[new_state]]:
                 actions.append(stk.resource_by_refid(refid).signal)
         else:
             LOG.warning(_LW("Could not process watch state %s for stack"),
                         new_state)
     return actions
コード例 #39
0
    def stop(self):
        """Stop this server.

        This is not a very nice action, as currently the method by which a
        server is stopped is by killing its eventlet.

        :returns: None

        """
        LOG.info(_LI("Stopping WSGI server."))
        if self._server is not None:
            # Resize pool to stop new requests from being processed
            self._pool.resize(0)
            self._server.kill()
コード例 #40
0
    def wait(self):
        """Block, until the server has stopped.

        Waits on the server's eventlet to finish, then returns.

        :returns: None

        """
        try:
            if self._server is not None:
                self._pool.waitall()
                self._server.wait()
        except greenlet.GreenletExit:
            LOG.info(_LI("WSGI server has stopped."))
コード例 #41
0
ファイル: ha_restarter.py プロジェクト: Hybrid-Cloud/conveyor
    def handle_signal(self, details=None):
        if details is None:
            alarm_state = 'alarm'
        else:
            alarm_state = details.get('state', 'alarm').lower()

        LOG.info(_LI('%(name)s Alarm, new state %(state)s'),
                 {'name': self.name, 'state': alarm_state})

        if alarm_state != 'alarm':
            return

        target_id = self.properties[self.INSTANCE_ID]
        victim = self.stack.resource_by_refid(target_id)
        if victim is None:
            LOG.info(_LI('%(name)s Alarm, can not find instance '
                         '%(instance)s'),
                     {'name': self.name,
                      'instance': target_id})
            return

        LOG.info(_LI('%(name)s Alarm, restarting resource: %(victim)s'),
                 {'name': self.name, 'victim': victim.name})
        self.stack.restart_resource(victim.name)
コード例 #42
0
ファイル: worker.py プロジェクト: Hybrid-Cloud/conveyor
    def start(self):
        target = oslo_messaging.Target(
            version=self.RPC_API_VERSION,
            server=self.host,
            topic=self.topic)
        self.target = target
        LOG.info(_LI("Starting %(topic)s (%(version)s) in engine %(engine)s."),
                 {'topic': self.topic,
                  'version': self.RPC_API_VERSION,
                  'engine': self.engine_id})

        # self._rpc_server = rpc_messaging.get_rpc_server(target, self)
        # self._rpc_server.start()

        super(WorkerService, self).start()
コード例 #43
0
    def start(self):
        target = oslo_messaging.Target(version=self.RPC_API_VERSION,
                                       server=self.host,
                                       topic=self.topic)
        self.target = target
        LOG.info(
            _LI("Starting %(topic)s (%(version)s) in engine %(engine)s."), {
                'topic': self.topic,
                'version': self.RPC_API_VERSION,
                'engine': self.engine_id
            })

        # self._rpc_server = rpc_messaging.get_rpc_server(target, self)
        # self._rpc_server.start()

        super(WorkerService, self).start()
コード例 #44
0
    def stop(self):
        # Stop rpc connection at first for preventing new requests
        LOG.info(_LI("Stopping %(topic)s in engine %(engine)s."), {
            'topic': self.topic,
            'engine': self.engine_id
        })
        try:
            self._rpc_server.stop()
            self._rpc_server.wait()
        except Exception as e:
            LOG.error(_LE("%(topic)s is failed to stop, %(exc)s"), {
                'topic': self.topic,
                'exc': e
            })

        super(WorkerService, self).stop()
コード例 #45
0
    def _create(self):
        con = self.context

        volume_api_version = self.get_volume_api_version()
        if cfg.CONF.FusionSphere.pubcloud:
            service_type = self.EVS
            client_version = '2'
        elif volume_api_version == 1:
            service_type = self.VOLUME
            client_version = '1'
        elif volume_api_version == 2:
            service_type = self.VOLUME_V2
            client_version = '2'
        else:
            raise exception.Error(_('No volume service available.'))
        LOG.info(_LI('Creating Cinder client with volume API version %d.'),
                 volume_api_version)

        endpoint_type = self._get_client_option(CLIENT_NAME, 'endpoint_type')
        args = {
            'service_type': service_type,
            'auth_url': con.auth_url or '',
            'project_id': con.tenant_id,
            'username': None,
            'api_key': None,
            'endpoint_type': endpoint_type,
            'http_log_debug': self._get_client_option(CLIENT_NAME,
                                                      'http_log_debug'),
            'cacert': self._get_client_option(CLIENT_NAME, 'ca_file'),
            'insecure': self._get_client_option(CLIENT_NAME, 'insecure'),
            'timeout': self._get_client_option(CLIENT_NAME, 'timeout')
        }

        client = cc.Client(client_version, **args)
        management_url = self.url_for(service_type=service_type,
                                      endpoint_type=endpoint_type)
        client.client.auth_token = self.auth_token
        client.client.management_url = management_url

        if cfg.CONF.FusionSphere.pubcloud:
            client.volume_api_version = 2
        else:
            client.volume_api_version = volume_api_version

        return client
コード例 #46
0
    def check_attach_volume_complete(self, vol_id):
        vol = self.client().volumes.get(vol_id)
        if vol.status in ('available', 'attaching'):
            LOG.debug("Volume %(id)s is being attached - "
                      "volume status: %(status)s" % {'id': vol_id,
                                                     'status': vol.status})
            return False

        if vol.status != 'in-use':
            LOG.debug("Attachment failed - volume %(vol)s is "
                      "in %(status)s status" % {"vol": vol_id,
                                                "status": vol.status})
            raise exception.ResourceUnknownStatus(
                resource_status=vol.status,
                result=_('Volume attachment failed'))

        LOG.info(_LI('Attaching volume %(id)s complete'), {'id': vol_id})
        return True
コード例 #47
0
    def __init__(self, ip, port, conn_timeout, login, password=None,
                 privatekey=None, *args, **kwargs):
        self.ip = ip
        self.port = port
        self.login = login
        self.password = password
        self.conn_timeout = conn_timeout if conn_timeout else None
        self.privatekey = privatekey
        self.hosts_key_file = None

        # Validate good config setting here.
        # Paramiko handles the case where the file is inaccessible.
        if not CONF.ssh_hosts_key_file:
            raise exception.ParameterNotFound(param='ssh_hosts_key_file')
        elif not os.path.isfile(CONF.ssh_hosts_key_file):
            # If using the default path, just create the file.
            if CONF.state_path in CONF.ssh_hosts_key_file:
                open(CONF.ssh_hosts_key_file, 'a').close()
            else:
                msg = (_("Unable to find ssh_hosts_key_file: %s") %
                       CONF.ssh_hosts_key_file)
                raise exception.InvalidInput(reason=msg)

        if 'hosts_key_file' in kwargs.keys():
            self.hosts_key_file = kwargs.pop('hosts_key_file')
            LOG.info(_LI("Secondary ssh hosts key file %(kwargs)s will be "
                         "loaded along with %(conf)s "
                         "from /etc/conveyor.conf."),
                     {'kwargs': self.hosts_key_file,
                      'conf': CONF.ssh_hosts_key_file})

        LOG.debug("Setting strict_ssh_host_key_policy to '%(policy)s' "
                  "using ssh_hosts_key_file '%(key_file)s'.",
                  {'policy': CONF.strict_ssh_host_key_policy,
                   'key_file': CONF.ssh_hosts_key_file})

        self.strict_ssh_host_key_policy = CONF.strict_ssh_host_key_policy

        if not self.hosts_key_file:
            self.hosts_key_file = CONF.ssh_hosts_key_file
        else:
            self.hosts_key_file += ',' + CONF.ssh_hosts_key_file

        super(SSHPool, self).__init__(*args, **kwargs)
コード例 #48
0
ファイル: nova.py プロジェクト: Hybrid-Cloud/conveyor
    def check_detach_volume_complete(self, server_id, attach_id):
        """Check that nova server lost attachment.

        This check is needed for immediate reattachment when updating:
        there might be some time between cinder marking volume as 'available'
        and nova removing attachment from its own objects, so we
        check that nova already knows that the volume is detached.
        """
        try:
            self.client().volumes.get_server_volume(server_id, attach_id)
        except Exception as ex:
            self.ignore_not_found(ex)
            LOG.info(_LI("Volume %(vol)s is detached from server %(srv)s"),
                     {'vol': attach_id, 'srv': server_id})
            return True
        else:
            LOG.debug("Server %(srv)s still has attachment %(att)s." % {
                'att': attach_id, 'srv': server_id})
            return False
コード例 #49
0
ファイル: nova.py プロジェクト: Hybrid-Cloud/conveyor
    def check_detach_volume_complete(self, server_id, attach_id):
        """Check that nova server lost attachment.

        This check is needed for immediate reattachment when updating:
        there might be some time between cinder marking volume as 'available'
        and nova removing attachment from its own objects, so we
        check that nova already knows that the volume is detached.
        """
        try:
            self.client().volumes.get_server_volume(server_id, attach_id)
        except Exception as ex:
            self.ignore_not_found(ex)
            LOG.info(_LI("Volume %(vol)s is detached from server %(srv)s"), {
                'vol': attach_id,
                'srv': server_id
            })
            return True
        else:
            LOG.debug("Server %(srv)s still has attachment %(att)s." % {
                'att': attach_id,
                'srv': server_id
            })
            return False
コード例 #50
0
    def create_software_deployment(self, cnxt, server_id, config_id,
                                   input_values, action, status,
                                   status_reason, stack_user_project_id,
                                   deployment_id=None):
        if server_id and not isinstance(server_id, six.string_types):
            LOG.error(_LI('server_id %s must be string.') % server_id)
            raise ValueError(_('server_id must be string.'))

        if deployment_id is None:
            deployment_id = str(uuid.uuid4())
        sd = software_deployment_object.SoftwareDeployment.create(cnxt, {
            'id': deployment_id,
            'config_id': config_id,
            'server_id': server_id,
            'input_values': input_values,
            'tenant': cnxt.tenant_id,
            'stack_user_project_id': stack_user_project_id,
            'action': action,
            'status': status,
            'status_reason': status_reason})
        self._push_metadata_software_deployments(
            cnxt, server_id, stack_user_project_id)
        return api.format_software_deployment(sd)
コード例 #51
0
ファイル: scheduler.py プロジェクト: Hybrid-Cloud/conveyor
    def step(self):
        """Run another step of the task.

        Return True if the task is complete; False otherwise.
        """
        if not self.done():
            assert self._runner is not None, "Task not started"

            if self._timeout is not None and self._timeout.expired():
                LOG.info(_LI('%s timed out'), six.text_type(self))
                self._done = True

                self._timeout.trigger(self._runner)
            else:
                LOG.debug('%s running' % six.text_type(self))

                try:
                    next(self._runner)
                except StopIteration:
                    self._done = True
                    LOG.debug('%s complete' % six.text_type(self))

        return self._done
コード例 #52
0
ファイル: extensions.py プロジェクト: Hybrid-Cloud/conveyor
    def __init__(self):
        LOG.info(_LI('Initializing extension manager.'))

        self.cls_list = CONF.osapi_birdie_extension
        self.extensions = {}
        self._load_extensions()