Exemple #1
0
    def pre_op(self, cluster_id, action, policy_data):
        nodes = db_api.node_get_all_by_cluster(action.context, cluster_id)
        current_size = len(nodes)

        if self.adjustment_type == self.EXACT_CAPACITY:
            count = self.adjustment_number - current_size
        elif self.adjustment_type == self.CHANGE_IN_CAPACITY:
            count = self.adjustment_number
        elif self.adjustment_type == self.CHANGE_IN_PERCENTAGE:
            count = int((self.adjustment_number * current_size) / 100.0)
            if count < self.adjustment_min_step:
                count = self.adjustment_min_step
        if current_size + count > self.max_size:
            policy_data.status = base.CHECK_ERROR
            policy_data.reason = _('Attempted scaling exceeds maximum size')
        elif current_size + count < self.min_size:
            policy_data.status = base.CHECK_ERROR
            policy_data.reason = _('Attempted scaling exceeds minimum size')
        else:
            policy_data.status = base.CHECK_OK
            policy_data.reason = _('Scaling request validated')

        pd = {'count': count}
        if action.action == consts.CLUSTER_SCALE_OUT:
            if count < 0:
                LOG.warning(_LW('Requesting a scale out operation but scaling '
                                'policy generates a negative count.'))
            policy_data['creation'] = pd
        elif action.action == consts.CLUSTER_SCALE_IN:
            if count > 0:
                LOG.warning(_LW('Requesting a scale out operation but scaling '
                                'policy generates a negative count.'))
            policy_data['deletion'] = pd

        return policy_data
Exemple #2
0
    def do_update(self, context, new_profile_id, **kwargs):
        '''Additional logic at the beginning of cluster updating progress.

        Check profile and set cluster status to UPDATING.
        '''
        # Profile type checking is done here because the do_update logic can
        # be triggered from API or Webhook
        if not new_profile_id:
            raise exception.ProfileNotSpecified()

        if new_profile_id == self.profile_id:
            return True

        new_profile = db_api.get_profile(context, new_profile_id)
        if not new_profile:
            event_mod.warning(context, self, 'update',
                              _LW('Cluster cannot be updated to a profile '
                                  'that does not exists'))
            return False

        # Check if profile types match
        old_profile = db_api.get_profile(context, self.profile_id)
        if old_profile.type != new_profile.type:
            event_mod.warning(context, self, 'update',
                              _LW('Cluster cannot be updated to a different '
                                  'profile type (%(oldt)s->%(newt)s)') % {
                                      'oldt': old_profile.type,
                                      'newt': new_profile.type})
            return False

        self.set_status(self.UPDATING)
        return True
Exemple #3
0
    def _register_info(self, name, info):
        '''place the new info in the correct location in the registry.

        :param path: a string of plugin name.
        :param info: reference to a PluginInfo data structure, deregister a
                     PluginInfo if specified as None.
        '''
        registry = self._registry
        if info is None:
            # delete this entry.
            LOG.warning(_LW('Removing %(item)s from registry'), {'item': name})
            registry.pop(name, None)
            return

        if name in registry and isinstance(registry[name], PluginInfo):
            if registry[name] == info:
                return
            details = {
                'name': name,
                'old': str(registry[name].plugin),
                'new': str(info.plugin)
            }
            LOG.warning(_LW('Changing %(name)s from %(old)s to %(new)s'),
                        details)
        else:
            LOG.info(_LI('Registering %(name)s -> %(value)s'), {
                'name': name, 'value': str(info.plugin)})

        info.user_provided = not self.is_global
        registry[name] = info
Exemple #4
0
    def _register_info(self, name, info):
        '''place the new info in the correct location in the registry.

        :param path: a string of plugin name.
        :param info: reference to a PluginInfo data structure, deregister a
                     PluginInfo if specified as None.
        '''
        registry = self._registry
        if info is None:
            # delete this entry.
            msg = _LW("Removing %(item)s from registry")
            LOG.warning(msg, {'item': name})
            registry.pop(name, None)
            return

        if name in registry and isinstance(registry[name], PluginInfo):
            if registry[name] == info:
                return
            details = {
                'name': name,
                'old': str(registry[name].plugin),
                'new': str(info.plugin)
            }
            LOG.warning(_LW('Changing %(name)s from %(old)s to %(new)s'),
                        details)
        else:
            msg = _LI('Registering %(name)s -> %(value)s')
            LOG.info(msg, {'name': name, 'value': str(info.plugin)})

        info.user_provided = not self.is_global
        registry[name] = info
Exemple #5
0
    def _start_check(self, entry):
        """Routine to call for starting the checking for a cluster.

        @param entry: A dict containing the data associated with the cluster.
        @return: An updated registry entry record.
        """
        if entry['check_type'] == consts.NODE_STATUS_POLLING:
            interval = min(entry['interval'], cfg.CONF.periodic_interval_max)
            timer = self.TG.add_timer(interval, self._poll_cluster, None,
                                      entry['cluster_id'])
            entry['timer'] = timer
        elif entry['check_type'] == consts.VM_LIFECYCLE_EVENTS:
            LOG.info(_LI("Start listening events for cluster (%s)."),
                     entry['cluster_id'])
            listener = self._add_listener(entry['cluster_id'])
            if listener:
                entry['listener'] = listener
            else:
                return None
        else:
            LOG.warning(_LW("Cluster (%(id)s) check type (%(type)s) is "
                            "invalid."),
                        {'id': entry['cluster_id'],
                         'type': entry['check_type']})
            return None

        return entry
Exemple #6
0
    def do_update(self, context, new_profile_id):
        if not new_profile_id:
            raise exception.ProfileNotSpecified()

        if new_profile_id == self.profile_id:
            return True

        if not self.physical_id:
            return False

        # Check if profile types match
        old_profile = db_api.get_profile(context, self.profile_id)
        new_profile = db_api.get_profile(context, new_profile_id)
        if old_profile.type != new_profile.type:
            event_mod.warning(_LW('Node cannot be updated to a different '
                                  'profile type (%(oldt)s->%(newt)s)') %
                              {'oldt': old_profile.type,
                               'newt': new_profile.type})
            return False

        res = profile_base.update_object(self, new_profile_id)
        if res:
            self.rt['profile'] = profile_base.load(context,
                                                   new_profile_id)
            self.profile_id = new_profile_id
            self.updated_time = datetime.datetime.utcnow()
            self.store()

        return res
Exemple #7
0
    def _start_check(self, entry):
        """Routine to call for starting the checking for a cluster.

        @param entry: A dict containing the data associated with the cluster.
        @return: An updated registry entry record.
        """
        if entry['check_type'] == consts.NODE_STATUS_POLLING:
            interval = min(entry['interval'], cfg.CONF.periodic_interval_max)
            timer = self.TG.add_timer(interval, self._poll_cluster, None,
                                      entry['cluster_id'])
            entry['timer'] = timer
        elif entry['check_type'] == consts.VM_LIFECYCLE_EVENTS:
            LOG.info(_LI("Start listening events for cluster (%s)."),
                     entry['cluster_id'])
            listener = self._add_listener(entry['cluster_id'])
            if listener:
                entry['listener'] = listener
            else:
                return None
        else:
            LOG.warn(_LW("Cluster (%(id)s) check type (%(type)s) is invalid."),
                     {'id': entry['cluster_id'], 'type': entry['check_type']})
            return None

        return entry
Exemple #8
0
    def _remove_children(self, pid):

        if pid in self.children:
            self.children.remove(pid)
            LOG.info(_LI('Removed dead child %s'), pid)
        elif pid in self.stale_children:
            self.stale_children.remove(pid)
            LOG.info(_LI('Removed stale child %s'), pid)
        else:
            LOG.warning(_LW('Unrecognized child %s'), pid)
Exemple #9
0
    def _remove_children(self, pid):

        if pid in self.children:
            self.children.remove(pid)
            LOG.info(_LI('Removed dead child %s'), pid)
        elif pid in self.stale_children:
            self.stale_children.remove(pid)
            LOG.info(_LI('Removed stale child %s'), pid)
        else:
            LOG.warning(_LW('Unrecognized child %s'), pid)
Exemple #10
0
    def post_op(self, cluster_id, action):
        """Routine to be called after an action has been executed.

        For this particular policy, we take this chance to update the pool
        maintained by the load-balancer.

        :param cluster_id: The ID of the cluster on which a relevant action
            has been executed.
        :param action: The action object that triggered this operation.
        :returns: Nothing.
        """
        # TODO(Yanyanhu): Need special handling for cross-az scenario
        # which is supported by Neutron lbaas.
        if action.action == consts.NODE_CREATE:
            nodes_added = [action.node.id]
        else:
            creation = action.data.get('creation', None)
            nodes_added = creation.get('nodes', []) if creation else []
            if len(nodes_added) == 0:
                return

        db_cluster = co.Cluster.get(action.context, cluster_id)
        lb_driver = self.lbaas(db_cluster.user, db_cluster.project)
        lb_driver.lb_status_timeout = self.lb_status_timeout
        cp = cluster_policy.ClusterPolicy.load(action.context, cluster_id,
                                               self.id)
        policy_data = self._extract_policy_data(cp.data)
        lb_id = policy_data['loadbalancer']
        pool_id = policy_data['pool']
        port = self.pool_spec.get(self.POOL_PROTOCOL_PORT)
        subnet = self.pool_spec.get(self.POOL_SUBNET)

        # Add new nodes to lb pool
        for node_id in nodes_added:
            node = nm.Node.load(action.context, node_id=node_id)
            member_id = node.data.get('lb_member', None)
            if member_id:
                LOG.warning(_LW('Node %(n)s already in lb pool %(p)s.'), {
                    'n': node_id,
                    'p': pool_id
                })
                continue

            member_id = lb_driver.member_add(node, lb_id, pool_id, port,
                                             subnet)
            if member_id is None:
                action.data['status'] = base.CHECK_ERROR
                action.data['reason'] = _('Failed in adding new node(s) '
                                          'into lb pool.')
                return

            node.data.update({'lb_member': member_id})
            node.store(action.context)

        return
Exemple #11
0
    def post_op(self, cluster_id, action):
        """Routine to be called after an action has been executed.

        For this particular policy, we take this chance to update the pool
        maintained by the load-balancer.

        :param cluster_id: The ID of the cluster on which a relevant action
            has been executed.
        :param action: The action object that triggered this operation.
        :returns: Nothing.
        """

        # TODO(Yanyanhu): Need special handling for cross-az scenario
        # which is supported by Neutron lbaas.
        if action.action == consts.NODE_CREATE:
            nodes_added = [action.node.id]
        else:
            creation = action.data.get('creation', None)
            nodes_added = creation.get('nodes', []) if creation else []
            if len(nodes_added) == 0:
                return

        db_cluster = co.Cluster.get(action.context, cluster_id)
        params = self._build_conn_params(db_cluster)
        lb_driver = driver_base.SenlinDriver().loadbalancing(params)
        cp = cluster_policy.ClusterPolicy.load(action.context, cluster_id,
                                               self.id)
        policy_data = self._extract_policy_data(cp.data)
        lb_id = policy_data['loadbalancer']
        pool_id = policy_data['pool']
        port = self.pool_spec.get(self.POOL_PROTOCOL_PORT)
        subnet = self.pool_spec.get(self.POOL_SUBNET)

        # Add new nodes to lb pool
        for node_id in nodes_added:
            node = nm.Node.load(action.context, node_id=node_id)
            member_id = node.data.get('lb_member', None)
            if member_id:
                LOG.warning(_LW('Node %(n)s already in lb pool %(p)s.'),
                            {'n': node_id, 'p': pool_id})
                continue

            member_id = lb_driver.member_add(node, lb_id, pool_id, port,
                                             subnet)
            if member_id is None:
                action.data['status'] = base.CHECK_ERROR
                action.data['reason'] = _('Failed in adding new node(s) '
                                          'into lb pool.')
                return

            node.data.update({'lb_member': member_id})
            node.store(action.context)

        return
Exemple #12
0
 def release(self, target_id):
     """Release a target lock."""
     # Only the engine that owns the lock will be releasing it.
     result = self.lock_release(target_id, self.engine_id)
     if result is True:
         LOG.warn(_LW("Lock was already released on %(target_type) "
                      "%(target)s!"), {'target_type': self.target_type,
                                       'target': target_id})
     else:
         LOG.debug("Engine %(engine)s released lock on %(target_type)s "
                   "%(target)s" % {'engine': self.engine_id,
                                   'target_type': self.target_type,
                                   'target': target_id})
Exemple #13
0
    def _add_listener(self, cluster_id):
        """Routine to be executed for adding cluster listener.

        @param cluster_id: The UUID of the cluster to be filtered.
        @return Nothing.
        """
        cluster = objects.Cluster.get(self.ctx, cluster_id)
        if not cluster:
            LOG.warn(_LW("Cluster (%s) is not found."), cluster_id)
            return

        project = cluster.project
        return self.TG.add_thread(ListenerProc, 'nova', project, cluster_id)
Exemple #14
0
    def _add_listener(self, cluster_id):
        """Routine to be executed for adding cluster listener.

        @param cluster_id: The UUID of the cluster to be filtered.
        @return Nothing.
        """
        cluster = objects.Cluster.get(self.ctx, cluster_id)
        if not cluster:
            LOG.warning(_LW("Cluster (%s) is not found."), cluster_id)
            return

        project = cluster.project
        return self.TG.add_thread(ListenerProc, 'nova', project, cluster_id)
Exemple #15
0
def warning(context, entity, action, status=None, status_reason=None,
            timestamp=None):
    timestamp = timestamp or timeutils.utcnow(True)
    event = Event(timestamp, logging.WARNING, entity,
                  action=action, status=status, status_reason=status_reason,
                  user=context.user, project=context.project)
    event.store(context)
    LOG.warning(_LW('%(name)s [%(id)s] %(action)s - %(status)s: %(reason)s'),
                {'name': event.oname,
                 'id': event.oid and event.oid[:8],
                 'action': action,
                 'status': status,
                 'reason': status_reason})
Exemple #16
0
def load_dispatcher():
    """Load dispatchers."""
    global dispatchers

    LOG.debug("Loading dispatchers")
    dispatchers = named.NamedExtensionManager(
        namespace="senlin.dispatchers",
        names=cfg.CONF.event_dispatchers,
        invoke_on_load=True,
        propagate_map_exceptions=True)
    if not list(dispatchers):
        LOG.warning(_LW("No dispatchers configured for 'senlin.dispatchers'"))
    else:
        LOG.info(_LI("Loaded dispatchers: %s"), dispatchers.names())
Exemple #17
0
    def validate_regions(self, regions):
        """Check whether the given regions are valid.

        :param regions: A list of regions for validation.
        :returns: A list of regions that are found available on keystone.
        """
        known = [r['id'] for r in self.region_list()]

        validated = []
        for r in regions:
            if r in known:
                validated.append(r)
            else:
                LOG.warning(_LW('Region %s is not found.'), r)

        return validated
Exemple #18
0
    def validate_regions(self, regions):
        """Check whether the given regions are valid.

        :param regions: A list of regions for validation.
        :returns: A list of regions that are found available on keystone.
        """
        known = [r['id'] for r in self.region_list()]

        validated = []
        for r in regions:
            if r in known:
                validated.append(r)
            else:
                LOG.warning(_LW('Region %s is not found.'), r)

        return validated
Exemple #19
0
    def pre_op(self, cluster_id, action):
        """Routine to be called before an action has been executed.

        For this particular policy, we take this chance to update the pool
        maintained by the load-balancer.

        :param cluster_id: The ID of the cluster on which a relevant action
            has been executed.
        :param action: The action object that triggered this operation.
        :returns: Nothing.
        """

        candidates = self._get_delete_candidates(cluster_id, action)
        if len(candidates) == 0:
            return

        db_cluster = db_api.cluster_get(action.context, cluster_id)
        params = self._build_conn_params(db_cluster)
        lb_driver = driver_base.SenlinDriver().loadbalancing(params)
        cp = cluster_policy.ClusterPolicy.load(action.context, cluster_id,
                                               self.id)
        policy_data = self._extract_policy_data(cp.data)
        lb_id = policy_data['loadbalancer']
        pool_id = policy_data['pool']

        # Remove nodes that will be deleted from lb pool
        for node_id in candidates:
            node = node_mod.Node.load(action.context, node_id=node_id)
            member_id = node.data.get('lb_member', None)
            if member_id is None:
                LOG.warning(_LW('Node %(n)s not found in lb pool %(p)s.'),
                            {'n': node_id, 'p': pool_id})
                continue

            res = lb_driver.member_remove(lb_id, pool_id, member_id)
            if res is not True:
                action.data['status'] = base.CHECK_ERROR
                action.data['reason'] = _('Failed in removing deleted '
                                          'node(s) from lb pool.')
                return

        deletion = action.data.get('deletion', {})
        deletion.update({'count': len(candidates), 'candidates': candidates})
        action.data.update({'deletion': deletion})

        return
Exemple #20
0
    def validate_azs(self, azs):
        """check whether availability zones provided are valid.

        :param azs: A list of availability zone names for checking.
        :returns: A list of zones that are found available on Nova.
        """
        known = self.availability_zone_list()
        names = [az['zoneName'] for az in known
                 if az['zoneState']['available']]

        found = []
        for az in azs:
            if az in names:
                found.append(az)
            else:
                LOG.warning(_LW("Availability zone '%s' is not available."),
                            az)
        return found
Exemple #21
0
    def validate_azs(self, azs):
        """check whether availability zones provided are valid.

        :param azs: A list of availability zone names for checking.
        :returns: A list of zones that are found available on Nova.
        """
        known = self.availability_zone_list()
        names = [az['zoneName'] for az in known
                 if az['zoneState']['available']]

        found = []
        for az in azs:
            if az in names:
                found.append(az)
            else:
                LOG.warning(_LW("Availability zone '%s' is not available."),
                            az)
        return found
    def _validate_zones(self, cluster):
        """check whether availability zones in spec are valid.

        :param cluster: the cluster object that policy attached to.
        :returns: A list of zones that are found available on Nova.
        """
        azs = self.nova(cluster).availability_zone_list()
        azs = [az['zoneName'] for az in azs if az['zoneState']['available']]

        avail = {}
        for name in self.zones:
            if name not in azs:
                LOG.warning(_LW('Availability zone %(az)s is not available.'),
                            {'az': name})
            else:
                avail[name] = self.zones[name]

        return avail
Exemple #23
0
def setup(binary, host):
    if cfg.CONF.profiler.enabled:
        _notifier = osprofiler.notifier.create(
            "Messaging", oslo_messaging, context.get_admin_context().to_dict(),
            messaging.TRANSPORT, "senlin", binary, host)
        osprofiler.notifier.set(_notifier)
        osprofiler.web.enable(cfg.CONF.profiler.hmac_keys)
        LOG.warning(_LW("OSProfiler is enabled.\nIt means that any person who "
                        "knows any of hmac_keys that are specified in "
                        "/etc/senlin/senlin.conf can trace his requests. \n"
                        "In real life only an operator can read this file so "
                        "there is no security issue. Note that even if any "
                        "person can trigger the profiler, only an admin user "
                        "can retrieve trace.\n"
                        "To disable OSProfiler set in senlin.conf:\n"
                        "[profiler]\nenabled=false"))
    else:
        osprofiler.web.disable()
Exemple #24
0
    def pre_op(self, cluster_id, action):
        """Routine to be called before an action has been executed.

        For this particular policy, we take this chance to update the pool
        maintained by the load-balancer.

        :param cluster_id: The ID of the cluster on which a relevant action
            has been executed.
        :param action: The action object that triggered this operation.
        :returns: Nothing.
        """

        candidates = self._get_delete_candidates(cluster_id, action)
        if len(candidates) == 0:
            return

        db_cluster = co.Cluster.get(action.context, cluster_id)
        lb_driver = self.lbaas(db_cluster.user, db_cluster.project)
        lb_driver.lb_status_timeout = self.lb_status_timeout
        cp = cluster_policy.ClusterPolicy.load(action.context, cluster_id,
                                               self.id)
        policy_data = self._extract_policy_data(cp.data)
        lb_id = policy_data['loadbalancer']
        pool_id = policy_data['pool']

        # Remove nodes that will be deleted from lb pool
        for node_id in candidates:
            node = nm.Node.load(action.context, node_id=node_id)
            member_id = node.data.get('lb_member', None)
            if member_id is None:
                LOG.warning(_LW('Node %(n)s not found in lb pool %(p)s.'), {
                    'n': node_id,
                    'p': pool_id
                })
                continue

            res = lb_driver.member_remove(lb_id, pool_id, member_id)
            if res is not True:
                action.data['status'] = base.CHECK_ERROR
                action.data['reason'] = _('Failed in removing deleted '
                                          'node(s) from lb pool.')
                return

        return
Exemple #25
0
 def do_update(self, obj, new_profile, **params):
     """For subclass to override."""
     LOG.warn(_LW("Update operation not supported."))
     return True
Exemple #26
0
 def do_check(self, obj):
     """For subclass to override."""
     LOG.warn(_LW("Check operation not supported."))
     return True
Exemple #27
0
 def do_get_details(self, obj):
     """For subclass to override."""
     LOG.warn(_LW("Get_details operation not supported."))
     return {}
Exemple #28
0
 def do_leave(self, obj):
     """For subclass to override to perform extra operations."""
     LOG.warn(_LW("Join operation not specialized."))
     return True
Exemple #29
0
    def post_op(self, cluster_id, action):
        """Routine to be called after an action has been executed.

        For this particular policy, we take this chance to update the pool
        maintained by the load-balancer.

        :param cluster_id: The ID of the cluster on which a relevant action
            has been executed.
        :param action: The action object that triggered this operation.
        :returns: Nothing.
        """
        nodes_added = action.outputs.get('nodes_added', [])
        nodes_removed = action.outputs.get('nodes_removed', [])
        if ((len(nodes_added) == 0) and (len(nodes_removed) == 0)):
            return

        db_cluster = db_api.cluster_get(action.context, cluster_id)
        params = self._build_conn_params(db_cluster)
        lb_driver = driver_base.SenlinDriver().loadbalancing(params)
        cp = cluster_policy.ClusterPolicy.load(action.context, cluster_id,
                                               self.id)
        policy_data = self._extract_policy_data(cp.data)
        lb_id = policy_data['loadbalancer']
        pool_id = policy_data['pool']
        port = self.pool_spec.get(self.POOL_PROTOCOL_PORT)
        subnet = self.pool_spec.get(self.POOL_SUBNET)

        # Remove nodes that have been deleted from lb pool
        for node_id in nodes_removed:
            node = node_mod.Node.load(action.context, node_id=node_id,
                                      show_deleted=True)
            member_id = node.data.get('lb_member', None)
            if member_id is None:
                LOG.warning(_LW('Node %(n)s not found in lb pool %(p)s.'),
                            {'n': node_id, 'p': pool_id})
                continue

            res = lb_driver.member_remove(lb_id, pool_id, member_id)
            if res is not True:
                action.data['status'] = base.CHECK_ERROR
                action.data['reason'] = _('Failed in removing deleted '
                                          'node(s) from lb pool.')
                return

        # Add new nodes to lb pool
        for node_id in nodes_added:
            node = node_mod.Node.load(action.context, node_id=node_id,
                                      show_deleted=True)
            member_id = node.data.get('lb_member', None)
            if member_id:
                LOG.warning(_LW('Node %(n)s already in lb pool %(p)s.'),
                            {'n': node_id, 'p': pool_id})
                continue

            member_id = lb_driver.member_add(node, lb_id, pool_id, port,
                                             subnet)
            if member_id is None:
                action.data['status'] = base.CHECK_ERROR
                action.data['reason'] = _('Failed in adding new node(s) '
                                          'into lb pool.')
                return

            node.data.update({'lb_member': member_id})
            node.store(action.context)

        return
Exemple #30
0
 def do_leave(self, obj):
     """For subclass to override to perform extra operations."""
     LOG.warn(_LW("Join operation not specialized."))
     return True
Exemple #31
0
 def do_join(self, obj, cluster_id):
     """For subclass to override to perform extra operations."""
     LOG.warning(_LW("Join operation not specialized."))
     return True
Exemple #32
0
 def do_validate(self, obj):
     """For subclass to override."""
     LOG.warning(_LW("Validate operation not supported."))
     return True
Exemple #33
0
 def do_update(self, obj, new_profile, **params):
     """For subclass to override."""
     LOG.warn(_LW("Update operation not supported."))
     return True
Exemple #34
0
 def do_check(self, obj):
     """For subclass to override."""
     LOG.warn(_LW("Check operation not supported."))
     return True
Exemple #35
0
 def do_get_details(self, obj):
     """For subclass to override."""
     LOG.warn(_LW("Get_details operation not supported."))
     return {}
Exemple #36
0
    def __call__(self, request):
        """WSGI method that controls (de)serialization and method dispatch."""
        action_args = self.get_action_args(request.environ)
        action = action_args.pop('action', None)
        content_type = request.params.get("ContentType")

        try:
            deserialized_request = self.dispatch(self.deserializer,
                                                 action, request)
            action_args.update(deserialized_request)

            logging.debug(
                ('Calling %(controller)s : %(action)s'),
                {'controller': self.controller, 'action': action})

            action_result = self.dispatch(self.controller, action,
                                          request, **action_args)
        except TypeError as err:
            logging.error(_LE('Exception handling resource: %s') % err)
            msg = _('The server could not comply with the request since '
                    'it is either malformed or otherwise incorrect.')
            err = webob.exc.HTTPBadRequest(msg)
            http_exc = translate_exception(err, request.best_match_language())
            # NOTE(luisg): We disguise HTTP exceptions, otherwise they will be
            # treated by wsgi as responses ready to be sent back and they
            # won't make it into the pipeline app that serializes errors
            raise exception.HTTPExceptionDisguise(http_exc)
        except webob.exc.HTTPException as err:
            if not isinstance(err, webob.exc.HTTPError):
                # Some HTTPException are actually not errors, they are
                # responses ready to be sent back to the users, so we don't
                # error log, disguise or translate those
                raise
            if isinstance(err, webob.exc.HTTPServerError):
                logging.error(
                    _LE("Returning %(code)s to user: %(explanation)s"),
                    {'code': err.code, 'explanation': err.explanation})
            http_exc = translate_exception(err, request.best_match_language())
            raise exception.HTTPExceptionDisguise(http_exc)
        except exception.SenlinException as err:
            raise translate_exception(err, request.best_match_language())
        except Exception as err:
            log_exception(err, sys.exc_info())
            raise translate_exception(err, request.best_match_language())

        serializer = self.serializer or serializers.JSONResponseSerializer()
        try:
            response = webob.Response(request=request)
            self.dispatch(serializer, action, response, action_result)
            return response

        # return unserializable result (typically an exception)
        except Exception:
            if content_type == "JSON":
                try:
                    err_body = action_result.get_unserialized_body()
                    serializer.default(action_result, err_body)
                except Exception:
                    logging.warning(_LW("Unable to serialize exception "
                                    "response"))

            return action_result
Exemple #37
0
 def do_rebuild(self, obj):
     """For subclass to override."""
     LOG.warn(_LW("Rebuild operation not specialized."))
     return True
Exemple #38
0
 def do_rebuild(self, obj):
     """For subclass to override."""
     LOG.warn(_LW("Rebuild operation not specialized."))
     return True
Exemple #39
0
    def post_op(self, cluster_id, action):
        """Routine to be called after an action has been executed.

        For this particular policy, we take this chance to update the pool
        maintained by the load-balancer.

        :param cluster_id: The ID of the cluster on which a relevant action
            has been executed.
        :param action: The action object that triggered this operation.
        :returns: Nothing.
        """
        nodes = action.data.get('nodes', [])
        if len(nodes) == 0:
            return

        db_cluster = db_api.cluster_get(action.context, cluster_id)
        params = self._build_conn_params(db_cluster)
        lb_driver = driver_base.SenlinDriver().loadbalancing(params)
        cp = cluster_policy.ClusterPolicy.load(action.context, cluster_id,
                                               self.id)
        policy_data = self._extract_policy_data(cp.data)
        lb_id = policy_data['loadbalancer']
        pool_id = policy_data['pool']
        port = self.pool_spec.get(self.POOL_PROTOCOL_PORT)
        subnet = self.pool_spec.get(self.POOL_SUBNET)

        for node_id in nodes:
            node = node_mod.Node.load(action.context, node_id=node_id,
                                      show_deleted=True)
            member_id = node.data.get('lb_member')
            if (action.action in (consts.CLUSTER_DEL_NODES,
                                  consts.CLUSTER_SCALE_IN))\
                    or (action.action == consts.CLUSTER_RESIZE and
                        action.data.get('deletion')):

                if member_id is None:
                    LOG.warning(_LW('Node %(node)s not found in loadbalancer '
                                    'pool %(pool)s.'),
                                {'node': node_id, 'pool': pool_id})
                    continue

                # Remove nodes that have been deleted from lb pool
                res = lb_driver.member_remove(lb_id, pool_id, member_id)
                if res is not True:
                    action.data['status'] = base.CHECK_ERROR
                    action.data['reason'] = _('Failed in removing deleted '
                                              'node from lb pool')
                    return

            if (action.action in (consts.CLUSTER_ADD_NODES,
                                  consts.CLUSTER_SCALE_OUT))\
                    or (action.action == consts.CLUSTER_RESIZE and
                        action.data.get('creation')):

                if member_id:
                    LOG.warning(_LW('Node %(node)s already in loadbalancer '
                                    'pool %(pool)s.'),
                                {'node': node_id, 'pool': pool_id})
                    continue

                res = lb_driver.member_add(node, lb_id, pool_id, port, subnet)
                if res is None:
                    action.data['status'] = base.CHECK_ERROR
                    action.data['reason'] = _('Failed in adding new node '
                                              'into lb pool')
                    return

                node.data.update({'lb_member': res})
                node.store(action.context)

        return