def update_pending_roles(cls, instance, new_pending_roles): """Update pending_roles for Node instance. Logs an error if node doesn't belong to Cluster :param instance: Node instance :param new_pending_roles: list of new pending role names :returns: None """ if not instance.cluster_id: logger.warning(u"Attempting to assign pending roles to node " u"'{0}' which isn't added to cluster".format( instance.name or instance.id)) return logger.debug(u"Updating pending roles for node {0}: {1}".format( instance.id, new_pending_roles)) if new_pending_roles == []: instance.pending_role_list = [] #TODO(enchantner): research why the hell we need this Cluster.clear_pending_changes(instance.cluster, node_id=instance.id) else: instance.pending_role_list = db().query(models.Role).filter_by( release_id=instance.cluster.release_id, ).filter( models.Role.name.in_(new_pending_roles)).all() db().flush() db().refresh(instance)
def remove_from_cluster(cls, instance): """Remove Node from Cluster. Also drops networks assignment for Node and clears both roles and pending roles :param instance: Node instance :returns: None """ if instance.cluster: Cluster.clear_pending_changes( instance.cluster, node_id=instance.id ) netmanager = Cluster.get_network_manager( instance.cluster ) netmanager.clear_assigned_networks(instance) netmanager.clear_bond_configuration(instance) cls.update_roles(instance, []) cls.update_pending_roles(instance, []) cls.remove_replaced_params(instance) instance.cluster_id = None instance.group_id = None instance.kernel_params = None instance.reset_name_to_default() db().flush() db().refresh(instance)
def update_pending_roles(cls, instance, new_pending_roles): """Update pending_roles for Node instance. Logs an error if node doesn't belong to Cluster :param instance: Node instance :param new_pending_roles: list of new pending role names :returns: None """ if not instance.cluster_id: logger.warning( u"Attempting to assign pending roles to node " u"'{0}' which isn't added to cluster".format( instance.full_name)) return logger.debug( u"Updating pending roles for node {0}: {1}".format( instance.full_name, new_pending_roles)) if new_pending_roles == []: # TODO(enchantner): research why the hell we need this Cluster.clear_pending_changes( instance.cluster, node_id=instance.id ) instance.pending_roles = new_pending_roles db().flush()
def remove_from_cluster(cls, instance): """Remove Node from Cluster. Also drops networks assignment for Node and clears both roles and pending roles :param instance: Node instance :returns: None """ if instance.cluster: Cluster.clear_pending_changes(instance.cluster, node_id=instance.id) netmanager = Cluster.get_network_manager(instance.cluster) netmanager.clear_assigned_networks(instance) netmanager.clear_bond_configuration(instance) cls.update_roles(instance, []) cls.update_pending_roles(instance, []) cls.remove_replaced_params(instance) instance.cluster_id = None instance.group_id = None instance.kernel_params = None instance.primary_roles = [] instance.hostname = cls.default_slave_name(instance) from nailgun.objects import OpenstackConfig OpenstackConfig.disable_by_nodes([instance]) db().flush() db().refresh(instance)
def remove_from_cluster(cls, instance): """Remove Node from Cluster. Also drops networks assignment for Node and clears both roles and pending roles :param instance: Node instance :returns: None """ if instance.cluster: Cluster.clear_pending_changes( instance.cluster, node_id=instance.id ) netmanager = Cluster.get_network_manager( instance.cluster ) netmanager.clear_assigned_networks(instance) netmanager.clear_bond_configuration(instance) cls.update_roles(instance, []) cls.update_pending_roles(instance, []) cls.remove_replaced_params(instance) instance.cluster_id = None instance.group_id = None instance.kernel_params = None instance.primary_roles = [] instance.hostname = cls.default_slave_name(instance) from nailgun.objects import OpenstackConfig OpenstackConfig.disable_by_nodes([instance]) db().flush() db().refresh(instance)
def update_pending_roles(cls, instance, new_pending_roles): """Update pending_roles for Node instance. Logs an error if node doesn't belong to Cluster :param instance: Node instance :param new_pending_roles: list of new pending role names :returns: None """ if not instance.cluster_id: logger.warning(u"Attempting to assign pending roles to node " u"'{0}' which isn't added to cluster".format( instance.full_name)) return logger.debug(u"Updating pending roles for node {0}: {1}".format( instance.full_name, new_pending_roles)) if new_pending_roles == []: # TODO(enchantner): research why the hell we need this Cluster.clear_pending_changes(instance.cluster, node_id=instance.id) instance.pending_roles = new_pending_roles db().flush()
def update_pending_roles(cls, instance, new_pending_roles): if not instance.cluster_id: logger.warning( u"Attempting to assign pending roles to node " u"'{0}' which isn't added to cluster".format( instance.name or instance.id ) ) return logger.debug( u"Updating pending roles for node {0}: {1}".format( instance.id, new_pending_roles ) ) if new_pending_roles == []: instance.pending_role_list = [] # research why the hell we need this Cluster.clear_pending_changes( instance.cluster, node_id=instance.id ) else: instance.pending_role_list = db().query(models.Role).filter_by( release_id=instance.cluster.release_id, ).filter( models.Role.name.in_(new_pending_roles) ).all() db().flush() db().refresh(instance)
def update_pending_roles(cls, instance, new_pending_roles): """Update pending_roles for Node instance. Logs an error if node doesn't belong to Cluster :param instance: Node instance :param new_pending_roles: list of new pending role names :returns: None """ if not instance.cluster_id: logger.warning( u"Attempting to assign pending roles to node " u"'{0}' which isn't added to cluster".format(instance.name or instance.id) ) return logger.debug(u"Updating pending roles for node {0}: {1}".format(instance.id, new_pending_roles)) if new_pending_roles == []: instance.pending_role_list = [] # TODO(enchantner): research why the hell we need this Cluster.clear_pending_changes(instance.cluster, node_id=instance.id) else: instance.pending_role_list = ( db() .query(models.Role) .filter_by(release_id=instance.cluster.release_id) .filter(models.Role.name.in_(new_pending_roles)) .all() ) db().flush() db().refresh(instance)
def _update_cluster_data(cls, instance): cluster = instance.cluster if instance.name == "deploy": if instance.status == "ready": # If for some reasosns orchestrator # didn't send ready status for node # we should set it explicitly for n in cluster.nodes: if n.status == "deploying": n.status = "ready" n.progress = 100 cls.__update_cluster_status(cluster, "operational") Cluster.clear_pending_changes(cluster) elif instance.status == "error" and not TaskHelper.before_deployment_error(instance): # We don't want to set cluster status to # error because we don't want to lock # settings if cluster wasn't delpoyed cls.__update_cluster_status(cluster, "error") elif instance.name == "deployment" and instance.status == "error": cls.__update_cluster_status(cluster, "error") q_nodes_to_error = TaskHelper.get_nodes_to_deployment_error(cluster) cls.__update_nodes_to_error(q_nodes_to_error, error_type="deploy") elif instance.name == "provision" and instance.status == "error": cls.__update_cluster_status(cluster, "error") q_nodes_to_error = TaskHelper.get_nodes_to_provisioning_error(cluster) cls.__update_nodes_to_error(q_nodes_to_error, error_type="provision") elif instance.name == "stop_deployment": if instance.status == "error": cls.__update_cluster_status(cluster, "error") else: cls.__update_cluster_status(cluster, "stopped") elif instance.name == consts.TASK_NAMES.update: if instance.status == consts.TASK_STATUSES.error: cls.__update_cluster_status(cluster, consts.CLUSTER_STATUSES.update_error) q_nodes_to_error = TaskHelper.get_nodes_to_deployment_error(cluster) cls.__update_nodes_to_error(q_nodes_to_error, error_type=consts.NODE_ERRORS.deploy) elif instance.status == consts.TASK_STATUSES.ready: cls.__update_cluster_status(cluster, consts.CLUSTER_STATUSES.operational) cluster.release_id = cluster.pending_release_id cluster.pending_release_id = None
def remove_from_cluster(cls, instance): Cluster.clear_pending_changes( instance.cluster, node_id=instance.id ) Cluster.get_network_manager( instance.cluster ).clear_assigned_networks(instance) instance.cluster_id = None instance.roles = instance.pending_roles = [] instance.reset_name_to_default() db().flush() db().refresh(instance)
def _update_cluster_data(cls, instance): cluster = instance.cluster if instance.name == 'deploy': if instance.status == 'ready': # If for some reasosns orchestrator # didn't send ready status for node # we should set it explicitly for n in cluster.nodes: if n.status == 'deploying': n.status = 'ready' n.progress = 100 cls.__update_cluster_status(cluster, 'operational') Cluster.clear_pending_changes(cluster) elif instance.status == 'error' and \ not TaskHelper.before_deployment_error(instance): # We don't want to set cluster status to # error because we don't want to lock # settings if cluster wasn't delpoyed cls.__update_cluster_status(cluster, 'error') elif instance.name == consts.TASK_NAMES.spawn_vms: if instance.status == consts.TASK_STATUSES.ready: Cluster.mark_vms_as_created(cluster) elif instance.status == consts.TASK_STATUSES.error and \ not TaskHelper.before_deployment_error(instance): cls.__update_cluster_status(cluster, 'error') elif instance.name == 'deployment' and instance.status == 'error': cls.__update_cluster_status(cluster, 'error') q_nodes_to_error = \ TaskHelper.get_nodes_to_deployment_error(cluster) cls.__update_nodes_to_error(q_nodes_to_error, error_type='deploy') elif instance.name == 'provision' and instance.status == 'error': cls.__update_cluster_status(cluster, 'error') q_nodes_to_error = \ TaskHelper.get_nodes_to_provisioning_error(cluster) cls.__update_nodes_to_error(q_nodes_to_error, error_type='provision') elif instance.name == 'stop_deployment': if instance.status == 'error': cls.__update_cluster_status(cluster, 'error') else: cls.__update_cluster_status(cluster, 'stopped') elif instance.name == consts.TASK_NAMES.update: if instance.status == consts.TASK_STATUSES.error: cls.__update_cluster_status( cluster, consts.CLUSTER_STATUSES.update_error) q_nodes_to_error = \ TaskHelper.get_nodes_to_deployment_error(cluster) cls.__update_nodes_to_error( q_nodes_to_error, error_type=consts.NODE_ERRORS.deploy) elif instance.status == consts.TASK_STATUSES.ready: cls.__update_cluster_status( cluster, consts.CLUSTER_STATUSES.operational) cluster.release_id = cluster.pending_release_id cluster.pending_release_id = None
def _update_cluster_data(cls, instance): cluster = instance.cluster if instance.name == 'deploy': if instance.status == 'ready': # If for some reasosns orchestrator # didn't send ready status for node # we should set it explicitly for n in cluster.nodes: if n.status == 'deploying': n.status = 'ready' n.progress = 100 cls.__update_cluster_status(cluster, 'operational') Cluster.clear_pending_changes(cluster) elif instance.status == 'error' and \ not TaskHelper.before_deployment_error(instance): # We don't want to set cluster status to # error because we don't want to lock # settings if cluster wasn't delpoyed cls.__update_cluster_status(cluster, 'error') elif instance.name == consts.TASK_NAMES.spawn_vms: if instance.status == consts.TASK_STATUSES.ready: Cluster.mark_vms_as_created(cluster) elif instance.status == consts.TASK_STATUSES.error and \ not TaskHelper.before_deployment_error(instance): cls.__update_cluster_status(cluster, 'error') elif instance.name == 'deployment' and instance.status == 'error': cls.__update_cluster_status(cluster, 'error') q_nodes_to_error = \ TaskHelper.get_nodes_to_deployment_error(cluster) cls.__update_nodes_to_error(q_nodes_to_error, error_type='deploy') elif instance.name == 'provision' and instance.status == 'error': cls.__update_cluster_status(cluster, 'error') q_nodes_to_error = \ TaskHelper.get_nodes_to_provisioning_error(cluster) cls.__update_nodes_to_error(q_nodes_to_error, error_type='provision') elif instance.name == 'stop_deployment': if instance.status == 'error': cls.__update_cluster_status(cluster, 'error') else: cls.__update_cluster_status(cluster, 'stopped') elif instance.name == consts.TASK_NAMES.update: if instance.status == consts.TASK_STATUSES.error: cls.__update_cluster_status( cluster, consts.CLUSTER_STATUSES.update_error ) q_nodes_to_error = \ TaskHelper.get_nodes_to_deployment_error(cluster) cls.__update_nodes_to_error( q_nodes_to_error, error_type=consts.NODE_ERRORS.deploy) elif instance.status == consts.TASK_STATUSES.ready: cls.__update_cluster_status( cluster, consts.CLUSTER_STATUSES.operational ) cluster.release_id = cluster.pending_release_id cluster.pending_release_id = None
def _update_cluster_data(cls, instance): cluster = instance.cluster if instance.name == consts.TASK_NAMES.deployment: if instance.status == consts.TASK_STATUSES.ready: # If for some reasons orchestrator # didn't send ready status for node # we should set it explicitly for n in cluster.nodes: if n.status == consts.NODE_STATUSES.deploying: n.status = consts.NODE_STATUSES.ready n.progress = 100 cls.__update_cluster_status( cluster, consts.CLUSTER_STATUSES.operational, consts.NODE_STATUSES.ready ) Cluster.clear_pending_changes(cluster) elif instance.status == consts.CLUSTER_STATUSES.error: cls.__update_cluster_status( cluster, consts.CLUSTER_STATUSES.error, None ) q_nodes_to_error = TaskHelper.get_nodes_to_deployment_error( cluster ) cls.__update_nodes_to_error( q_nodes_to_error, error_type=consts.NODE_ERRORS.deploy ) elif instance.name == consts.TASK_NAMES.spawn_vms: if instance.status == consts.TASK_STATUSES.ready: Cluster.set_vms_created_state(cluster) elif (instance.status == consts.TASK_STATUSES.error and not TaskHelper.before_deployment_error(instance)): cls.__update_cluster_status( cluster, consts.CLUSTER_STATUSES.error, None ) elif instance.name == consts.TASK_NAMES.deploy and \ instance.status == consts.TASK_STATUSES.error and \ not TaskHelper.before_deployment_error(instance): # We don't want to set cluster status to # error because we don't want to lock # settings if cluster wasn't deployed cls.__update_cluster_status( cluster, consts.CLUSTER_STATUSES.error, None ) elif instance.name == consts.TASK_NAMES.provision: if instance.status == consts.TASK_STATUSES.ready: cls.__update_cluster_status( cluster, consts.CLUSTER_STATUSES.partially_deployed, None ) elif instance.status == consts.TASK_STATUSES.error: cls.__update_cluster_status( cluster, consts.CLUSTER_STATUSES.error, None ) q_nodes_to_error = \ TaskHelper.get_nodes_to_provisioning_error(cluster) cls.__update_nodes_to_error( q_nodes_to_error, error_type=consts.NODE_ERRORS.provision) elif instance.name == consts.TASK_NAMES.stop_deployment: if instance.status == consts.TASK_STATUSES.error: cls.__update_cluster_status( cluster, consts.CLUSTER_STATUSES.error, None ) else: cls.__update_cluster_status( cluster, consts.CLUSTER_STATUSES.stopped, None )
def _update_cluster_data(cls, instance): cluster = instance.cluster if instance.name == consts.TASK_NAMES.deployment: if instance.status == consts.TASK_STATUSES.ready: # If for some reasons orchestrator # didn't send ready status for node # we should set it explicitly for n in cluster.nodes: if n.status == consts.NODE_STATUSES.deploying: n.status = consts.NODE_STATUSES.ready n.progress = 100 cls._update_cluster_status(cluster, consts.CLUSTER_STATUSES.operational, consts.NODE_STATUSES.ready) Cluster.clear_pending_changes(cluster) elif instance.status == consts.TASK_STATUSES.error: cls._update_cluster_status(cluster, consts.CLUSTER_STATUSES.error, None) q_nodes_to_error = TaskHelper.get_nodes_to_deployment_error( cluster) cls.__update_nodes_to_error( q_nodes_to_error, error_type=consts.NODE_ERRORS.deploy) elif instance.name == consts.TASK_NAMES.spawn_vms: if instance.status == consts.TASK_STATUSES.ready: Cluster.set_vms_created_state(cluster) elif (instance.status == consts.TASK_STATUSES.error and not TaskHelper.before_deployment_error(instance)): cls._update_cluster_status(cluster, consts.CLUSTER_STATUSES.error, None) elif instance.name == consts.TASK_NAMES.deploy and \ instance.status == consts.TASK_STATUSES.error and \ not TaskHelper.before_deployment_error(instance): # We don't want to set cluster status to # error because we don't want to lock # settings if cluster wasn't deployed cls._update_cluster_status(cluster, consts.CLUSTER_STATUSES.error, None) elif instance.name == consts.TASK_NAMES.provision: if instance.status == consts.TASK_STATUSES.ready: cls._update_cluster_status( cluster, consts.CLUSTER_STATUSES.partially_deployed, None) elif instance.status == consts.TASK_STATUSES.error: cls._update_cluster_status(cluster, consts.CLUSTER_STATUSES.error, None) q_nodes_to_error = \ TaskHelper.get_nodes_to_provisioning_error(cluster) cls.__update_nodes_to_error( q_nodes_to_error, error_type=consts.NODE_ERRORS.provision) elif instance.name == consts.TASK_NAMES.stop_deployment: if instance.status == consts.TASK_STATUSES.error: cls._update_cluster_status(cluster, consts.CLUSTER_STATUSES.error, None) else: cls._update_cluster_status(cluster, consts.CLUSTER_STATUSES.stopped, None)