def execute(self, nets, vlan_ids): self.remove_previous_task() task = Task(name=TASK_NAMES.check_networks, cluster=self.cluster) if len(self.cluster.nodes) < 2: task.status = TASK_STATUSES.error task.progress = 100 task.message = ('At least two nodes are required to be ' 'in the environment for network verification.') db().add(task) db().commit() return task if self.cluster.status in self._blocking_statuses: task.status = TASK_STATUSES.error task.progress = 100 task.message = ( "Environment is not ready to run network verification " "because it is in '{0}' state.".format(self.cluster.status)) db().add(task) db().commit() return task db().add(task) db().commit() self._call_silently(task, tasks.CheckNetworksTask, data=nets, check_admin_untagged=True) db().refresh(task) if task.status != TASK_STATUSES.error: # this one is connected with UI issues - we need to # separate if error happened inside nailgun or somewhere # in the orchestrator, and UI does it by task name. task.name = TASK_NAMES.verify_networks verify_task = tasks.VerifyNetworksTask(task, vlan_ids) if tasks.CheckDhcpTask.enabled(self.cluster): dhcp_subtask = objects.task.Task.create_subtask( task, name=TASK_NAMES.check_dhcp) verify_task.add_subtask( tasks.CheckDhcpTask(dhcp_subtask, vlan_ids)) if tasks.MulticastVerificationTask.enabled(self.cluster): multicast = objects.task.Task.create_subtask( task, name=TASK_NAMES.multicast_verification) verify_task.add_subtask( tasks.MulticastVerificationTask(multicast)) db().commit() self._call_silently(task, verify_task) return task
def execute(self, nets, vlan_ids, **kwargs): self.remove_previous_task() task = Task( name=consts.TASK_NAMES.check_networks, cluster=self.cluster ) if len([n for n in self.cluster.nodes if n.online]) < 2: task.status = consts.TASK_STATUSES.error task.progress = 100 task.message = ('At least two online nodes are required to be ' 'in the environment for network verification.') db().add(task) db().commit() return task if len(self.cluster.node_groups) > 1: task.status = consts.TASK_STATUSES.error task.progress = 100 task.message = ('Network verification is disabled for ' 'environments containing more than one node ' 'group.') db().add(task) db().commit() return task if self.cluster.status in self._blocking_statuses: task.status = consts.TASK_STATUSES.error task.progress = 100 task.message = ( "Environment is not ready to run network verification " "because it is in '{0}' state.".format(self.cluster.status) ) db().add(task) db().commit() return task db().add(task) db().commit() self._call_silently( task, tasks.CheckNetworksTask, data=nets, check_all_parameters=True ) db().refresh(task) if task.status != consts.TASK_STATUSES.error: # this one is connected with UI issues - we need to # separate if error happened inside nailgun or somewhere # in the orchestrator, and UI does it by task name. task.name = consts.TASK_NAMES.verify_networks verify_task = tasks.VerifyNetworksTask(task, vlan_ids) if tasks.CheckDhcpTask.enabled(self.cluster): dhcp_subtask = objects.task.Task.create_subtask( task, name=consts.TASK_NAMES.check_dhcp) verify_task.add_subtask( tasks.CheckDhcpTask(dhcp_subtask, vlan_ids)) if tasks.MulticastVerificationTask.enabled(self.cluster): multicast = objects.task.Task.create_subtask( task, name=consts.TASK_NAMES.multicast_verification) verify_task.add_subtask( tasks.MulticastVerificationTask(multicast)) # we have remote connectivity checks since fuel 6.1, # so we should not create those tasks for old envs if StrictVersion(self.cluster.release.environment_version) >= \ StrictVersion(consts.FUEL_REMOTE_REPOS): # repo connectivity check via default gateway repo_check_task = objects.task.Task.create_subtask( task, name=consts.TASK_NAMES.check_repo_availability) verify_task.add_subtask( tasks.CheckRepoAvailability(repo_check_task, vlan_ids)) # repo connectivity check via external gateway conf, errors = tasks.CheckRepoAvailabilityWithSetup.get_config( self.cluster) # if there is no conf - there is no nodes on which # we need to setup network if conf: repo_check_task = objects.task.Task.create_subtask( task, consts.TASK_NAMES.check_repo_availability_with_setup) verify_task.add_subtask( tasks.CheckRepoAvailabilityWithSetup( repo_check_task, conf)) if errors: notifier.notify( "warning", '\n'.join(errors), self.cluster.id ) db().commit() self._call_silently(task, verify_task) return task