Ejemplo n.º 1
0
 def delete(self):
     parser = reqparse.RequestParser()
     data = request.get_json()
     resp_list = []
     etcd_manager = EtcdManagement()
     deleted_flag = None
     for key_path in data:
         try:
             deleted_flag = etcd_manager.remove_key(key_path)
         except:
             resp_list.append({
                 "key": key_path,
                 "status": "failed",
                 "code": 1002
             })
             continue
         if deleted_flag:
             resp_list.append({"key": key_path, "status": "success"})
         else:
             resp_list.append({
                 "key": key_path,
                 "status": "failed",
                 "code": 1002
             })
         deleted_flag = None
     return jsonify(resp_list)
Ejemplo n.º 2
0
    def delete(self):
        parser = reqparse.RequestParser()
        parser.add_argument('key', type=str)
        args = parser.parse_args()
        etcd_manager = EtcdManagement()
        deleted_flag = None
        try:
            deleted_flag = etcd_manager.remove_key(args["key"])
        except:
            return {'message': "Request can't be executed", "code": 1000}, 500

        if deleted_flag:
            return {'message': 'Key was deleted successfully'}, 200
        else:
            return {
                'message': "Key wasn't deleted successfully",
                "code": 1002
            }, 409
Ejemplo n.º 3
0
class DecisionMaker():
    def __init__(self):
        """
		Constructor
		Args:
			available_nodes(list)
		"""
        ###etcd way
        self.etcd_manager = EtcdManagement()
        ###etcd way

    @staticmethod
    def get_docker_api(host_ip):
        """
		Get docker api client
		Args:
			host_ip(str)
		"""
        return docker.DockerClient(base_url='tcp://{}:2375'.format(host_ip))

    @staticmethod
    def list_containers_by_host(host_ip):

        logger = Logger(filename = "orchestrator", \
             logger_name = "DecisionMaker list_containers_by_host", \
             dirname="/aux1/ocorchestrator/")
        docker_api = DecisionMaker.get_docker_api(host_ip)
        cont_names = []
        try:
            for container in docker_api.containers.list():
                app_name_search = re.search('(.*?)\_\d+', container.name)
                if app_name_search:
                    app_name = app_name_search.group(1)
                    cont_names.append(app_name)
        except:
            logger.error("Can't retrieve data from {} host!".format(host_ip))
            ### 2019.09.05
            if isinstance(host_ip, str):
                DecisionMaker().etcd_manager.write("/platform/orchestrator/failed_nodes/{}". \
                      format(host_ip), "1")
                DecisionMaker().etcd_manager.remove_key( \
                    "/platform/orchestrator/platform_nodes/{}".format(host_ip))
            logger.info(
                "Moving host {} from platform_nodes to failed_nodes".format(
                    host_ip))
            ### 2019.09.05

            logger.clear_handler()
        logger.clear_handler()
        return cont_names

    def update_platform_status(self):

        logger_2 = Logger(filename = "orchestrator", \
             logger_name = "DecisionMaker update_platform_status", \
             dirname="/aux1/ocorchestrator/")
        names_by_hosts = {}
        ###etcd way
        orchestrator_conf = self.etcd_manager. \
             get_etcd_orchestrator_config()['platform']['orchestrator']
        for host in orchestrator_conf['platform_nodes']:
            ###etcd way
            names_by_hosts[host] = {}
            try:
                docker_api = DecisionMaker.get_docker_api(host)
                for container in docker_api.containers.list():
                    app_name_search = re.search('(.*?)\_\d+', container.name)
                    if app_name_search:
                        app_name = app_name_search.group(1)
                        if app_name not in names_by_hosts[host]:
                            names_by_hosts[host][app_name] = {}
                        names_by_hosts[host][app_name].update( \
                          {container.name: orchestrator_conf['types_instances'][app_name][container.name]})
            except:
                logger_2.error(
                    "Can't establish connection with {} host!".format(host))
        self.etcd_manager.write("/platform/orchestrator/platform_status",
                                str(names_by_hosts))
        logger_2.clear_handler()
        return names_by_hosts

    def take_containers_by_hosts(self):

        names_by_hosts = {}
        ###orchastrator.json way
        # for host in parse_config('orchastrator.json')['platform_nodes']:
        ###orchastrator.json way
        ###etcd way
        orchestrator_conf = self.etcd_manager. \
             get_etcd_orchestrator_config()['platform']['orchestrator']
        for host in orchestrator_conf['platform_nodes']:
            ###etcd way
            names_by_hosts[host] = dict(
                Counter(self.list_containers_by_host(host)))

        return names_by_hosts

    def counting_app_by_host(self, application):
        """
		Counting application by hosts
		Args:
			application(str)
		Returns:
			container_count(str)
		"""
        apps_by_hosts = self.take_containers_by_hosts()
        container_count = {}
        for host in apps_by_hosts.keys():
            if application not in apps_by_hosts[host]:
                # return host
                container_count[host] = {application: 0}
            else:
                container_count[host] = {
                    application: apps_by_hosts[host][application]
                }

        return container_count

    def calculating_app_on_hosts(self):
        """
		Args:
			None
		Returns:
			app_counts(dict)
		"""

        app_counts = {}
        for app in self.etcd_manager.get_application_instances():
            app_count = self.counting_app_by_host(app)
            number = 0
            for host in app_count:
                number += app_count[host][app]
            app_counts[app] = number

        return app_counts

    def check_for_releasing_node(self):
        """
		Check for finding a node that can 
		be released if it is not necessary
		Args:
			None
		Returns:
			host_for_release(str) or None		
		"""
        thresholds = literal_eval(
            self.etcd_manager.read_key("/platform/orchestrator/thresholds"))
        orchestrator_conf = self.etcd_manager. \
             get_etcd_orchestrator_config()['platform']['orchestrator']
        apps_count = self.calculating_app_on_hosts()
        curr_nodes_number = len(orchestrator_conf['platform_nodes'])
        validation_flag = True
        for app in apps_count.keys():
            app_count = apps_count[app]
            app_per_node = '{}_per_node'.format(app)
            if curr_nodes_number*thresholds[app_per_node] - app_count >= \
             thresholds[app_per_node]:
                pass
            else:
                validation_flag = False
        if validation_flag:
            # return "Should be released some node"
            all_app_count = 1000
            names_by_hosts = self.take_containers_by_hosts()
            for host in names_by_hosts.keys():
                if host == orchestrator_conf['master']:
                    continue
                curr_count = 0
                for app in names_by_hosts[host].keys():
                    curr_count += names_by_hosts[host][app]
                if curr_count < all_app_count:
                    host_for_release = host
                    all_app_count = curr_count
            return host_for_release
        else:
            return None

    def making_host_decision(self, application, decision, release_node=False):
        """
		Make decision on which host to run container
		Args:
			application(str)
			decision(str)
		Returns:
			host(str)
		"""
        orchestrator_conf = self.etcd_manager. \
             get_etcd_orchestrator_config()['platform']['orchestrator']
        thresholds = literal_eval(
            self.etcd_manager.read_key("/platform/orchestrator/thresholds"))
        # swarm_manager = SwarmManagment()
        app_per_node = "{}_per_node".format(application)
        app_by_hosts = self.counting_app_by_host(application)
        if release_node:
            del (app_by_hosts[release_node])
        host_number = len(app_by_hosts.keys())
        if decision is 'up':
            application_number = 0
            for host in app_by_hosts.keys():
                if app_by_hosts[host][application] == 0:
                    return host
                else:
                    application_number += app_by_hosts[host][application]
            average_app_number = application_number / host_number
            logger_2 = Logger(filename = "orchestrator", \
                 logger_name = "DecisionMaker making_host_decision", \
                 dirname="/aux1/ocorchestrator/")
            logger_2.info("Aplication {} ||| Average => {}\tApp_per_node => {}". \
             format(application, average_app_number, thresholds[app_per_node]))
            logger_2.clear_handler()
            ###logic for adding node to the swarm
            if average_app_number >= float(thresholds[app_per_node]):
                if len(list(orchestrator_conf['available_nodes'].keys())) != 0:
                    available_nodes = list(
                        orchestrator_conf['available_nodes'].keys())
                    new_node = available_nodes[0]
                    self.etcd_manager.remove_key("/platform/orchestrator/available_nodes/{}". \
                           format(new_node))
                    self.etcd_manager.write("/platform/orchestrator/platform_nodes/{}". \
                           format(new_node), '1')
                    return new_node
                else:
                    logger = Logger(filename = "orchestrator", \
                        logger_name = "DecisionMaker making_host_decision", \
                        dirname="/aux1/ocorchestrator/")
                    logger.critical(
                        "There are not any available servers should"
                        "look at host stat to run on the lowest"
                        "loaded host a container")
                    logger.clear_handler()
            ###logic for adding node to the swarm
            for host in app_by_hosts.keys():
                if app_by_hosts[host][application] < average_app_number and \
                 app_by_hosts[host][application] < float(thresholds[app_per_node]): #parse_config('orchastrator.json')[app_per_node]:
                    return host
            for host in app_by_hosts.keys():
                return host
        elif decision is 'down':
            application_number = 0
            for host in app_by_hosts.keys():
                application_number += app_by_hosts[host][application]

            min_app = "{}_min".format(application)
            logger = Logger(filename = "orchestrator", \
                logger_name = "DecisionMaker making_host_decision", \
                dirname="/aux1/ocorchestrator/")
            logger.warning("Application => {}\tmin_apps on platform=> {}\tcurrent app_num {}". \
             format(application, thresholds[min_app], application_number))
            logger.clear_handler()
            if application_number == float(thresholds[min_app]):
                return None

            average_app_number = application_number / host_number
            for host in app_by_hosts.keys():
                if app_by_hosts[host][application] > average_app_number and \
                 app_by_hosts[host][application] < thresholds[app_per_node]: #parse_config('orchastrator.json')[app_per_node]:
                    return host
            for host in app_by_hosts.keys():
                return host

    def release_node(self, host):
        """
		Stop all containers from the passed node,
		move them to the other hosts in self.platform_nodes,
		and move the host to available.servers
		Args:
			host(str)
		Returns:
			None
		"""
        container_manager = ContainerManagement()
        apps_by_host = container_manager.get_container_names_by_host(host)
        for app in apps_by_host:
            app_name_search = re.search('(.*?)\_\d+', app)
            if app_name_search:
                app_name = app_name_search.group(1)
            container_manager.stop_container(name=app, host_ip=host)
            new_host = self.making_host_decision(application=app_name, \
                      decision='up', \
                      release_node=host)
            container_manager.run_container_name(host_ip=new_host, \
                     application=app_name, \
                     container_hostname=app)
        #####
        self.etcd_manager.remove_key(
            "/platform/orchestrator/platform_nodes/{}".format(host))
        self.etcd_manager.write(
            "/platform/orchestrator/available_nodes/{}".format(host), '1')
        #####
        logger = Logger(filename = "orchestrator", \
            logger_name = "DecisionMaker release_node", \
            dirname="/aux1/ocorchestrator/")
        logger.warning("Releasing node {} was successfull !".format(host))
        logger.clear_handler()
Ejemplo n.º 4
0
class SwarmManagment():
	"""
	Swarm manager class
	"""

	def __init__(self):
		"""
		Constructor of swarm manager
		Args:
			available_nodes(list)
			platform_nodes(list)
			user(str)
			password(str)
			master_node(str)
			token(str)
		"""

		###orchastrator.json way
		# self.ssh_client = paramiko.SSHClient()
		# self.ssh_client.load_system_host_keys()
		# self.available_nodes = parse_config("orchastrator.json")["available_nodes"]
		# self.platform_nodes = parse_config("orchastrator.json")["platform_nodes"]
		# self.user = parse_config("orchastrator.json")["user"]
		# self.password = parse_config("orchastrator.json")["password"]
		# self.master_nodes = parse_config("orchastrator.json")["master_nodes"]
		# self.__master = parse_config("orchastrator.json")["master"]
		# self.__token = parse_config("orchastrator.json")["token"]
		###orchastrator.json way
		
		###etcd way
		self.etcd_manager = EtcdManagement()
		self.orchastrator_config = self.etcd_manager.get_etcd_orchestrator_config()['platform']['orchestrator']
		self.ssh_client = paramiko.SSHClient()
		self.ssh_client.load_system_host_keys()
		self.available_nodes = self.orchastrator_config["available_nodes"]
		self.platform_nodes = self.orchastrator_config["platform_nodes"]
		# self.user = self.orchastrator_config["user"]
		# self.password = self.orchastrator_config["password"]
		# self.master_nodes = self.orchastrator_config["master_nodes"]
		self.__master = self.orchastrator_config["master"]
		self.__token = self.orchastrator_config["token"]

		self.user = "******"
		self.password = "******"
		self.master_nodes = "None"
		# self.user = parse_config("orchastrator.json")["user"]
		# self.password = parse_config("orchastrator.json")["password"]
		# self.master_nodes = parse_config("orchastrator.json")["master_nodes"]
		###etcd way



	@staticmethod
	def get_docker_api(host_ip):
		"""
		Get docker api client
		Args:
			host_ip(str)
		"""
		return docker.DockerClient(base_url='tcp://{}:2375'.format(host_ip))



	def add_server(self, host_ips):
		"""
		Add server to available_nodes
		If the server consist in the self.available_nodes
	 	it won't be add
		Args:
			host_ips(list or str)
		Returns:
			Append to self.available_nodes the host_ips
		"""
		logger = Logger(filename = "orchastrator", logger_name = "SwarmManagment add_server", dirname="/aux1/ocorchestrator/")
		if isinstance(host_ips, str):
			if host_ips not in self.available_nodes:
				self.available_nodes.append(host_ips)
###orchastrator.json way
				# update_config("orchastrator.json", "available_nodes", host_ips, state='add')
###orchastrator.json way
###etcd way
				self.etcd_manager.write("/orchastrator/available_nodes/{}".format(host_ips), "")
###etcd way

			else:
				# print("The host ip is already in the list")
				logger.info("The host ip is already in the list")
				logger.clear_handler()
		elif isinstance(host_ips, list):
			self.available_nodes = list(set(self.available_nodes + host_ips))
###orchastrator.json way
			# update_config("orchastrator.json", "available_nodes", host_ips, state='add')
###orchastrator.json way
###etcd way
			self.etcd_manager.write("/orchastrator/available_nodes/{}".format(host_ips), "")
###etcd way
		else:
			logger.error("Server should be list or string")
			logger.clear_handler()
			raise TypeError("Server should be list or string")


	def add_swarm_server(self, host_ip):
		"""
		Add server to platform_nodes
		If the server consist in the list it won't be add
		Args:
			host_ips(str)
		Returns:
			Append to self.platform_nodes the host_ip
		"""
		logger = Logger(filename = "orchastrator", logger_name = "SwarmManagment add_swarm_server", dirname="/aux1/ocorchestrator/")
		if isinstance(host_ip, str):
			if host_ip not in self.platform_nodes:
				self.platform_nodes.append(host_ip)
###orchastrator.json way
				# update_config("orchastrator.json", "platform_nodes", host_ip, state='add')
###orchastrator.json way
###etcd way
				self.etcd_manager.write("/orchastrator/platform_nodes/{}".format(host_ip), "")
###etcd way
			else:
				# print("The host ip is already in the list")
				logger.info("The host ip is already in the list")
				logger.clear_handler()


	def list_available_nodes(self):
		"""
		List the available servers remain
		Returns:
			self.available_nodes(list)
		"""
###orchastrator.json way
		# return parse_config("orchastrator.json")["available_nodes"]
###orchastrator.json way
###etcd way
		return self.orchastrator_config["available_nodes"]
###etcd way


	def list_platform_nodes(self):
		"""
		List the servers in the swarm
		Returns:
			self.platform_nodes(list)
		"""
###orchastrator.json way
		# return parse_config("orchastrator.json")["platform_nodes"]
###orchastrator.json way
###etcd way
		return self.orchastrator_config["platform_nodes"]
###etcd way


	def remove_available_server(self, host_ip):
		"""
		Remove server ip from self.available_nodes
		Args:
			host_ip(str)
		"""
		self.available_nodes.remove(host_ip)
###orchastrator.json way
		# update_config("orchastrator.json", "available_nodes", host_ip, state='remove')
###orchastrator.json way
###etcd way
		self.etcd_manager.remove_key("/orchastrator/available_nodes/{}".format(host_ip))
###etcd way


	def remove_swarm_server(self, host_ip):
		"""
		Remove server ip from self.platform_nodes
		Args:
			host_ip(str)
		"""
		if host_ip in self.platform_nodes:
			self.platform_nodes.remove(host_ip)
###orchastrator.json way
			# update_config("orchastrator.json", "platform_nodes", host_ip, state='remove')
###orchastrator.json way
###etcd way
			self.etcd_manager.remove_key("/orchastrator/platform_nodes/{}".format(host_ip))
###etcd way
		else:
			logger = Logger(filename = "orchastrator", logger_name = "SwarmManagment remove_swarm_server", dirname="/aux1/ocorchestrator/")		
			logger.error("Node {} can't be removed from platform_nodes (It is not in platform_nodes)".format(host_ip))
			logger.clear_handler()			

	def join_server_swarm(self, host_ip):
		"""
		Join server to the swarm
		Args:
			host_ip(str)
		"""
		#####First way
		# self.ssh_client.connect(host_ip, username=self.user, password=self.password)
		# _, stdout, _ = self.ssh_client.exec_command('docker swarm join --token {} {}:2377'. \
		# 											format(self.__token, self.__master))
		# stdout = '\n'.join(map(lambda x: x.rstrip(), stdout.readlines()))
		# if re.search(r'This node joined a swarm as a worker', stdout, re.I|re.S):
		# 	self.remove_available_server(host_ip)
		# 	self.add_swarm_server(host_ip)
		# else:
		# 	return "Node {} can't be joined to the swarm".format(host_ip)


		#####Second way

		logger = Logger(filename = "orchastrator", logger_name = "SwarmManagment join_server_swarm", dirname="/aux1/ocorchestrator/")		
		docker_api = self.get_docker_api(host_ip)
		response = False
		try:
###orchastrator.json way
			# response = docker_api.swarm.join(remote_addrs= \
			# 				[parse_config("orchastrator.json")["master"]], \
			# 				join_token = parse_config("orchastrator.json")["token"])
###orchastrator.json way
###etcd way
			response = docker_api.swarm.join(remote_addrs= \
							[self.orchastrator_config["master"]], \
							join_token = self.orchastrator_config["token"])
###etcd way
		except docker.errors.APIError as e:
			logger.info("Exception handling swarm joining but config will be updated and corrected")
			logger.clear_handler()
			self.remove_available_server(host_ip)
			self.add_swarm_server(host_ip)
			
		if response == True:
			logger.info("Node {} was successfully joined to the swarm".format(host_ip))
			logger.clear_handler()
			self.remove_available_server(host_ip)
			self.add_swarm_server(host_ip)
		else:

			logger.error("Node {} can't be joined to the swarm".format(host_ip))
			logger.clear_handler()
			return "Node {} can't be joined to the swarm".format(host_ip)

		#####Second way

	def leave_server_swarm(self, host_ip):
		"""
		Leave server from the swarm
		Args:
			host_ip(str)
		"""

		#####First way
		# if host_ip in parse_config("orchastrator.json")["master_nodes"]:
		# 	print("Demoting the node from manager")
		# 	self.demote_manager(host_ip)

		# self.ssh_client.connect(host_ip, username=self.user, password=self.password)
		# _, stdout, _ = self.ssh_client.exec_command('docker swarm leave')
		# stdout = '\n'.join(map(lambda x: x.rstrip(), stdout.readlines()))
		# print("STDOUT => {}".format(stdout))
		# stdout = "Node left the swarm"
		# hostname = self.get_hostname(host_ip)
		# if re.search(r'Node left the swarm', stdout, re.I|re.S):
		# 	print("YEEEEE")
		# 	self.ssh_client.connect(self.__master, username=self.user, password=self.password)
		# 	_, leave_stdout, _ = self.ssh_client.exec_command('docker node rm -f {}'.format(hostname))
		# 	leave_stdout = '\n'.join(map(lambda x: x.rstrip(), leave_stdout.readlines()))
		# 	self.add_server(host_ip)
		# 	self.remove_swarm_server(host_ip)						
		# else:
		# 	return "Node {} can't left the swarm for some reason".format(host_ip)

		#####Second way
		logger = Logger(filename = "orchastrator", logger_name = "SwarmManagment leave_server_swarm", dirname="/aux1/ocorchestrator/")
		docker_api = self.get_docker_api(host_ip)
		response = docker_api.swarm.leave(force=True)
		if response:
			self.add_server(host_ip)
			self.remove_swarm_server(host_ip)		
		else:
			logger.error("Node {} can't left the swarm for some reason".format(host_ip))
			logger.clear_handler()
			return "Node {} can't left the swarm for some reason".format(host_ip)

	def add_master_node(self, host_ip):
		"""
		Add server ip to self.master_nodes
		Args:
			host_ip(str)
		"""
		self.master_nodes.append(host_ip)
###orchastrator.json way
		update_config("orchastrator.json", "master_nodes", host_ip, state='add')
###orchastrator.json way





	def remove_master_node(self, host_ip):
		"""
		Remove server ip to self.master_nodes
		Args:
			host_ip(str)
		"""
		self.master_nodes.remove(host_ip)
###orchastrator.json way
		update_config("orchastrator.json", "master_nodes", host_ip, state='remove')
###orchastrator.json way


	def promote_to_manager(self, host_ip):
		"""
		Promote the server to manager in the swarm
		Args:
			host_ip(str)
		"""
		logger = Logger(filename = "orchastrator", logger_name = "SwarmManagment promote_to_manager", dirname="/aux1/ocorchestrator/")
		hostname = self.get_hostname(host_ip)
		self.ssh_client.connect(self.__master, username=self.user, password=self.password)
		_, promoted_stdout, _ = self.ssh_client.exec_command('docker node promote {}'.format(hostname))
		promoted_stdout = '\n'.join(map(lambda x: x.rstrip(), promoted_stdout.readlines()))
		if re.search(r'promoted to a manager in the swarm', promoted_stdout, re.I|re.S):
			self.add_master_node(host_ip)
		else:
			logger.error("Node {} can't be promoted to manager".format(host_ip))
			logger.clear_handler()
			return "Node {} can't be promoted to manager".format(host_ip)

	def demote_manager(self, host_ip):
		"""
		Demote the server from manager in the swarm
		Args:
			host_ip(str)
		"""
		logger = Logger(filename = "orchastrator", logger_name = "SwarmManagment demote_manager", dirname="/aux1/ocorchestrator/")
		hostname = self.get_hostname(host_ip)
		self.ssh_client.connect(self.__master, username=self.user, password=self.password)
		_, demoted_stdout, _ = self.ssh_client.exec_command('docker node demote {}'.format(hostname))
		demoted_stdout = '\n'.join(map(lambda x: x.rstrip(), demoted_stdout.readlines()))
		if re.search(r'demoted in the swarm', demoted_stdout, re.I|re.S):
			self.remove_master_node(host_ip)
		else:
			logger.error("Node {} can't be demoted from manager".format(host_ip))
			logger.clear_handler()
			return "Node {} can't be demoted from manager".format(host_ip)

	def get_hostname(self, host_ip):
		"""
		Take the hostname of the server ip
		Args:
			host_ip(str)
		Returns:
			hostname(str)
		"""
		self.ssh_client.connect(host_ip, username=self.user, password=self.password)
		_, hostname, _ = self.ssh_client.exec_command('hostname')
		hostname = '\n'.join(map(lambda x: x.rstrip(), hostname.readlines()))

		return hostname.strip()


	def change_master(self, host_ip):
		"""
		Change the self.__master
		Args:
			host_ip(str)
		"""
		self.__master = host_ip
###orchastrator.json way
		# update_config("orchastrator.json", "master", host_ip, state="add")
###orchastrator.json way
###etcd way
		self.etcd_manager.write("/orchastrator/master", host_ip)
###etcd way


	def change_token(self, token):
		"""
		Change the self.__token
		Args:
			token(str)
		"""
		self.__token = token
###orchastrator.json way
		# update_config("orchastrator.json", "token", token, state="add")
###orchastrator.json way
###etcd way
		self.etcd_manager.write("/orchastrator/token", token)
###etcd way