class NodeManager:
    config_path = XprConfigParser.DEFAULT_CONFIG_PATH

    def __init__(self, persistence_manager):
        self.config = XprConfigParser(self.config_path)
        self.logger = XprLogger()
        self.persistence_manager = persistence_manager

    def register_node(self, node_json):
        """
            registers a new node in the database if the server is available

            checks if the node already exists and then checks if the server
            with node ip_address is available. Then adds the node to database

            Parameters:
                node_json [json]: json with node information

            Return:
                Success -> 'OK' [str] : returns 'OK' as response
                Failure -> [str] : returns appropriate failure response
        """
        self.logger.info("registering a new node")
        self.logger.debug(f"node info provided is : {node_json}")
        new_node = Node(node_json)
        self.logger.info('checking if the mandatory fields are provided')
        new_node.validate_mandatory_fields()
        self.logger.info('checking if the address of node is valid')
        new_node.validate_node_address()
        new_node.set('provisionStatus', False)
        new_node.set('activationStatus', True)

        self.logger.info("provisionStatus and activationStatus fields are set")

        self.logger.info("adding node to the database")

        self.persistence_manager.insert("nodes", new_node.data, False)
        self.logger.info("node successfully added to the persistence")

    def get_nodes(self, filter_json, apply_display_filter=True):
        """
            Gets the list of nodes in the database

            Calls the persistence with input filters to fetch the list of nodes
            After fetching, the nodes list is filtered before sending
            as output in order to send relevant information only

            Parameters:
                filter_json [json] : json with filter key & value pairs

            Return:
                Success -> [list] : returns list of nodes
                Failure -> [str] : returns persistence failure response
        """
        self.logger.debug(f"filter_json is : {filter_json}")
        self.logger.info("getting the list of nodes from persistence")
        nodes = self.persistence_manager.find("nodes", filter_json)
        self.logger.info("filtering nodes before sending as output")
        if apply_display_filter:
            filtered_nodes = []
            for node_json in nodes:
                temp_node = Node(node_json)
                temp_node.filter_display_fields()
                filtered_nodes.append(temp_node.data)
            nodes = filtered_nodes
        self.logger.debug("Output of Nodes sent : ", nodes)
        return nodes

    def provision_node(self, provision_json):
        """
            provisions a node either for deployment or development

            checks if node is available and then connects to the server
            through ssh and runs appropriate packages on the server

            Parameters:
                node_id [str]: id i.e. name of the node
                provision_json [json] : json with node provision info

            Return:
                Success -> 'OK' [str] : returns OK if provision_node succeeds
                Failure -> [str] : returns appropriate failure response
        """
        self.logger.debug(f"provision_node info is: {provision_json}")
        self.logger.info("provision of a node is requested")
        new_node = Node(provision_json)
        new_node.provision_info_check()
        address = provision_json["address"]
        node_id_json = {"address": address}
        node_type = provision_json['nodetype']
        self.logger.info("checking persistence if the node is registered")
        node = self.persistence_manager.find("nodes", node_id_json)
        if len(node) == 0:
            self.logger.error("Node not found")
            raise NodeNotFoundException("Node not found to provision")

        for (key, val) in node[0].items():
            new_node.set(key, val)

        print(new_node.data)
        new_node.provision_node_check(provision_json, self.persistence_manager)
        print("provision_node_check passed")
        # checks if ip address of the node and its type is provided or not
        if node_type != 'DEVVM':
            print("updating cluster")
            new_node.update_cluster(provision_json, self.persistence_manager)
        self.logger.info("provision node in progress")
        provision_status = new_node.provision_node_setup()
        if provision_status == 1:
            self.logger.info("provision of node is successful")
            update_json = {"provisionStatus": True, "nodetype": node_type}
            if node_type == 'CLUSTER_MASTER':
                update_json["cluster"] = provision_json["cluster"]
            elif node_type == 'CLUSTER_WORKER':
                update_json["masterip"] = provision_json["masterip"]
            else:
                update_json["flavor"] = provision_json["flavor"].lower()

            self.persistence_manager.update("nodes", node_id_json, update_json)
        elif provision_status == 0:
            self.logger.error("provision failed: kubernetes error")
            raise ProvisionKubernetesException("Provision Failed")
        else:
            self.logger.error('given provision node data is invalid')
            raise InvalidProvisionInfoException("Provision data is invalid")

    def deactivate_node(self, node_id):
        """
            Deactivates a node in persistence

            Deletes all the installed packages of the node on server
            then deactivates the node in database

            Parameters:
                node_id [str] : name of the node

            Return:
                returns appropriate output
        """
        self.logger.info(f"request received for deactivating node {node_id}")
        node_id_json = {"address": node_id}
        self.logger.info("checking persistence if node is present or not")
        nodes = self.persistence_manager.find("nodes", node_id_json)

        if not len(nodes):
            raise NodeNotFoundException("Node not found for deactivation")

        if 'activationStatus' in nodes[0] and not nodes[0]['activationStatus']:
            self.logger.error("This node is already deactivated")
            raise NodeDeactivatedException()

        new_node = Node(nodes[0])

        # deletes all the packages installed on the node
        self.logger.info("deleting all packages on the node")
        node_deprovision = 1
        if new_node.data["provisionStatus"]:
            # deprovision shall be called only on provisioned nodes
            node_deprovision = new_node.deprovision_node()
        if node_deprovision == 1:
            self.logger.info("deleted all of the packages on node")
            # deletes the node entry from the database
            self.logger.info('deactivating node from persistence')
            deactivate_json = {
                "activationStatus": False,
                "provisionStatus": False
            }
            self.persistence_manager.update("nodes", node_id_json,
                                            deactivate_json)
            return XprResponse('success', '', {})
        else:
            self.logger.error('Node deletion failed: kubernetes error')
            raise ProvisionKubernetesException("Deactivation Failed")

    def assign_node(self, assign_json):
        """
            assigns a node to a user

            assigns a node with development vm type to a user

            Parameters:
                assign_json [json] : Json with assignation info

            Return:
                returns appropriate output
        """
        if 'user' not in assign_json or 'node' not in assign_json:
            self.logger.error("Incomplete information in assign_json")
            raise IncompleteNodeInfoException("user and node info is required")
        elif not len(assign_json['user']) or not len(assign_json['node']):
            self.logger.error("Incomplete information in assign_json")
            raise IncompleteNodeInfoException(
                "user & node info shouldn't be empty")

        uid_json = {"address": assign_json['node']}
        user = assign_json['user']
        users = UserManager(self.persistence_manager).get_users({"uid": user})
        nodes = self.persistence_manager.find('nodes', uid_json)
        if len(users) == 0:
            raise UserNotFoundException("User not found")
        elif len(nodes) == 0:
            raise NodeNotFoundException("Node not found")
        else:
            if 'provisionStatus' not in nodes[0]:
                raise UnProvisionedNodeException("Node is not provisioned")
            elif not nodes[0]['provisionStatus']:
                raise UnProvisionedNodeException("Node is not provisioned")
            elif nodes[0]['nodetype'] != 'DEVVM':
                raise NodeTypeException(
                    "Assign only work form node types of devvm")

            user_nodes = []
            for node_dict in users[0]['nodes']:
                user_nodes.append(node_dict['address'])
            if assign_json['node'] in user_nodes:
                raise NodeAlreadyAssignedException()

        new_node = Node(nodes[0])
        out, err = new_node.assign_node_to_user(user)
        try:
            if not len(out.readlines()) and len(err.readlines()):
                print("failure because of errors")
                raise NodeAssignException(
                    "Assignation failed due to internal error")
            else:
                temp_node = {'address': nodes[0]['address']}
                nodes = [] if 'nodes' not in users[0] else users[0]['nodes']
                nodes.append(temp_node)
                self.persistence_manager.update('users', {"uid": user},
                                                {"nodes": nodes})
        except:
            print("caught exception")
            raise NodeAssignException(
                "Assignation failed due to internal error")

    def modify_node(self, changes_json):
        """
            modify_node updates the node info in the persistence

            checks if node is available and then updates
            the info as per changes_json

            Parameters:
                changes_json [json] : json with node changes info

            Return:
                returns xprresponse object
        """
        if 'address' not in changes_json:
            raise IncompleteNodeInfoException("Node address not provided")

        uid_json = {"address": changes_json['address']}
        self.logger.info(f"Modifying node information of {uid_json}")
        self.logger.debug(f"Info provided to be modified is {changes_json}")
        # checks if the user is present in persistence
        self.logger.info("Checking if the node is present in the persistence")
        node = self.persistence_manager.find("nodes", uid_json)
        if len(node) == 0:
            self.logger.error(
                f"node {uid_json['address']} not found in the persistence")
            raise NodeNotFoundException()

        if 'activationStatus' in changes_json and \
            not changes_json['activationStatus']:
            raise CallDeactivateNodeException()

        self.logger.info("updating the user information")
        self.persistence_manager.update("nodes", uid_json, changes_json)

    def delete_node(self, node_id):
        """
            deletes the node from persistence

            Deletes all the installed packages of the node on server
            then deletes the node from database

            Parameters:
                node_id [str] : name of the node

            Return:
                returns appropriate output
        """
        self.logger.info(f"request received for deactivating node {node_id}")
        node_id_json = {"address": node_id}
        self.logger.info("checking persistence if node is present or not")
        nodes = self.persistence_manager.find("nodes", node_id_json)

        if nodes and len(nodes):
            self.logger.info("deleting all packages on the node")
            new_node = Node(nodes[0])
            node_deletion = new_node.deprovision_node()
            if node_deletion == 1:
                self.logger.info("deleted all of the packages on node")
                # deletes the node entry from the database
                self.logger.info('deleting node from persistence')
                self.persistence_manager.delete("nodes", node_id_json)
            else:
                self.logger.error('Node deletion failed: kubernetes error')
                raise NodeDeletionKubernetesException()
        else:
            raise NodeNotFoundException()

    def update_all_nodes(self, filter_json=None, branch_name="master"):
        """
        Update the xpresso project in all the nodes
        Args:
            filter_json: dictionary to updated specific set of nodes
            branch_name: name of the branch to which xpresso project will be
                         updated

        Returns:
            (list, list): list of update node and list of non updated node
        """

        if filter_json is None:
            filter_json = {}
        filtered_node_list = self.get_nodes(filter_json=filter_json)
        updated_list = []
        non_updated_list = []

        update_cmd = (
            f"cd {self.config['general']['package_path']} && "
            f"python3 xpresso/ai/admin/infra/clients/xpr_pkg.py "
            f"--conf config/common.json "
            f"--type install "
            f"--package UpdateLocalXpressoPackage "
            f"--parameters {{\"branch_name\": \"{branch_name}\"}}' && "
            f"cp config/common_{self.config['env']}.json "
            f"config/common.json ")
        self.logger.debug(update_cmd)
        for node in filtered_node_list:
            node_address = node["address"]
            ssh_client = SSHUtils(node_address)

            if ssh_client.client is None:
                self.logger.warning(
                    f"unable to login to server: {node_address}")
                non_updated_list.append(node_address)
                continue
            std_response = ssh_client.exec(update_cmd)
            self.logger.debug(f"\n\n STDERR : \n{std_response['stderr']}\n")
            if std_response['status'] == 0:
                updated_list.append(node_address)
            else:
                non_updated_list.append(node_address)
            ssh_client.close()
        return updated_list, non_updated_list
예제 #2
0
class AptRepositoryPackage(AbstractPackage):
    """
    Sets up private aptitude repository on ubuntu VM

    """

    # Configuration Keys
    APT_SECTION = "apt-get-repo"
    NFS_PACKAGE_KEY = "nfs_package_folder"
    APT_PUBLIC_KEY = "public_key_file"
    APT_PRIVATE_KEY = "private_key_file"
    APT_HOSTED_PACKGE_KEY = "hosted_package"
    PACKAGE_LIST_KEY = "package-list"
    DOCKER_NAME = "docker-name"
    META_PACKAGE_KEY = "meta_packages_folder"
    DOCKER_FILE_PATH_KEY = "dockerfile-path"

    DOCKER_IMAGE_VERSION = 0.1

    def __init__(self,
                 config_path=XprConfigParser.DEFAULT_CONFIG_PATH,
                 executor=None):
        if not executor:
            executor = LocalShellExecutor()
        super().__init__(executor=executor)

        self.config = XprConfigParser(config_path)["packages_setup"]
        self.logger = XprLogger()

        self.apt_config = self.config[self.APT_SECTION]
        self.public_key = self.apt_config[self.APT_PUBLIC_KEY]
        self.private_key = self.apt_config[self.APT_PRIVATE_KEY]
        self.hosted_package_folder = self.apt_config[
            self.APT_HOSTED_PACKGE_KEY]
        self.sign_paraphrase = None
        self.sign_key_id = None
        self.home_folder = os.getcwd()

    def execute(self, parameters=None):
        if parameters:
            self.sign_paraphrase = parameters["paraphrase"]
            self.sign_key_id = parameters["key_id"]
        self.cleanup()
        self.pre_install()
        self.download_dependencies()
        self.setup_meta_packages()
        self.sign_packages()
        self.run_docker_container()

    def download_dependencies(self):
        """
        Generates the list of packages and all its dependencies. Download the
        packages into the directory

        install apt-rdepends to generate the list
        install apt-download to download the package
        Ignore error
        """

        with open(self.apt_config[self.PACKAGE_LIST_KEY]) as pkg_fp:
            pkg_list = pkg_fp.read().splitlines()

        if not pkg_list:
            return None
        os.chdir(self.home_folder)
        self.logger.info("Generating all dependencies")
        full_package_list = []
        for pkg in tqdm.tqdm(pkg_list):
            script = 'apt-rdepends {} |  grep -v "^ "'.format(pkg)
            self.logger.info(script)
            try:
                (_, output, _) = self.executor.execute_with_output(script)
                dependencies = output.splitlines()
                full_package_list += [x.decode() for x in dependencies]
            except CommandExecutionFailedException:
                self.logger.warning("Package fetch failed")
        full_package_set = set(full_package_list)
        # We have now full list. Download each of the dependencies.
        try:
            os.makedirs(self.hosted_package_folder, exist_ok=True)
            os.chdir(self.hosted_package_folder)
        except OSError:
            self.logger.info("Installation makes sense")

        self.logger.info("Download all dependencies => {}".format(os.getcwd()))
        self.logger.info(full_package_set)
        for pkg in tqdm.tqdm(list(full_package_set)):
            try:
                self.executor.execute(
                    "sudo -u xprops apt-get download {}".format(pkg))
            except CommandExecutionFailedException:
                self.logger.warning(
                    "Failed to download package {}".format(pkg))

    def setup_meta_packages(self):
        """
        Create meta package folder and build
        """
        os.chdir(self.home_folder)
        local_meta_folder = "{}/*.ns-control".format \
            (self.apt_config[self.META_PACKAGE_KEY])
        self.logger.info(local_meta_folder)
        for meta_pkg in glob.iglob(local_meta_folder, recursive=True):
            try:
                abs_meta_pkg = os.path.join(os.getcwd(), meta_pkg)
                meta_pkg_folder = os.path.join(
                    self.hosted_package_folder,
                    os.path.basename(meta_pkg).split('.')[0])
                self.logger.info(meta_pkg_folder)
                os.makedirs(meta_pkg_folder, exist_ok=True)
                os.chdir(meta_pkg_folder)
                shutil.copy(abs_meta_pkg, '.')
                build_meta_pkg_script = "sudo -u xprops equivs-build {}".format(
                    os.path.basename(meta_pkg))
                self.logger.info(build_meta_pkg_script)
                self.logger.info(os.getcwd())
                self.executor.execute(build_meta_pkg_script)
            except OSError as e:
                # Ignoring
                self.logger.error(e)
                self.logger.error("Failed to create meta {}".format(meta_pkg))

    def sign_packages(self):
        """
        Sign packages using private key

        """
        os.chdir(self.home_folder)
        try:
            self.executor.execute('chmod 755 -R {}'.format(
                self.hosted_package_folder))
            self.logger.info("Importing Keys")
            self.executor.execute("gpg --import --batch {}".format(
                self.private_key))
            self.executor.execute(
                'expect -c "spawn gpg --edit-key {} '
                'trust quit; send \"5\ry\r\"; expect eof"'.format(
                    self.sign_key_id))
            os.chdir(self.hosted_package_folder)
            for deb_file in glob.iglob("{}/*.deb".format(
                    self.hosted_package_folder),
                                       recursive=True):
                self.executor.execute(
                    'dpkg-sig -g "--pinentry-mode loopback --passphrase {}" '
                    '--sign builder {}'.format(self.sign_paraphrase, deb_file))
            self.executor.execute("apt-ftparchive packages . > Packages")
            self.executor.execute("gzip -c Packages > Packages.gz")
            self.executor.execute("apt-ftparchive release . > Release")
            self.executor.execute(
                'gpg --pinentry-mode loopback --passphrase {} '
                '--clearsign -o InRelease Release'.format(
                    self.sign_paraphrase))
            self.executor.execute(
                'gpg --pinentry-mode loopback --passphrase {} '
                '-abs -o Release.gpg Release'.format(self.sign_paraphrase))

        except OSError:
            # Ignoring
            self.logger.error("Failed to sign {}")

    def run_docker_container(self):
        """
        Start the docker container
        """
        self.cleanup()
        os.chdir(self.home_folder)
        self.logger.info(os.getcwd())
        # Copy public key in local
        shutil.copy(self.public_key, './public_key')

        try:
            client = docker.from_env()
            docker_image_tag = ':'.join([
                self.apt_config[self.DOCKER_NAME],
                str(self.DOCKER_IMAGE_VERSION)
            ])
            (_, build_log) = client.images.build(
                path=".",
                dockerfile=self.apt_config[self.DOCKER_FILE_PATH_KEY],
                tag=docker_image_tag,
                nocache=False)
            for line in build_log:
                self.logger.info(line)

            client.containers.run(image=docker_image_tag,
                                  name=self.apt_config[self.DOCKER_NAME],
                                  ports={"80/tcp": 8500},
                                  restart_policy={
                                      "Name": "on-failure",
                                      "MaximumRetryCount": 5
                                  },
                                  volumes={
                                      self.hosted_package_folder: {
                                          'bind':
                                          '/usr/local/apache2/htdocs/deb',
                                          'mode': 'rw'
                                      }
                                  },
                                  detach=True)

        except (docker.errors.APIError, docker.errors.NotFound) as e:
            self.logger.error(e)
            self.logger.error("Could not build container".format(
                self.apt_config[self.DOCKER_NAME]))

    def pre_install(self):
        """
        Install required apt-get packages
        """
        try:
            self.executor.execute("apt-get update -y && "
                                  "apt-get -y install apt-rdepends "
                                  "dpkg-dev dpkg-sig expect apt-utils")
        except OSError:
            self.logger.error("Can not install the requirements")

    def cleanup(self, delete_packages=False):
        """
        Removes package and shutdown docker container
        """
        os.chdir(self.home_folder)
        if delete_packages:
            shutil.rmtree(self.hosted_package_folder)

        try:
            client = docker.from_env()
            apt_get_container = client.containers.get(
                self.apt_config[self.DOCKER_NAME])

            apt_get_container.stop()
            apt_get_container.remove()
        except (docker.errors.APIError, docker.errors.NotFound):
            self.logger.error("{} container failed to remove".format(
                self.apt_config[self.DOCKER_NAME]))

    def status(self):
        try:
            client = docker.from_env()
            apt_get_container = client.containers.get(
                self.apt_config[self.DOCKER_NAME])

            if apt_get_container.status == "running":
                return True
        except (docker.errors.APIError, docker.errors.NotFound):
            self.logger.error("{} container not found".format(
                self.apt_config[self.DOCKER_NAME]))
            return False
        return False

    def install(self, parameters=None):
        self.execute(parameters=parameters)

    def uninstall(self, **kwargs):
        self.cleanup(delete_packages=True)

    def start(self, **kwargs):
        self.execute()

    def stop(self, **kwargs):
        self.cleanup(delete_packages=False)
class KubernetesDeploy:
    """
    class containing methods to deploy a project in Kubernetes
    """
    def __init__(self, persistence_manager):

        self.persistence_manager = persistence_manager
        self.logger = XprLogger()
        config_path = XprConfigParser.DEFAULT_CONFIG_PATH
        self.config = XprConfigParser(config_path)
        PROJECTS_SECTION = 'projects'
        DEPLOYMENT_FILES_FOLDER = 'deployment_files_folder'
        self.deployment_files_folder = self.config[PROJECTS_SECTION][
            DEPLOYMENT_FILES_FOLDER]

        if not os.path.isdir(self.deployment_files_folder):
            os.makedirs(self.deployment_files_folder, 0o755)

    def deployment_yaml_generator(self, deployment):
        """
        generates yaml for creating deployment in kubernetes
        Args:
            deployment: Service Deployment
        Returns:
            str: path of yaml generated

        """
        self.logger.info('entering deployment_yaml_generator')
        # converting any special characters to '-'
        project_name = project_utils.modify_string_for_deployment(
            deployment.project_name)
        component = project_utils.modify_string_for_deployment(
            deployment.component_name)
        # this will be the name used in the deployment file
        deployment_name = '{}--{}'.format(project_name, component)
        # reading contents from the standard xpresso deployment yaml file
        with open("config/kubernetes-deployfile.yaml", "r") as f:
            content = f.read()
        yaml_content = self.populate_yaml_content(content, deployment,
                                                  deployment_name)

        filename = "{}/deployfile--{}.yaml".format(
            self.deployment_files_folder, deployment_name)
        with open(filename, "w+") as f:
            yaml.safe_dump(yaml_content, f)
        self.logger.info('exiting deployment_yaml_generator')
        return filename

    def persistent_volume_yaml_generator(self, deployment, persstence_type):
        """
        generates yaml for creating persistent volumne
        Args:
            deployment: Any Deployment
        Returns:
            str: path of yaml generated

        """
        self.logger.info('entering persistent_yaml_generator')

        # converting any special characters to '-'
        project_name = project_utils.modify_string_for_deployment(
            deployment.project_name)
        component = project_utils.modify_string_for_deployment(
            deployment.component_name)
        # this will be the name used in the deployment file
        deployment_name = '{}--{}'.format(project_name, component)

        # reading contents from the standard xpresso deployment yaml file
        with open(f"config/kubernetes-persistent-{persstence_type}.yaml",
                  "r") as f:
            content = f.read()

        content = content.replace("K8_XPRESSO_COMPONENT_NAME",
                                  str(deployment_name))
        content = content.replace("K8_XPRESSO_PERSISTENT_STORAGE_SIZE",
                                  str(deployment.volume_size))
        content = content.replace("K8_XPRESSO_PROJECT_NAME", str(project_name))
        yaml_content = yaml.safe_load(content)

        filename = (
            f"{self.deployment_files_folder}"
            f"/persistent-{persstence_type}-file--{deployment_name}.yaml")
        with open(filename, "w+") as f:
            yaml.safe_dump(yaml_content, f)
        self.logger.info('exiting persistent_yaml_generator')
        return filename

    def populate_yaml_content(self, content, deployment, deployment_name):
        content = content.replace("K8_XPRESSO_COMPONENT_NAME",
                                  str(deployment_name))
        content = content.replace("K8_XPRESSO_COMPONENT_REPLICAS",
                                  str(deployment.replicas))
        content = content.replace("K8_XPRESSO_COMPONENT_IMAGE_NAME",
                                  str(deployment.docker_image))
        content = content.replace("K8_XPRESSO_COMPONENT_ENVIRONMENT_LIST",
                                  str(deployment.environment))
        content = content.replace("K8_XPRESSO_PROJECT_LINUX_UID",
                                  str(deployment.project_linux_uid))
        if deployment.need_persistence():
            content = content.replace("K8_XPRESSO_COMPONENT_VOLUME_MOUNT_PATH",
                                      str(deployment.volume_mount_path))

        # content = content.format(deployment_name, replicas, deployment_name,
        #                          image, deployment_name, environment)
        yaml_content = yaml.safe_load(content)

        # Remove persistence if not required
        if not deployment.need_persistence():
            try:
                del yaml_content["spec"]["template"]["spec"]["volumes"]
                del yaml_content["spec"]["template"]["spec"]["containers"][0][
                    "volumeMounts"]
            except (IndexError, KeyError):
                self.logger.warning("spec.template.spec.volumes not found")
        return yaml_content

    def service_yaml_generator(self, project_name, component, port):
        """
        generates yaml for creating service in kubernetes
        Args:
            project_name: project to be deployed
            component: component for which this yaml is generated
            port: array containing info of ports to be opened
        Returns: path of yaml generated

        """
        self.logger.info('entering service_yaml_generator')
        # reading contents from the standard xpresso service yaml file
        with open("config/kubernetes-servicefile.yaml", "r") as f:
            content = f.read()
        # converting any special characters to '-'
        project_name = project_utils.modify_string_for_deployment(project_name)
        component = project_utils.modify_string_for_deployment(component)
        ports = []
        for i in port:
            temp = str(i)
            fixed_port = project_utils.modify_string_for_deployment(
                temp).replace("'", '"')
            ports.append(json.loads(fixed_port))
        # this will be the name used in the service file
        service_name = '{}--{}'.format(project_name, component)
        content = content.format(service_name, ports, service_name)
        yaml_content = yaml.safe_load(content)
        filename = "{}/servicefile--{}.yaml".format(
            self.deployment_files_folder, service_name)
        with open(filename, "w+") as f:
            yaml.safe_dump(yaml_content, f)
        self.logger.info('exiting service_yaml_generator')
        return filename

    def namespace_yaml_generator(self, project_name):
        """
        generates yaml file to create a new namespace
        Args:
            project_name: name of the project to be deployed

        Returns: path of the yaml generated

        """
        self.logger.info('entering namespace_yaml_generator')
        with open("config/kubernetes-namespacefile.yaml", "r") as f:
            content = f.read()
        # converting any special characters to '-'
        project_name = project_utils.modify_string_for_deployment(project_name)
        content = content.format(project_name)
        yaml_content = yaml.safe_load(content)
        filename = "{}/namespacefile--{}.yaml".format(
            self.deployment_files_folder, project_name)
        with open(filename, "w+") as f:
            yaml.safe_dump(yaml_content, f)
        self.logger.info('exiting namespace_yaml_generator')
        return filename

    def job_yaml_generator(self, deployment):
        """
        generates yaml file to create a job
         Args:
            deployment: Any Deployment
        Returns:
            str: path of yaml generated
        """
        self.logger.info('entering job_yaml_generator')
        # reading contents from the standard xpresso job yaml file

        # converting any special characters to '-'
        project_name = project_utils.modify_string_for_deployment(
            deployment.project_name)
        component = project_utils.modify_string_for_deployment(
            deployment.component_name)
        # this will be the name used in the job file
        job_name = '{}--{}'.format(project_name, component)

        with open("config/kubernetes-jobfile.yaml", "r") as f:
            content = f.read()

        content = content.replace("K8_XPRESSO_COMPONENT_NAME", str(job_name))
        content = content.replace("K8_XPRESSO_COMPONENT_IMAGE_NAME",
                                  str(deployment.docker_image))
        content = content.replace("K8_XPRESSO_COMPONENT_ENVIRONMENT_LIST",
                                  str(deployment.environment))
        content = content.replace("K8_XPRESSO_COMPONENT_COMMAND",
                                  str(deployment.commands))
        content = content.replace("K8_XPRESSO_COMPONENT_REPLICAS",
                                  str(deployment.replicas))

        if deployment.need_persistence():
            content = content.replace("K8_XPRESSO_COMPONENT_VOLUME_MOUNT_PATH",
                                      str(deployment.volume_mount_path))
        # content = content.format(job_name, job_name, image, environment,
        #                          command, parallelism)
        yaml_content = yaml.safe_load(content)
        # Remove persistence if not required
        if not deployment.need_persistence():
            try:
                del yaml_content["spec"]["template"]["spec"]["volumes"]
                del yaml_content["spec"]["template"]["spec"]["containers"][0][
                    "volumeMounts"]
            except (IndexError, KeyError):
                self.logger.warning("spec.template.spec.volumes not found")
        filename = "{}/jobfile--{}.yaml".format(self.deployment_files_folder,
                                                job_name)
        with open(filename, "w+") as f:
            yaml.safe_dump(yaml_content, f)
        self.logger.info('exiting job_yaml_generator')
        return filename

    def cronjob_yaml_generator(self, project_name, component, schedule, image,
                               environment, args):
        """
        generates yaml file to create a cronjob
        :param environment: environment
        :param project_name: project name
        :param component: component name
        :param schedule: Cron Job schedule in standard Cron format
        :param image: docker image
        :param args: array of args to run
        :return: path of yaml generated
        """
        self.logger.info('entering cronjob_yaml_generator')
        if not project_utils.validate_cronjob_format(schedule):
            self.logger.error('Invalid cron schedule provided. Exiting.')
            raise InvalidCronScheduleException
        # reading contents from the standard xpresso cronjob yaml file
        with open("config/kubernetes-cronjobfile.yaml", "r") as f:
            content = f.read()
        # converting any special characters to '-'
        project_name = project_utils.modify_string_for_deployment(project_name)
        component = project_utils.modify_string_for_deployment(component)
        # this will be the name used in the job file
        cronjob_name = '{}--{}'.format(project_name, component)
        content = content.format(cronjob_name, schedule, cronjob_name, image,
                                 environment, args)
        yaml_content = yaml.safe_load(content)
        filename = "{}/cronjobfile--{}.yaml".format(
            self.deployment_files_folder, cronjob_name)
        with open(filename, "w+") as f:
            yaml.safe_dump(yaml_content, f)
        self.logger.info('exiting cronjob_yaml_generator')
        return filename

    def patch_deployment_client(self, path, project_name):
        """
        helper function to patch deployment for project as a given yaml file on
        Kubernetes via the Kubernetes API
        Args:
            path: path of the yaml to be deployed
            project_name: project to be deployed (needed for namespace)
        :return: status of patching (True/Error Code)
        """
        self.logger.info('entering patch_deploy_client')
        try:
            with open(path) as f:
                dep = yaml.safe_load(f)
                k8s_beta = client.ExtensionsV1beta1Api()
                # collecting response from API
                r = k8s_beta.patch_namespaced_deployment(
                    name=dep['metadata']['name'],
                    body=dep,
                    namespace=project_utils.modify_string_for_deployment(
                        project_name))
                self.logger.debug("Deployment patched. Details : {}".format(
                    str(r)))
            self.logger.info('exiting patch_deploy_client')
            return True
        except ApiException as e:
            self.logger.error('Patching deployment failed. '
                              'Error info : {}.'.format(e))
            raise DeploymentCreationFailedException

    def deploy_client(self, path, project_name):
        """
        helper function to create deployment for a given yaml file on
        Kubernetes via the Kubernetes API
        Args:
            path: path of the yaml to be deployed
            project_name: project to be deployed (needed for namespace)

        Returns: status of deployment (True/Error Code)

        """
        self.logger.info('entering deploy_client')
        try:
            with open(path) as f:
                dep = yaml.safe_load(f)
                k8s_beta = client.ExtensionsV1beta1Api()
                # collecting response from API
                r = k8s_beta.create_namespaced_deployment(
                    body=dep,
                    namespace=project_utils.modify_string_for_deployment(
                        project_name))
                self.logger.debug("Deployment created. Details : {}".format(
                    str(r)))
            self.logger.info('exiting deploy_client')
            return True
        except ApiException as e:
            if e.status == 409:  # in case of conflict, patch the deployment
                self.patch_deployment_client(path, project_name)
                return True
            self.logger.error('Creation of deployment failed. Exiting.')
            raise DeploymentCreationFailedException

    def patch_service_client(self, path, project_name):
        """
                helper function to patch service for project as a given yaml
                file on Kubernetes via the Kubernetes API
                Args:
                    path: path of the yaml to be deployed
                    project_name: project to be deployed (needed for namespace)

                Returns: status of service patching (True/Error code)

                """
        self.logger.info('entering patch_service_client')
        try:
            with open(path) as f:
                dep = yaml.safe_load(f)
                k8s_beta = client.CoreV1Api()
                # collecting response from API
                r = k8s_beta.patch_namespaced_service(
                    namespace=project_utils.modify_string_for_deployment(
                        project_name),
                    body=dep,
                    name=dep['metadata']['name'])
                self.logger.debug("Service patched. Details : {}".format(
                    str(r)))
            self.logger.info('exiting patch_service_client')
            return True
        except ApiException as e:
            self.logger.error('Patching service failed. Error details : '
                              '{}'.format(e))
            if e.status == 422:  # Unprocessable Entity
                self.logger.error("Can't patch service port.")
                raise PortPatchingAttemptedException
            raise ServiceCreationFailedException

    def create_service_client(self, path, project_name):
        """
        helper function to create service for a given yaml file on
        Kubernetes via the Kubernetes API
        Args:
            path: path of the yaml to be deployed
            project_name: project to be deployed (needed for namespace)

        Returns: status of service creation (True/Error code)

        """
        self.logger.info('entering create_service_client')
        try:
            with open(path) as f:
                dep = yaml.safe_load(f)
                k8s_beta = client.CoreV1Api()
                # collecting response from API
                r = k8s_beta.create_namespaced_service(
                    namespace=project_utils.modify_string_for_deployment(
                        project_name),
                    body=dep)
                self.logger.debug("Service created. Details : {}".format(
                    str(r)))
            self.logger.info('exiting create_service_client')
            return True
        except ApiException as e:
            if e.status == 409:
                self.patch_service_client(path, project_name)
                return True
            self.logger.error('Creation of service failed. Exiting.')
            raise ServiceCreationFailedException

    def create_namespace_client(self, path):
        """
        helper function to create namespace for a given yaml file on
        Kubernetes via the Kubernetes API
        Args:
            path: path of the yaml

        Returns: status of namespace creation (True/Error Code)

        """
        self.logger.info('entering create_namespace_client')
        try:
            with open(path) as f:
                dep = yaml.safe_load(f)
                k8s_beta = client.CoreV1Api()
                r = k8s_beta.create_namespace(body=dep)
                self.logger.debug("Namespace created. Details : {}".format(
                    str(r)))
            self.logger.info('exiting create_namespace_client')
            return True
        except:
            self.logger.error('Failed to create namespace. Exiting.')
            raise NamespaceCreationFailedException

    def patch_job_client(self, path, project_name):
        self.logger.info('entering patch_job_client')
        try:
            with open(path) as f:
                dep = yaml.safe_load(f)
                k8s_beta = client.BatchV1Api()
                # collecting response from API
                r = k8s_beta.patch_namespaced_job(
                    name=dep['metadata']['name'],
                    body=dep,
                    namespace=project_utils.modify_string_for_deployment(
                        project_name))
                self.logger.debug("Job patched. Details : {}".format(str(r)))
            self.logger.info('exiting patch_job_client')
            return True
        except ApiException as e:
            self.logger.error('Patching job failed. '
                              'Error info : {}.'.format(e))
            raise JobCreationFailedException

    def create_job_client(self, path, project_name):
        """
        method to create a job in kubernetes
        :param path: path of the yaml file
        :param project_name: project name of which the job is a part
        :return: status (True/Error code)
        """
        self.logger.info('Entering create_job_client')
        try:
            with open(path) as f:
                dep = yaml.safe_load(f)
                k8s_beta = client.BatchV1Api()
                # collecting response from API
                r = k8s_beta.create_namespaced_job(
                    namespace=project_utils.modify_string_for_deployment(
                        project_name),
                    body=dep)
                self.logger.debug("Job created. Details : {}".format(str(r)))
            self.logger.info('exiting create_job_client')
            return True
        except ApiException as e:
            if e.status == 409:  # in case of conflict, patch the job
                self.patch_job_client(path, project_name)
                return True
            self.logger.error('Creation of job failed. Exiting.')
            raise JobCreationFailedException

    def patch_cronjob_client(self, path, project_name):
        self.logger.info('entering patch_cronjob_client')
        try:
            with open(path) as f:
                dep = yaml.safe_load(f)
                k8s_beta = client.BatchV1beta1Api()
                # collecting response from API
                r = k8s_beta.patch_namespaced_cron_job(
                    name=dep['metadata']['name'],
                    body=dep,
                    namespace=project_utils.modify_string_for_deployment(
                        project_name))
                self.logger.debug("CronJob patched. Details : {}".format(
                    str(r)))
            self.logger.info('exiting patch_cronjob_client')
            return True
        except ApiException as e:
            self.logger.error('Patching cronjob failed. '
                              'Error info : {}.'.format(e))
            raise CronjobCreationFailedException

    def create_cronjob_client(self, path, project_name):
        """
                method to create a cronjob in kubernetes
                :param path: path of the yaml file
                :param project_name: project name of which the cronjob is a part
                :return: status (True/Error code)
                """
        self.logger.info('Entering create_cronjob_client')
        try:
            with open(path) as f:
                dep = yaml.safe_load(f)
                k8s_beta = client.BatchV1beta1Api()
                # collecting response from API
                r = k8s_beta.create_namespaced_cron_job(
                    namespace=project_utils.modify_string_for_deployment(
                        project_name),
                    body=dep)
                self.logger.debug("Cron Job created. Details : {}".format(
                    str(r)))
            self.logger.info('exiting create_cronjob_client')
            return True
        except ApiException as e:
            if e.status == 409:  # in case of conflict, patch the cronjob
                self.patch_cronjob_client(path, project_name)
                return True
            self.logger.error('Creation of cron job failed. Exiting.')
            raise CronjobCreationFailedException

    def get_service_ip(self, deployment: ServiceDeployment):
        """
        method to get the list of IP addresses for services of a component
        Args:
            deployment: Service Depoyment Object

        Returns: array of service IPs

        """
        self.logger.info('Entering get_service_ip method')
        service_name = '{}--{}'.format(
            project_utils.modify_string_for_deployment(
                deployment.project_name),
            project_utils.modify_string_for_deployment(
                deployment.component_name))
        k8s_beta = client.CoreV1Api()
        r = k8s_beta.read_namespaced_service(
            name=service_name,
            namespace=project_utils.modify_string_for_deployment(
                deployment.project_name))

        service_ips = []
        for port in r.spec.ports:
            service_ips.append('{}:{}'.format(deployment.master_node,
                                              port.node_port))
        self.logger.info('Exiting get_service_ip method')
        return service_ips

    def patch_persistence_volume(self, pv):
        """
        Helper function to patch persistence volume
        Args:
            pv: persistence volume yaml file
        :return: status of patching (True/Error Code)
        """
        self.logger.info('entering persistence')
        try:
            with open(pv) as f:
                dep = yaml.safe_load(f)
                k8s_beta = client.CoreV1Api()
                # collecting response from API
                r = k8s_beta.patch_persistent_volume(
                    name=dep['metadata']['name'], body=dep)
                self.logger.debug(
                    "Persistence volume patched. Details : {}".format(str(r)))
            self.logger.info('exiting patch_deploy_client')
            return True
        except ApiException as e:
            self.logger.error('Patching PV failed. '
                              'Error info : {}.'.format(e))
            raise DeploymentCreationFailedException

    def patch_persistence_volume_claim(self, pv, pvc, project_name):
        """
        Helper function to patch persistence volume claim
        Args:
            pv: persistence volume yaml file
            pvc: persistence volumet claim yaml fil
            project_name: project to be deployed (needed for namespace)
        :return: status of patching (True/Error Code)
        """
        self.logger.info('entering persistence')
        try:
            with open(pvc) as f:
                dep = yaml.safe_load(f)
                k8s_beta = client.CoreV1Api()
                # collecting response from API
                r = k8s_beta.patch_namespaced_persistent_volume_claim(
                    name=dep['metadata']['name'],
                    body=dep,
                    namespace=project_utils.modify_string_for_deployment(
                        project_name))
                self.logger.debug(
                    "Persistence volume patched. Details : {}".format(str(r)))
            self.logger.info('exiting patch_deploy_client')
            return True
        except ApiException as e:
            self.logger.error('Patching PVC failed. '
                              'Error info : {}.'.format(e))
            raise DeploymentCreationFailedException

    def create_persistence_if_required(self, deployment):
        """ Check if persistence is required, If yes then create one"""
        self.logger.debug("Checking for persistence")
        if not deployment.need_persistence():
            self.logger.debug("Persistence not needed.")
            return False

        self.logger.info("Persistence is needed")
        pv = self.persistent_volume_yaml_generator(deployment,
                                                   persstence_type="volume")
        pvc = self.persistent_volume_yaml_generator(
            deployment, persstence_type="volume-claim")
        try:
            with open(pv) as f:
                dep = yaml.safe_load(f)
                k8s_beta = client.CoreV1Api()
                # collecting response from API
                r = k8s_beta.create_persistent_volume(body=dep)
                self.logger.debug(
                    f"Persistence Volume created. Details : {str(r)}")
        except ApiException as e:
            if e.status == 409:  # in case of conflict, patch the deployment
                self.patch_persistence_volume(pv)
                return True
            self.logger.error('Creation of PV failed. Exiting.')
            raise DeploymentCreationFailedException

        try:
            with open(pvc) as f:
                dep = yaml.safe_load(f)
                k8s_beta = client.CoreV1Api()
                # collecting response from API
                r = k8s_beta.create_namespaced_persistent_volume_claim(
                    body=dep,
                    namespace=project_utils.modify_string_for_deployment(
                        deployment.project_name))
                self.logger.debug(
                    f"Persistence Volume Claim created. Details : {str(r)}")

            self.logger.info('exiting deploy_client')
            return True
        except ApiException as e:
            if e.status == 409:  # in case of conflict, patch the deployment
                self.patch_persistence_volume_claim(pv, pvc,
                                                    deployment.project_name)
                return True
            self.logger.error('Creation of PVC failed. Exiting.')
            raise DeploymentCreationFailedException

    def run_deployment_steps(self, deployment: ServiceDeployment):
        try:
            self.create_persistence_if_required(deployment)
            deployment_yaml = self.deployment_yaml_generator(deployment)
            self.deploy_client(deployment_yaml, deployment.project_name)
            self.logger.debug(f'Deployment created for '
                              f'{deployment.component_name}. '
                              f'Now creating service.')
            service_yaml = self.service_yaml_generator(
                deployment.project_name, deployment.component_name,
                deployment.ports)
            self.create_service_client(service_yaml, deployment.project_name)
            self.logger.debug(f'Service created for '
                              f'{deployment.component_name}')
            return True
        except XprExceptions:
            self.logger.error('Error while running deployment steps. '
                              'Deployment failed.')
            raise ProjectDeploymentFailedException

    def run_job_steps(self, deployment: JobDeployment):
        if deployment.is_base_job():
            try:
                self.create_persistence_if_required(deployment)
                job_yaml = self.job_yaml_generator(deployment)
                self.create_job_client(job_yaml, deployment.project_name)
                self.logger.debug(f'Job created for '
                                  f'{deployment.component_name}')
            except XprExceptions:
                self.logger.error('Error while running job steps. '
                                  'Job creation failed.')
                raise JobCreationFailedException
        elif deployment.is_cronjob():
            try:
                self.create_persistence_if_required(deployment)
                cronjob_yaml = self.cronjob_yaml_generator(deployment)
                self.create_cronjob_client(cronjob_yaml,
                                           deployment.project_name)
                self.logger.debug(f'Cronjob created for '
                                  f'{deployment.component_name}')
            except XprExceptions:
                self.logger.error('Error while running job steps. '
                                  'Cronjob creation failed.')
                raise CronjobCreationFailedException
예제 #4
0
class LocalShellExecutor(CommandExecutor):
    """ It is used to run shell commands locally on a linux environment"""

    DEFAULT_EXECUTOR = "/bin/bash"

    def __init__(self):
        super().__init__()
        self.logger = XprLogger()

    def execute_with_output(self, command: str, executor=DEFAULT_EXECUTOR):
        """ It runs linux shell command on local server and returns the
        output

        Args;
            command(str): command for execution

        Returns:
            tuple: (response code: int, stdout: str, stderr: str)
        """
        self.logger.debug("Running command {}".format(command))
        try:
            status = subprocess.run(command, capture_output=True, shell=True,
                                    executable=executor)
            self.logger.debug("Command successful")
        except (subprocess.CalledProcessError, FileNotFoundError) as e:
            self.logger.warning("Command failed")
            raise CommandExecutionFailedException(str(e))
        return status.returncode, status.stdout, status.stderr

    def execute(self, command: str, executor=DEFAULT_EXECUTOR):
        """ It runs linux shell command on local server

        Args;
            command(str): command for execution
        Returns:
            int: response code
        """
        self.logger.debug("Running command {}".format(command))
        try:
            status = subprocess.run(command, shell=True, executable=executor)
            self.logger.debug("Command successful")
        except (subprocess.CalledProcessError, FileNotFoundError) as e:
            self.logger.warning("Command failed")
            raise CommandExecutionFailedException(str(e))
        return status.returncode

    def execute_same_shell(self, command: str, executor=DEFAULT_EXECUTOR):
        """ It runs linux shell command on local server and returns the
        output

        Args;
            command(str): command for execution

        Returns:
            tuple: (response code: int, stdout: str, stderr: str)
        """
        self.logger.debug("Running command {}".format(command))
        try:
            status = subprocess.run(command, capture_output=True, shell=False,
                                    executable=executor)
            self.logger.debug("Command successful")
        except (subprocess.CalledProcessError, FileNotFoundError) as e:
            self.logger.warning("Command failed")
            raise CommandExecutionFailedException(str(e))
        return status.returncode, status.stdout, status.stderr