def execute(self):
        """
        installs kubeadm on the machine.
        """
        logger = XprLogger()
        if not linux_utils.check_root():
            logger.fatal("Please run this as root")
        logger.info("Installing Kubeadm...")

        try:
            swapoff = 'swapoff -a'
            self.executor.execute(swapoff)  # turns swap off
            add_key = 'curl -s ' \
                      'https://packages.cloud.google.com/apt/doc/apt-key.gpg ' \
                      '| apt-key add -'
            self.executor.execute(add_key)
            add_list_path = '/etc/apt/sources.list.d/kubernetes.list'
            add_list_content = 'deb https://apt.kubernetes.io/ ' \
                               'kubernetes-xenial main'
            linux_utils.write_to_file(add_list_content, add_list_path, "a")
            install_kubeadm = 'apt-get update && apt-get install ' \
                              '-y kubelet kubeadm kubectl'
            self.executor.execute(install_kubeadm)  # installs kubeadm
            hold_kubeadm = 'apt-mark hold kubelet kubeadm kubectl'
            self.executor.execute(hold_kubeadm)
        except CommandExecutionFailedException as e:
            logger.error("Failed to install Kubeadm. \n{}".format(str(e)))
            return False
        return True
Ejemplo n.º 2
0
    def execute(self, parameters=None):
        """
        installs worker node on the machine.
        """
        logger = XprLogger()
        if not linux_utils.check_root():
            logger.fatal("Please run this as root")

        self.cleanup()
        logger.info("Initialising Kubernetes worker node...")
        try:

            if parameters and self.PARAMETER_MASTER_IP in parameters:
                master_ip = parameters[self.PARAMETER_MASTER_IP]
            else:
                master_ip = input("Enter the IP address of the master"
                                  " node you want to join:")
            path = '/mnt/nfs/data/k8/k8_clusters/{}/{}.txt'. \
                format(master_ip, master_ip)
            with open(path, "r") as f:
                join_command = f.read()  # extract join command
            self.executor.execute(
                join_command)  # run command to join the cluster
        except CommandExecutionFailedException as e:
            logger.error("Failed to setup worker node. \n{}".format(str(e)))
            return False
        return True
class MetricsAggregator:
    """ Tracks the list all available metrics.
    Stores the result from these metrics in a dictionary and return
    """

    MONGO_SECTION = 'mongodb'
    DB_URL = 'mongo_url'
    DB_NAME = 'database'
    DB_UID = 'mongo_uid'
    DB_PWD = 'mongo_pwd'
    W = 'w'

    def __init__(self, config_path):
        self.config = XprConfigParser(config_file_path=config_path)
        self.logger = XprLogger()
        self.metrics_list = None
        self.persistence_manager = None

    def initialize(self,
                   db_type=MONGO_SECTION,
                   db_url=DB_URL,
                   db_name=DB_NAME,
                   db_uid=DB_UID,
                   db_pwd=DB_PWD,
                   db_w=W):
        self.persistence_manager = MongoPersistenceManager(
            url=self.config[db_type][db_url],
            db=self.config[db_type][db_name],
            uid=self.config[db_type][db_uid],
            pwd=self.config[db_type][db_pwd],
            w=self.config[db_type][db_w])

        self.metrics_list = [
            UserMetrics, AbstractMetrics, ClusterMetrics, ProjectMetrics
        ]

    def get_all_metrics(self):
        """ Iterate through all available metrics and get the required
        metrics.
         For all metrics class, it picks all the method which starts with metric_
         """
        self.logger.info("Aggregating all the metrics")
        aggregated_metrics = []
        for metrics_class in self.metrics_list:
            metric_obj = metrics_class(
                config=self.config,
                persistence_manager=self.persistence_manager)
            for metric_name, metric_func in metrics_class.__dict__.items():
                if not metric_name.startswith("metric_"):
                    continue
                aggregated_metrics.extend(metric_func(metric_obj))
        self.logger.info("Metric aggregration compleed")
        return aggregated_metrics
Ejemplo n.º 4
0
class User(XprObject):
    """
    This class represents a User
    """
    def __init__(self, user_json=None):
        self.logger = XprLogger()
        """
        Constructor:
        """
        self.logger.debug(f"User constructor called with {user_json}")
        super().__init__(user_json)
        self.logger.info(f"user info : {self.data}")
        # These are mandatory fields that needs to be provided in user_json
        self.mandatory_fields = [
            "uid", "pwd", "firstName", "lastName", "email", "primaryRole"
        ]

        # primaryRole of a user has to one of these
        self.valid_values = {"primaryRole": ["Dev", "PM", "DH", "Admin", "Su"]}

        # fields that cannot be modified
        self.unmodifiable_fields = ["pwd"]

        # fields that should be displayed in the output
        self.display_fields = [
            "uid", "firstName", "lastName", "email", "primaryRole", "nodes",
            "activationStatus"
        ]
        self.logger.debug("User constructed successfully")

    def get_token_info(self):
        token_info = TokenInfo(self.data['token'])
        token_info.token = self.get("token")
        token_info.token_expiry = self.get("tokenExpiry")
        token_info.login_expiry = self.get("loginExpiry")
        return token_info

    @staticmethod
    def check_password(password):
        if len(password) < 6:
            raise PasswordStrengthException("Password is too short")
        reg_exp = "^(((?=.*[a-z])(?=.*[A-Z]))|((?=.*[a-z])(?=.*[0-9]))|((?=.*[A-Z])(?=.*[0-9])))(?=.{6,})"
        match = re.search(reg_exp, password)
        if not match:
            raise PasswordStrengthException(
                "Password is weak. Choose a strong password")
class GatewayManager:
    """ Manages the gateway transactions """

    CONFIG_GATEWAY_KEY = "gateway"
    CONFIG_GATEWAY_PROVIDER = "provider"
    CONFIG_GATEWAY_ADMIN = "admin_url"
    CONFIG_GATEWAY_PROXY = "proxy_url"

    KONG_GATEWAY = "kong"

    def __init__(self, config_path=XprConfigParser.DEFAULT_CONFIG_PATH):
        self.config = XprConfigParser(config_file_path=config_path)
        self.api_gateway = None
        self.initialize_gateway(
            gateway_provider=self.config[GatewayManager.CONFIG_GATEWAY_KEY][
                GatewayManager.CONFIG_GATEWAY_PROVIDER],
            admin_url=self.config[GatewayManager.CONFIG_GATEWAY_KEY][
                GatewayManager.CONFIG_GATEWAY_ADMIN],
            proxy_url=self.config[GatewayManager.CONFIG_GATEWAY_KEY][
                GatewayManager.CONFIG_GATEWAY_PROXY],
            config_path=config_path)
        self.logger = XprLogger()

    def initialize_gateway(self, gateway_provider, admin_url, proxy_url,
                           config_path):
        if gateway_provider == GatewayManager.KONG_GATEWAY:
            self.api_gateway = KongAPIGateway(admin_url=admin_url,
                                              proxy_url=proxy_url,
                                              config_path=config_path)

    def setup_external_service(self, component_name, internal_service_url):
        """ Register a service with an external service """

        update_service_url = [
            "http://" + svc if not svc.startswith("http") else svc
            for svc in internal_service_url
        ]
        self.logger.info(f"Updated svc: {update_service_url}")
        return self.api_gateway.register_new_service(
            upstream_url=update_service_url,
            service_name=component_name,
            route=f"/{component_name}")

    def delete_external_service(self, component_name):
        return self.api_gateway.delete_service(service_name=component_name)
Ejemplo n.º 6
0
class SSOManager:
    """
    This class provides user authentication functionality for Xpresso Controller
    """
    def __init__(self, persistence_manager):
        self.persistence_manager = persistence_manager
        self.logger = XprLogger()

        config_path = XprConfigParser.DEFAULT_CONFIG_PATH
        self.config = XprConfigParser(config_path)

    def validate_token(self, validation_token):
        """
        Validate if the current token is valid
        Returns:
        """
        self.logger.info("Validating token")
        token_data = self.persistence_manager.find(
            'sso_tokens', {"validation_token": validation_token})
        if not token_data:
            self.logger.error("Token does not exist")
            raise IncorrectTokenException("Token does not exist")
        self.logger.info("Token validated")
        return token_data[0]["login_token"]

    @staticmethod
    def generate_token():
        """
        generate a new token
        """
        return secrets.token_hex(8)

    def update_token(self, validation_token, login_token):
        """
        Update validation and login token for match
        Args:
            validation_token: sso  login validation token
            login_token: login token
        """
        self.persistence_manager.insert("sso_tokens", {
            "validation_token": validation_token,
            "login_token": login_token
        },
                                        duplicate_ok=True)
Ejemplo n.º 7
0
 def execute(self):
     """
     installs kubernetes dashboard on the machine.
     """
     logger = XprLogger()
     if not linux_utils.check_root():
         logger.fatal("Please run this as root")
     logger.info("Setting up the Kubernetes dashboard...")
     try:
         deploy_dashboard = 'kubectl create -f https://raw.githubusercontent' \
                            '.com/kubernetes/dashboard/master/aio/deploy' \
                            '/recommended/kubernetes-dashboard.yaml'
         self.executor.execute(deploy_dashboard)  # creates deployment
         nodeport = """kubectl -n kube-system patch service \
                 kubernetes-dashboard --type='json' -p \
                 '[{"op":"replace","path":"/spec/type","value":"NodePort"}]'"""
         self.executor.execute(nodeport)  # exposes dashboard
         constant_port = """kubectl -n kube-system patch service \
                 kubernetes-dashboard --type='json' -p \
                 '[{"op":"replace","path":"/spec/ports/0/nodePort","value":30252}]'"""
         self.executor.execute(constant_port)  # sets constant port
         content_path = '/opt/xpresso.ai/config/kubernetes-dashboard-access.yaml'
         with open(content_path, "r") as f:
             content = f.read()
         path = '/etc/kubernetes/kube-dashboard-access.yaml'
         linux_utils.write_to_file(content, path, "w+")
         dashboard_access = 'kubectl create -f {}'.format(path)
         self.executor.execute(dashboard_access)  # grants permission
         skip_login = """kubectl patch deployment -n kube-system \
                 kubernetes-dashboard --type='json' -p='[{"op": "add", "path": \
                 "/spec/template/spec/containers/0/args/1", \
                 "value":"--enable-skip-login" }]'"""
         self.executor.execute(skip_login)  # enables skip login
     except CommandExecutionFailedException as e:
         logger.error("Failed to setup dashboard. \n{}".format(str(e)))
         return False
     return True
class PromotheusMetricReportGenerator:
    """ Takes list of metrics and generate a promotheus compatible report """

    def __init__(self):
        self.logger = XprLogger()

    def generate_report(self, metrics):
        self.logger.info("converting metrics to promotheus report")
        report_list = []
        for metric in metrics:
            try:
                label = f"xpresso_ctrl_{metric['label']}"
                data = metric['data']
                if type(data) == list:
                    for data_point in data:
                        report_list.append(f'{label}{{value="{data_point}"}} 1')
                elif type(data) == int:
                    report_list.append(f'{label} {data}')
                elif type(data) == str:
                    report_list.append(f'{label} {data}')
            except KeyError:
                self.logger.debug("Ignoring the metric")
        self.logger.info("Report generation complete")
        return '\n'.join(report_list)
class KubernetesManager:
    def __init__(self, persistence_manager):

        self.persistence_manager = persistence_manager
        self.kubernetes_deploy = KubernetesDeploy(persistence_manager)
        self.logger = XprLogger()

    def set_api_config(self, master_node):
        """
        Sets the kubernetes API config
        :param master_node: IP of master node of the cluster on which
        project is to be deployed
        :return: nothing
        """
        # get the master node of the cluster
        master_info = self.persistence_manager.find('nodes',
                                                    {"address": master_node})
        # get the kubernetes bearer token from master
        try:
            token = master_info[0]['token']
        except (IndexError, KeyError):
            self.logger.error("Cluster is not valid. No valid node exist")
            raise ClusterNotFoundException("Cluster is invalid")
        self.logger.debug('Bearer token retrieved from master node')
        # kubernetes API configurations
        configuration = client.Configuration()
        configuration.host = 'https://{}:6443'.format(master_node)
        configuration.verify_ssl = False
        configuration.debug = True
        configuration.api_key = {"authorization": "Bearer " + token}
        client.Configuration.set_default(configuration)
        self.logger.debug('API configurations set.')

    def check_for_namespace(self, project):
        """
        Check if a namespace exists for the given project.  If not, creates it.
        :param project: project to be deployed
        :return: nothing
        """
        # check if namespace exists for the project
        self.logger.debug('checking for existing namespace')
        namespaces = client.CoreV1Api().list_namespace()
        flag = False
        project_name = project['name']
        for ns in namespaces.items:
            if ns.metadata.name == \
                    project_utils.modify_string_for_deployment(project_name):
                flag = True
                self.logger.debug('Namespace for project already exists.')
        if not flag:  # if project_name not in namespaces
            self.logger.debug('creating namespace for the project')
            ns_path = self.kubernetes_deploy.namespace_yaml_generator(
                project_name)
            # create namespace for project
            self.kubernetes_deploy.create_namespace_client(ns_path)

    def kube_deploy_job(self, deployment: Deployment):
        """
        Deploys a job/cronjob component.
        Args:
            deployment: Job deployment object
        """
        if not isinstance(deployment, JobDeployment):
            raise ComponentsSpecifiedIncorrectlyException("Service component"
                                                          "is invalid")
        self.logger.info('entering kube_deploy_job')
        self.logger.debug('running job steps')
        self.kubernetes_deploy.run_job_steps(deployment)

    def kube_deploy_service(self, deployment: Deployment):
        """
        Deploys a service component.
        Args:
            deployment: Service deployment object
        Returns:
            str: IP of the hosted service
        """
        if not isinstance(deployment, ServiceDeployment):
            raise ComponentsSpecifiedIncorrectlyException("Service component"
                                                          "is invalid")
        self.logger.debug('running deployment steps')
        self.kubernetes_deploy.run_deployment_steps(deployment)
        service_ip = self.kubernetes_deploy.get_service_ip(deployment)
        return service_ip

    def kube_undeploy(self, project_name):
        """
        Undeploys a project
        :param project_name: project to be undeployed
        :return: nothing
        """
        try:
            self.logger.debug('Deleting namespace to undeploy project')
            k8s_beta = client.CoreV1Api()
            resp = k8s_beta.delete_namespace(
                project_utils.modify_string_for_deployment(project_name))
            self.logger.debug("Namespace deleted. Details : {}".format(
                str(resp)))
        except ApiException as e:
            if e.status == 404:  # Not found
                self.logger.error('Project is not deployed currently.')
                raise CurrentlyNotDeployedException
Ejemplo n.º 10
0
class KubeflowUtils:
    def __init__(self, persistence_manager):
        self.persistence_manager = persistence_manager
        self.kubernetes_manager = KubernetesManager(persistence_manager)
        self.logger = XprLogger()

    def install_kubeflow(self, master_node, namespace):
        """
        installs kubeflow on a given node, in a given namespace
        Args:
            master_node: master node of the cluster
            namespace: namespace in which kubeflow is to be installed

        Returns: nothing

        """
        pass

    def fetch_ambassador_port(self, master_node, namespace):
        """
        Fetches the port on which ambassador is running
        Args:
            master_node: master node IP of the cluster
            namespace: namespace on which ambassador is deployed

        Returns: ambassador nodePort

        """
        self.logger.info('entering change_ambassador_port method')
        self.kubernetes_manager.set_api_config(master_node)
        k8s_beta = client.CoreV1Api()
        try:
            self.kubernetes_manager.set_api_config(master_node)
            s = k8s_beta.read_namespaced_service(name='ambassador',
                                                 namespace=namespace)
            ambassador_port = s.spec.ports[0].node_port
        except ApiException as e:
            self.logger.error(f'Ambassaddor port fetching failed. Details : '
                              f'{e.status, e.body}')
            raise AmbassadorPortFetchException(
                'Failed to fetch pipeline port.')
        self.logger.info('exiting fetch_ambassador_port method')
        return ambassador_port

    def set_kubeflow_api_config(self, master_node, ambassador_port):
        """
        sets the Kubeflow API config
        Args:
            ambassador_port: ambassador's service nodePort
            master_node: address of the master node

        Returns: nothing

        """
        self.logger.info('entering set_kubeflow_api_config method')
        try:
            master_info = self.persistence_manager.find(
                'nodes', {"address": master_node})
            token = master_info[0]['token']
        except (IndexError, KeyError):
            self.logger.error("Token retrieval from master node failed.")
            raise IncorrectTokenException(
                "Token retrieval from master node failed.")
        config = kfp_server_api.configuration.Configuration()
        config.verify_ssl = False
        config.debug = True
        config.host = f'http://{master_node}:{ambassador_port}/pipeline'
        config.api_key = {"authorization": "Bearer " + token}
        self.logger.info('exiting set_kubeflow_api_config method')
        return config

    def upload_pipeline_to_kubeflow(self, master_node, namespace,
                                    pipeline_zip):
        """
        uploads given kubeflow pipeline on the given cluster
        Args:
            namespace: namespace on which kubeflow is installed
            master_node: master node IP of the cluster
            pipeline_zip: zip file containing the pipeline yaml

        Returns: ambassador nodePort

        """
        self.logger.info('entering upload_pipeline_to_kubeflow method')
        ambassador_port = self.fetch_ambassador_port(master_node, namespace)
        self.logger.debug('fetched ambassador port')
        config = self.set_kubeflow_api_config(master_node, ambassador_port)
        api_client = kfp_server_api.api_client.ApiClient(config)
        try:
            upload_client = kfp_server_api.api.PipelineUploadServiceApi(
                api_client)
            upload_client.upload_pipeline(pipeline_zip)
        except KFApiException as e:
            if e.status == 500:
                self.logger.error('Trying to upload already existing pipeline')
                raise PipelineUploadFailedException(
                    'Pipeline already exists. Please choose a different name.')
            else:
                self.logger.error(f'Pipeline upload failed. Reason : {e.body}')
                raise PipelineUploadFailedException(e.body)
        return ambassador_port
class PackageManager:
    """Manages the request for package installation and setup
    """

    MANIFEST_SCRIPT_KEY = "script_path"
    MANIFEST_MULTI_ARG_KEY = "script_multi_arguments"
    MANIFEST_DEPENDENCY_KEY = "dependency_packages"
    MANIFEST_SSH_CONFIG = "ssh_config"

    def __init__(self, config_path=XprConfigParser.DEFAULT_CONFIG_PATH):
        """
        1. Generate metadata of existing VM
        2. Reads the arguments and initiates the instance variable
        """

        self.logger = XprLogger()

        self.python_version = sys.version.split('\n')
        self.system = platform.system()
        self.machine = platform.machine()
        self.platform = platform.platform()
        self.uname = platform.uname()
        self.version = platform.version()
        self.arch = platform.architecture()
        self.config_path = config_path
        self.package_dependency = PackageDependency(config_path=config_path)

    def list(self):
        """ List all supported package group name

        Returns:
            list: list of available packages
        """
        return self.package_dependency.list_all()

    def run(self,
            package_to_install: str,
            execution_type: ExecutionType,
            parameters: dict = None):
        """
        Perform provided execution type on the given package name

        Args:
            parameters(dict): Additional parameters
            package_to_install(str): name of the package to install. Must match
                                     the supported package names
            execution_type(ExecutionType): Type of execution
        """
        if not self.package_dependency.check_if_supported(package_to_install):
            self.logger.error(
                "Unsupported Package Name : {}".format(package_to_install))
            return False

        dependency_list = self.package_dependency.get_dependency(
            package_to_install)
        self.logger.info(dependency_list)
        response = self.execute_recursive(dependency_list,
                                          0,
                                          execution_type,
                                          parameters=parameters)
        if not response:
            raise PackageFailedException("Package installation failed!!")
        self.logger.info(
            "{} installed successfully".format(package_to_install))
        return True

    def execute_recursive(self,
                          dependency_list: list,
                          current_index: int,
                          execution_type: ExecutionType,
                          parameters: dict = None):
        """
        Execute  recursively. If something failed then rollback
        """
        if current_index >= len(dependency_list):
            return True

        package_string = dependency_list[current_index]
        self.logger.info(package_string)
        try:
            self.execute(package_class=self.package_str_to_class(
                package_string, packages),
                         execution_type=execution_type,
                         parameters=parameters)
            current_index += 1
            return self.execute_recursive(dependency_list,
                                          current_index,
                                          execution_type,
                                          parameters=parameters)
        except PackageFailedException:
            self.logger.error("Failed to execute package {}".format(
                str(package_string)))
        return False

    def execute(self,
                package_class,
                execution_type: ExecutionType,
                parameters: dict = None):
        """
        Perform provided execution type on the given package name

        Args:
            parameters: Additional parameter required for installation
            package_class: name of the package to install.
                           Must match the supported package names.
            execution_type(ExecutionType): Type of execution
        Returns:
            bool: True,if the execution is successful
        """
        if package_class is None:
            self.logger.info(
                "{} Not Found in the hirarchy".format(package_class))
            return False
        self.logger.info(f"Running package {package_class} with parameters"
                         f"{parameters}")
        package_obj = package_class(config_path=self.config_path)
        if (execution_type == ExecutionType.INSTALL
                and package_obj.status(parameters=parameters)):
            return True
        elif execution_type == ExecutionType.INSTALL:
            return package_obj.install(parameters=parameters)
        elif execution_type == ExecutionType.UNINSTALL:
            return package_obj.uninstall(parameters=parameters)
        elif execution_type == ExecutionType.STATUS:
            return package_obj.status(parameters=parameters)
        elif execution_type == ExecutionType.START:
            return package_obj.start(parameters=parameters)
        elif execution_type == ExecutionType.STOP:
            return package_obj.stop(parameters=parameters)

        self.logger.error(str(package_obj) + " Not defined")
        return False

    def package_str_to_class(self, target_class_name: str, package_name):
        """ Converts class name into python class object. It looks for all
        classes in a package and matches the name to the class object

        Args:
            package_name(package): Find the target class name within this
                                   package name
            target_class_name(str): exact name of the class. It should match the
                                    class name as well

        Returns:
            Object: returns python class object, None otherwise
        """

        for _, modname, is_pkg in \
            pkgutil.iter_modules(package_name.__path__,
                                 package_name.__name__ + "."):
            imported_module = __import__(modname, fromlist=["dummy"])
            matched_class_name = None
            if is_pkg:
                matched_class_name = self.package_str_to_class(
                    target_class_name, imported_module)
            if matched_class_name:
                return matched_class_name
            for name, obj in inspect.getmembers(imported_module):
                if inspect.isclass(obj) and name == target_class_name:
                    return obj
        return None
class NodeManager:
    config_path = XprConfigParser.DEFAULT_CONFIG_PATH

    def __init__(self, persistence_manager):
        self.config = XprConfigParser(self.config_path)
        self.logger = XprLogger()
        self.persistence_manager = persistence_manager

    def register_node(self, node_json):
        """
            registers a new node in the database if the server is available

            checks if the node already exists and then checks if the server
            with node ip_address is available. Then adds the node to database

            Parameters:
                node_json [json]: json with node information

            Return:
                Success -> 'OK' [str] : returns 'OK' as response
                Failure -> [str] : returns appropriate failure response
        """
        self.logger.info("registering a new node")
        self.logger.debug(f"node info provided is : {node_json}")
        new_node = Node(node_json)
        self.logger.info('checking if the mandatory fields are provided')
        new_node.validate_mandatory_fields()
        self.logger.info('checking if the address of node is valid')
        new_node.validate_node_address()
        new_node.set('provisionStatus', False)
        new_node.set('activationStatus', True)

        self.logger.info("provisionStatus and activationStatus fields are set")

        self.logger.info("adding node to the database")

        self.persistence_manager.insert("nodes", new_node.data, False)
        self.logger.info("node successfully added to the persistence")

    def get_nodes(self, filter_json, apply_display_filter=True):
        """
            Gets the list of nodes in the database

            Calls the persistence with input filters to fetch the list of nodes
            After fetching, the nodes list is filtered before sending
            as output in order to send relevant information only

            Parameters:
                filter_json [json] : json with filter key & value pairs

            Return:
                Success -> [list] : returns list of nodes
                Failure -> [str] : returns persistence failure response
        """
        self.logger.debug(f"filter_json is : {filter_json}")
        self.logger.info("getting the list of nodes from persistence")
        nodes = self.persistence_manager.find("nodes", filter_json)
        self.logger.info("filtering nodes before sending as output")
        if apply_display_filter:
            filtered_nodes = []
            for node_json in nodes:
                temp_node = Node(node_json)
                temp_node.filter_display_fields()
                filtered_nodes.append(temp_node.data)
            nodes = filtered_nodes
        self.logger.debug("Output of Nodes sent : ", nodes)
        return nodes

    def provision_node(self, provision_json):
        """
            provisions a node either for deployment or development

            checks if node is available and then connects to the server
            through ssh and runs appropriate packages on the server

            Parameters:
                node_id [str]: id i.e. name of the node
                provision_json [json] : json with node provision info

            Return:
                Success -> 'OK' [str] : returns OK if provision_node succeeds
                Failure -> [str] : returns appropriate failure response
        """
        self.logger.debug(f"provision_node info is: {provision_json}")
        self.logger.info("provision of a node is requested")
        new_node = Node(provision_json)
        new_node.provision_info_check()
        address = provision_json["address"]
        node_id_json = {"address": address}
        node_type = provision_json['nodetype']
        self.logger.info("checking persistence if the node is registered")
        node = self.persistence_manager.find("nodes", node_id_json)
        if len(node) == 0:
            self.logger.error("Node not found")
            raise NodeNotFoundException("Node not found to provision")

        for (key, val) in node[0].items():
            new_node.set(key, val)

        print(new_node.data)
        new_node.provision_node_check(provision_json, self.persistence_manager)
        print("provision_node_check passed")
        # checks if ip address of the node and its type is provided or not
        if node_type != 'DEVVM':
            print("updating cluster")
            new_node.update_cluster(provision_json, self.persistence_manager)
        self.logger.info("provision node in progress")
        provision_status = new_node.provision_node_setup()
        if provision_status == 1:
            self.logger.info("provision of node is successful")
            update_json = {"provisionStatus": True, "nodetype": node_type}
            if node_type == 'CLUSTER_MASTER':
                update_json["cluster"] = provision_json["cluster"]
            elif node_type == 'CLUSTER_WORKER':
                update_json["masterip"] = provision_json["masterip"]
            else:
                update_json["flavor"] = provision_json["flavor"].lower()

            self.persistence_manager.update("nodes", node_id_json, update_json)
        elif provision_status == 0:
            self.logger.error("provision failed: kubernetes error")
            raise ProvisionKubernetesException("Provision Failed")
        else:
            self.logger.error('given provision node data is invalid')
            raise InvalidProvisionInfoException("Provision data is invalid")

    def deactivate_node(self, node_id):
        """
            Deactivates a node in persistence

            Deletes all the installed packages of the node on server
            then deactivates the node in database

            Parameters:
                node_id [str] : name of the node

            Return:
                returns appropriate output
        """
        self.logger.info(f"request received for deactivating node {node_id}")
        node_id_json = {"address": node_id}
        self.logger.info("checking persistence if node is present or not")
        nodes = self.persistence_manager.find("nodes", node_id_json)

        if not len(nodes):
            raise NodeNotFoundException("Node not found for deactivation")

        if 'activationStatus' in nodes[0] and not nodes[0]['activationStatus']:
            self.logger.error("This node is already deactivated")
            raise NodeDeactivatedException()

        new_node = Node(nodes[0])

        # deletes all the packages installed on the node
        self.logger.info("deleting all packages on the node")
        node_deprovision = 1
        if new_node.data["provisionStatus"]:
            # deprovision shall be called only on provisioned nodes
            node_deprovision = new_node.deprovision_node()
        if node_deprovision == 1:
            self.logger.info("deleted all of the packages on node")
            # deletes the node entry from the database
            self.logger.info('deactivating node from persistence')
            deactivate_json = {
                "activationStatus": False,
                "provisionStatus": False
            }
            self.persistence_manager.update("nodes", node_id_json,
                                            deactivate_json)
            return XprResponse('success', '', {})
        else:
            self.logger.error('Node deletion failed: kubernetes error')
            raise ProvisionKubernetesException("Deactivation Failed")

    def assign_node(self, assign_json):
        """
            assigns a node to a user

            assigns a node with development vm type to a user

            Parameters:
                assign_json [json] : Json with assignation info

            Return:
                returns appropriate output
        """
        if 'user' not in assign_json or 'node' not in assign_json:
            self.logger.error("Incomplete information in assign_json")
            raise IncompleteNodeInfoException("user and node info is required")
        elif not len(assign_json['user']) or not len(assign_json['node']):
            self.logger.error("Incomplete information in assign_json")
            raise IncompleteNodeInfoException(
                "user & node info shouldn't be empty")

        uid_json = {"address": assign_json['node']}
        user = assign_json['user']
        users = UserManager(self.persistence_manager).get_users({"uid": user})
        nodes = self.persistence_manager.find('nodes', uid_json)
        if len(users) == 0:
            raise UserNotFoundException("User not found")
        elif len(nodes) == 0:
            raise NodeNotFoundException("Node not found")
        else:
            if 'provisionStatus' not in nodes[0]:
                raise UnProvisionedNodeException("Node is not provisioned")
            elif not nodes[0]['provisionStatus']:
                raise UnProvisionedNodeException("Node is not provisioned")
            elif nodes[0]['nodetype'] != 'DEVVM':
                raise NodeTypeException(
                    "Assign only work form node types of devvm")

            user_nodes = []
            for node_dict in users[0]['nodes']:
                user_nodes.append(node_dict['address'])
            if assign_json['node'] in user_nodes:
                raise NodeAlreadyAssignedException()

        new_node = Node(nodes[0])
        out, err = new_node.assign_node_to_user(user)
        try:
            if not len(out.readlines()) and len(err.readlines()):
                print("failure because of errors")
                raise NodeAssignException(
                    "Assignation failed due to internal error")
            else:
                temp_node = {'address': nodes[0]['address']}
                nodes = [] if 'nodes' not in users[0] else users[0]['nodes']
                nodes.append(temp_node)
                self.persistence_manager.update('users', {"uid": user},
                                                {"nodes": nodes})
        except:
            print("caught exception")
            raise NodeAssignException(
                "Assignation failed due to internal error")

    def modify_node(self, changes_json):
        """
            modify_node updates the node info in the persistence

            checks if node is available and then updates
            the info as per changes_json

            Parameters:
                changes_json [json] : json with node changes info

            Return:
                returns xprresponse object
        """
        if 'address' not in changes_json:
            raise IncompleteNodeInfoException("Node address not provided")

        uid_json = {"address": changes_json['address']}
        self.logger.info(f"Modifying node information of {uid_json}")
        self.logger.debug(f"Info provided to be modified is {changes_json}")
        # checks if the user is present in persistence
        self.logger.info("Checking if the node is present in the persistence")
        node = self.persistence_manager.find("nodes", uid_json)
        if len(node) == 0:
            self.logger.error(
                f"node {uid_json['address']} not found in the persistence")
            raise NodeNotFoundException()

        if 'activationStatus' in changes_json and \
            not changes_json['activationStatus']:
            raise CallDeactivateNodeException()

        self.logger.info("updating the user information")
        self.persistence_manager.update("nodes", uid_json, changes_json)

    def delete_node(self, node_id):
        """
            deletes the node from persistence

            Deletes all the installed packages of the node on server
            then deletes the node from database

            Parameters:
                node_id [str] : name of the node

            Return:
                returns appropriate output
        """
        self.logger.info(f"request received for deactivating node {node_id}")
        node_id_json = {"address": node_id}
        self.logger.info("checking persistence if node is present or not")
        nodes = self.persistence_manager.find("nodes", node_id_json)

        if nodes and len(nodes):
            self.logger.info("deleting all packages on the node")
            new_node = Node(nodes[0])
            node_deletion = new_node.deprovision_node()
            if node_deletion == 1:
                self.logger.info("deleted all of the packages on node")
                # deletes the node entry from the database
                self.logger.info('deleting node from persistence')
                self.persistence_manager.delete("nodes", node_id_json)
            else:
                self.logger.error('Node deletion failed: kubernetes error')
                raise NodeDeletionKubernetesException()
        else:
            raise NodeNotFoundException()

    def update_all_nodes(self, filter_json=None, branch_name="master"):
        """
        Update the xpresso project in all the nodes
        Args:
            filter_json: dictionary to updated specific set of nodes
            branch_name: name of the branch to which xpresso project will be
                         updated

        Returns:
            (list, list): list of update node and list of non updated node
        """

        if filter_json is None:
            filter_json = {}
        filtered_node_list = self.get_nodes(filter_json=filter_json)
        updated_list = []
        non_updated_list = []

        update_cmd = (
            f"cd {self.config['general']['package_path']} && "
            f"python3 xpresso/ai/admin/infra/clients/xpr_pkg.py "
            f"--conf config/common.json "
            f"--type install "
            f"--package UpdateLocalXpressoPackage "
            f"--parameters {{\"branch_name\": \"{branch_name}\"}}' && "
            f"cp config/common_{self.config['env']}.json "
            f"config/common.json ")
        self.logger.debug(update_cmd)
        for node in filtered_node_list:
            node_address = node["address"]
            ssh_client = SSHUtils(node_address)

            if ssh_client.client is None:
                self.logger.warning(
                    f"unable to login to server: {node_address}")
                non_updated_list.append(node_address)
                continue
            std_response = ssh_client.exec(update_cmd)
            self.logger.debug(f"\n\n STDERR : \n{std_response['stderr']}\n")
            if std_response['status'] == 0:
                updated_list.append(node_address)
            else:
                non_updated_list.append(node_address)
            ssh_client.close()
        return updated_list, non_updated_list
Ejemplo n.º 13
0
class bitbucketapi():
    """
        bitbucketapi class defines methods to work with bitbucket repos

        This class is used in creating and updating repos on bitbucket.
        bitbucket standard RestAPI 2.0 has been used for creating project
        and repository. Standard git commands through subprocess are used
        in cloning & updating the repository

        ....

        Methods
        -------
            exec_command()
                executes a linux command through subprocess

            create_bitbucket_project()
                creates a project on bitbucket using RESTAPI

            create_bitbucket_repo()
                creates a repo on bitbucket using RESTAPI

            clone_bitbucket_repo()
                clones a repo on bitbucket using git command
                through subprocess

            push_bitbucket_repo()
                pushes updated code to bitbucket using git
                command through subprocess
    """
    config_path = XprConfigParser.DEFAULT_CONFIG_PATH
    logger = XprLogger()

    def __init__(self):
        self.logger = XprLogger()
        self.config = XprConfigParser(self.config_path)
        self.defaulturl = self.config['bitbucket']['restapi']
        self.teamname = self.config['bitbucket']['teamname']
        self.username = self.config['bitbucket']['username']
        self.password = self.config['bitbucket']['password']

        # Following project format provided for bibucket RESTAPI
        self.defaultprojectbody = {
            "name": "",
            "description": "",
            "key": "",
            "is_private": False
        }
        # Following repo format provided for bibucket RESTAPI
        self.defaultrepobody = {"scm": "git", "project": {"key": ""}}

    def exec_command(self, command: list, inputflag: bool, inputcmd: str,
                     wd: str) -> bool:
        """
            exec_command executes a input command through subprocess

            Takes command and current working directory
            to execute the command there. Also takes the argument
            for prompt input in some cases

            ....

            Parameters
            ----------
                command -> input command to be executed
                inputflg -> flag to specify if any input prompt is present
                inputcmd -> input prompt in case required
                wd -> working directory where the command needs to be
                        executed

            Returns
            -------
                returns a True or False boolean based on execution status
        """
        # executes the command at specified working directory path
        self.logger.info(f"Execute Command :{command} @ {wd}")
        exec = subprocess.Popen(command,
                                cwd=wd,
                                stdin=subprocess.PIPE,
                                stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE)

        # Incase input prompt is expected and provided
        if inputflag and inputcmd:
            # subprocess Popen returns a stdin filestream
            # input string has to be convereted to bytes before
            # writing to the stream
            inputbytes = str.encode(inputcmd + '\n')
            # Providing input for ex password to the stdin stream
            exec.stdin.write(inputbytes)
            exec.stdin.flush()

        # returncode will be None until the process is complete
        # provide timeout case to prevent forever loop
        while exec.returncode is None:
            print("waiting")
            self.logger.info("Waiting for the command execution to end")
            exec.wait()

        # once process is complete returncode will be 0 if execution succeeds
        if exec.returncode != 0:
            stderror = exec.stderr.readlines()
            print(stderror)
            self.logger.error(f"\n Error in command execution: \n {stderror}")
            return False
        else:
            self.logger.info("Command successfully executed")
            return True

    def create_bitbucket_project(self, projectjson: dict) -> dict:
        """
            creates a project on bitbucket

            creates a project on bitbucket through RESTAPI.
            using bitbucket api v2.0. Project info is provided
            as body in post request

            ....
            Parameters
            ----------
                projectjson
                    information on project to be created

            Returns
            -------
                returns the status code of post request

        """
        body = deepcopy(self.defaultprojectbody)
        body['name'] = projectjson['name']
        body['description'] = projectjson['projectDescription']
        body['key'] = projectjson['name'] + '_xpr'
        # Team name should be provided.
        create_project_url = self.defaulturl + teams + self.teamname + projects
        self.logger.debug(f"New project creation url: {create_project_url}")
        self.logger.debug(f"project body : {body}")
        projectcreation = requests.post(create_project_url,
                                        json=body,
                                        auth=(self.username, self.password))
        print("projectcreation is ", projectcreation.text)
        self.logger.info(f"projectcreation response is {projectcreation.text}")
        return projectcreation.json()

    def create_bitbucket_repo(self, projectjson: dict) -> dict:
        """
            creates a repo on bitbucket

            creates a repository on bitbucket through RESTAPI.
            using bitbucket api v2.0. Project info is provided
            as body in post request

            ....
            Parameters
            ----------
                projectjson
                    same project information is used in creating
                    the repository

            Returns
            -------
                returns response json of repocreation. The json
                contain links & info to the repository.
        """
        body = deepcopy(self.defaultrepobody)
        reponame = projectjson['name'] + '_sc'
        body['project']['key'] = projectjson['name'] + '_xpr'
        create_repo_url = self.defaulturl + repos + self.teamname + reponame
        self.logger.debug(f"New repo creation url: {create_repo_url}")
        self.logger.debug(f"repo body : {body}")
        repocreation = requests.post(create_repo_url,
                                     json=body,
                                     auth=(self.username, self.password))
        print("\n repocreation is : ", repocreation.text)
        self.logger.info(f"repocreation response is {repocreation.text}")
        return repocreation.json()

    def clone_bitbucket_repo(self, clone_link: str,
                             local_clone_path: str) -> int:
        """
            Clones a repo from bitbucket to local system.

            clones a repository to the specified path in the
            input argument. uses git commands through subprocess
            to clone the repo.

            ....
            Parameters
            ----------
                clone_link
                    bitbucket link to the repository

                local_clone_path
                    path on local server where the repo
                    needs to be cloned
            Returns
            -------
                returns the status code of cloning the repo

        """
        clone_command = ['git', 'clone', clone_link, local_clone_path]
        self.logger.info(f"Cloning {clone_link} to {local_clone_path}")
        # exec_command internally calls subprocess to clone the repo
        clone_repo_status = self.exec_command(clone_command, False, None, None)
        if not clone_repo_status:
            self.logger.info("Cloning failed")
            raise BitbucketCloneException("Cloning failed.")

    def push_repo_to_bitbucket(self, remotepath: str) -> bool:
        """
            pushes the repository to bitbucket

            After updating the code, the repository is pushed
            to bitbucket using git commands

            ....
            Parameters
            ----------
                remotepath
                    path of the repository on the local server

            Returns
            -------
                returns the status push request

        """
        # reduce add and commit to single call
        gitaddcmd = ["git", "add", "-A"]
        gitcommitcmd = ["git", "commit", "-m", "Initial commit"]
        gitpushcmd = ["git", "push", "-u", "origin", "master"]
        for gitcmd in [gitaddcmd, gitcommitcmd, gitpushcmd]:
            gitstatus = self.exec_command(gitcmd, False, "", wd=remotepath)
            if not gitstatus:
                return False
            self.logger.info(f"{' '.join(gitcmd)} : Done")
        return True

    def delete_bitbucket_repo(self, repo):
        pass
Ejemplo n.º 14
0
class XpressoControllerCLI:
    """
    It takes the command line arguments and processes it as needed. xprctl
    binary uses this class to serve the command
    """

    COMMAND_KEY_WORD = "command"
    USER_ARGUMENT = "user"
    FILE_ARGUMENT = "file"
    INPUT_ARGUMENT = "input"
    ALL_ARGUMENT = "all"
    BRANCH_ARGUMENT = "branch"

    def __init__(self):
        self.controller_client = ControllerClient()
        self.command = None
        self.arguments = {}
        self.SUPPORTED_COMMANDS = {}
        self.initialize_commands()
        self.logger = XprLogger()

    def initialize_commands(self):
        """
        Creates a mapping ot command to functions
        """
        try:
            self.SUPPORTED_COMMANDS = {
                "list": self.list_supported_commands,
                "login": self.login,
                "logout": self.logout,
                "get_cluster": self.get_clusters,
                "register_cluster": self.register_cluster,
                "deactivate_cluster": self.deactivate_cluster,
                "get_users": self.get_users,
                "register_user": self.register_user,
                "modify_user": self.modify_user,
                "deactivate_user": self.deactivate_user,
                "update_password": self.update_password,
                "get_nodes": self.get_nodes,
                "register_node": self.register_node,
                "deactivate_node": self.deactivate_node,
                "provision_node": self.provision_node,
                "assign_node": self.assign_node,
                "create_project": self.create_project,
                "register_project": self.create_project,
                "get_project": self.get_project,
                "deactivate_project": self.deactivate_project,
                "build_project": self.build_project,
                "get_build_version": self.get_build_version,
                "deploy_project": self.deploy_project,
                "undeploy_project": self.undeploy_project,
                "modify_project": self.modify_project,
                "version": self.get_version,
                "update": self.update,
                "create_repo": self.create_repo,
                "create_branch": self.create_branch,
                "push_dataset": self.push_dataset,
                "pull_dataset": self.pull_dataset,
                "list_dataset": self.list_dataset
            }
        except AttributeError:
            raise CLICommandFailedException("CLI issue. Contact developer to "
                                            "fix it.")

    def extract_argument(self, argument):
        if argument in self.arguments:
            return self.arguments[argument]
        return None

    def extract_json_from_file_or_input(self):
        """
        Extracts json data from either file or input
        """
        file_fs = self.extract_argument(self.FILE_ARGUMENT)
        input_json = self.extract_argument(self.INPUT_ARGUMENT)
        if input_json:
            try:
                data = json.loads(input_json)
            except json.JSONDecodeError:
                raise CLICommandFailedException("Invalid Json file")
        elif file_fs:
            try:
                data = json.load(file_fs)
            except json.JSONDecodeError:
                raise CLICommandFailedException("Invalid Json file")
        else:
            raise CLICommandFailedException("Please provide input json using "
                                            "-f/--file or -i/--input")
        return data

    def execute(self, **kwargs):
        """
        Validates the command provided and calls the relevant function for
        execution

        Args:
            kwargs: It takes kwargs as argument which should contain the
                    argument passed in command line
        """
        self.arguments = kwargs
        if self.COMMAND_KEY_WORD not in self.arguments:
            raise CLICommandFailedException("No valid command provided."
                                            "Please type xprctl list for "
                                            "complete list of commands")

        command = self.arguments[self.COMMAND_KEY_WORD]

        if command not in self.SUPPORTED_COMMANDS:
            raise CLICommandFailedException(f"{command} not supported")

        try:
            self.logger.info(f"executing command {command}"
                             f"with argument {self.arguments}")
            response = self.SUPPORTED_COMMANDS[command]()
            self.logger.info(f"Command executed with response {response}")
            return response
        except TypeError as e:
            self.logger.error(e)
            raise CLICommandFailedException(f"{command} is not executable")

    def list_supported_commands(self):
        return {"Commands": list(self.SUPPORTED_COMMANDS.keys())}

    def login(self):
        username = self.extract_argument(self.USER_ARGUMENT)
        if not username:
            username = input('Username: '******'url']}"
            print(CLIResponseFormatter(data=message).get_str())
            print("Waiting for login to get successful...")
            return self.controller_client.sso_validate(
                response["validation_token"])
        return response

    def logout(self):
        return self.controller_client.logout()

    def get_clusters(self):
        data = self.extract_json_from_file_or_input()
        return self.controller_client.get_clusters(data)

    def deactivate_cluster(self):
        data = self.extract_json_from_file_or_input()
        return self.controller_client.deactivate_cluster(data)

    def register_cluster(self):
        data = self.extract_json_from_file_or_input()
        return self.controller_client.register_cluster(data)

    def get_users(self):
        data = self.extract_json_from_file_or_input()
        return self.controller_client.get_users(data)

    def register_user(self):
        data = self.extract_json_from_file_or_input()
        return self.controller_client.register_user(data)

    def modify_user(self):
        data = self.extract_json_from_file_or_input()
        return self.controller_client.modify_user(data)

    def deactivate_user(self):
        data = self.extract_json_from_file_or_input()
        return self.controller_client.deactivate_user(data)

    def register_node(self):
        data = self.extract_json_from_file_or_input()
        return self.controller_client.register_node(data)

    def get_nodes(self):
        data = self.extract_json_from_file_or_input()
        return self.controller_client.get_nodes(data)

    def provision_node(self):
        data = self.extract_json_from_file_or_input()
        return self.controller_client.provision_node(data)

    def deactivate_node(self):
        data = self.extract_json_from_file_or_input()
        return self.controller_client.deactivate_node(data)

    def assign_node(self):
        data = self.extract_json_from_file_or_input()
        return self.controller_client.assign_node(data)

    def create_project(self):
        data = self.extract_json_from_file_or_input()
        return self.controller_client.create_project(data)

    def get_project(self):
        data = self.extract_json_from_file_or_input()
        return self.controller_client.get_project(data)

    def deactivate_project(self):
        data = self.extract_json_from_file_or_input()
        return self.controller_client.deactivate_project(data)

    def modify_project(self):
        data = self.extract_json_from_file_or_input()
        return self.controller_client.modify_project(data)

    def build_project(self):
        data = self.extract_json_from_file_or_input()
        return self.controller_client.build_project(data)

    def get_build_version(self):
        data = self.extract_json_from_file_or_input()
        return self.controller_client.get_build_version(data)

    def deploy_project(self):
        data = self.extract_json_from_file_or_input()
        return self.controller_client.deploy_project(data)

    def undeploy_project(self):
        data = self.extract_json_from_file_or_input()
        return self.controller_client.undeploy_project(data)

    def get_version(self):
        return self.controller_client.fetch_version()

    def update(self):
        return self.controller_client.update_xpresso()

    def update_password(self):
        username = self.extract_argument(self.USER_ARGUMENT)
        if not username:
            username = input('Username: ')
        old_password = getpass("Current Password:"******"New Password:"******"uid": username,
            "old_pwd": old_password,
            "new_pwd": new_password
        })

    def create_repo(self):
        data = self.extract_json_from_file_or_input()
        return self.controller_client.create_repo(data)

    def create_branch(self):
        data = self.extract_json_from_file_or_input()
        return self.controller_client.create_branch(data)

    def push_dataset(self):
        data = self.extract_json_from_file_or_input()
        return self.controller_client.push_dataset(data)

    def pull_dataset(self):
        data = self.extract_json_from_file_or_input()
        return self.controller_client.pull_dataset(data)

    def list_dataset(self):
        data = self.extract_json_from_file_or_input()
        return self.controller_client.list_dataset(data)
class HTTPHandler:
    """
    Manages the HTTP request and response for external queries.
    """
    def __init__(self):
        self.logger = XprLogger()
        self.empty_response = HTTPResponse(500, {}, {})

    def send_request(self, request: HTTPRequest) -> HTTPResponse:
        """
        Sends the HTTP request and gets the response

        Args:
            request(HTTPRequest): Object of HTTP Request which contains
                                  necessary request data

        Returns:
            HTTPResponse: response object
        """

        try:
            self.logger.info(f"Sending HTTP Request: {request}")
            if request.method == HTTPMethod.GET:
                response = requests.get(request.url,
                                        json=request.data,
                                        headers=request.headers)
            elif request.method == HTTPMethod.POST:
                response = requests.post(request.url,
                                         json=request.data,
                                         headers=request.headers)
            elif request.method == HTTPMethod.PUT:
                response = requests.put(request.url,
                                        json=request.data,
                                        headers=request.headers)
            elif request.method == HTTPMethod.DELETE:
                response = requests.delete(request.url,
                                           json=request.data,
                                           headers=request.headers)
            elif request.method == HTTPMethod.HEAD:
                response = requests.head(request.url,
                                         json=request.data,
                                         headers=request.headers)
            elif request.method == HTTPMethod.OPTIONS:
                response = requests.options(request.url,
                                            json=request.data,
                                            headers=request.headers)
            else:
                raise HTTPInvalidRequestException("Invalid HTTP Method")

            parsed_response = self.parse_response(response)
            self.logger.info(f"Received HTTP Request: {parsed_response}")
        except (requests.HTTPError, requests.ConnectionError,
                requests.ConnectTimeout, requests.exceptions.SSLError) as e:
            self.logger.error(e)
            raise HTTPRequestFailedException("Request Failed")
        except (requests.exceptions.InvalidHeader,
                requests.exceptions.InvalidSchema,
                requests.exceptions.InvalidURL) as e:
            self.logger.error(e)
            raise HTTPInvalidRequestException("Invalid Request Object")
        return parsed_response

    def parse_response(self, response: requests.Response) -> HTTPResponse:
        """ Convert requests.Response into HTTP Response object"""
        if not response:
            return self.empty_response
        return HTTPResponse(response_code=response.status_code,
                            response_data=response.text,
                            response_headers=response.headers)
class XprProjectManager:

    LINUX_UID_KEY = "linux_uid"
    MIN_VALID_LINUX_UID = 1001

    def __init__(self, persistence_manager):
        self.logger = XprLogger()
        self.persistence_manager = persistence_manager

    def create_project(self, project_json: dict) -> object:
        """
        creates a new project in the persistence and bitbucket

        Creates a new project and repo on bitbucket.
        Then setup necessary nfs mount. Then adds the
        project json to persistence.

        Parameters:
            project_json: json with project information

        Return:
            returns xprresponse object
        """
        # checks if the project_json has the complete info required
        self.logger.info("checking the provided project information")

        # Updating next linux uid
        project_json[self.LINUX_UID_KEY] = self.get_next_linux_uid()
        new_project = Project(project_json)
        new_project.project_info_check(self.persistence_manager)
        new_project.complete_project_info()
        self.logger.debug(f"Updated project info is: {new_project.data}")

        self.logger.info("calling setup_project to complete the setup")
        setup = setup_project(new_project.data)
        setup_code = setup['status']
        self.logger.info(f"setup_project status code is {setup_code}")
        if setup_code != 200:
            self.logger.error("Project setup failed")
            return XprResponse('failure', setup_code,
                               {"message": "Project setup failed"})
        self.logger.info("project setup completed")
        self.logger.info(
            "Adding the project with complete info to persistence")
        self.persistence_manager.insert("projects", setup['project_json'],
                                        False)
        self.logger.info("project successfully added to persistence")

        # allocate required environments
        self.logger.info("Allocating project environments")
        env_manager = EnvManager()
        env_manager.allocate_env(project_json["name"],
                                 project_json["environments"])
        self.logger.info("Allocated project environments")

        new_project.filter_display_fields()
        return XprResponse("success", None, new_project.data)

    def get_next_linux_uid(self):
        """
        Checks the database and finds the next linux uid which needs to be
        assigned to a project
        Returns:
            A valid UID
        """
        all_projects = self.persistence_manager.find("projects", {})
        new_linux_uid = max([self.MIN_VALID_LINUX_UID] + [
            project[self.LINUX_UID_KEY]
            for project in all_projects if self.LINUX_UID_KEY in project
        ]) + 1
        return new_linux_uid

    def get_projects(self,
                     filter_json: dict,
                     apply_display_filter=True) -> object:
        """
            Calls the persistence with input filters to fetch the list of projects.

            Parameters:
                filter_json [json] : json with filter key & value pairs

            Return:
                returns a xprresponse object
        """
        self.logger.info("retrieving the list of projects from persistence")
        projects = self.persistence_manager.find("projects", filter_json)
        self.logger.info("calling filter_project_output")
        self.logger.debug(f"projects are: {projects}")
        if apply_display_filter:
            filtered_projects = []
            for project_json in projects:
                temp_project = Project(project_json)
                temp_project.filter_display_fields()
                filtered_projects.append(temp_project.data)
            projects = filtered_projects
        self.logger.debug(f"\n Filtered projects are: {projects}")
        # get users call retrieves whole user info from persistence
        # Filtering the data that needs to be shown as output
        return projects

    def modify_project(self, changes_json: dict):
        """
            Modifies a project in persistence and on bitbucket

            Parameters:
                changes_json: project information that needs to be modified

            Return:
                returns a xprresponse object
        """
        if 'name' not in changes_json:
            raise IncompleteProjectInfoException(
                "Project name needs to be provided for modify_project")

        uid_json = {'name': changes_json['name']}
        self.logger.info("checking if the project is already present")
        projects = self.persistence_manager.find("projects", uid_json)
        if len(projects) == 0:
            self.logger.error("cannot modify a project which doesn't exist.")
            raise NoProjectException("Cannot Modify unregistered project")
        self.logger.info("calling modify_info_check to validate the info")
        new_project = Project(projects[0])
        new_project.modify_info_check(changes_json, self.persistence_manager)
        self.logger.info("modify_project_locally has been called")
        modify_status = modify_project_locally(projects[0], changes_json)
        if modify_status != 200:
            self.logger.error("modify_project_locally failed")
            XprResponse('failure', modify_status,
                        {"message": "Modify project failed"})

        self.logger.info(
            "project info is being modified before updating @persistence")
        update_json = new_project.modify_project_info(changes_json)
        self.persistence_manager.update("projects", uid_json, update_json)
        self.logger.info("Project modified successfully")

        # allocate required environments
        self.logger.info("Allocating project environments")
        env_manager = EnvManager()
        env_manager.allocate_env(changes_json["name"],
                                 changes_json["environments"])
        self.logger.info("Allocated project environments")

    def deactivate_project(self, uid_json: dict):
        """
            Deactivates a project. updates the appropriate flags in persistence

            Parameters:
                uid [str] : uid of the project

            Return:
                returns xprresponse object
        """
        # deletes the project from persistence
        self.logger.info("Checking if the project actually exists")
        projects = self.persistence_manager.find("projects", uid_json)
        if len(projects) == 0:
            raise NoProjectException()
        elif 'activationStatus' not in projects[0]:
            projects[0]['activationStatus'] = True

        if projects[0]['currentlyDeployed']:
            raise ProjectDeactivationException(
                "Project Currently deployed. Undeploy first.")
        elif not projects[0]['activationStatus']:
            raise ProjectDeactivationException("Project already deactivated")

        active_flag_json = {"activationStatus": False}
        self.persistence_manager.update("projects", uid_json, active_flag_json)
        # update_id = self.db_utils.delete("projects", uid_json)

        # remove allocated environments
        self.logger.info("Removing project environments")
        env_manager = EnvManager()
        env_manager.remove_env(uid_json["name"])
        self.logger.info("Removed project environments")
Ejemplo n.º 17
0
    def execute(self):
        """
        installs kubernetes master node on the machine.
        """

        logger = XprLogger()
        if not linux_utils.check_root():
            logger.fatal("Please run this as root")
        logger.info("Initialising Kubernetes master node...")
        try:
            pod_network_cidr = self.config[self.PACKAGES][self.KUBE_SECTION][
                self.CIDR_KEY]
            init = 'kubeadm init --token-ttl=0 --pod-network-cidr={}'.format(
                pod_network_cidr)
            (_, output, _) = self.executor.execute_with_output(init)
            output = output.splitlines()
            join_command = (output[-2].decode("utf-8").rstrip('\\') +
                            output[-1].decode("utf-8"))
            # waiting time for master node to become active
            time.sleep(90)
            master_ip = linux_utils.get_ip_address()
            cluster_path = '/mnt/nfs/data/k8/k8_clusters/' \
                           '{}'.format(master_ip)
            linux_utils.create_directory(cluster_path, 0o755)
            join_filename = '{}/{}.txt'.format(cluster_path, master_ip)
            linux_utils.write_to_file(join_command, join_filename, "w+")
            if not os.path.isfile(join_filename):
                logger.error('Failed to write join command to file. Exiting.')
                raise CommandExecutionFailedException
            kubeconfig = 'KUBECONFIG=/etc/kubernetes/admin.conf'
            environment_path = '/etc/environment'
            linux_utils.write_to_file(kubeconfig, environment_path, "a")
            os.environ["KUBECONFIG"] = "/etc/kubernetes/admin.conf"
            kube_directory = '$HOME/.kube'
            linux_utils.create_directory(kube_directory, 0o755)
            copy_config = 'sudo cp -f /etc/kubernetes/admin.conf' \
                          ' $HOME/.kube/config'
            self.executor.execute(copy_config)
            chown = 'sudo chown $(id -u):$(id -g) $HOME/.kube/config'
            self.executor.execute(chown)
            flannel = 'kubectl apply -f https://raw.githubusercontent.com' \
                      '/coreos/flannel/master/Documentation/kube-flannel.yml'
            self.executor.execute(flannel)
            generate_api_token = "kubectl get secret $(kubectl get " \
                                 "serviceaccount default -o jsonpath=" \
                                 "'{.secrets[0].name}') -o jsonpath=" \
                                 "'{.data.token}' | base64 --decode"
            status, stdout, stderr = self.executor.execute_with_output(
                generate_api_token)
            if status != 0 or len(stderr.decode('utf-8')):
                raise CommandExecutionFailedException(
                    "Token generation failed")
            token = stdout.decode("utf-8")
            self.persistence_manager.update("nodes", {"address": master_ip},
                                            {"token": token})
            api_access = 'kubectl create clusterrolebinding permissive-binding \
                                  --clusterrole=cluster-admin \
                                  --user=admin \
                                  --user=kubelet \
                                  --group=system:serviceaccounts'

            self.executor.execute(api_access)
            docker_secret = \
                'kubectl create secret docker-registry dockerkey ' \
                '--docker-server https://dockerregistry.xpresso.ai/ ' \
                '--docker-username xprdocker --docker-password Abz00ba@123'
            self.executor.execute(docker_secret)

        except CommandExecutionFailedException as e:
            logger.error("Failed to initialise master. \n{}".format(str(e)))
            return False
        return True
class KubernetesDeploy:
    """
    class containing methods to deploy a project in Kubernetes
    """
    def __init__(self, persistence_manager):

        self.persistence_manager = persistence_manager
        self.logger = XprLogger()
        config_path = XprConfigParser.DEFAULT_CONFIG_PATH
        self.config = XprConfigParser(config_path)
        PROJECTS_SECTION = 'projects'
        DEPLOYMENT_FILES_FOLDER = 'deployment_files_folder'
        self.deployment_files_folder = self.config[PROJECTS_SECTION][
            DEPLOYMENT_FILES_FOLDER]

        if not os.path.isdir(self.deployment_files_folder):
            os.makedirs(self.deployment_files_folder, 0o755)

    def deployment_yaml_generator(self, deployment):
        """
        generates yaml for creating deployment in kubernetes
        Args:
            deployment: Service Deployment
        Returns:
            str: path of yaml generated

        """
        self.logger.info('entering deployment_yaml_generator')
        # converting any special characters to '-'
        project_name = project_utils.modify_string_for_deployment(
            deployment.project_name)
        component = project_utils.modify_string_for_deployment(
            deployment.component_name)
        # this will be the name used in the deployment file
        deployment_name = '{}--{}'.format(project_name, component)
        # reading contents from the standard xpresso deployment yaml file
        with open("config/kubernetes-deployfile.yaml", "r") as f:
            content = f.read()
        yaml_content = self.populate_yaml_content(content, deployment,
                                                  deployment_name)

        filename = "{}/deployfile--{}.yaml".format(
            self.deployment_files_folder, deployment_name)
        with open(filename, "w+") as f:
            yaml.safe_dump(yaml_content, f)
        self.logger.info('exiting deployment_yaml_generator')
        return filename

    def persistent_volume_yaml_generator(self, deployment, persstence_type):
        """
        generates yaml for creating persistent volumne
        Args:
            deployment: Any Deployment
        Returns:
            str: path of yaml generated

        """
        self.logger.info('entering persistent_yaml_generator')

        # converting any special characters to '-'
        project_name = project_utils.modify_string_for_deployment(
            deployment.project_name)
        component = project_utils.modify_string_for_deployment(
            deployment.component_name)
        # this will be the name used in the deployment file
        deployment_name = '{}--{}'.format(project_name, component)

        # reading contents from the standard xpresso deployment yaml file
        with open(f"config/kubernetes-persistent-{persstence_type}.yaml",
                  "r") as f:
            content = f.read()

        content = content.replace("K8_XPRESSO_COMPONENT_NAME",
                                  str(deployment_name))
        content = content.replace("K8_XPRESSO_PERSISTENT_STORAGE_SIZE",
                                  str(deployment.volume_size))
        content = content.replace("K8_XPRESSO_PROJECT_NAME", str(project_name))
        yaml_content = yaml.safe_load(content)

        filename = (
            f"{self.deployment_files_folder}"
            f"/persistent-{persstence_type}-file--{deployment_name}.yaml")
        with open(filename, "w+") as f:
            yaml.safe_dump(yaml_content, f)
        self.logger.info('exiting persistent_yaml_generator')
        return filename

    def populate_yaml_content(self, content, deployment, deployment_name):
        content = content.replace("K8_XPRESSO_COMPONENT_NAME",
                                  str(deployment_name))
        content = content.replace("K8_XPRESSO_COMPONENT_REPLICAS",
                                  str(deployment.replicas))
        content = content.replace("K8_XPRESSO_COMPONENT_IMAGE_NAME",
                                  str(deployment.docker_image))
        content = content.replace("K8_XPRESSO_COMPONENT_ENVIRONMENT_LIST",
                                  str(deployment.environment))
        content = content.replace("K8_XPRESSO_PROJECT_LINUX_UID",
                                  str(deployment.project_linux_uid))
        if deployment.need_persistence():
            content = content.replace("K8_XPRESSO_COMPONENT_VOLUME_MOUNT_PATH",
                                      str(deployment.volume_mount_path))

        # content = content.format(deployment_name, replicas, deployment_name,
        #                          image, deployment_name, environment)
        yaml_content = yaml.safe_load(content)

        # Remove persistence if not required
        if not deployment.need_persistence():
            try:
                del yaml_content["spec"]["template"]["spec"]["volumes"]
                del yaml_content["spec"]["template"]["spec"]["containers"][0][
                    "volumeMounts"]
            except (IndexError, KeyError):
                self.logger.warning("spec.template.spec.volumes not found")
        return yaml_content

    def service_yaml_generator(self, project_name, component, port):
        """
        generates yaml for creating service in kubernetes
        Args:
            project_name: project to be deployed
            component: component for which this yaml is generated
            port: array containing info of ports to be opened
        Returns: path of yaml generated

        """
        self.logger.info('entering service_yaml_generator')
        # reading contents from the standard xpresso service yaml file
        with open("config/kubernetes-servicefile.yaml", "r") as f:
            content = f.read()
        # converting any special characters to '-'
        project_name = project_utils.modify_string_for_deployment(project_name)
        component = project_utils.modify_string_for_deployment(component)
        ports = []
        for i in port:
            temp = str(i)
            fixed_port = project_utils.modify_string_for_deployment(
                temp).replace("'", '"')
            ports.append(json.loads(fixed_port))
        # this will be the name used in the service file
        service_name = '{}--{}'.format(project_name, component)
        content = content.format(service_name, ports, service_name)
        yaml_content = yaml.safe_load(content)
        filename = "{}/servicefile--{}.yaml".format(
            self.deployment_files_folder, service_name)
        with open(filename, "w+") as f:
            yaml.safe_dump(yaml_content, f)
        self.logger.info('exiting service_yaml_generator')
        return filename

    def namespace_yaml_generator(self, project_name):
        """
        generates yaml file to create a new namespace
        Args:
            project_name: name of the project to be deployed

        Returns: path of the yaml generated

        """
        self.logger.info('entering namespace_yaml_generator')
        with open("config/kubernetes-namespacefile.yaml", "r") as f:
            content = f.read()
        # converting any special characters to '-'
        project_name = project_utils.modify_string_for_deployment(project_name)
        content = content.format(project_name)
        yaml_content = yaml.safe_load(content)
        filename = "{}/namespacefile--{}.yaml".format(
            self.deployment_files_folder, project_name)
        with open(filename, "w+") as f:
            yaml.safe_dump(yaml_content, f)
        self.logger.info('exiting namespace_yaml_generator')
        return filename

    def job_yaml_generator(self, deployment):
        """
        generates yaml file to create a job
         Args:
            deployment: Any Deployment
        Returns:
            str: path of yaml generated
        """
        self.logger.info('entering job_yaml_generator')
        # reading contents from the standard xpresso job yaml file

        # converting any special characters to '-'
        project_name = project_utils.modify_string_for_deployment(
            deployment.project_name)
        component = project_utils.modify_string_for_deployment(
            deployment.component_name)
        # this will be the name used in the job file
        job_name = '{}--{}'.format(project_name, component)

        with open("config/kubernetes-jobfile.yaml", "r") as f:
            content = f.read()

        content = content.replace("K8_XPRESSO_COMPONENT_NAME", str(job_name))
        content = content.replace("K8_XPRESSO_COMPONENT_IMAGE_NAME",
                                  str(deployment.docker_image))
        content = content.replace("K8_XPRESSO_COMPONENT_ENVIRONMENT_LIST",
                                  str(deployment.environment))
        content = content.replace("K8_XPRESSO_COMPONENT_COMMAND",
                                  str(deployment.commands))
        content = content.replace("K8_XPRESSO_COMPONENT_REPLICAS",
                                  str(deployment.replicas))

        if deployment.need_persistence():
            content = content.replace("K8_XPRESSO_COMPONENT_VOLUME_MOUNT_PATH",
                                      str(deployment.volume_mount_path))
        # content = content.format(job_name, job_name, image, environment,
        #                          command, parallelism)
        yaml_content = yaml.safe_load(content)
        # Remove persistence if not required
        if not deployment.need_persistence():
            try:
                del yaml_content["spec"]["template"]["spec"]["volumes"]
                del yaml_content["spec"]["template"]["spec"]["containers"][0][
                    "volumeMounts"]
            except (IndexError, KeyError):
                self.logger.warning("spec.template.spec.volumes not found")
        filename = "{}/jobfile--{}.yaml".format(self.deployment_files_folder,
                                                job_name)
        with open(filename, "w+") as f:
            yaml.safe_dump(yaml_content, f)
        self.logger.info('exiting job_yaml_generator')
        return filename

    def cronjob_yaml_generator(self, project_name, component, schedule, image,
                               environment, args):
        """
        generates yaml file to create a cronjob
        :param environment: environment
        :param project_name: project name
        :param component: component name
        :param schedule: Cron Job schedule in standard Cron format
        :param image: docker image
        :param args: array of args to run
        :return: path of yaml generated
        """
        self.logger.info('entering cronjob_yaml_generator')
        if not project_utils.validate_cronjob_format(schedule):
            self.logger.error('Invalid cron schedule provided. Exiting.')
            raise InvalidCronScheduleException
        # reading contents from the standard xpresso cronjob yaml file
        with open("config/kubernetes-cronjobfile.yaml", "r") as f:
            content = f.read()
        # converting any special characters to '-'
        project_name = project_utils.modify_string_for_deployment(project_name)
        component = project_utils.modify_string_for_deployment(component)
        # this will be the name used in the job file
        cronjob_name = '{}--{}'.format(project_name, component)
        content = content.format(cronjob_name, schedule, cronjob_name, image,
                                 environment, args)
        yaml_content = yaml.safe_load(content)
        filename = "{}/cronjobfile--{}.yaml".format(
            self.deployment_files_folder, cronjob_name)
        with open(filename, "w+") as f:
            yaml.safe_dump(yaml_content, f)
        self.logger.info('exiting cronjob_yaml_generator')
        return filename

    def patch_deployment_client(self, path, project_name):
        """
        helper function to patch deployment for project as a given yaml file on
        Kubernetes via the Kubernetes API
        Args:
            path: path of the yaml to be deployed
            project_name: project to be deployed (needed for namespace)
        :return: status of patching (True/Error Code)
        """
        self.logger.info('entering patch_deploy_client')
        try:
            with open(path) as f:
                dep = yaml.safe_load(f)
                k8s_beta = client.ExtensionsV1beta1Api()
                # collecting response from API
                r = k8s_beta.patch_namespaced_deployment(
                    name=dep['metadata']['name'],
                    body=dep,
                    namespace=project_utils.modify_string_for_deployment(
                        project_name))
                self.logger.debug("Deployment patched. Details : {}".format(
                    str(r)))
            self.logger.info('exiting patch_deploy_client')
            return True
        except ApiException as e:
            self.logger.error('Patching deployment failed. '
                              'Error info : {}.'.format(e))
            raise DeploymentCreationFailedException

    def deploy_client(self, path, project_name):
        """
        helper function to create deployment for a given yaml file on
        Kubernetes via the Kubernetes API
        Args:
            path: path of the yaml to be deployed
            project_name: project to be deployed (needed for namespace)

        Returns: status of deployment (True/Error Code)

        """
        self.logger.info('entering deploy_client')
        try:
            with open(path) as f:
                dep = yaml.safe_load(f)
                k8s_beta = client.ExtensionsV1beta1Api()
                # collecting response from API
                r = k8s_beta.create_namespaced_deployment(
                    body=dep,
                    namespace=project_utils.modify_string_for_deployment(
                        project_name))
                self.logger.debug("Deployment created. Details : {}".format(
                    str(r)))
            self.logger.info('exiting deploy_client')
            return True
        except ApiException as e:
            if e.status == 409:  # in case of conflict, patch the deployment
                self.patch_deployment_client(path, project_name)
                return True
            self.logger.error('Creation of deployment failed. Exiting.')
            raise DeploymentCreationFailedException

    def patch_service_client(self, path, project_name):
        """
                helper function to patch service for project as a given yaml
                file on Kubernetes via the Kubernetes API
                Args:
                    path: path of the yaml to be deployed
                    project_name: project to be deployed (needed for namespace)

                Returns: status of service patching (True/Error code)

                """
        self.logger.info('entering patch_service_client')
        try:
            with open(path) as f:
                dep = yaml.safe_load(f)
                k8s_beta = client.CoreV1Api()
                # collecting response from API
                r = k8s_beta.patch_namespaced_service(
                    namespace=project_utils.modify_string_for_deployment(
                        project_name),
                    body=dep,
                    name=dep['metadata']['name'])
                self.logger.debug("Service patched. Details : {}".format(
                    str(r)))
            self.logger.info('exiting patch_service_client')
            return True
        except ApiException as e:
            self.logger.error('Patching service failed. Error details : '
                              '{}'.format(e))
            if e.status == 422:  # Unprocessable Entity
                self.logger.error("Can't patch service port.")
                raise PortPatchingAttemptedException
            raise ServiceCreationFailedException

    def create_service_client(self, path, project_name):
        """
        helper function to create service for a given yaml file on
        Kubernetes via the Kubernetes API
        Args:
            path: path of the yaml to be deployed
            project_name: project to be deployed (needed for namespace)

        Returns: status of service creation (True/Error code)

        """
        self.logger.info('entering create_service_client')
        try:
            with open(path) as f:
                dep = yaml.safe_load(f)
                k8s_beta = client.CoreV1Api()
                # collecting response from API
                r = k8s_beta.create_namespaced_service(
                    namespace=project_utils.modify_string_for_deployment(
                        project_name),
                    body=dep)
                self.logger.debug("Service created. Details : {}".format(
                    str(r)))
            self.logger.info('exiting create_service_client')
            return True
        except ApiException as e:
            if e.status == 409:
                self.patch_service_client(path, project_name)
                return True
            self.logger.error('Creation of service failed. Exiting.')
            raise ServiceCreationFailedException

    def create_namespace_client(self, path):
        """
        helper function to create namespace for a given yaml file on
        Kubernetes via the Kubernetes API
        Args:
            path: path of the yaml

        Returns: status of namespace creation (True/Error Code)

        """
        self.logger.info('entering create_namespace_client')
        try:
            with open(path) as f:
                dep = yaml.safe_load(f)
                k8s_beta = client.CoreV1Api()
                r = k8s_beta.create_namespace(body=dep)
                self.logger.debug("Namespace created. Details : {}".format(
                    str(r)))
            self.logger.info('exiting create_namespace_client')
            return True
        except:
            self.logger.error('Failed to create namespace. Exiting.')
            raise NamespaceCreationFailedException

    def patch_job_client(self, path, project_name):
        self.logger.info('entering patch_job_client')
        try:
            with open(path) as f:
                dep = yaml.safe_load(f)
                k8s_beta = client.BatchV1Api()
                # collecting response from API
                r = k8s_beta.patch_namespaced_job(
                    name=dep['metadata']['name'],
                    body=dep,
                    namespace=project_utils.modify_string_for_deployment(
                        project_name))
                self.logger.debug("Job patched. Details : {}".format(str(r)))
            self.logger.info('exiting patch_job_client')
            return True
        except ApiException as e:
            self.logger.error('Patching job failed. '
                              'Error info : {}.'.format(e))
            raise JobCreationFailedException

    def create_job_client(self, path, project_name):
        """
        method to create a job in kubernetes
        :param path: path of the yaml file
        :param project_name: project name of which the job is a part
        :return: status (True/Error code)
        """
        self.logger.info('Entering create_job_client')
        try:
            with open(path) as f:
                dep = yaml.safe_load(f)
                k8s_beta = client.BatchV1Api()
                # collecting response from API
                r = k8s_beta.create_namespaced_job(
                    namespace=project_utils.modify_string_for_deployment(
                        project_name),
                    body=dep)
                self.logger.debug("Job created. Details : {}".format(str(r)))
            self.logger.info('exiting create_job_client')
            return True
        except ApiException as e:
            if e.status == 409:  # in case of conflict, patch the job
                self.patch_job_client(path, project_name)
                return True
            self.logger.error('Creation of job failed. Exiting.')
            raise JobCreationFailedException

    def patch_cronjob_client(self, path, project_name):
        self.logger.info('entering patch_cronjob_client')
        try:
            with open(path) as f:
                dep = yaml.safe_load(f)
                k8s_beta = client.BatchV1beta1Api()
                # collecting response from API
                r = k8s_beta.patch_namespaced_cron_job(
                    name=dep['metadata']['name'],
                    body=dep,
                    namespace=project_utils.modify_string_for_deployment(
                        project_name))
                self.logger.debug("CronJob patched. Details : {}".format(
                    str(r)))
            self.logger.info('exiting patch_cronjob_client')
            return True
        except ApiException as e:
            self.logger.error('Patching cronjob failed. '
                              'Error info : {}.'.format(e))
            raise CronjobCreationFailedException

    def create_cronjob_client(self, path, project_name):
        """
                method to create a cronjob in kubernetes
                :param path: path of the yaml file
                :param project_name: project name of which the cronjob is a part
                :return: status (True/Error code)
                """
        self.logger.info('Entering create_cronjob_client')
        try:
            with open(path) as f:
                dep = yaml.safe_load(f)
                k8s_beta = client.BatchV1beta1Api()
                # collecting response from API
                r = k8s_beta.create_namespaced_cron_job(
                    namespace=project_utils.modify_string_for_deployment(
                        project_name),
                    body=dep)
                self.logger.debug("Cron Job created. Details : {}".format(
                    str(r)))
            self.logger.info('exiting create_cronjob_client')
            return True
        except ApiException as e:
            if e.status == 409:  # in case of conflict, patch the cronjob
                self.patch_cronjob_client(path, project_name)
                return True
            self.logger.error('Creation of cron job failed. Exiting.')
            raise CronjobCreationFailedException

    def get_service_ip(self, deployment: ServiceDeployment):
        """
        method to get the list of IP addresses for services of a component
        Args:
            deployment: Service Depoyment Object

        Returns: array of service IPs

        """
        self.logger.info('Entering get_service_ip method')
        service_name = '{}--{}'.format(
            project_utils.modify_string_for_deployment(
                deployment.project_name),
            project_utils.modify_string_for_deployment(
                deployment.component_name))
        k8s_beta = client.CoreV1Api()
        r = k8s_beta.read_namespaced_service(
            name=service_name,
            namespace=project_utils.modify_string_for_deployment(
                deployment.project_name))

        service_ips = []
        for port in r.spec.ports:
            service_ips.append('{}:{}'.format(deployment.master_node,
                                              port.node_port))
        self.logger.info('Exiting get_service_ip method')
        return service_ips

    def patch_persistence_volume(self, pv):
        """
        Helper function to patch persistence volume
        Args:
            pv: persistence volume yaml file
        :return: status of patching (True/Error Code)
        """
        self.logger.info('entering persistence')
        try:
            with open(pv) as f:
                dep = yaml.safe_load(f)
                k8s_beta = client.CoreV1Api()
                # collecting response from API
                r = k8s_beta.patch_persistent_volume(
                    name=dep['metadata']['name'], body=dep)
                self.logger.debug(
                    "Persistence volume patched. Details : {}".format(str(r)))
            self.logger.info('exiting patch_deploy_client')
            return True
        except ApiException as e:
            self.logger.error('Patching PV failed. '
                              'Error info : {}.'.format(e))
            raise DeploymentCreationFailedException

    def patch_persistence_volume_claim(self, pv, pvc, project_name):
        """
        Helper function to patch persistence volume claim
        Args:
            pv: persistence volume yaml file
            pvc: persistence volumet claim yaml fil
            project_name: project to be deployed (needed for namespace)
        :return: status of patching (True/Error Code)
        """
        self.logger.info('entering persistence')
        try:
            with open(pvc) as f:
                dep = yaml.safe_load(f)
                k8s_beta = client.CoreV1Api()
                # collecting response from API
                r = k8s_beta.patch_namespaced_persistent_volume_claim(
                    name=dep['metadata']['name'],
                    body=dep,
                    namespace=project_utils.modify_string_for_deployment(
                        project_name))
                self.logger.debug(
                    "Persistence volume patched. Details : {}".format(str(r)))
            self.logger.info('exiting patch_deploy_client')
            return True
        except ApiException as e:
            self.logger.error('Patching PVC failed. '
                              'Error info : {}.'.format(e))
            raise DeploymentCreationFailedException

    def create_persistence_if_required(self, deployment):
        """ Check if persistence is required, If yes then create one"""
        self.logger.debug("Checking for persistence")
        if not deployment.need_persistence():
            self.logger.debug("Persistence not needed.")
            return False

        self.logger.info("Persistence is needed")
        pv = self.persistent_volume_yaml_generator(deployment,
                                                   persstence_type="volume")
        pvc = self.persistent_volume_yaml_generator(
            deployment, persstence_type="volume-claim")
        try:
            with open(pv) as f:
                dep = yaml.safe_load(f)
                k8s_beta = client.CoreV1Api()
                # collecting response from API
                r = k8s_beta.create_persistent_volume(body=dep)
                self.logger.debug(
                    f"Persistence Volume created. Details : {str(r)}")
        except ApiException as e:
            if e.status == 409:  # in case of conflict, patch the deployment
                self.patch_persistence_volume(pv)
                return True
            self.logger.error('Creation of PV failed. Exiting.')
            raise DeploymentCreationFailedException

        try:
            with open(pvc) as f:
                dep = yaml.safe_load(f)
                k8s_beta = client.CoreV1Api()
                # collecting response from API
                r = k8s_beta.create_namespaced_persistent_volume_claim(
                    body=dep,
                    namespace=project_utils.modify_string_for_deployment(
                        deployment.project_name))
                self.logger.debug(
                    f"Persistence Volume Claim created. Details : {str(r)}")

            self.logger.info('exiting deploy_client')
            return True
        except ApiException as e:
            if e.status == 409:  # in case of conflict, patch the deployment
                self.patch_persistence_volume_claim(pv, pvc,
                                                    deployment.project_name)
                return True
            self.logger.error('Creation of PVC failed. Exiting.')
            raise DeploymentCreationFailedException

    def run_deployment_steps(self, deployment: ServiceDeployment):
        try:
            self.create_persistence_if_required(deployment)
            deployment_yaml = self.deployment_yaml_generator(deployment)
            self.deploy_client(deployment_yaml, deployment.project_name)
            self.logger.debug(f'Deployment created for '
                              f'{deployment.component_name}. '
                              f'Now creating service.')
            service_yaml = self.service_yaml_generator(
                deployment.project_name, deployment.component_name,
                deployment.ports)
            self.create_service_client(service_yaml, deployment.project_name)
            self.logger.debug(f'Service created for '
                              f'{deployment.component_name}')
            return True
        except XprExceptions:
            self.logger.error('Error while running deployment steps. '
                              'Deployment failed.')
            raise ProjectDeploymentFailedException

    def run_job_steps(self, deployment: JobDeployment):
        if deployment.is_base_job():
            try:
                self.create_persistence_if_required(deployment)
                job_yaml = self.job_yaml_generator(deployment)
                self.create_job_client(job_yaml, deployment.project_name)
                self.logger.debug(f'Job created for '
                                  f'{deployment.component_name}')
            except XprExceptions:
                self.logger.error('Error while running job steps. '
                                  'Job creation failed.')
                raise JobCreationFailedException
        elif deployment.is_cronjob():
            try:
                self.create_persistence_if_required(deployment)
                cronjob_yaml = self.cronjob_yaml_generator(deployment)
                self.create_cronjob_client(cronjob_yaml,
                                           deployment.project_name)
                self.logger.debug(f'Cronjob created for '
                                  f'{deployment.component_name}')
            except XprExceptions:
                self.logger.error('Error while running job steps. '
                                  'Cronjob creation failed.')
                raise CronjobCreationFailedException
Ejemplo n.º 19
0
class AptRepositoryPackage(AbstractPackage):
    """
    Sets up private aptitude repository on ubuntu VM

    """

    # Configuration Keys
    APT_SECTION = "apt-get-repo"
    NFS_PACKAGE_KEY = "nfs_package_folder"
    APT_PUBLIC_KEY = "public_key_file"
    APT_PRIVATE_KEY = "private_key_file"
    APT_HOSTED_PACKGE_KEY = "hosted_package"
    PACKAGE_LIST_KEY = "package-list"
    DOCKER_NAME = "docker-name"
    META_PACKAGE_KEY = "meta_packages_folder"
    DOCKER_FILE_PATH_KEY = "dockerfile-path"

    DOCKER_IMAGE_VERSION = 0.1

    def __init__(self,
                 config_path=XprConfigParser.DEFAULT_CONFIG_PATH,
                 executor=None):
        if not executor:
            executor = LocalShellExecutor()
        super().__init__(executor=executor)

        self.config = XprConfigParser(config_path)["packages_setup"]
        self.logger = XprLogger()

        self.apt_config = self.config[self.APT_SECTION]
        self.public_key = self.apt_config[self.APT_PUBLIC_KEY]
        self.private_key = self.apt_config[self.APT_PRIVATE_KEY]
        self.hosted_package_folder = self.apt_config[
            self.APT_HOSTED_PACKGE_KEY]
        self.sign_paraphrase = None
        self.sign_key_id = None
        self.home_folder = os.getcwd()

    def execute(self, parameters=None):
        if parameters:
            self.sign_paraphrase = parameters["paraphrase"]
            self.sign_key_id = parameters["key_id"]
        self.cleanup()
        self.pre_install()
        self.download_dependencies()
        self.setup_meta_packages()
        self.sign_packages()
        self.run_docker_container()

    def download_dependencies(self):
        """
        Generates the list of packages and all its dependencies. Download the
        packages into the directory

        install apt-rdepends to generate the list
        install apt-download to download the package
        Ignore error
        """

        with open(self.apt_config[self.PACKAGE_LIST_KEY]) as pkg_fp:
            pkg_list = pkg_fp.read().splitlines()

        if not pkg_list:
            return None
        os.chdir(self.home_folder)
        self.logger.info("Generating all dependencies")
        full_package_list = []
        for pkg in tqdm.tqdm(pkg_list):
            script = 'apt-rdepends {} |  grep -v "^ "'.format(pkg)
            self.logger.info(script)
            try:
                (_, output, _) = self.executor.execute_with_output(script)
                dependencies = output.splitlines()
                full_package_list += [x.decode() for x in dependencies]
            except CommandExecutionFailedException:
                self.logger.warning("Package fetch failed")
        full_package_set = set(full_package_list)
        # We have now full list. Download each of the dependencies.
        try:
            os.makedirs(self.hosted_package_folder, exist_ok=True)
            os.chdir(self.hosted_package_folder)
        except OSError:
            self.logger.info("Installation makes sense")

        self.logger.info("Download all dependencies => {}".format(os.getcwd()))
        self.logger.info(full_package_set)
        for pkg in tqdm.tqdm(list(full_package_set)):
            try:
                self.executor.execute(
                    "sudo -u xprops apt-get download {}".format(pkg))
            except CommandExecutionFailedException:
                self.logger.warning(
                    "Failed to download package {}".format(pkg))

    def setup_meta_packages(self):
        """
        Create meta package folder and build
        """
        os.chdir(self.home_folder)
        local_meta_folder = "{}/*.ns-control".format \
            (self.apt_config[self.META_PACKAGE_KEY])
        self.logger.info(local_meta_folder)
        for meta_pkg in glob.iglob(local_meta_folder, recursive=True):
            try:
                abs_meta_pkg = os.path.join(os.getcwd(), meta_pkg)
                meta_pkg_folder = os.path.join(
                    self.hosted_package_folder,
                    os.path.basename(meta_pkg).split('.')[0])
                self.logger.info(meta_pkg_folder)
                os.makedirs(meta_pkg_folder, exist_ok=True)
                os.chdir(meta_pkg_folder)
                shutil.copy(abs_meta_pkg, '.')
                build_meta_pkg_script = "sudo -u xprops equivs-build {}".format(
                    os.path.basename(meta_pkg))
                self.logger.info(build_meta_pkg_script)
                self.logger.info(os.getcwd())
                self.executor.execute(build_meta_pkg_script)
            except OSError as e:
                # Ignoring
                self.logger.error(e)
                self.logger.error("Failed to create meta {}".format(meta_pkg))

    def sign_packages(self):
        """
        Sign packages using private key

        """
        os.chdir(self.home_folder)
        try:
            self.executor.execute('chmod 755 -R {}'.format(
                self.hosted_package_folder))
            self.logger.info("Importing Keys")
            self.executor.execute("gpg --import --batch {}".format(
                self.private_key))
            self.executor.execute(
                'expect -c "spawn gpg --edit-key {} '
                'trust quit; send \"5\ry\r\"; expect eof"'.format(
                    self.sign_key_id))
            os.chdir(self.hosted_package_folder)
            for deb_file in glob.iglob("{}/*.deb".format(
                    self.hosted_package_folder),
                                       recursive=True):
                self.executor.execute(
                    'dpkg-sig -g "--pinentry-mode loopback --passphrase {}" '
                    '--sign builder {}'.format(self.sign_paraphrase, deb_file))
            self.executor.execute("apt-ftparchive packages . > Packages")
            self.executor.execute("gzip -c Packages > Packages.gz")
            self.executor.execute("apt-ftparchive release . > Release")
            self.executor.execute(
                'gpg --pinentry-mode loopback --passphrase {} '
                '--clearsign -o InRelease Release'.format(
                    self.sign_paraphrase))
            self.executor.execute(
                'gpg --pinentry-mode loopback --passphrase {} '
                '-abs -o Release.gpg Release'.format(self.sign_paraphrase))

        except OSError:
            # Ignoring
            self.logger.error("Failed to sign {}")

    def run_docker_container(self):
        """
        Start the docker container
        """
        self.cleanup()
        os.chdir(self.home_folder)
        self.logger.info(os.getcwd())
        # Copy public key in local
        shutil.copy(self.public_key, './public_key')

        try:
            client = docker.from_env()
            docker_image_tag = ':'.join([
                self.apt_config[self.DOCKER_NAME],
                str(self.DOCKER_IMAGE_VERSION)
            ])
            (_, build_log) = client.images.build(
                path=".",
                dockerfile=self.apt_config[self.DOCKER_FILE_PATH_KEY],
                tag=docker_image_tag,
                nocache=False)
            for line in build_log:
                self.logger.info(line)

            client.containers.run(image=docker_image_tag,
                                  name=self.apt_config[self.DOCKER_NAME],
                                  ports={"80/tcp": 8500},
                                  restart_policy={
                                      "Name": "on-failure",
                                      "MaximumRetryCount": 5
                                  },
                                  volumes={
                                      self.hosted_package_folder: {
                                          'bind':
                                          '/usr/local/apache2/htdocs/deb',
                                          'mode': 'rw'
                                      }
                                  },
                                  detach=True)

        except (docker.errors.APIError, docker.errors.NotFound) as e:
            self.logger.error(e)
            self.logger.error("Could not build container".format(
                self.apt_config[self.DOCKER_NAME]))

    def pre_install(self):
        """
        Install required apt-get packages
        """
        try:
            self.executor.execute("apt-get update -y && "
                                  "apt-get -y install apt-rdepends "
                                  "dpkg-dev dpkg-sig expect apt-utils")
        except OSError:
            self.logger.error("Can not install the requirements")

    def cleanup(self, delete_packages=False):
        """
        Removes package and shutdown docker container
        """
        os.chdir(self.home_folder)
        if delete_packages:
            shutil.rmtree(self.hosted_package_folder)

        try:
            client = docker.from_env()
            apt_get_container = client.containers.get(
                self.apt_config[self.DOCKER_NAME])

            apt_get_container.stop()
            apt_get_container.remove()
        except (docker.errors.APIError, docker.errors.NotFound):
            self.logger.error("{} container failed to remove".format(
                self.apt_config[self.DOCKER_NAME]))

    def status(self):
        try:
            client = docker.from_env()
            apt_get_container = client.containers.get(
                self.apt_config[self.DOCKER_NAME])

            if apt_get_container.status == "running":
                return True
        except (docker.errors.APIError, docker.errors.NotFound):
            self.logger.error("{} container not found".format(
                self.apt_config[self.DOCKER_NAME]))
            return False
        return False

    def install(self, parameters=None):
        self.execute(parameters=parameters)

    def uninstall(self, **kwargs):
        self.cleanup(delete_packages=True)

    def start(self, **kwargs):
        self.execute()

    def stop(self, **kwargs):
        self.cleanup(delete_packages=False)
class LdapSetup:
    """
    This class is used to setup the LDAP server as a docker container.
    """

    LDAP_SECTION = "ldap"
    LDAP_IMAGE = "ldap_image"
    LDAP_CONTAINER = "ldap_container"

    def __init__(self,
                 config_path=XprConfigParser.DEFAULT_CONFIG_PATH_SETUP_LOG):
        self.config = XprConfigParser(config_path)
        self.logger = XprLogger()
        self.ldapmanager = LdapManager()
        self.client = docker.from_env()

    def install(self):
        if not check_root():
            self.logger.error("Please run the program using sudo privileges")
            raise PermissionDeniedException(
                "Permission Denied. Run with admin rights.")

        self.setup_ldap()

    def setup_ldap(self):
        if not check_root():
            self.logger.error("Please run the program using sudo privileges")
            raise PermissionDeniedException(
                "Permission Denied. Run with admin rights.")

        try:
            self.pull_image()
            self.client.containers.run(
                self.config[self.LDAP_SECTION][self.LDAP_IMAGE],
                name=self.config[self.LDAP_SECTION][self.LDAP_CONTAINER],
                environment={
                    "LDAP_ORGANISATION": "abzooba",
                    "LDAP_DOMAIN": "abzooba.com",
                    "LDAP_ADMIN_PASSWORD": "******"
                },
                ports={
                    "389": "389",
                    "636": "636"
                },
                detach=True,
                restart_policy={
                    "Name": "on-failure",
                    "MaximumRetryCount": 5
                })
            self.logger.info("Ldap docker service successfully started")
        except docker.errors.ContainerError as err:
            self.logger.error(
                "The container exits with a non-zero exit code. \n{}".format(
                    str(err)))
            raise err
        except docker.errors.ImageNotFound as err:
            self.logger.error(
                "The specified image does not exist. \n{}".format(str(err)))
            raise err
        except docker.errors.APIError as err:
            self.logger.error("The server returns an error. \n{}".format(
                str(err)))
            raise err
        except KeyError as err:
            self.logger.error("Key not present. \n{}".format(str(err)))
            raise err
        return

    def pull_image(self):
        if not check_root():
            self.logger.error("Please run the program using sudo privileges")
            raise PermissionDeniedException(
                "Permission Denied. Run with admin rights.")

        try:
            self.client.images.pull(
                self.config[self.LDAP_SECTION][self.LDAP_IMAGE])
            self.logger.info("Successfully pulled LDAP docker image.")
        except docker.errors.APIError as err:
            self.logger.error("The server returns an error. \n{}".format(
                str(err)))
            raise err

    def insert_default_users(self):
        users = {
            "admin_user": {
                "uid": "xprdb_admin",
                "pwd": 'xprdb@Abz00ba'
            },
            "superuser": {
                "uid": "superuser1",
                "pwd": "xprdb@Abz00ba"
            },
            "admin1_user": {
                "uid": "admin1",
                "pwd": "admin1"
            }
        }

        self.logger.info("Creating default users in LDAP")

        for key in users:
            try:
                self.ldapmanager.add(users[key]["uid"], users[key]["pwd"])
                self.logger.info("Successfully added {} .".format(key))
            except ldap.INVALID_CREDENTIALS as e:
                self.logger.error("Error : {} in adding ".format(str(e), key))
            except ldap.LDAPError as e:
                self.logger.error("Error : {} in adding ".format(str(e), key))

        self.logger.info("Exiting insert_default_users")
Ejemplo n.º 21
0
class ControllerClient:
    CONTROLLER_SECTION = 'controller'
    SERVER_URL = 'server_url'
    CLIENT_PATH = 'client_path'
    JENKINS_SECTION = 'jenkins'
    JENKINS_HOST = 'master_host'
    relogin_response = {
        "outcome": "failure",
        "error_code": "106",
        "results": {}
    }

    API_JSON_OUTCOME = "outcome"
    API_JSON_RESULTS = "results"
    API_JSON_ERROR_CODE = "error_code"
    API_JSON_SUCCESS = "success"
    API_JSON_FAILURE = "failure"

    def __init__(self, config_path=XprConfigParser.DEFAULT_CONFIG_PATH):
        self.logger = XprLogger()
        self.config = XprConfigParser(config_path)
        self.path = os.path.join(
            os.path.expanduser('~'),
            self.config[self.CONTROLLER_SECTION][self.CLIENT_PATH])
        self.token_file = '{}.current'.format(self.path)
        self.server_path = self.config[self.CONTROLLER_SECTION][
            self.SERVER_URL]

    def sso_login(self):
        """ It performs Single Sign-On authentication for the client.
        It follows following steps
        1. Check if token exists
        2. If exists: Send to the server for validation
            2.1 If token is validated then login is successful
            2.2 If token is not validated, assume token does not exist and go
            to point 3
        3. If no token exists:
            3.1 Print the SSO authentication url for user to login
            3.2 Send request to server every few seconds to check if user
            signed in successful. Wait for 60 seconds. Throw error if not
            logged in
            3.3 When user logged in, fetch the token and save

        """
        self.logger.info('CLIENT : Entering SSO Login Method')

        # Check if token exists:
        try:
            token = self.get_token()
        except ControllerClientResponseException:
            self.logger.info("No Token found")
            token = None

        # Since no token exist, ask for new login
        if token:
            url = f"{self.server_path}/sso/token_login"
            self.logger.debug('CLIENT : Making post request to server')
            data = {"token": token}
            try:
                response = self.send_http_request(url=url,
                                                  header=data,
                                                  http_method=HTTPMethod.POST,
                                                  data=data)
                return response
            except ControllerClientResponseException as e:
                self.logger.info("Assuming logging request failed")
                self.logger.info(e.message)

        url = f"{self.server_path}/sso/get_authentication_url"
        self.logger.debug('CLIENT : Making post request to server')
        response = self.send_http_request(url=url, http_method=HTTPMethod.GET)
        return response

    def sso_validate(self, validation_token):
        """
        Check whether SSO authentication is completed and successful
        Args:
            validation_token: sso validation token which is used to check if
                              a user has logged in or not.
        Returns:
        """
        # We keep requesting the sso server to test for
        interval_second = 2
        wait_second = 60
        start_time = time.time()
        while time.time() - start_time < wait_second:
            self.logger.debug('CLIENT : Making post request to server')
            url = f"{self.server_path}/sso/validate"
            data = {"validation_token": validation_token}
            try:

                response = self.send_http_request(url=url,
                                                  http_method=HTTPMethod.POST,
                                                  data=data)
                self.logger.info("Token validated")
                self.save_token(response["token"])
                return {"message": "SSO Login Successfull"}
            except ControllerClientResponseException:
                time.sleep(interval_second)
        self.logger.info('CLIENT : Existing SSO Login Method')
        raise ControllerClientResponseException("Session over without login",
                                                error_codes.server_error)

    def login(self, username, password):
        """Sends request to Controller server and
        get the status on login request"""
        self.logger.info('CLIENT : entering login method')
        if not os.path.isdir(self.path):
            os.makedirs(self.path, 0o755)
        if os.path.isfile(self.token_file):
            os.remove(self.token_file)

        if not username:
            self.logger.error('CLIENT : Empty username passed. Exiting.')
            raise ControllerClientResponseException("Username can't be empty",
                                                    error_codes.empty_uid)
        if not password:
            self.logger.error('CLIENT : Empty password passed. Exiting.')
            raise ControllerClientResponseException("Password can't be empty",
                                                    error_codes.empty_uid)

        url = f"{self.server_path}/auth"
        credentials = {"uid": username, "pwd": password}
        self.logger.debug('CLIENT : Making post request to server')
        response = self.send_http_request(url=url,
                                          http_method=HTTPMethod.POST,
                                          data=credentials)

        self.save_token(token=response['access_token'])
        if 'relogin' in response and response['relogin']:
            self.logger.debug('CLIENT : already logged in. Saving new token.')
            return {"message": f"You are already logged in"}
        elif 'relogin' in response and not response['relogin']:
            self.logger.info(
                'CLIENT : Login successful. Writing token to file.')
            return {"message": f"Welcome, {username}!"}
        return response

    def save_token(self, token):
        """Token is saved in the local file system for """
        file = open(self.token_file, 'w+')
        file.write(token)
        file.close()
        self.logger.info('CLIENT : Token written to file. Exiting.')

    def get_token(self):
        """Token is saved in the local file system for """
        token = None
        try:
            with open(self.token_file, "r") as f:
                token = f.read()
        except FileNotFoundError:
            self.logger.error("No Token Found. Need to Relogin")
            raise ControllerClientResponseException(
                "No Session found. Login again", error_codes.expired_token)
        return token

    def logout(self):
        self.logger.info('CLIENT : entering logout method')
        url = f'{self.server_path}/auth'
        token = self.get_token()
        headers = {'token': token}
        self.logger.debug('CLIENT : Making delete request to server')
        self.send_http_request(url=url,
                               http_method=HTTPMethod.DELETE,
                               header=headers)
        os.remove(self.token_file)
        self.logger.info('CLIENT : Logout successful. Exiting.')
        return {"message": "Successfully logged out"}

    def get_clusters(self, argument):
        self.logger.info(f'CLIENT : entering get_clusters method '
                         f'with arguments {argument}')
        url = f'{self.server_path}/clusters'
        headers = {"token": self.get_token()}
        self.logger.debug('CLIENT : Making get request to server')
        response = self.send_http_request(url=url,
                                          http_method=HTTPMethod.GET,
                                          header=headers,
                                          data=argument)
        self.logger.info('CLIENT : Get request successful. Exiting.')
        return response

    def deactivate_cluster(self, argument):

        self.logger.info('CLIENT : Entering deactivate_cluster method')
        if not argument:
            self.logger.error('CLIENT : No input arguments provided. Exiting.')
            raise ControllerClientResponseException(
                f"Please provide some input arguments ===",
                error_codes.incomplete_cluster_info)
        url = f'{self.server_path}/clusters'
        headers = {"token": self.get_token()}
        self.send_http_request(url=url,
                               http_method=HTTPMethod.DELETE,
                               header=headers,
                               data=argument)
        self.logger.info('CLIENT : Deactivation successful. Exiting.')
        return {"message": "Cluster deactivated."}

    def register_cluster(self, argument):
        self.logger.info('CLIENT : Entering register_cluster '
                         'with arguments {}'.format(argument))
        if not argument:
            self.logger.error('CLIENT : No input arguments provided. Exiting.')
            raise ControllerClientResponseException(
                f"Please provide some input arguments ===",
                error_codes.incomplete_cluster_info)
        url = f'{self.server_path}/clusters'
        headers = {"token": self.get_token()}
        response = self.send_http_request(url=url,
                                          http_method=HTTPMethod.POST,
                                          header=headers,
                                          data=argument)
        self.logger.info('CLIENT : Cluster registration successful.Exiting.')
        return {
            "message":
            f"Cluster successfully registered with "
            f"ID {response} ###"
        }

    def register_user(self, user_json):
        url = f"{self.server_path}/users"
        response = self.send_http_request(url=url,
                                          http_method=HTTPMethod.POST,
                                          header={"token": self.get_token()},
                                          data=user_json)
        return response

    def get_users(self, filter_json):
        url = f"{self.server_path}/users"
        response = self.send_http_request(url=url,
                                          http_method=HTTPMethod.GET,
                                          header={"token": self.get_token()},
                                          data=filter_json)
        return response

    def modify_user(self, changes_json):
        url = f"{self.server_path}/users"
        response = self.send_http_request(url=url,
                                          http_method=HTTPMethod.PUT,
                                          header={"token": self.get_token()},
                                          data=changes_json)
        return response

    def update_password(self, password_json):
        url = f"{self.server_path}/user/pwd"
        response = self.send_http_request(url=url,
                                          http_method=HTTPMethod.PUT,
                                          header={"token": self.get_token()},
                                          data=password_json)
        return response

    def deactivate_user(self, uid_json):
        url = f"{self.server_path}/users"
        response = self.send_http_request(url=url,
                                          http_method=HTTPMethod.DELETE,
                                          header={"token": self.get_token()},
                                          data=uid_json)
        return response

    def register_node(self, node_json):
        url = f"{self.server_path}/nodes"
        response = self.send_http_request(url=url,
                                          http_method=HTTPMethod.POST,
                                          header={"token": self.get_token()},
                                          data=node_json)
        print(response)
        return response

    def get_nodes(self, filter_json):
        url = f"{self.server_path}/nodes"
        response = self.send_http_request(url=url,
                                          http_method=HTTPMethod.GET,
                                          header={"token": self.get_token()},
                                          data=filter_json)
        return response

    def provision_node(self, changes_json):
        url = f"{self.server_path}/nodes"
        response = self.send_http_request(url=url,
                                          http_method=HTTPMethod.PUT,
                                          header={"token": self.get_token()},
                                          data=changes_json)
        return response

    def deactivate_node(self, node_json):
        url = f"{self.server_path}/nodes"
        response = self.send_http_request(url=url,
                                          http_method=HTTPMethod.DELETE,
                                          header={"token": self.get_token()},
                                          data=node_json)
        return response

    def assign_node(self, assign_json):
        url = f"{self.server_path}/assign_node"
        response = self.send_http_request(url=url,
                                          http_method=HTTPMethod.PUT,
                                          header={"token": self.get_token()},
                                          data=assign_json)
        return response

    def check_for_declarative_json(self, project_json):
        """
        Checks if the provided declarative json exists and replaces that field
        with the contents of declarative json.
        Args:
            project_json: input file from user

        Returns: modified project_json

        """
        for pipeline in project_json['pipelines']:
            if not os.path.isfile(pipeline['declarative_json']):
                self.logger.error("declarative json not found")
                raise FileNotFoundException('Declarative JSON not found.')
            with open(pipeline['declarative_json'], 'r') as f:
                declarative_json_data = json.load(f)
                pipeline['declarative_json'] = declarative_json_data
        return project_json

    def create_project(self, project_json):
        if 'pipelines' in project_json:
            project_json = self.check_for_declarative_json(project_json)
        url = f"{self.server_path}/projects/manage"
        response = self.send_http_request(url=url,
                                          http_method=HTTPMethod.POST,
                                          header={"token": self.get_token()},
                                          data=project_json)
        return response

    def get_project(self, filter_json):
        url = f"{self.server_path}/projects/manage"
        response = self.send_http_request(url=url,
                                          http_method=HTTPMethod.GET,
                                          header={"token": self.get_token()},
                                          data=filter_json)
        return response

    def modify_project(self, changes_json):
        if 'pipelines' in changes_json:
            changes_json = self.check_for_declarative_json(changes_json)
        url = f"{self.server_path}/projects/manage"
        response = self.send_http_request(url=url,
                                          http_method=HTTPMethod.PUT,
                                          header={"token": self.get_token()},
                                          data=changes_json)
        return response

    def deactivate_project(self, project_json):
        url = f"{self.server_path}/projects/manage"
        response = self.send_http_request(url=url,
                                          http_method=HTTPMethod.DELETE,
                                          header={"token": self.get_token()},
                                          data=project_json)
        print("response is ", response)
        return response

    def build_project(self, argument):
        self.logger.info(f'CLIENT : Entering build_project '
                         f'with arguments {argument}')
        if not argument:
            self.logger.error('CLIENT : No input arguments provided. Exiting.')
            raise ControllerClientResponseException(
                f"Please provide some input arguments ===",
                error_codes.incomplete_cluster_info)
        url = f'{self.server_path}/projects/build'
        response = self.send_http_request(url=url,
                                          http_method=HTTPMethod.POST,
                                          header={"token": self.get_token()},
                                          data=argument)
        self.logger.info('CLIENT : Project build successful.Exiting.')
        return {
            "message":
            "Project build successful!",
            "Build IDS":
            response,
            "Jenkins Pipeline":
            f"{self.config[self.JENKINS_SECTION][self.JENKINS_HOST]}"
            f"/blue/pipelines"
        }

    def get_build_version(self, argument):
        self.logger.info(f'CLIENT : entering get_build_version method '
                         f'with arguments {argument}')
        url = f'{self.server_path}/projects/build'
        self.logger.debug('CLIENT : Making get request to server')
        response = self.send_http_request(url=url,
                                          http_method=HTTPMethod.GET,
                                          header={"token": self.get_token()},
                                          data=argument)
        return response

    def deploy_project(self, argument):
        self.logger.info(f'CLIENT : Entering deploy_project '
                         f'with arguments {argument}')
        if not argument:
            self.logger.error('CLIENT : No input arguments provided. Exiting.')

            raise ControllerClientResponseException(
                f"Please provide some input arguments ===",
                error_codes.incomplete_cluster_info)
        url = f'{self.server_path}/projects/deploy'
        response = self.send_http_request(url=url,
                                          http_method=HTTPMethod.POST,
                                          header={"token": self.get_token()},
                                          data=argument)
        self.logger.info('CLIENT : Project deployed successfully.Exiting.')
        return {
            "message": "Project deployed successfully on the below IPs!",
            "Output": response
        }

    def undeploy_project(self, argument):
        self.logger.info(f'CLIENT : Entering undeploy_project '
                         f'with arguments {argument}')
        if not argument:
            self.logger.error('CLIENT : No input arguments provided. Exiting.')
            raise ControllerClientResponseException(
                f"Please provide some input arguments ===",
                error_codes.incomplete_cluster_info)

        url = f'{self.server_path}/projects/deploy'
        response = self.send_http_request(url=url,
                                          http_method=HTTPMethod.DELETE,
                                          header={"token": self.get_token()},
                                          data=argument)
        self.logger.info('CLIENT : Project undeployed successfully.Exiting.')
        return {"message": "Project undeployed successfully!"}

    def update_xpresso(self):
        """
        Update xpresso project to the latest commit
        """
        # Send request to update server
        server_update_is_success = False
        url = f'{self.server_path}/update_xpresso'
        try:
            self.send_http_request(url, HTTPMethod.POST)
            server_update_is_success = True
        except ControllerClientResponseException as e:
            self.logger.error(e)

        # Update local
        package_manager = PackageManager()
        package_manager.run(package_to_install="UpdateLocalXpressoPackage",
                            execution_type=ExecutionType.INSTALL)
        response = {"client": "Updated"}
        if server_update_is_success:
            response["server"] = "Updated"
        else:
            response["server"] = "Update Failed"
        return response

    def fetch_version(self):
        """
        Fetches server version and client version, convert to a dict and
        returns.
        """
        url = f'{self.server_path}/version'
        json_response = self.send_http_request(url, HTTPMethod.GET)
        server_version = "None"
        if "version" in json_response:
            server_version = json_response["version"]
        client_version = get_version()
        return {
            "client_version": client_version,
            "server_version": server_version
        }

    def send_http_request(self,
                          url: str,
                          http_method: HTTPMethod,
                          data=None,
                          header: dict = None):
        request = HTTPRequest(method=http_method,
                              url=url,
                              headers=header,
                              data=data)
        handler = HTTPHandler()
        try:
            response = handler.send_request(request)
            json_response = response.get_data_as_json()
            if not json_response:
                raise ControllerClientResponseException(
                    "Request Failed", error_codes.server_error)
            elif (json_response[self.API_JSON_OUTCOME] == self.API_JSON_SUCCESS
                  and self.API_JSON_RESULTS in json_response):
                return json_response[self.API_JSON_RESULTS]
            elif (json_response[self.API_JSON_OUTCOME] == self.API_JSON_SUCCESS
                  and self.API_JSON_RESULTS not in json_response):
                return {}
            elif (self.API_JSON_RESULTS in json_response
                  and self.API_JSON_ERROR_CODE in json_response):
                raise ControllerClientResponseException(
                    json_response[self.API_JSON_RESULTS],
                    json_response[self.API_JSON_ERROR_CODE])
            elif self.API_JSON_ERROR_CODE in json_response:
                raise ControllerClientResponseException(
                    "Request Failed", json_response[self.API_JSON_ERROR_CODE])
            raise ControllerClientResponseException("Request Failed", -1)
        except (HTTPRequestFailedException, HTTPInvalidRequestException) as e:
            self.logger.error(str(e))
            raise ControllerClientResponseException("Server is not accessible",
                                                    error_codes.server_error)
        except JSONDecodeError as e:
            self.logger.error(str(e))
            raise ControllerClientResponseException(
                "Invalid response from server", error_codes.server_error)

    def create_repo(self, repo_json):
        """
        creates a repo on pachyderm cluster

        :param repo_json:
            information of repo i.e. name and description
        :return:
            returns operation status
        """
        url = f"{self.server_path}/repo"
        response = self.send_http_request(url=url,
                                          http_method=HTTPMethod.POST,
                                          header={"token": self.get_token()},
                                          data=repo_json)
        return response

    # def get_repo(self):
    #     """
    #
    #     :return:
    #     """
    #     url = f"{self.server_path}/repo"
    #     response = self.send_http_request(url=url,
    #                                       http_method=HTTPMethod.GET,
    #                                       header={"token": self.get_token()},
    #                                       data={})
    #     return response

    def create_branch(self, branch_json):
        """
        creates a branch in a repo

        :param branch_json:
            information of branch i.e. repo and branch names
        :return:
            operation status
        """
        url = f"{self.server_path}/repo"
        response = self.send_http_request(url=url,
                                          http_method=HTTPMethod.PUT,
                                          header={"token": self.get_token()},
                                          data=branch_json)
        return response

    def push_dataset(self, dataset_json):
        """
        pushes a dataset into pachyderm cluster

        :param dataset_json:
            information of dataset
        :return:
            operation status
        """
        url = f"{self.server_path}/dataset/manage"
        self.send_http_request(url=url,
                               http_method=HTTPMethod.PUT,
                               header={"token": self.get_token()},
                               data=dataset_json)
        manager = repo_manager.PachydermRepoManager()
        try:
            commit_id = manager.push_files(dataset_json)
            return {
                "message": f"Dataset push successful. commit id: {commit_id}"
            }
        except XprExceptions as err:
            return err.message

    def pull_dataset(self, dataset_json):
        """
        pulls a dataset from pachyderm cluster

        :param dataset_json:
            info of the dataset on pachyderm cluster
        :return:
            path of the dataset on user system
        """
        url = f"{self.server_path}/dataset/manage"
        self.send_http_request(url=url,
                               http_method=HTTPMethod.GET,
                               header={"token": self.get_token()},
                               data=dataset_json)
        manager = repo_manager.PachydermRepoManager()
        try:
            dataset_path = manager.manage_xprctl_dataset('pull', dataset_json)
            return {
                "message": f"Pull Successful, find the files at {dataset_path}"
            }
        except XprExceptions as err:
            return err.message

    def list_dataset(self, filter_json):
        """
        lists datasets saved on pachyderm cluster as per filter specs

        :param filter_json:
            info to filter required dataset
        :return:
            list of all the files and their props as per filter specs
        """
        url = f"{self.server_path}/dataset/list"
        self.send_http_request(url=url,
                               http_method=HTTPMethod.GET,
                               header={"token": self.get_token()},
                               data=filter_json)
        manager = repo_manager.PachydermRepoManager()
        try:
            dataset_list = manager.manage_xprctl_dataset('list', filter_json)
            return dataset_list
        except XprExceptions as err:
            return err.message
class XprEmailSender():

    EMAIL_SECTION = "email_notification"
    PROJECT_NAME = "Project Name"
    SMTP_HOST = "smtphost"
    SMTP_PORT = "smtpport"
    SENDER_MAIL = "sender_mail"
    SENDER_PASSWD = "sender_passwd"

    def __init__(self):
        self.xpr_config = XprConfigParser(
            config_file_path=XprConfigParser.DEFAULT_CONFIG_PATH)
        self.logger = XprLogger()

    def send_single_mail(self, receiver, message, subject):
        smtp_session = None
        try:
            smtp_session = smtplib.SMTP(
                self.xpr_config[self.EMAIL_SECTION][self.SMTP_HOST],
                self.xpr_config[self.EMAIL_SECTION][self.SMTP_PORT])
            smtp_session.starttls()

            smtp_session.login(
                self.xpr_config[self.EMAIL_SECTION][self.SENDER_MAIL],
                self.xpr_config[self.EMAIL_SECTION][self.SENDER_PASSWD])

            msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
                self.xpr_config[self.EMAIL_SECTION][self.SENDER_MAIL],
                receiver, subject, formatdate(), message)

            smtp_session.sendmail(
                self.xpr_config[self.EMAIL_SECTION][self.SENDER_MAIL],
                receiver, msg)
            self.logger.info("{}  Successfully sent to {}".format(
                msg, receiver))

        except smtplib.SMTPServerDisconnected as e:
            err = "server unexpectedly disconnects:{}".format(e)
            self.logger.error(err)
            raise EmailException(err)

        except smtplib.SMTPSenderRefused as e:
            err = "Sender address refused : {}".format(e)
            self.logger.error(err)
            raise EmailException(err)

        except smtplib.SMTPRecipientsRefused as e:
            err = "recipient {} addresses refused : {}".format(receiver, e)
            self.logger.error(err)
            raise EmailException(err)

        except smtplib.SMTPDataError as e:
            err = "The SMTP server refused to accept the message data. :{}".format(
                e)
            self.logger.error(err)
            raise EmailException()

        except smtplib.SMTPConnectError as e:
            err = "Error connecting to server : {}".format(e)
            self.logger.error(err)
            raise EmailException(err)

        except smtplib.SMTPAuthenticationError as e:
            err = "Unable to authenticate : {}".format(e)
            self.logger.error(err)
            raise EmailException(err)

        except smtplib.SMTPException as e:
            err = "Error sending mail :{}".format(e)
            self.logger.error(err)
            raise EmailException(err)

        finally:
            if smtp_session is not None:
                smtp_session.quit()

    def send(self, receivers, message, subject):
        success = list()
        failure = list()
        for receiver in receivers:
            try:
                self.send_single_mail(receiver, message, subject)
                success.append(receiver)
            except EmailException as e:
                failure.append(receiver)
        self.logger.info('successfully send email to {} '.format(str(success)))
        self.logger.info('Unable to send email to {} '.format(str(failure)))
        return failure
Ejemplo n.º 23
0
class AbstractPackage:
    """
    Abstract base class for packages. It has one job to do, is execute one
    packages
    """
    def __init__(
            self,
            executor: CommandExecutor = None,
            config_path: XprConfigParser = XprConfigParser.DEFAULT_CONFIG_PATH
    ):
        self.executor = executor
        self.config_path = config_path
        self.logger = XprLogger()

    @abstractmethod
    def install(self, **kwargs):
        """
        Run installation scripts
        """

    @abstractmethod
    def uninstall(self, **kwargs):
        """
        Removes installed libraries
        """

    @abstractmethod
    def start(self, **kwargs):
        """
        Start the service/stop if required
        """

    @abstractmethod
    def stop(self, **kwargs):
        """
        Stop the service/stop if required
        """

    @abstractmethod
    def status(self, **kwargs):
        """
        Checks is the libraries are installed and running
        Returns:
             bool: True, if libraries are setup correctly
        """

    def execute_command(self, command):
        self.logger.info(f"Running command: {command}")
        try:
            return self.executor.execute(command)
        except CommandExecutionFailedException:
            self.logger.error("Command failed {}".format(command))
            raise PackageFailedException(
                "Base Ubuntu Package Installation Failed")

    def execute_command_with_output(self, command):
        self.logger.info(f"Running command: {command}")
        try:
            return self.executor.execute_with_output(command)
        except CommandExecutionFailedException:
            self.logger.error("Command failed {}".format(command))
            raise PackageFailedException(
                "Base Ubuntu Package Installation Failed")
Ejemplo n.º 24
0
class LdapManager():
    """
    Creates a class to perform LDAP operations i.e. authenticating user etc.
    """
    LDAP_SECTION = 'ldap'
    URL = 'ldap_url'

    def __init__(self):
        self.config = XprConfigParser(XprConfigParser.DEFAULT_CONFIG_PATH)
        self.logger = XprLogger()
        self.adminuser = "******"
        self.adminpassword = "******"

    def authenticate(self, username, password):
        """
        Authenticates user using LDAP server

        Args:
        username(str): unique username provided
        password(str):user account password

        Returns:
            bool : return True  if user authenticated successfully,
            else raises corresponding Excecption

        """
        self.logger.info("Authenticating using LDAP")
        ldap_server = self.config[self.LDAP_SECTION][self.URL]

        user_dn = f'cn={username},dc=abzooba,dc=com'
        connect = ldap.initialize(ldap_server)

        try:
            connect.bind_s(user_dn, password)
            self.logger.info(
                "User:{} Succesfully Authenticated".format(username))
            return True
        finally:
            connect.unbind_s()
        return False

    def add(self, username, password):
        """
        Adds a new user

        Args:
        username(str): Name of the user account to be added
        password(str): Password specified for the account

        Returns:
            bool : return True  if user added successfully,
            else raises corresponding excecption

        """
        ldap_server = self.config[self.LDAP_SECTION][self.URL]
        connect = ldap.initialize(ldap_server)

        user_dn = f'cn={self.adminuser},dc=abzooba,dc=com'

        add_dn = f'cn={username},dc=abzooba,dc=com'
        attrs = {}
        attrs['objectclass'] = [b'simpleSecurityObject', b'organizationalRole']
        attrs['cn'] = [str.encode(username)]
        attrs['userPassword'] = [str.encode(password)]
        attrs['description'] = [b'Xpresso User']

        try:
            connect.bind_s(user_dn, self.adminpassword)
            connect.add_s(add_dn, modlist.addModlist(attrs))
            self.logger.info("Successfully added user {}".format(username))
            return True
        except ldap.INVALID_CREDENTIALS as e:
            self.logger.error("Invalid credentials provided : {}".format(e))
            raise e
            return False
        except ldap.LDAPError as e:
            self.logger.error("Error : {}".format(e))
            raise e
            return False
        finally:
            connect.unbind_s()
        return False

    def update_password(self, username, old_password, new_password):
        """
        Updates an already existing user account password
        username(str): Name of the user account to be added
        old_password(str)

        Args:: Already existing password
        new_password(str) : New user password

        Returns:
            bool : return True  if user password updated successfully,
            else raises corresponding Excecption

        """

        ldap_server = self.config[self.LDAP_SECTION][self.URL]
        connect = ldap.initialize(ldap_server)
        user_dn = f'cn={username},dc=abzooba,dc=com'
        try:
            connect.bind_s(user_dn, old_password)
            add_pass = [(ldap.MOD_REPLACE, 'userPassword',
                         [str.encode(new_password)])]
            connect.modify_s(user_dn, add_pass)
            self.logger.info(
                "Successfully updated password for {}".format(username))
            return True
        except ldap.LDAPError as e:
            self.logger.error("Error : {}".format(e))
        finally:
            connect.unbind_s()
        return False
Ejemplo n.º 25
0
class UserManager:

    CONTROLLER_SECTION = 'controller'
    TOKEN_EXPIRY = 'soft_expiry'
    LOGIN_EXPIRY = 'hard_expiry'
    AUTHENTICATION_TYPE = "authentication_type"

    def __init__(self, persistence_manager):
        self.logger = XprLogger()
        self.config = XprConfigParser()
        self.persistence_manager = persistence_manager
        self.ldapmanager = LdapManager()

    def register_user(self, user_json):
        """
        register a new user in the persistence

        checks if the user already exists and then adds to persistence

        Parameters:
            user_json [json]: json with node information

        Return:
            Success -> 'OK' [str] : returns 'OK' as response
            Failure -> [str] : returns appropriate failure response
        """
        self.logger.debug(f"Entered register_user with {user_json}")
        # create user object
        new_user = User(user_json)
        # run validations
        new_user.validate_mandatory_fields()
        new_user.validate_field_values()
        # valid inputs - exception would have been raised in case of missing /
        # invalid info

        # now, set other fields as required
        # Password should not be saved as plain text in database.
        # Encrypting the password before saving it to database
        self.logger.info("Registering a new user")
        user_pwd = user_json["pwd"]
        new_user.set('pwd', sha512_crypt.encrypt(user_json['pwd']))
        new_user.set('loginStatus', False)
        new_user.set('activationStatus', True)
        new_user.set('nodes', [])
        self.logger.info("adding user to the database")

        self.persistence_manager.insert("users", new_user.data, False)
        self.logger.info("user successfully added to the persistence")

        try:
            self.ldapmanager.add(user_json["uid"], user_pwd)
        except Exception as e:
            self.logger.error("Unable to add user")
            print("unable to add user to ldap server : ", e)
            return XprResponse("failure", None, str(e))

        # NFS User directory changes
        self.logger.info("Setting up NFS for the user")
        nfs_manager = NFSUserManager(config=self.config)
        nfs_manager.setup_user_folder(user=user_json['uid'])
        self.logger.info("NFS set up")

        return XprResponse("success", None, None)

    def modify_user(self, filter_json, changes_json):
        """
            modify_user updates the user info in the persistence

            checks if user is available and then updates
            the info as per changes_json

            Parameters:
                filter_json: filter to find user
                changes_json: json with user changes info

            Return:
                Success -> 'OK' [str] : returns OK if provision_node succeeds
                Failure -> [str] : returns appropriate failure response
        """

        self.logger.debug(
            f"Modifying user information of {filter_json} to {changes_json}")
        # checks if the user is present in database
        self.logger.info("Checking if the user is present in the database")
        users = self.persistence_manager.find("users", filter_json)
        if not users or len(users) == 0:
            self.logger.error(f"user {filter_json} not found in the database")
            raise UserNotFoundException()

        # checks if the user password is also present in changes_json
        temp_user = User(changes_json)
        temp_user.validate_field_values()
        temp_user.validate_modifiable_fields()

        self.logger.info("updating the user information")
        self.persistence_manager.update("users", filter_json, changes_json)
        return XprResponse('success', '', {})

    def deactivate_user(self, uid):
        """
            Deletes an user and his info from the persistence

            Deletes the user from database

            Parameters:
                uid [str] : uid of the user

            Return:
                returns appropriate output
        """
        uid_json = {"uid": uid}
        # deletes the user from persistence

        del_users = self.persistence_manager.find("users", uid_json)
        if del_users and len(del_users) != 0:
            self.logger.info(f"deactivating the user {uid_json['uid']}")
            if 'activationStatus' in del_users[0] and \
                    del_users[0]['activationStatus']:
                self.persistence_manager.update("users", uid_json,
                                                {"activationStatus": False})
                self.logger.info(
                    f"user {uid_json['uid']} successfully deactivated")
                return XprResponse('success', '', {})
            else:
                raise DeactivatedUserException
        else:
            raise UserNotFoundException()

    def get_users(self, filter_json, apply_display_filter=True):
        """
            Calls the persistence with input filters to fetch the list of users.
            After fetching, the users list is filtered before sending
            as output in order to send relevant information only

            Parameters:
                filter_json [json] : json with filter key & value pairs

            Return:
                Success -> [list] : returns list of users
                Failure -> [str] : returns persistence failure response
        """
        self.logger.info("getting all the users in the persistence")
        self.logger.debug(f"filter_json is : {filter_json}")
        users = self.persistence_manager.find("users", filter_json)

        # filter user fields before sending the output
        if apply_display_filter:
            new_json = []
            for user_json in users:
                user = User(user_json)
                user.filter_display_fields()
                new_json.append(user.data)
            users = new_json
        # get users call retrieves whole user info from persistence
        # Filtering the data that needs to be shown as output
        self.logger.debug(f'Output of users sent: {users}')
        return users

    def update_password(self, password_json):
        """
        Updates user password

        Checks the password and updates the password on ldap and database

        :param password_json:
            contains the uid, old password & new password
        :return:
            raises exception in case of error
        """
        # uid is mandatory
        if "uid" not in password_json:
            self.logger.info("uid not provided for update password")
            raise IncompleteUserInfoException("User 'uid' not provided")
        uid_json = {"uid": password_json["uid"]}
        # fetches the user information
        users = self.persistence_manager.find("users", uid_json)
        if not len(users):
            self.logger.info("User not found for updating password")
            raise UserNotFoundException()
        # creates user object
        new_user = User(users[0])
        old_password_hash = users[0]["pwd"]
        old_password = password_json["old_pwd"]
        new_password = password_json["new_pwd"]
        # checks if the old password provided is same as the one saved in db
        if not sha512_crypt.verify(old_password, old_password_hash):
            raise InvalidPasswordException("Current password is incorrect")
        # Current and new password should not be same
        if old_password == new_password:
            raise InvalidPasswordException("Current and new password is same.")
        # checks if the password is valid and secure enough
        new_user.check_password(password_json["new_pwd"])
        # updates the password on ldap server
        self.ldapmanager.update_password(password_json["uid"], old_password,
                                         new_password)
        hashed_pwd = sha512_crypt.encrypt(new_password)
        update_json = {"pwd": hashed_pwd}
        self.persistence_manager.update("users", uid_json, update_json)
class XprClusters:
    """
    This class provides methods for Xpresso cluster management.
    """
    def __init__(self, persistence_manager):
        self.logger = XprLogger()
        self.persistence_manager = persistence_manager

    def get_clusters(self, cluster):
        """
        Retrieves info about specified cluster.
        Args:
            cluster: cluster name

        Returns: dictionary object with cluster info

        """
        self.logger.info('entering get_clusters method with'
                         ' input {}'.format(cluster))
        if cluster == {}:
            self.logger.debug('getting all clusters')
            clusters = self.persistence_manager.find('clusters', {})
            all_clusters = []
            for current_cluster in clusters:
                try:
                    temp = dict(
                        name=current_cluster['name'],
                        activationStatus=current_cluster['activationStatus'],
                        master_nodes=current_cluster['master_nodes'],
                        worker_nodes=current_cluster['worker_nodes'])

                    all_clusters.append(temp)
                except KeyError:
                    self.logger.error(
                        f"Invalid cluster format {current_cluster}")
            self.logger.info(
                'exiting get_clusters method with list of all clusters')
            return all_clusters
        else:
            self.logger.debug('getting specific cluster(s)')
            info = self.persistence_manager.find('clusters', cluster)
            if not info:
                self.logger.info('exiting get_clusters method with empty list')
                return []
            for item in info:
                if "_id" in item:
                    del item["_id"]
            self.logger.info(
                'exiting get_clusters method with required cluster(s)')
            return info

    def deactivate_cluster(self, cluster):
        """
        removes specified cluster from the database
        Args:
            cluster: cluster name

        Returns: count of items deleted

        """
        self.logger.info('entering deactivate_cluster method '
                         'with input {}'.format(cluster))
        if 'name' not in cluster:
            self.logger.error('Cluster name not provided.')
            raise IncompleteClusterInfoException
        cluster_name = cluster['name']
        self.logger.debug('Checking for already existing cluster.')
        check = self.persistence_manager.find('clusters',
                                              {"name": cluster_name})
        if not check or not check[0]['activationStatus']:
            self.logger.error('Cluster does not exist')
            raise ClusterNotFoundException
        self.persistence_manager.update('clusters', {"name": cluster['name']},
                                        {"activationStatus": False})
        self.logger.info('exiting deactivate_cluster method.')

        # let environment manager know that the cluster has been deactivated
        EnvManager().deactivate_cluster(cluster)
        return True

    def register_cluster(self, cluster):
        """
        registers a new cluster in database
        Args:
            cluster: cluster to be registered

        Returns: cluster's id

        """
        self.logger.info('entering register_cluster method '
                         'with input {}'.format(cluster))
        new_cluster = Cluster(cluster)
        new_cluster.validate_mandatory_fields()
        if not cluster['name']:
            self.logger.error('Cluster name cannot be blank. Exiting.')
            raise ClusterNameBlankException
        check = self.persistence_manager.find('clusters',
                                              {"name": cluster['name']})
        if check and not check[0]["activationStatus"]:
            self.persistence_manager.update('clusters',
                                            {"name": cluster['name']},
                                            {"activationStatus": True})
            return str(check[0]['_id'])
        if 'master_nodes' in cluster:
            new_cluster.set('master_nodes', cluster['master_nodes'])
        else:
            new_cluster.set('master_nodes', [])
        if 'worker_nodes' in cluster:
            new_cluster.set('worker_nodes', cluster['worker_nodes'])
        else:
            new_cluster.set('worker_nodes', [])
        new_cluster.set('activationStatus', True)
        try:
            inserted_id = self.persistence_manager.insert(
                'clusters', cluster, False)
            self.logger.info('exiting register_cluster method '
                             'with insert ID {}'.format(str(inserted_id)))
            return str(inserted_id)
        except UnsuccessfulOperationException:
            self.logger.error('Cluster already exists. Exiting.')
            raise ClusterAlreadyExistsException('cluster already exists')
class DeclarativePipelineBuilder:

    # all the pipeline reference variables will be stored in this array as
    # they are discovered by the code so that we can check for any faulty
    # reference made which is not present in the reference array
    reference_array = []

    def __init__(self, persistence_manager):
        self.kubeflow_utils = KubeflowUtils(persistence_manager)
        self.logger = XprLogger()
        self.executor = LocalShellExecutor()
        config_path = XprConfigParser.DEFAULT_CONFIG_PATH
        self.config = XprConfigParser(config_path)
        self.declarative_pipeline_folder = self.config[PROJECTS_SECTION][
            DECLARATIVE_PIPELINE_FOLDER]
        self.content = self.declarative_pipeline_folder_check()

    def declarative_pipeline_folder_check(self):
        """
        checks whether declarative pipeline folder is present
        Returns: contents of template

        """
        if not os.path.isdir(self.declarative_pipeline_folder):
            os.makedirs(self.declarative_pipeline_folder, permission_755)
        kubeflow_template = self.config[PROJECTS_SECTION][KUBEFLOW_TEMPLATE]
        try:
            with open(kubeflow_template, 'r') as f:
                template_content = f.read()
                return template_content
        except FileNotFoundException:
            self.logger.error('kubeflow template file not found')

    def prevalidate_declarative_json(self, pipeline_info):
        """
        Validates (with dummy data) if the pipeline yaml file is being created
        properly before adding pipeline as a part of project.
        Args:
            pipeline_info: declarative JSON file

        Returns: nothing

        """
        self.logger.info('entering prevalidate_declarative_json')
        temp_component_images = {}
        self.logger.info('creating dict with temporary component images')
        for component in pipeline_info['components']:
            self.validate_component_keys(component.keys())
            temp_component_images[
                component['xpresso_reference']] = "temp_image"
        self.generate_pipeline_file(pipeline_info, temp_component_images, 0)
        self.logger.info('Pipeline validated.')

    def check_for_reference(self, value):
        """
        Checks if the provided value has any faulty reference.
        Args:
            value: value to be checked

        Returns: raises exception if reference is not found

        """
        self.logger.info(f'entering check_for_reference to '
                         f'validate {value}')
        if '.output' in value:
            reference = value.split('.')[0]
            self.check_for_reference(reference)
            if open_parenthesis in reference:
                # in case of typecasting
                reference = reference.split(open_parenthesis)[1]
            if reference not in self.reference_array:
                self.logger.error(f'Reference "{reference}" not found.')
                raise ReferenceNotFoundException(
                    f'Reference "{reference}" not '
                    f'found in declarative JSON')
        self.logger.info('Reference validated. Exiting.')

    def modify_for_function_parameters(self, func_params):
        """
        modifies a string (json key-value pair) to be used as a function's
        parameters
        Args:
            func_params: json key-value pair string (in xpresso defined format)

        Returns: modified string, fit for using as a function's parameters

        """
        self.logger.info('entering modify_for_function_parameters')
        param_list = []
        for key, value in func_params.items():
            modified_key = key.replace(variable_indicator, "")
            if variable_indicator not in str(value):
                if double_quote in value:
                    value = value.replace(double_quote, escape_quote)
                modified_value = f'"{value}"'
            else:
                modified_value = value.replace(variable_indicator, "")
            # check for any faulty reference
            self.check_for_reference(modified_value)
            param_list.append(f'{modified_key}={modified_value}')
            self.reference_array.append(modified_key)
        result = ', '.join(param_list)
        self.logger.info(f'exiting modify_for_function_parameters with '
                         f'output {result}')
        return result

    def modify_for_function_variables(self, func_vars):
        """
        modifies a string (json key-value pair) to be used as a function's
        variables
        Args:
            func_vars: json key-value pair string (in xpresso defined format)

        Returns: modified string, fit for use as a function's variables

        """
        self.logger.info('entering modify_for_function_variables')
        result = """"""
        for key, value in func_vars.items():
            modified_key = key.replace(variable_indicator, "")
            if variable_indicator not in value:
                if double_quote in value:
                    value = value.replace(double_quote, escape_quote)
                modified_value = f'"{value}"'
            else:
                modified_value = value.replace(variable_indicator, "")
            # check for any faulty reference
            self.check_for_reference(modified_value)
            self.reference_array.append(modified_key)
            result = result + f'{modified_key} = {modified_value}\n\t'
        self.logger.info(f'exiting modify_for_function_variables with '
                         f'output {result}')
        return result

    def validate_declarative_json(self, pipeline_info):
        """
        validates the mandatory fields in the provided declarative json
        Args:
            pipeline_info: contents of the json file

        Returns: Raises exception in case of inconsistency

        """
        self.logger.info('entering validate_declarative_json method')
        if not pipeline_info:
            self.logger.error('Declarative JSON empty.')
            raise IncorrectDeclarativeJSONDefinitionException(
                'Declarative JSON empty.')
        pipeline_fields = [
            'name', 'description', 'pvc_name', 'components', 'main_func_params'
        ]
        for field in pipeline_fields:
            if field not in pipeline_info.keys():
                self.logger.error(f'Field "{field}" not present in '
                                  f'declarative JSON')
                raise IncorrectDeclarativeJSONDefinitionException(
                    f'Field "{field}" not present in declarative JSON')

    def validate_component_keys(self, component_keys):
        """
        Validates if the component has all default keys present
        Args:
            component_keys: keys present in the component

        Returns: nothing

        """
        default_keys = [
            'name', 'xpresso_reference', 'description', 'inputs',
            'input_values', 'implementation'
        ]
        for key in default_keys:
            if key not in component_keys:
                self.logger.error(f'Key "{key}" is missing from one or more '
                                  f'components in pipeline JSON')
                raise ComponentsSpecifiedIncorrectlyException(
                    f'Key "{key}" is missing from one or more components '
                    f'in pipeline JSON')

    def generate_pipeline_file(self, pipeline_info, component_images,
                               pipeline_deploy_id):
        """
        generates a python dsl pipeline file using the provided declarative
        json, executes it and uploads the pipeline to kubeflow.
        Args:
            component_images: dict of pipeline components and their
            corresponding docker images
            pipeline_info: declarative json file containing info
            about pipeline
            pipeline_deploy_id : deploy version id of pipeline fetched from
                                database
        Returns: ambassador port to view the pipeline on dashboard

        """
        self.logger.info('entering generate_python_file method')
        self.logger.debug('reading declarative json')

        # check for mandatory fields
        self.validate_declarative_json(pipeline_info)

        # generate code to load pipeline component objects
        components_info = self.generate_pipeline_component_objects(
            pipeline_info)

        # populate the pipeline name and description
        self.populate_name_and_description(pipeline_info)

        # populate main function's parameters
        self.populate_main_func_parameters(pipeline_info)

        # populate main function's variables, if any
        self.populate_main_func_variables(pipeline_info)

        # populate container op, if present
        self.populate_container_op(pipeline_info)

        # generate and populate component definitions with inputs
        self.populate_component_definitions(pipeline_info, components_info)

        # update pipeline yaml location
        pipeline_yaml_location = self.update_pipeline_yaml_location(
            pipeline_deploy_id, pipeline_info)

        # finally, populate and generate the python file
        self.generate_pipeline_python_file(pipeline_deploy_id, pipeline_info)

        # create yaml file for the generated python file to read components from
        self.create_pipeline_yaml(component_images, pipeline_info,
                                  pipeline_yaml_location)

        # run the generated python file to generate the zip file
        self.logger.debug('running generated python file')
        pipeline_file = f'{self.declarative_pipeline_folder}' \
            f'/{pipeline_info["name"]}--declarative_pipeline' \
            f'_{pipeline_deploy_id}.py'
        run_pipeline_python = f'python {pipeline_file}'
        status = self.executor.execute(run_pipeline_python)
        if status:
            raise IncorrectDeclarativeJSONDefinitionException(
                "Failed to run pipeline dsl file. "
                "Please re-check the declarative JSON file.")
        pipeline_zip = f'{pipeline_file}.zip'
        return pipeline_zip

    def create_pipeline_yaml(self, component_images, pipeline_info,
                             pipeline_yaml_location):
        """
        creates yaml file for dsl code to read components from
        Args:
            component_images: dict of pipeline components and their
            corresponding docker images
            pipeline_info: pipeline info from declarative json
            pipeline_yaml_location: location where the file is to be generated

        Returns: nothing

        """
        self.logger.debug('creating yaml for generated python file')
        temp_pipeline = deepcopy(pipeline_info)
        modified_components = temp_pipeline['components']
        for component in modified_components:
            component['implementation']['container']['image'] \
                = component_images[component['xpresso_reference']]
            del component['xpresso_reference']
            del component['input_values']
        data_to_insert = {"components": modified_components}
        with open(pipeline_yaml_location, 'w+') as f:
            f.write(yaml.dump(data_to_insert))

    def generate_pipeline_python_file(self, pipeline_deploy_id, pipeline_info):
        """
        generates pipeline python file
        Args:
            pipeline_deploy_id: deploy version id of pipeline fetched from
                                database
            pipeline_info: pipeline info from declarative json

        Returns: nothing

        """
        self.logger.debug('generating python file')
        with open(
                f'{self.declarative_pipeline_folder}/{pipeline_info["name"]}'
                f'--declarative_pipeline_{pipeline_deploy_id}.py', 'w+') as f:
            f.write(self.content)

    def update_pipeline_yaml_location(self, pipeline_deploy_id, pipeline_info):
        """
        updates location where pipeline yaml will be generated
        Args:
            pipeline_deploy_id: deploy version id of pipeline fetched from
                                database
            pipeline_info: pipeline info from declarative json

        Returns: yaml location

        """
        pipeline_yaml_location = f"{self.declarative_pipeline_folder}" \
            f"/{pipeline_info['name']}--pipeline_components_file_" \
            f"{pipeline_deploy_id}.yaml"
        self.content = self.content.replace('%pipeline_yaml_location%',
                                            f"'{pipeline_yaml_location}'")
        return pipeline_yaml_location

    def populate_container_op(self, pipeline_info):
        """
        populates container op
        Args:
            pipeline_info: pipeline info from declarative json

        Returns: nothing

        """
        if 'container_op' not in pipeline_info.keys():
            self.logger.debug('container op not present')
            self.content = self.content.replace('%container_op%', '')
        else:
            self.logger.debug('populating container op')
            checkout = f"""\t{str(pipeline_info['container_op'][
                                      '$$name$$'])} = dsl.ContainerOp({self.modify_for_function_parameters(
                pipeline_info['container_op'])})"""
            if 'checkout' in pipeline_info['after_dependencies'].keys():
                checkout = checkout + f"""\n\n\tcheckout.after({
                pipeline_info['after_dependencies']['checkout']})"""
            self.reference_array.append('checkout')
            self.content = self.content.replace('%container_op%', checkout)

    def populate_main_func_variables(self, pipeline_info):
        """
        populates main function variables
        Args:
            pipeline_info: pipeline info from declarative json

        Returns: nothing

        """
        if 'main_func_variables' in pipeline_info.keys():
            self.logger.debug("populating main function's variables")
            main_variables = "\t" + self.modify_for_function_variables(
                pipeline_info['main_func_variables'])
            self.content = self.content.replace('%main_function_variables%',
                                                main_variables)
        else:
            self.logger.debug('No variables found for main function')
            self.content = self.content.replace('%main_function_variables%',
                                                '')

    def generate_pipeline_component_objects(self, pipeline_info):
        """
        generates code to load pipeline component objects
        Args:
            pipeline_info: pipeline info from declarative json

        Returns: components info

        """
        self.logger.info('generating code to load pipeline component objects')
        pipeline_comps = """"""
        components_info = pipeline_info['components']
        self.reference_array.extend([comp['name'] for comp in components_info])
        for index, component in enumerate(components_info):
            self.validate_component_keys(component.keys())
            pipeline_comps = pipeline_comps + f"{component['name']}_ = " \
                f"components.load_component_from_text(str(" \
                f"component_info[{index}]))\n"
        self.content = self.content.replace('%load_components%',
                                            pipeline_comps)
        return components_info

    def populate_name_and_description(self, pipeline_info):
        """
        populates the pipeline name and description
        Args:
            pipeline_info: pipeline info from declarative json

        Returns: nothing

        """
        self.logger.debug('populating the pipeline name and description')
        self.content = self.content.replace("%pipeline_name%",
                                            f"'{pipeline_info['name']}'")
        self.content = self.content.replace(
            '%pipeline_description%', f"'{pipeline_info['description']}'")

    def populate_main_func_parameters(self, pipeline_info):
        """
        populates main function parameters
        Args:
            pipeline_info: pipeline info from declarative json

        Returns: nothing

        """
        self.logger.debug("populate main function's parameters")
        main_params = self.modify_for_function_parameters(
            pipeline_info['main_func_params'])
        self.content = self.content.replace('%main_function_params%',
                                            main_params)

    def populate_component_definitions(self, pipeline_info, components_info):
        """
        populates component definitions
        Args:
            pipeline_info: pipeline info from declarative json
            components_info: components info in declarative json

        Returns: nothing

        """
        self.logger.debug('populating component definitions with inputs')
        component_definitions = """"""
        for index, component in enumerate(components_info):
            if index == 0:
                add_pvc = \
                    f"add_volume(k8s_client.V1Volume(name='pipeline-nfs', " \
                    f"persistent_volume_claim=k8s_client." \
                    f"V1PersistentVolumeClaimVolumeSource(claim_name=" \
                    f"'{pipeline_info['pvc_name']}'))).add_volume_mount(" \
                    f"k8s_client.V1VolumeMount(" \
                    f"mount_path='/data', name='pipeline-nfs'))"
            else:
                add_pvc = "add_volume_mount(k8s_client.V1VolumeMount(" \
                          "mount_path='/data', name='pipeline-nfs'))"
            component_definitions = \
                component_definitions + \
                f"\t{component['name']} = {component['name']}_(" \
                f"{self.modify_for_function_parameters(component['input_values'])}).{add_pvc}\n\n"

            if 'after_dependencies' in pipeline_info.keys():
                if component['name'] in pipeline_info[
                        'after_dependencies'].keys():
                    component_definitions = \
                        component_definitions + \
                        f"\t{component['name']}.after({pipeline_info['after_dependencies'][component['name']]})\n\n"
        self.content = self.content.replace('%component_definitions%',
                                            component_definitions)
Ejemplo n.º 28
0
class xpruser():
    config_path = XprConfigParser.DEFAULT_CONFIG_PATH

    CONTROLLER_SECTION = 'controller'
    TOKEN_EXPIRY = 'soft_expiry'
    LOGIN_EXPIRY = 'hard_expiry'
    MONGO_SECTION = 'mongodb'
    URL = 'mongo_url'
    DB = 'database'
    UID = 'mongo_uid'
    PWD = 'mongo_pwd'
    W = 'w'

    def __init__(self):
        self.config = XprConfigParser(self.config_path)
        self.db_utils = Utils(url=self.config[self.MONGO_SECTION][self.URL],
                              db=self.config[self.MONGO_SECTION][self.DB],
                              uid=self.config[self.MONGO_SECTION][self.UID],
                              pwd=self.config[self.MONGO_SECTION][self.PWD],
                              w=self.config[self.MONGO_SECTION][self.W])
        self.logger = XprLogger()

    def registeruser(self, user_json):
        """
        register a new user in the db

        checks if the user already exists and then adds to db

        Parameters:
            userjson [json]: json with node information

        Return:
            Success -> 'OK' [str] : returns 'OK' as response
            Failure -> [str] : returns appropriate failure response
        """
        self.logger.debug(f"user info provided is {user_json}")
        info_check = userinfocheck(user_json)
        # user info_check checks if the user_json has sufficient info
        if info_check == -1:
            errcode = error_codes.incomplete_user_information
            self.logger.error("Insufficient information to create a new user")
            return xprresponse('failure', errcode, {})
        elif info_check == 0:
            errcode = error_codes.incorrect_primaryRole
            self.logger.error("Incorrect primaryRole has been provided")
            return xprresponse('failure', errcode, {})

        # Password should not be saved as plain text in db.
        # Encrypting the password before saving it to db
        password = sha512_crypt.encrypt(user_json['pwd'])
        user_json['pwd'] = password
        # checks if the user is already present in the db
        self.logger.info("Registering a new user")
        uid_json = {'uid': user_json['uid']}
        self.logger.info("Checking the db if user is already present")
        user = self.db_utils.find("users", uid_json)
        if len(user) != 0:
            errcode = error_codes.user_exists
            return xprresponse('failure', errcode, {})

        user_json['loginStatus'] = False
        user_json['activationStatus'] = True
        user_json['nodes'] = []
        self.logger.info("adding user to the db")
        add_user = self.db_utils.insert("users", user_json, False)
        if add_user == -1:
            errcode = error_codes.username_already_exists
            self.logger.error("username already exists in the db")
            return xprresponse('failure', errcode, {})

        self.logger.info("user successfully added to the db")

        # NFS User directory changes
        nfs_manager = NFSUserManager(config=self.config)
        nfs_manager.setup_user_folder(user=user_json['uid'])
        return xprresponse('success', '', {})

    def modifyuser(self, token, changesjson):
        """
            modify_user updates the user info in the db

            checks if user is available and then updates
            the info as per changesjson

            Parameters:
                uid [str]: uid of the user
                changesjson [json] : json with user changes info

            Return:
                Success -> 'OK' [str] : returns OK if provision_node succeeds
                Failure -> [str] : returns appropriate failure response
        """
        check = modify_user_check(changesjson)
        if check != 200:
            return xprresponse('failure', check, {})

        uidjson = {"uid": changesjson['uid']}
        self.logger.info(f"Modifying user information of {uidjson}")
        self.logger.debug(f"Info provided to be modified is {changesjson}")
        # checks if the user is present in db
        self.logger.info("Checking if the user is present in the db")
        user = self.db_utils.find("users", uidjson)
        if len(user) == 0:
            errcode = error_codes.user_not_found
            self.logger.error(f"user {uidjson['uid']} not found in the db")
            return xprresponse('failure', errcode, {})

        self.logger.info("updating the user information")
        updateuser = self.db_utils.update("users", uidjson, changesjson)
        return xprresponse('success', '', {})

    def deactivateuser(self, uid):
        """
            Deletes an user and his info from the db

            Deletes the user from database

            Parameters:
                uid [str] : uid of the user

            Return:
                returns appropriate output
        """
        uidjson = {"uid": uid}
        # deletes the user from db

        deluser = self.db_utils.find("users", uidjson)
        if len(deluser) != 0:
            self.logger.info(f"deactivating the user {uidjson['uid']}")
            if 'activationStatus' in deluser[0] and \
                deluser[0]['activationStatus']:
                self.db_utils.update("users", uidjson,
                                     {"activationStatus": False})
                self.logger.info(f"user {uidjson['uid']} successfully deleted")
                return xprresponse('success', '', {})
            else:
                errcode = error_codes.user_already_deactivated
                return xprresponse('failure', errcode, {})
        else:
            errcode = error_codes.user_not_found
            self.logger.info("user not found")
            return xprresponse('failure', errcode, {})

    def getusers(self, filterjson):
        """
            Calls the db with input filters to fetch the list of users.
            After fetching, the users list is filtered before sending
            as output in order to send relevant information only

            Parameters:
                filterjson [json] : json with filter key & value pairs

            Return:
                Success -> [list] : returns list of users
                Failure -> [str] : returns db failure response
        """
        self.logger.info("getting all the users in the db")
        self.logger.debug(f"filterjson is : {filterjson}")
        users = self.db_utils.find("users", filterjson)
        # get users call retrieves whole user info from db
        # Filtering the data that needs to be shown as output
        self.logger.info("filtering the users before sending output")
        users = filteruseroutput(users)
        self.logger.debug(f'Output of users sent: {users}')
        return xprresponse('success', '', users)
Ejemplo n.º 29
0
class XprDbSetup:
    """
        Class that provides tools to setup mongodb on a node
    """

    def __init__(self, executor=None):
        if not executor:
            self.executor = LocalShellExecutor()
        self.logger = XprLogger()
        self.service_path = '/lib/systemd/system/mongod.service'

    def install_mongo(self):
        """
        installs mongodb on the system
        """
        self.logger.info('entering install_mongo method')
        if not linux_utils.check_root():
            self.logger.fatal("Please run this as root")
        import_key = 'sudo apt-key adv --keyserver ' \
                     'hkp://keyserver.ubuntu.com:80 --recv ' \
                     '9DA31620334BD75D9DCB49F368818C72E52529D4'
        self.executor.execute(import_key)
        create_list = 'echo "deb [ arch=amd64 ] https://repo.mongodb.org/' \
                      'apt/ubuntu bionic/mongodb-org/4.0 multiverse" | ' \
                      'sudo tee /etc/apt/sources.list.d/mongodb-org-4.0.list'
        self.executor.execute(create_list)
        reload_packages = 'sudo apt-get update'
        self.executor.execute(reload_packages)
        self.logger.debug('installing mongo')
        install_mongo = 'sudo apt-get install -y mongodb-org'
        self.executor.execute(install_mongo)
        hold = """echo "mongodb-org hold" | sudo dpkg --set-selections
                  echo "mongodb-org-server hold" | sudo dpkg --set-selections
                  echo "mongodb-org-shell hold" | sudo dpkg --set-selections
                  echo "mongodb-org-mongos hold" | sudo dpkg --set-selections
                  echo "mongodb-org-tools hold" | sudo dpkg --set-selections"""
        self.executor.execute(hold)
        self.logger.info('exiting install_mongo')

    def initial_setup(self, db):
        """
        sets up the initial users and collections in the db
        :param db: database against which the setup is to be done
        :return: nothing
        """
        self.logger.info('entering initial_setup method')
        # initiate users collection
        users = db.users
        self.insert_default_users(users)
        db.users.create_index([('uid', ASCENDING)], unique=True)
        self.logger.debug('created index for users collection')

        # initiate nodes collection
        nodes = db.nodes
        self.logger.debug('setting up initial node')
        initial_node = {
            "name": "initial_node",
            "address": ""
        }
        nodes.insert_one(initial_node)
        nodes.create_index([('address', ASCENDING)], unique=True)
        self.logger.debug('created index for nodes collection')
        nodes.delete_one({"name": "initial_node"})

        # initiate clusters collection
        clusters = db.clusters
        self.logger.debug('setting up initial cluster')
        initial_cluster = {
            "name": "initial_cluster",
            "activationStatus": True,
            "master_nodes": [],
            "worker_nodes": []
        }
        clusters.insert_one(initial_cluster)
        clusters.create_index([('name', ASCENDING)], unique=True)
        self.logger.debug('created index for clusters collection')
        clusters.delete_one({"name": "initial_cluster"})

        # initiate projects collection
        projects = db.projects
        self.logger.debug('setting up initial project')
        initial_project = {
            "name": "initial_project",
            "projectDescription": "Initiates the collection",
            "owner": {},
            "developers": [],
            "components": []
        }
        projects.insert_one(initial_project)
        projects.create_index([('name', ASCENDING)], unique=True)
        self.logger.debug('created index for projects collection')
        projects.delete_one({"name": "initial_project"})

        # create xprdb_admin user in mongo
        self.logger.debug('creating xprdb user in mongo')
        db.command("createUser", "xprdb_admin", pwd="xprdb@Abz00ba",
                   roles=[{"role": "root", "db": "admin"}])
        self.logger.info('exiting initial_setup method')

    def insert_default_users(self, users):
        self.logger.debug('setting up default users')
        admin_user = {
            "uid": "xprdb_admin",
            "firstName": "Xpresso",
            "lastName": "Admin",
            "pwd": sha512_crypt.hash('xprdb@Abz00ba'),
            "email": "*****@*****.**",
            "primaryRole": "Admin",
            "activationStatus": True,
            "loginStatus": False
        }
        users.insert_one(admin_user)
        superuser = {
            "uid": "superuser1",
            "firstName": "superuser1",
            "lastName": "superuser1",
            "pwd": sha512_crypt.hash('superuser1'),
            "email": "*****@*****.**",
            "primaryRole": "Su",
            "activationStatus": True,
            "loginStatus": False
        }
        users.insert_one(superuser)
        admin1_user = {
            "uid": "admin1",
            "firstName": "admin1",
            "lastName": "admin1",
            "pwd": sha512_crypt.hash('admin1'),
            "email": "*****@*****.**",
            "primaryRole": "Admin",
            "activationStatus": True,
            "loginStatus": False
        }
        users.insert_one(admin1_user)

    def enable_replication(self):
        """
        installs replica set for the database
        :return: nothing
        """
        self.logger.info('entering enable_replication method')
        path = '/srv/mongodb/rs0-0'
        linux_utils.create_directory(path, 0o777)
        self.logger.debug('created directory for replica set')
        ip = linux_utils.get_ip_address()
        start = 'mongod --replSet rs0 --port 27017 --bind_ip localhost,' \
                '{} --dbpath /srv/mongodb/rs0-0 --fork ' \
                '--logpath /var/log/mongodb/mongod.log'.format(ip)
        self.executor.execute(start)
        self.logger.debug('mongo daemon started')
        client = MongoClient('localhost', replicaset='rs0')
        db = client.xprdb
        client.admin.command("replSetInitiate")
        self.logger.debug('Replica set initiated')
        time.sleep(5)
        self.initial_setup(db)
        # stop mongo to restart with auth
        stop_mongod = 'pgrep mongod | xargs kill'
        self.executor.execute(stop_mongod)
        self.logger.debug('stopping mongo daemon to restart with auth')
        time.sleep(10)
        restart = 'mongod --replSet rs0 --port 27017 --bind_ip localhost,{} ' \
                  '--dbpath /srv/mongodb/rs0-0 --auth --fork --logpath ' \
                  '/var/log/mongodb/mongod.log'.format(ip)
        config = configparser.ConfigParser()
        config.read(self.service_path)
        config['Service']['ExecStart'] = restart
        with open(self.service_path, 'w') as f:
            config.write(f)
        restart_mongod = 'systemctl restart mongod'
        self.executor.execute(restart_mongod)
        self.logger.debug('db setup complete, exiting enable_replication')
class JenkinsManager:
    """
    Jenkins wrapper class to connect to Jenkins server. It can execute any Job
    in the Jenkins, get the current status or help in creating the node.

    Args:
        cfg (XprConfigParser): config for Jenkins.
    """

    JENKINS_SECTION = "jenkins"
    JENKINS_MASTER = "master_host"
    JENKINS_USERNAME = "******"
    JENKINS_PASSWORD = "******"
    JENKINS_TEMPLATE_PIPELINE = "template_job"

    def __init__(self, cfg: XprConfigParser):
        self.config = cfg[self.JENKINS_SECTION]
        self.logger = XprLogger()

        console_handler = logging.StreamHandler()
        self.logger.addHandler(console_handler)
        self.logger.setLevel(logging.DEBUG)
        self.jenkins_server = self.init_connection()

    def init_connection(self) -> jenkins.Jenkins:
        """
        Initiates a connection to Jenkins and returns its objects
        """
        self.logger.info("Initiating connection to Jenkins {}".format(
            self.config[self.JENKINS_MASTER]))
        try:
            server = jenkins.Jenkins(
                self.config[self.JENKINS_MASTER],
                username=self.config[self.JENKINS_USERNAME],
                password=self.config[self.JENKINS_PASSWORD])
            self.logger.info("Jenkins connected successfully")
        except jenkins.JenkinsException:
            self.logger.error("Jenkins Connection Failed")
            raise JenkinsConnectionFailedException("Jenkins Connection Failed")
        return server

    def create_pipeline(self, pipeline_name: str, bitbucket_repo: str):
        """
        It creates a pipeline project in the Jenkins master. It uses a template
        to create a pipeline.

        Args:
            bitbucket_repo(str): bitbucket repository from where build will
                                 work
            pipeline_name(str): Jenkins pipeline name identifier

        Returns:
            True. on success Otherwise raise an Exceptions

        Raises
            JenkinsConnectionFailedException, JenkinsInvalidInputException
        """
        self.logger.info("Creating a job named: {}".format(pipeline_name))
        try:
            self.jenkins_server.copy_job(
                self.config[self.JENKINS_TEMPLATE_PIPELINE], pipeline_name)
            pipeline_config = self.jenkins_server.get_job_config(pipeline_name)
            pipeline_root = ET.fromstring(pipeline_config)
            # Update Description
            self.update_config(
                pipeline_root=pipeline_root,
                field_path='description',
                update_value='Pipeline to perform build for {}'.format(
                    pipeline_name))
            # Update Bit bucket repository
            self.update_config(pipeline_root=pipeline_root,
                               field_path='definition/scm/userRemoteConfigs/'
                               'hudson.plugins.git.UserRemoteConfig/url',
                               update_value=bitbucket_repo)
            updated_pipeline_config = ET_tostring(pipeline_root).decode()
            self.jenkins_server.reconfig_job(pipeline_name,
                                             updated_pipeline_config)

            # We need to do this because build does not get available for build
            # until job is disabled and enabled
            self.jenkins_server.disable_job(pipeline_name)
            self.jenkins_server.enable_job(pipeline_name)
        except jenkins.JenkinsException:
            self.logger.error("Job Creation Failed")
            raise JenkinsInvalidInputException("Job creation failed")
        except (requests.exceptions.ConnectionError,
                requests.exceptions.SSLError, requests.exceptions.HTTPError):
            self.logger.error("Jenkins Connection Issue")
            raise JenkinsConnectionFailedException("Jenkins Connection Issue")
        self.logger.info("Job Created: {}".format(pipeline_name))

    @staticmethod
    def update_config(pipeline_root: Element, field_path: str,
                      update_value: str):
        """
        Update Jenkins configuration
        Args:
            pipeline_root(Element): Root Tree of the pipeline
            field_path(str): Path to the field which needs to be updated
            update_value(str): value which needs to be updated in the field
        """
        field = pipeline_root.find(field_path)
        field.text = update_value

    def submit_build(self, pipeline_name: str, branch_name: str,
                     docker_image_name: str, component_name: str) -> str:
        """
        It executes a job in the Jenkins on the basis of the pipeline name.
        It will return a job id which can be used to fetch the status

        Args:
            docker_image_name(str): docker image name. Image name is tagged with
                                    the build number
            branch_name(str): branch to be build on
            pipeline_name(str): Jenkins pipeline name identifier

        Returns:
            str: Job ID of the pipeline

        Raises
            JenkinsConnectionFailedException, JenkinsInvalidInputException
        """
        self.logger.info("Executing Jenkins build for {} on branch {}".format(
            pipeline_name, branch_name))
        try:
            queue_number = self.jenkins_server.build_job(
                pipeline_name,
                parameters={
                    'git_branch': branch_name,
                    'docker_image_name': docker_image_name,
                    'component_name': component_name
                })

            # Check if build has submitted
            total_attempts = 10
            build_id = self.jenkins_server.get_job_info(
                pipeline_name)['nextBuildNumber']
            while total_attempts:
                total_attempts -= 1
                queue_info = self.jenkins_server.get_queue_item(queue_number)
                if "executable" not in queue_info:
                    self.logger.info("Waiting for build to get submitted")
                    time.sleep(2)
                    continue
                print(queue_info)
                build_id = queue_info["executable"]["number"]
        except jenkins.JenkinsException:
            self.logger.error("Build ID is invalid")
            raise JenkinsInvalidInputException("Build ID is not valid")
        except (requests.exceptions.ConnectionError,
                requests.exceptions.SSLError, requests.exceptions.HTTPError):
            self.logger.error("Jenkins Connection Issue")
            raise JenkinsConnectionFailedException("Jenkins Host is invalid")
        self.logger.info(
            "Build submitted successfully with ID {}".format(build_id))
        return build_id

    def get_build(self, pipeline_name: str, job_id: int):
        """
        Get the status and details of the job.

        Args:
            pipeline_name(str): Jenkins pipeline name identifier
            job_id(int): Jenkins Job identifier

        Returns:
            dict: details of the jobs.

        Raises
            JenkinsConnectionFailedException, JenkinsInvalidInputException
        """
        self.logger.info("Getting a build detail for {}:{}".format(
            pipeline_name, job_id))
        try:
            build_info = self.jenkins_server.get_build_info(
                pipeline_name, job_id)
        except jenkins.JenkinsException:
            self.logger.exception(
                "Pipeline name and build {}:{} is invalid".format(
                    pipeline_name, job_id))
            raise JenkinsInvalidInputException(
                "Pipeline name and build is invalid")
        except (requests.exceptions.ConnectionError,
                requests.exceptions.SSLError, requests.exceptions.HTTPError):
            self.logger.error("Jenkins Connection Issue")
            raise JenkinsConnectionFailedException("Jenkins Connection Issue")
        self.logger.info("Build info received and returned")
        return build_info

    def start_worker_node(self, worker_type: str):
        """
        TODO Starts a worker docker container on any available node to perform
        build
        and deploy

        Args:
            worker_type(str): Name of the worker type. It could be python, java
                              or gpu

        Returns:
            bool: True if worker node is started
        """
        pass

    def check_active_worker_node(self, worker_type: str):
        """
        TODO Check if worker node is active.

        Args:
            worker_type(str): Name of the worker type. It could be python, java
                              or gpu

        Returns:
            bool: True if worker node is active, False Otherwise
        """
        pass

    def delete_pipeline(self, pipeline_name: str):
        """
        Permanently delete the pipeline
        Args:
            pipeline_name: Jenkins pipeline identifier

        Raises
            JenkinsConnectionFailedException, JenkinsInvalidInputException
        """
        self.logger.info("Deleting Pipeline {}".format(pipeline_name))
        try:
            self.jenkins_server.delete_job(pipeline_name)
        except jenkins.JenkinsException:
            self.logger.exception(
                "Pipeline name {} is invalid".format(pipeline_name))
            raise JenkinsInvalidInputException("Pipeline name is invalid")
        except (requests.exceptions.ConnectionError,
                requests.exceptions.SSLError, requests.exceptions.HTTPError):
            self.logger.error("Jenkins Connection Issue")
            raise JenkinsConnectionFailedException("Jenkins Connection Issue")
        self.logger.info("Pipeline deleted {}".format(pipeline_name))