Пример #1
0
 def get_cluster(self, project, env):
     """
     gets the name of the cluster associated with a specific project environment
     :param project: name of project
     :param env: name of environment
     :return: name of cluster
     """
     self.logger.info(
         "Entering get_cluster with parameters project %s, env %s" %
         (project, env))
     # get record from the database
     mongo_persistence_manager = MongoPersistenceManager(url=MONGOURL,
                                                         db=MONGODB,
                                                         uid=MONGOUID,
                                                         pwd=MONGOPWD)
     mongo_persistence_manager.connect()
     results = mongo_persistence_manager.find(ENVIRONMENTS_COLLECTION, {
         PROJECT_IDENTIFIER: project,
         ENVIRONMENT_IDENTIFIER: env
     })
     self.logger.info(
         "Got record for project environment from environments collection "
         + str(results))
     if len(results) == 0:
         return None
     else:
         return results[0]["cluster"]
Пример #2
0
 def __init__(self,
              config_path=XprConfigParser.DEFAULT_CONFIG_PATH,
              executor=None):
     if not executor:
         executor = LocalShellExecutor()
     super().__init__(executor)
     self.config = XprConfigParser(config_path)
     self.persistence_manager = MongoPersistenceManager(
         url=self.config[self.MONGO_SECTION][self.URL],
         db=self.config[self.MONGO_SECTION][self.DB],
         uid=self.config[self.MONGO_SECTION][self.UID],
         pwd=self.config[self.MONGO_SECTION][self.PWD],
         w=self.config[self.MONGO_SECTION][self.W])
Пример #3
0
def connect_to_db():
    """
        connects to persistence

        connects to persistence for persistence operations to validate owner and developer
        information

        Returns
        -------
            returns a pymongo persistence client
    """
    mongo_section = "mongodb"
    url = "mongo_url"
    db = "database"
    uid = "mongo_uid"
    pwd = "mongo_pwd"
    write = 'w'

    config_path = XprConfigParser.DEFAULT_CONFIG_PATH
    config = XprConfigParser(config_path)
    # add exception
    db_client = MongoPersistenceManager(url=config[mongo_section][url],
                                        db=config[mongo_section][db],
                                        uid=config[mongo_section][uid],
                                        pwd=config[mongo_section][pwd],
                                        w=config[mongo_section][write])
    return db_client
Пример #4
0
    def deactivate_cluster(self, cluster):
        """
        deactivates a cluster and allocates environments on it to other clusters
        :param cluster: name of deactivated cluster
        :return:
        """

        # 1. get a list of environments allocated to the deactivated cluster
        self.logger.info("Deactivating cluster " + cluster)
        mongo_persistence_manager = MongoPersistenceManager(url=MONGOURL,
                                                            db=MONGODB,
                                                            uid=MONGOUID,
                                                            pwd=MONGOPWD)
        mongo_persistence_manager.connect()
        results = mongo_persistence_manager.find(ENVIRONMENTS_COLLECTION,
                                                 {CLUSTER_IDENTIFIER: cluster})

        self.logger.info(
            "Cluster has " + str(len(results)) +
            " environments currently allocated - re-allocating these")

        # 2. remove currennt allocations
        mongo_persistence_manager.delete(ENVIRONMENTS_COLLECTION,
                                         {CLUSTER_IDENTIFIER: cluster})

        # 3. re-allocate
        for record in results:
            self.logger.info("Reallocating env " + record["env"] +
                             " for project " + record["project"])
            envs = []
            envs.append(record["env"])
            self.allocate_env(record["project"], envs)
        self.logger.info("Re-allocation complete")
Пример #5
0
    def remove_env(self, project, env=None):
        """
        Removes an environment from a project ("DEV" environment CANNOT be removed)

        :param project: name of project
        :param env: name of environment
        :return: None
        """
        self.logger.info(
            "Entering remove_env with parameters project %s, env %s" %
            (project, env))
        # remove record from the database
        mongo_persistence_manager = MongoPersistenceManager(url=MONGOURL,
                                                            db=MONGODB,
                                                            uid=MONGOUID,
                                                            pwd=MONGOPWD)
        mongo_persistence_manager.connect()
        removal_json = {PROJECT_IDENTIFIER: project}
        if env is not None:
            removal_json[ENVIRONMENT_IDENTIFIER] = env
        mongo_persistence_manager.delete(ENVIRONMENTS_COLLECTION, removal_json)
        self.logger.info(
            "Removed record for project environment from environments collection"
        )

        return
Пример #6
0
    def add_env(self, project, env, cluster):
        """
        Adds a single environment-cluster mappinng to the "environments" collection of the database
        :param project: name of project
        :param env: name of environment
        :param cluster: name of cluster
        :return: None
        """
        self.logger.info(
            "Entering add_env with parameters project %s, env %s, cluster %s" %
            (project, env, cluster))
        # add a record to the database
        mongo_persistence_manager = MongoPersistenceManager(url=MONGOURL,
                                                            db=MONGODB,
                                                            uid=MONGOUID,
                                                            pwd=MONGOPWD)
        mongo_persistence_manager.connect()
        mongo_persistence_manager.insert(
            ENVIRONMENTS_COLLECTION, {
                PROJECT_IDENTIFIER: project,
                ENVIRONMENT_IDENTIFIER: env,
                CLUSTER_IDENTIFIER: cluster
            }, False)
        self.logger.info(
            "Created record for project environment in environments collection"
        )

        return
Пример #7
0
def create_persistence_object(config):
    """
    Creates a persistence manager object

    Args:
        config(XprConfigParser): object of XprConfigParser

    Returns:
        object of connected persistence manager
    """
    mongo_persistence_manager = MongoPersistenceManager(
        url=config[MONGO_SECTION][URL],
        db=config[MONGO_SECTION][DB],
        uid=config[MONGO_SECTION][UID],
        pwd=config[MONGO_SECTION][PWD],
        w=config[MONGO_SECTION][W])
    return mongo_persistence_manager
    def initialize(self,
                   db_type=MONGO_SECTION,
                   db_url=DB_URL,
                   db_name=DB_NAME,
                   db_uid=DB_UID,
                   db_pwd=DB_PWD,
                   db_w=W):
        self.persistence_manager = MongoPersistenceManager(
            url=self.config[db_type][db_url],
            db=self.config[db_type][db_name],
            uid=self.config[db_type][db_uid],
            pwd=self.config[db_type][db_pwd],
            w=self.config[db_type][db_w])

        self.metrics_list = [
            UserMetrics, AbstractMetrics, ClusterMetrics, ProjectMetrics
        ]
Пример #9
0
    def allocate_env(self, project, envs):
        """
        Allocates clusters to the environments required for a project
        :param project: name of project
        :param envs: array containing names of (valid) environments required, e.g., "DEV", "QA", etc.
        :return: None (exception if any environment is invalid, or if no cluster is available for allocation)
        """
        # 1. check if environments specified are valid - if not raise InvalidEnvironmentException
        # 2. get list of environments already allocated to project, and do a diff between the allocated list and envs
        # to get a list of new envs required (new_envs)
        #
        # 3. get list of clusters and count of environments allocated to each from database
        # 4. Sort cluster list (least-to-highest)
        # 5. for each item in new_envs
        #    allot item to cluster at top of list (i.e., cluster with least envs allotted)
        #    re-sort cluster list

        # 1. check if environments specified are valid - if not raise InvalidEnvironmentException
        self.logger.debug("Checking environments specified for validity")
        if not envs:
            return

        if not all(env in VALID_ENVS for env in envs):
            raise InvalidEnvironmentException()

        self.logger.debug("Environments specified are valid")

        # 2. get list of environments already allocated to project, and do a diff between the allocated list and envs
        # to get a list of new envs required (new_envs)
        self.logger.debug("Checking on new environments required")
        new_envs = []
        mongo_persistence_manager = MongoPersistenceManager(url=MONGOURL,
                                                            db=MONGODB,
                                                            uid=MONGOUID,
                                                            pwd=MONGOPWD)
        mongo_persistence_manager.connect()
        allocated_envs = mongo_persistence_manager.find(
            ENVIRONMENTS_COLLECTION, {PROJECT_IDENTIFIER: project})
        for env in envs:
            found = False
            for i in range(len(allocated_envs)):
                if env == allocated_envs[i][ENVIRONMENT_IDENTIFIER]:
                    found = True
                    break
            if not found:
                new_envs.append(env)

        self.logger.debug(
            "Checked on new environments required. New environments are " +
            str(new_envs))

        # 3. get list of clusters and count of environments allocated to each from database
        self.logger.debug(
            "Getting list of currently allocated environments from database")
        # get a list of currently available clusters
        clusters = mongo_persistence_manager.find(CLUSTERS_COLLECTION,
                                                  {"activationStatus": True})
        if len(clusters) == 0:
            raise NoClustersPresentException()

        # get a list of environments to calculate count of each cluster
        self.logger.debug(
            "Getting allocated environment count for eqach cluster")
        current_envs = mongo_persistence_manager.find(ENVIRONMENTS_COLLECTION,
                                                      {})
        self.logger.debug("Got environments from database: " +
                          str(len(current_envs)))
        env_count = {}
        for cluster in clusters:
            cluster_name = cluster["name"]
            env_count[cluster_name] = 0
            for record in current_envs:
                env_cluster_name = record[CLUSTER_IDENTIFIER]
                if cluster_name == env_cluster_name:
                    env_count[cluster_name] = env_count[cluster_name] + 1
        self.logger.debug("Got list of allocated environments: " +
                          str(env_count))

        # 4. Sort cluster list (least-to-highest)
        self.logger.debug("Sorting list of clusters by environments allocated")
        sorted_cluster_count = OrderedDict(
            sorted(env_count.items(), key=lambda t: t[1]))
        self.logger.debug("Sorted. List: " + str(sorted_cluster_count))

        # 5. for each item in new_envs
        #    allot item to cluster at top of list (i.e., cluster with least envs allotted)
        #    re-sort cluster list
        self.logger.debug("Allocating new environments to clusters")
        for new_env in new_envs:
            cluster_name = list(sorted_cluster_count.keys())[0]
            orig_count = sorted_cluster_count[cluster_name]
            self.add_env(project, new_env, cluster_name)
            sorted_cluster_count[cluster_name] = orig_count + 1
            sorted_cluster_count = OrderedDict(
                sorted(sorted_cluster_count.items(), key=lambda t: t[1]))
        self.logger.debug("Allocated. New sorted list is " +
                          str(sorted_cluster_count))

        return
Пример #10
0
class KubeadmMasterPackage(AbstractPackage):
    KUBE_SECTION = 'kubernetes'
    CIDR_KEY = 'pod_network_cidr'
    CONTROLLER_SECTION = 'controller'
    MONGO_SECTION = 'mongodb'
    PROJECTS_SECTION = 'projects'
    URL = 'mongo_url'
    DB = 'database'
    UID = 'mongo_uid'
    PWD = 'mongo_pwd'
    PACKAGES = 'packages_setup'
    W = 'w'

    def __init__(self,
                 config_path=XprConfigParser.DEFAULT_CONFIG_PATH,
                 executor=None):
        if not executor:
            executor = LocalShellExecutor()
        super().__init__(executor)
        self.config = XprConfigParser(config_path)
        self.persistence_manager = MongoPersistenceManager(
            url=self.config[self.MONGO_SECTION][self.URL],
            db=self.config[self.MONGO_SECTION][self.DB],
            uid=self.config[self.MONGO_SECTION][self.UID],
            pwd=self.config[self.MONGO_SECTION][self.PWD],
            w=self.config[self.MONGO_SECTION][self.W])

    def execute(self):
        """
        installs kubernetes master node on the machine.
        """

        logger = XprLogger()
        if not linux_utils.check_root():
            logger.fatal("Please run this as root")
        logger.info("Initialising Kubernetes master node...")
        try:
            pod_network_cidr = self.config[self.PACKAGES][self.KUBE_SECTION][
                self.CIDR_KEY]
            init = 'kubeadm init --token-ttl=0 --pod-network-cidr={}'.format(
                pod_network_cidr)
            (_, output, _) = self.executor.execute_with_output(init)
            output = output.splitlines()
            join_command = (output[-2].decode("utf-8").rstrip('\\') +
                            output[-1].decode("utf-8"))
            # waiting time for master node to become active
            time.sleep(90)
            master_ip = linux_utils.get_ip_address()
            cluster_path = '/mnt/nfs/data/k8/k8_clusters/' \
                           '{}'.format(master_ip)
            linux_utils.create_directory(cluster_path, 0o755)
            join_filename = '{}/{}.txt'.format(cluster_path, master_ip)
            linux_utils.write_to_file(join_command, join_filename, "w+")
            if not os.path.isfile(join_filename):
                logger.error('Failed to write join command to file. Exiting.')
                raise CommandExecutionFailedException
            kubeconfig = 'KUBECONFIG=/etc/kubernetes/admin.conf'
            environment_path = '/etc/environment'
            linux_utils.write_to_file(kubeconfig, environment_path, "a")
            os.environ["KUBECONFIG"] = "/etc/kubernetes/admin.conf"
            kube_directory = '$HOME/.kube'
            linux_utils.create_directory(kube_directory, 0o755)
            copy_config = 'sudo cp -f /etc/kubernetes/admin.conf' \
                          ' $HOME/.kube/config'
            self.executor.execute(copy_config)
            chown = 'sudo chown $(id -u):$(id -g) $HOME/.kube/config'
            self.executor.execute(chown)
            flannel = 'kubectl apply -f https://raw.githubusercontent.com' \
                      '/coreos/flannel/master/Documentation/kube-flannel.yml'
            self.executor.execute(flannel)
            generate_api_token = "kubectl get secret $(kubectl get " \
                                 "serviceaccount default -o jsonpath=" \
                                 "'{.secrets[0].name}') -o jsonpath=" \
                                 "'{.data.token}' | base64 --decode"
            status, stdout, stderr = self.executor.execute_with_output(
                generate_api_token)
            if status != 0 or len(stderr.decode('utf-8')):
                raise CommandExecutionFailedException(
                    "Token generation failed")
            token = stdout.decode("utf-8")
            self.persistence_manager.update("nodes", {"address": master_ip},
                                            {"token": token})
            api_access = 'kubectl create clusterrolebinding permissive-binding \
                                  --clusterrole=cluster-admin \
                                  --user=admin \
                                  --user=kubelet \
                                  --group=system:serviceaccounts'

            self.executor.execute(api_access)
            docker_secret = \
                'kubectl create secret docker-registry dockerkey ' \
                '--docker-server https://dockerregistry.xpresso.ai/ ' \
                '--docker-username xprdocker --docker-password Abz00ba@123'
            self.executor.execute(docker_secret)

        except CommandExecutionFailedException as e:
            logger.error("Failed to initialise master. \n{}".format(str(e)))
            return False
        return True

    def status(self, **kwargs):
        self.logger.info("Checking if kubernes master is installe "
                         "packages are installed")
        return self.execute_command("kubectl cluster-info") == 0

    def cleanup(self):
        reset = 'yes | kubeadm reset'
        self.executor.execute(reset)
        restart = 'systemctl restart kubelet'
        self.executor.execute(restart)

    def install(self, **kwargs):
        self.execute()

    def uninstall(self, **kwargs):
        self.cleanup()

    def start(self, **kwargs):
        self.execute()

    def stop(self, **kwargs):
        self.cleanup()