예제 #1
0
파일: conftest.py 프로젝트: shaas/rookcheck
def rook_cluster(workspace):
    with Hardware(workspace) as hardware:
        with Kubernetes(workspace, hardware) as kubernetes:
            with RookCluster(workspace, kubernetes) as rook_cluster:
                if settings.as_bool('_USE_THREADS'):
                    logger.info("Starting rook build in a thread")
                    build_thread = threading.Thread(
                        target=rook_cluster.build())
                    build_thread.start()

                # build rook thread
                hardware.boot_nodes(masters=settings.NUMBER_MASTERS,
                                    workers=settings.NUMBER_WORKERS)
                hardware.prepare_nodes()
                kubernetes.bootstrap()
                kubernetes.install_kubernetes()

                if settings.as_bool('_USE_THREADS'):
                    logger.info("Re-joining rook build thread")
                    build_thread.join()
                else:
                    rook_cluster.build()

                # NOTE(jhesketh): The upload is very slow.. may want to
                #                 consider how to do this in a thread too but
                #                 is more complex with ansible.
                rook_cluster.preinstall()
                rook_cluster.install()

                yield rook_cluster
예제 #2
0
    def get_connection(self):
        """ Get a libcloud connection object for the configured driver """
        connection = None
        # TODO(jhesketh): Provide a sensible way to allow configuration
        #                 of extended options on a per-provider level.
        #                 For example, the setting of OpenStack networks.
        OpenStackDriver = get_driver(Provider.OPENSTACK)

        # Strip any path from OS_AUTH_URL to be compatable with libcloud's
        # auth_verion.
        auth_url_parts = urlparse(settings.OS_AUTH_URL)
        auth_url = \
            "%s://%s" % (auth_url_parts.scheme, auth_url_parts.netloc)
        connection = OpenStackDriver(
            settings.OS_USERNAME,
            settings.OS_PASSWORD,
            ex_force_auth_url=auth_url,
            ex_force_auth_version=settings.OS_AUTH_VERSION,
            ex_domain_name=settings.OS_USER_DOMAIN_NAME,
            ex_tenant_name=settings.OS_PROJECT_NAME,
            ex_tenant_domain_id=settings.OS_PROJECT_DOMAIN_ID,
            ex_force_service_region=(settings.OS_REGION_NAME
                                     if settings.OS_REGION_NAME else None),
            secure=settings.as_bool('OS_VERIFY_SSL_CERT'),
            api_version='2.2',
        )
        return connection
예제 #3
0
    def destroy(self, skip=False):
        if settings.as_bool('_TEAR_DOWN_CLUSTER_CONFIRM'):
            handle_cleanup_input("pause before cleanup workspace")

        # This kills the SSH_AGENT_PID agent
        try:
            self.execute('ssh-agent -k', check=True)
        except subprocess.CalledProcessError:
            logger.warning(f'Killing ssh-agent with PID'
                           f' {self._ssh_agent_pid} failed')

        if skip:
            logger.warning("The workspace directory will not be removed!")
            logger.warning(f"Workspace left behind at {self.working_dir}")
            return

        if settings.as_bool('_REMOVE_WORKSPACE'):
            logger.info(f"Removing workspace {self.working_dir} from disk")
            # NOTE(jhesketh): go clones repos as read-only. We need to chmod
            #                 all the files back to writable (in particular,
            #                 the directories) so that we can remove them
            #                 without failures or warnings.
            for root, dirs, files in os.walk(self.working_dir):
                for folder in dirs:
                    path = os.path.join(root, folder)
                    try:
                        os.chmod(path, os.stat(path).st_mode | stat.S_IWUSR)
                    except (FileNotFoundError, PermissionError):
                        # Some path's might be broken symlinks.
                        # Some files may be owned by somebody else (eg qemu)
                        # but are still safe to remove so ignore the
                        # permissions issue.
                        pass
                for f in files:
                    path = os.path.join(root, f)
                    try:
                        os.chmod(path, os.stat(path).st_mode | stat.S_IWUSR)
                    except (FileNotFoundError, PermissionError):
                        # Some path's might be broken symlinks.
                        # Some files may be owned by somebody else (eg qemu)
                        # but are still safe to remove so ignore the
                        # permissions issue.
                        pass
            shutil.rmtree(self.working_dir)
        else:
            logger.info(f"Keeping workspace on disk at {self.working_dir}")
예제 #4
0
    def destroy(self, skip=True):
        if skip:
            # We can skip in most cases since the nodes themselves will be
            # destroyed instead.
            return

        if settings.as_bool('_TEAR_DOWN_CLUSTER_CONFIRM'):
            common.handle_cleanup_input("pause before cleanup kubernetes")

        # TODO(jhesketh): Uninstall kubernetes
        logger.info(f"kube destroy on hardware {self.hardware}")
        pass
예제 #5
0
    def destroy(self, skip=False):
        if skip:
            logger.warning("Hardware will not be removed!")
            logger.warning("The following nodes and their associated resources"
                           " (such as IP's and volumes) will remain:")
            for n in self.nodes.values():
                logger.warning(f"Leaving node {n.name} at ip {n.get_ssh_ip()}")
                logger.warning(f".. with volumes {n._disks}")
                # TODO(jhesketh): Neaten up how disks are handled
            return

        if settings.as_bool('_TEAR_DOWN_CLUSTER_CONFIRM'):
            handle_cleanup_input("pause before cleanup hardware")

        logger.info("Remove all nodes from Hardware")
        for n in list(self.nodes):
            self.node_remove(self.nodes[n])
예제 #6
0
 def __exit__(self, type, value, traceback):
     if settings._GATHER_LOGS_DIR:
         self.gather_logs(settings._GATHER_LOGS_DIR)
     self.destroy(skip=not settings.as_bool('_TEAR_DOWN_CLUSTER'))
예제 #7
0
 def __exit__(self, type, value, traceback):
     self.destroy(skip=not settings.as_bool('_TEAR_DOWN_CLUSTER'))
예제 #8
0
import libcloud.security
from libcloud.compute.base import NodeImage
from libcloud.compute.drivers.openstack import (OpenStackNetwork,
                                                OpenStackNodeSize,
                                                OpenStackSecurityGroup)
from libcloud.compute.types import Provider, NodeState, StorageVolumeState
from libcloud.compute.providers import get_driver
from urllib.parse import urlparse

from tests.config import settings
from tests.lib.hardware.hardware_base import HardwareBase
from tests.lib.hardware.node_base import NodeBase, NodeRole
from tests.lib.workspace import Workspace

logger = logging.getLogger(__name__)
libcloud.security.VERIFY_SSL_CERT = settings.as_bool('OS_VERIFY_SSL_CERT')


class Node(NodeBase):
    def __init__(self, name, role, tags, conn, size, image, networks,
                 security_groups, sshkey_name):
        super().__init__(name, role, tags)
        self.conn = conn
        self._size = size
        self._image = image
        self._networks = networks
        self._security_groups = security_groups
        self._sshkey_name = sshkey_name
        self._libcloud_node = None

        self._floating_ips = []