コード例 #1
0
ファイル: utils.py プロジェクト: nimrod-becker/ocs-ci
def destroy_cluster(cluster_path, log_level="DEBUG"):
    """
    Destroy existing cluster resources in AWS.

    Args:
        cluster_path (str): filepath to cluster directory to be destroyed
        log_level (str): log level to set for openshift_installer

    """
    # Download installer
    installer = get_openshift_installer()

    destroy_cmd = (f"{installer} destroy cluster "
                   f"--dir {cluster_path} "
                   f"--log-level {log_level}")

    try:
        cluster_path = os.path.normpath(cluster_path)

        # Retrieve cluster name and aws region from metadata
        metadata_file = os.path.join(cluster_path, "metadata.json")
        with open(metadata_file) as f:
            metadata = json.loads(f.read())
        cluster_name = metadata.get("clusterName")
        region_name = metadata.get("aws").get("region")

        # Execute destroy cluster using OpenShift installer
        log.info(f"Destroying cluster defined in {cluster_path}")
        run_cmd(destroy_cmd)

        # Find and delete volumes
        aws = AWS(region_name)
        volume_pattern = f"{cluster_name}*"
        log.debug(f"Finding volumes with pattern: {volume_pattern}")
        volumes = aws.get_volumes_by_name_pattern(volume_pattern)
        log.debug(f"Found volumes: \n {volumes}")
        for volume in volumes:
            aws.detach_and_delete_volume(aws.ec2_resource.Volume(volume['id']))

        # Remove installer
        delete_file(installer)

    except Exception:
        log.error(traceback.format_exc())
コード例 #2
0
class AWSBase(Deployment):
    def __init__(self):
        """
        This would be base for both IPI and UPI deployment
        """
        super(AWSBase, self).__init__()
        self.region = config.ENV_DATA['region']
        self.aws = AWSUtil(self.region)

    def create_ebs_volumes(self, worker_pattern, size=100):
        """
        Add new ebs volumes to the workers

        Args:
            worker_pattern (str):  Worker name pattern e.g.:
                cluster-55jx2-worker*
            size (int): Size in GB (default: 100)
        """
        worker_instances = self.aws.get_instances_by_name_pattern(
            worker_pattern)
        with parallel() as p:
            for worker in worker_instances:
                logger.info(f"Creating and attaching {size} GB "
                            f"volume to {worker['name']}")
                p.spawn(
                    self.aws.create_volume_and_attach,
                    availability_zone=worker['avz'],
                    instance_id=worker['id'],
                    name=f"{worker['name']}_extra_volume",
                    size=size,
                )

    def add_volume(self, size=100):
        """
        Add a new volume to all the workers

        Args:
            size (int): Size of volume in GB (default: 100)
        """
        cluster_id = get_infra_id(self.cluster_path)
        worker_pattern = f'{cluster_id}-worker*'
        logger.info(f'Worker pattern: {worker_pattern}')
        self.create_ebs_volumes(worker_pattern, size)

    def host_network_update(self):
        """
        Update security group rules for HostNetwork
        """
        cluster_id = get_infra_id(self.cluster_path)
        worker_pattern = f'{cluster_id}-worker*'
        worker_instances = self.aws.get_instances_by_name_pattern(
            worker_pattern)
        security_groups = worker_instances[0]['security_groups']
        sg_id = security_groups[0]['GroupId']
        security_group = self.aws.ec2_resource.SecurityGroup(sg_id)
        # The ports are not 100 % clear yet. Taken from doc:
        # https://docs.google.com/document/d/1c23ooTkW7cdbHNRbCTztprVU6leDqJxcvFZ1ZvK2qtU/edit#
        security_group.authorize_ingress(
            DryRun=False,
            IpPermissions=[
                {
                    'FromPort':
                    6800,
                    'ToPort':
                    7300,
                    'IpProtocol':
                    'tcp',
                    'UserIdGroupPairs': [
                        {
                            'Description': 'Ceph OSDs',
                            'GroupId': sg_id,
                        },
                    ],
                },
                {
                    'FromPort':
                    3300,
                    'ToPort':
                    3300,
                    'IpProtocol':
                    'tcp',
                    'UserIdGroupPairs': [
                        {
                            'Description': 'Ceph MONs rule1',
                            'GroupId': sg_id,
                        },
                    ],
                },
                {
                    'FromPort':
                    6789,
                    'ToPort':
                    6789,
                    'IpProtocol':
                    'tcp',
                    'UserIdGroupPairs': [
                        {
                            'Description': 'Ceph MONs rule2',
                            'GroupId': sg_id,
                        },
                    ],
                },
                {
                    'FromPort':
                    8443,
                    'ToPort':
                    8443,
                    'IpProtocol':
                    'tcp',
                    'UserIdGroupPairs': [
                        {
                            'Description': 'Ceph Dashboard rule1',
                            'GroupId': sg_id,
                        },
                    ],
                },
                {
                    'FromPort':
                    8080,
                    'ToPort':
                    8080,
                    'IpProtocol':
                    'tcp',
                    'UserIdGroupPairs': [
                        {
                            'Description': 'Ceph Dashboard rule2',
                            'GroupId': sg_id,
                        },
                    ],
                },
            ])

    def add_node(self):
        # TODO: Implement later
        super(AWSBase, self).add_node()

    def check_cluster_existence(self, cluster_name_prefix):
        """
        Check cluster existence according to cluster name prefix

        Returns:
            bool: True in case a cluster with the same name prefix already exists,
                False otherwise

        """
        instances = self.aws.get_instances_by_name_pattern(cluster_name_prefix)
        instance_objs = [
            self.aws.get_ec2_instance(ins.get('id')) for ins in instances
        ]
        non_terminated_instances = [
            ins for ins in instance_objs
            if ins.state.get('Code') != constants.INSTANCE_TERMINATED
        ]
        if non_terminated_instances:
            logger.error(
                f"Non terminated EC2 instances with the same name prefix were"
                f" found: {[ins.id for ins in non_terminated_instances]}")
            return True
        return False

    def destroy_volumes(self):
        try:
            # Retrieve cluster name and AWS region from metadata
            cluster_name = self.ocp_deployment.metadata.get('clusterName')
            # Find and delete volumes
            volume_pattern = f"{cluster_name}*"
            logger.debug(f"Finding volumes with pattern: {volume_pattern}")
            volumes = self.aws.get_volumes_by_name_pattern(volume_pattern)
            logger.debug(f"Found volumes: \n {volumes}")
            for volume in volumes:
                self.aws.detach_and_delete_volume(
                    self.aws.ec2_resource.Volume(volume['id']))
        except Exception:
            logger.error(traceback.format_exc())