示例#1
0
 def extract(self):
     """Extract the vmdk disk out of zip."""
     LOGGER.debug("Extracting '.vmdk' disk file from [%s].",
                  self.input_disk_path)
     self.disk_to_upload = BaseDisk.decompress(self.input_disk_path,
                                               '.vmdk', self.working_dir)
     LOGGER.info("AWS disk_to_upload = '%s'", self.disk_to_upload)
示例#2
0
def get_list_from_config_yaml(key):
    """Retrieves a value from the config system and returns its YAML file or JSON string contents
    as a list.
    Returns an empty list for an empty YAML content."""

    # Retrieve string value for key
    value_string = get_config_value(key)
    if not value_string:
        return []

    # Convert YAML file or JSON string to a list
    if value_string.endswith('.yml') or value_string.endswith('.yaml'):
        try:
            with open(value_string, "r") as value_file:
                value_list = yaml.safe_load(value_file)
        except YAMLError:
            LOGGER.error("Unable to parse YAML from file [%s]!", value_string)
            raise
    else:
        try:
            value_list = json.loads(value_string)
        except ValueError:
            LOGGER.error("Unable to parse JSON from string [%s]!",
                         value_string)
            raise

    # Return the list
    return value_list
示例#3
0
 def set_uploaded_disk_name(self, disk_name):
     """Set the uploaded disk name"""
     # As Azure disk takes its name from the image-name (unlike other clouds where
     # the disk-name are auto-generated during disk extraction), append disk extension
     # to the uploaded disk name.
     self.uploaded_disk_name = disk_name + '.vhd'
     LOGGER.info("The uploaded disk name is '%s'.", self.uploaded_disk_name)
 def extract(self):
     """
     Input disk is already tar.gz file of disk.tar.
     Just copy the path.
     """
     self.disk_to_upload = self.input_disk_path
     LOGGER.info("Google disk_to_upload is '%s'.", self.disk_to_upload)
示例#5
0
    def __init__(self):
        """
        Class that other information gathering classes inherit from

        All information gathering from this class means that all other
        information gathering classes also need that information
        """

        LOGGER.info(
            "Collecting information about installed software on the build machine"
        )

        self.json_info = {}
        self.json_info["modules"] = telemetry.operation_info.get_module()
        self.json_info["uuid"] = uuid.uuid1().hex
        self.json_info["image_name"] = get_image_name()
        self.json_info["image_id"] = get_image_id()
        self.json_info["platform"] = telemetry.operation_info.get_platform()
        self.json_info[
            "boot_locations"] = telemetry.operation_info.get_boot_locations()
        self.json_info["modules"] = telemetry.operation_info.get_module()
        self.json_info["result"] = telemetry.operation_info.get_result()
        self.json_info["start_time"] = telemetry.operation_info.get_start_time(
        )
        self.json_info["end_time"] = telemetry.operation_info.get_end_time()
        self.get_platform_specific_info()
        self.output_file_path = "./"  # set default output to main folder
示例#6
0
 def delete_snapshot(self):
     """Delete the AWS snapshot created by this object."""
     if self.snapshot_id is not None:
         LOGGER.info("Deleting the snapshot '%s'.", self.snapshot_id)
         self.ec2_client.delete_snapshot(SnapshotId=self.snapshot_id)
         LOGGER.info("Successfully deleted snapshot '%s'.",
                     self.snapshot_id)
示例#7
0
    def __init__(self, metadata):
        # Create list of metadata config files for parent
        if metadata is None:
            raise ValueError('metadata is required.')

        context = 'tag'
        tag_config_files = MetadataConfigFileUtil(metadata.artifacts_dir, context). \
            get_all_config_filenames(['VersionFile'])

        # Init metadata with metadata/config files
        MetadataFilter.__init__(self, metadata, tag_config_files)
        self.filter()

        # Add user defined tags to the config/metadata
        user_tags = get_list_from_config_yaml('IMAGE_TAGS')
        for user_tag in user_tags:
            for key, value in user_tag.items():
                self.metadata[key] = value

        # Remove tags user requested to exclude
        user_exclude_tags = get_list_from_config_yaml('IMAGE_TAGS_EXCLUDE')
        for key in user_exclude_tags:
            if key in self.metadata:
                del self.metadata[key]
                LOGGER.info('Excluded key [%s] from image tags.', key)
            else:
                LOGGER.info(
                    'Key [%s] does not exist in image tags and cannot be excluded.',
                    key)
    def delete_blob(self):
        """
        Delete the blob corresponding to self.uploaded_disk_name if it exists.
        """
        try:
            # Populate the bucket if not already.
            if self.bucket is None:
                self.init_bucket()

            if self.uploaded_disk_name is None:
                raise RuntimeError(
                    "Trying to delete a non-existent uploaded disk.")

            blob = self.get_blob(self.uploaded_disk_name)
            if blob is not None:
                LOGGER.info("Deleting blob '%s'.", self.uploaded_disk_name)
                self.bucket.delete_blob(self.uploaded_disk_name)

                blob = self.get_blob(self.uploaded_disk_name)
                if blob is not None:
                    raise RuntimeError(
                        "Deleting blob '{}' silently failed as it still exists."
                        .format(self.uploaded_disk_name))
        except google.cloud.exceptions.NotFound as exception:
            LOGGER.exception(exception)
            raise exception
        except RuntimeError as runtime_exception:
            raise runtime_exception
示例#9
0
    def transform_values(self,
                         to_lower=False,
                         disallowed_regex='[^a-zA-Z0-9-]',
                         replacement_char='-'):
        """Transform data values"""
        LOGGER.debug('Transform metadata values')

        if disallowed_regex is not None and disallowed_regex != '':
            try:
                re.compile(disallowed_regex)
            except re.error as exc:
                raise ValueError('disallowed_regex is invalid: {}'.format(
                    str(exc))) from exc

            if replacement_char is None or replacement_char == '':
                raise ValueError(
                    'Replacement character is required for disallowed_regex')

        for key, val in self.metadata.items():
            # convert values to lower as requested
            if to_lower:
                val = val.lower()

            # substitute replacement character as requested
            if disallowed_regex is not None and disallowed_regex != '':
                val = re.sub(disallowed_regex, replacement_char, val)

            self.metadata[key] = val

        LOGGER.trace('metadata:%s',
                     json.dumps(self.metadata, indent=4, sort_keys=True))
示例#10
0
    def clean_up(self):
        """Clean-up cloud objects created by this class and its members."""
        LOGGER.info("Cleaning-up GoogleImage artifacts.")

        if self.disk is not None:
            self.disk.clean_up()
        LOGGER.info("Completed GoogleImage clean-up.")
示例#11
0
    def is_image_ready(self, image_name):
        """Checks if the given image is ready."""
        def _is_image_ready():
            """Checks if an image with image_name exists and status is READY"""
            # pylint: disable=no-member
            request = self.gce_service.images().get(
                project=self.gce_project_id, image=image_name)
            result = request.execute()
            if not result or result['status'] == 'FAILED':
                raise RuntimeError(
                    "Creation of image [{}] failed!".format(image_name))
            return result['status'] == 'READY'

        retrier = Retrier(_is_image_ready)
        retrier.tries = int(
            get_config_value('GCE_IMAGE_CREATE_COMPLETED_RETRY_COUNT'))
        retrier.delay = int(
            get_config_value('GCE_IMAGE_CREATE_COMPLETED_RETRY_DELAY'))
        LOGGER.info("Waiting for image [%s] to be ready.", image_name)
        try:
            if retrier.execute():
                LOGGER.info("Image [%s] is ready.", image_name)
                self.metadata.set(self.__class__.__name__, 'image_id',
                                  image_name)
                return True
            LOGGER.warning(
                "Image [%s] was still not ready after checking [%d] times!",
                image_name, retrier.tries)
            return False
        except HttpError as exp:
            LOGGER.exception(exp)
            return False
        except RuntimeError as runtime_exception:
            LOGGER.exception(runtime_exception)
            return False
示例#12
0
    def insert_image(self, image_name):
        """Create image in GCE and then check for status = READY"""
        bucket_name = get_config_value('GCE_BUCKET')
        image_body = {
            "name": image_name,
            "rawDisk": {
                # In the following line the bucket name along with blob name is required
                "source":
                "https://storage.googleapis.com/{}/{}".format(
                    bucket_name, self.disk.uploaded_disk_name)
            }
        }

        try:
            # pylint: disable=no-member
            request = self.gce_service.images().insert(
                project=self.gce_project_id, body=image_body)
            result = request.execute()
        except HttpError as exp:
            LOGGER.exception(exp)
            raise exp

        if not result:
            return False

        LOGGER.debug("Image creation response: '%s'", result)
        return self.is_image_ready(image_name)
def get_packages():
    """Collect version information for installed packages"""
    LOGGER.debug("Collecting version information for installed packages")
    distro_data = distro_info.linux_distribution(full_distribution_name=False)
    distro_command = BuildInfo.package_manager_commands[distro_data[0]]
    packages = _command_key_values_to_dict(*distro_command)
    return packages
    def is_share_image_succeeded(self, share_account_ids):
        """Helper utility for share_image() that goes through the list of share_account_ids
        and confirms that the image was shared with all accounts. The function logs any
        error during its execution without propagating it up."""
        try:
            LOGGER.info("Checking which accounts were added for sharing this AMI")
            image_launch_perms = self.ec2_client.describe_image_attribute(
                ImageId=self.image_id,
                Attribute='launchPermission',
                DryRun=False
            )
            LOGGER.trace("image.describe_attribute() response => %s", image_launch_perms)
        except ClientError as client_error:
            # Simply log the exception without propagating it.
            LOGGER.exception(client_error)
            return False

        # Create a list of account IDs that has launch permission
        launch_permission_accounts = []
        for each in image_launch_perms['LaunchPermissions']:
            launch_permission_accounts.append(each['UserId'])

        counter = 0
        # Check which accounts were added for sharing this AMI
        for account_id in share_account_ids:
            if str(account_id) in launch_permission_accounts:
                LOGGER.info("The AMI was successfully shared with account: %s", account_id)
                counter += 1
            else:
                LOGGER.warning("The AMI was not shared with account: %s", account_id)

        # Confirm that the number of accounts in share_account_ids and image's
        # 'LaunchPermissions' are matching.
        return counter == len(share_account_ids)
 def delete_uploaded_disk(self, disk_name):
     """Deletes the given disk_name from the self.bucket_name S3 bucket."""
     if self.is_disk_exist(disk_name):
         self.s3_client.delete_object(Bucket=self.bucket_name,
                                      Key=disk_name)
         LOGGER.info("Deleted '%s' from bucket '%s'.", disk_name,
                     self.bucket_name)
    def wait_for_image_availability(self):
        """ Wait for image to be created and available """
        def _wait_for_image_availability():
            """Awaits the describe_images() to successfully acknowledge availability
            of the given image."""
            try:
                response = self.ec2_client.describe_images(ImageIds=[self.image_id])
            except (ClientError, ParamValidationError) as botocore_exception:
                LOGGER.exception(botocore_exception)
                raise RuntimeError('EC2.Client.describe_images() failed for {} !'.
                                   format(self.image_id)) from botocore_exception
            if not response:
                raise RuntimeError('EC2.Client.describe_images() returned none response!') \
                    from botocore_exception
            try:
                if response['Images'][0]['State'] == 'available':
                    return True
                return False
            except (KeyError, IndexError) as image_describe_exception:
                LOGGER.exception(image_describe_exception)
                raise RuntimeError('EC2.Client.describe_images() did not have ' +
                                   '[\'Images\'][0][\'State\'] in its response: response \'{}\''.
                                   format(response)) from image_describe_exception

        retrier = Retrier(_wait_for_image_availability)
        retrier.tries = int(get_config_value('AWS_CREATE_IMAGE_RETRY_COUNT'))
        retrier.delay = int(get_config_value('AWS_CREATE_IMAGE_RETRY_DELAY'))
        LOGGER.info('Waiting for the image %s to become available.', self.image_id)

        if retrier.execute():
            LOGGER.info('Image [%s] is created in AWS.', self.image_id)
        else:
            raise RuntimeError('Exhausted all \'{}\' retries for image {} to become available.'.
                               format(self.image_id, retrier.tries))
示例#17
0
 def cancel_import_task(self):
     """Cancel an on-going import task as represented by the self.import_task_id.
     As per AWS, this only works on "pending" import tasks. For a completed task
     this would essentially be a NO-OP."""
     if self.import_task_id is not None:
         LOGGER.info("Cancelling pending import task '%s'.", self.import_task_id)
         self.ec2_client.cancel_import_task(ImportTaskId=self.import_task_id)
         LOGGER.info("Successfully cancelled pending import task '%s'.", self.import_task_id)
 def delete_image(self, image_id):
     """ Delete image by image id. Return response"""
     try:
         self.ec2_client.deregister_image(ImageId=image_id)
     except (ClientError, ParamValidationError) as botocore_exception:
         LOGGER.exception(botocore_exception)
         raise RuntimeError('deregister_image failed for image \'{}\' !'.format(image_id)) \
             from botocore_exception
def get_distro():
    """Collect Linux distro name and version"""
    LOGGER.debug("Collecting Linux distro name and version")
    distro = {}
    distro_data = distro_info.linux_distribution(full_distribution_name=False)
    distro["name"] = distro_data[0]
    distro["version"] = distro_data[1]
    return distro
示例#20
0
 def delete_file_from_storage(self):
     """delete file from storage"""
     if self.bucket.object_exists(self.uploaded_disk_name):
         LOGGER.info('Storage file %s exists, deleting it', self.uploaded_disk_name)
         self.bucket.delete_object(self.uploaded_disk_name)
     else:
         LOGGER.info('Storage file %s does not exist, no need to delete it',
                     self.uploaded_disk_name)
 def is_bucket_exist(self):
     """Checks if a bucket with self.bucket_name exists in S3."""
     try:
         return self.s3_resource.Bucket(
             self.bucket_name).creation_date is not None
     except ClientError as client_error:
         LOGGER.exception(client_error)
     return False
 def extract(self):
     """Extract the vhd disk out of tar.gz."""
     try:
         self.disk_to_upload = BaseDisk.decompress(self.input_disk_path,
                                                   '.vhd',
                                                   self.working_dir)
         LOGGER.info("Azure disk_to_upload = '%s'", self.disk_to_upload)
     except RuntimeError as runtime_error:
         raise runtime_error
示例#23
0
    def _progress_cb(self, byte_up, byte_total):
        sec = int(time())

        # No update within 10 second interval
        if sec - self.progress_cb_lu > 10:
            self.progress_cb_lu = sec
            byte_up //= (1 << 20)
            byte_total //= (1 << 20)
            LOGGER.info('Uploaded %d MB of total %d MB', byte_up, byte_total)
示例#24
0
 def set_number_of_threads():
     """number of threads should not be higher than oss2.defaults.connection_pool_size"""
     if int(get_config_value('ALIBABA_THREAD_COUNT')) > int(oss2.defaults.connection_pool_size):
         number_of_threads_message = 'Will use only ' + \
             '{} threads for the image upload, '.format(oss2.defaults.connection_pool_size) + \
             'the limit is imposed by oss2.defaults.connection_pool_size'
         LOGGER.warning(number_of_threads_message)
         return int(oss2.defaults.connection_pool_size)
     return int(get_config_value('ALIBABA_THREAD_COUNT'))
    def __init__(self,
                 artifacts_dir,
                 cloud_type,
                 image_disk_path,
                 should_clean=True):
        self.start_time = time.time()
        self.artifacts_dir = artifacts_dir
        self.cloud_type = cloud_type
        self.image_name = None
        self.image_disk_path = image_disk_path
        self.metadata = None
        self.status = 'failure'
        self.transformed_image_name = None
        self.working_dir = None
        self.should_clean = should_clean
        self.cloud_image = None

        if not os.path.isdir(artifacts_dir):
            raise ValueError(
                "Missing or invalid artifacts directory '{}'.".format(
                    artifacts_dir))
        if not os.path.isfile(image_disk_path):
            raise ValueError(
                "Missing image disk '{}'.".format(image_disk_path))

        # Create a working directory under the artifacts dir to temporarily store
        # various build constructs and files.
        self.create_working_dir(artifacts_dir)

        try:
            # Factory (could be a separate object)
            # pylint: disable=import-outside-toplevel
            if cloud_type == 'alibaba':
                from image.alibaba_image import AlibabaImage
                self.cloud_image = AlibabaImage(self.working_dir,
                                                self.image_disk_path)
            elif cloud_type == 'aws':
                from image.aws_image import AWSImage
                self.cloud_image = AWSImage(self.working_dir,
                                            self.image_disk_path)
            elif cloud_type == 'azure':
                from image.azure_image import AzureImage
                self.cloud_image = AzureImage(self.working_dir,
                                              self.image_disk_path)
            elif cloud_type == 'gce':
                from image.google_image import GoogleImage
                self.cloud_image = GoogleImage(self.working_dir,
                                               self.image_disk_path)
            else:
                raise ValueError(
                    'Unexpected cloud type: {}'.format(cloud_type))
            # pylint: enable=import-outside-toplevel
            self.cloud_image_name = self.image_name_factory(cloud_type)
        except BaseException as base_exception:
            LOGGER.exception(base_exception)
            raise base_exception
示例#26
0
def get_standalone_software():
    """Collect version information for standalone software"""
    LOGGER.debug("Collecting version information for standalone software")
    standalone_software = {}

    # ovftool
    ovftool_command = BuildInfo.package_manager_commands["ovftool"]
    standalone_software.update(_command_key_values_to_dict(*ovftool_command))

    return standalone_software
    def prep_disk(self):
        """Performs the leg work to convert the S3 Disk represented by self.disk into
        a snapshot from which an AWSImage can be created."""
        LOGGER.info("Prepare the uploaded s3 disk for image generation.")

        # Convert the s3Disk into an AWS Snapshot.
        self.snapshot = AWSSnapshot(self.ec2_client, self.disk.bucket_name,
                                    self.disk.uploaded_disk_name)
        self.snapshot.create_snapshot()
        LOGGER.info("AWS Disk preparation is complete for image creation.")
 def is_disk_exist(self, disk_name):
     """Checks if the given disk_name exists in the bucket in S3"""
     try:
         bucket = self.get_bucket()
         if bucket is not None:
             bucket_objs = list(bucket.objects.filter(Prefix=disk_name))
             return bucket_objs and bucket_objs[0].key == disk_name
     except ClientError as client_error:
         LOGGER.exception(client_error)
     return False
 def clean_up(self):
     """Clean-up."""
     try:
         if self.bucket_name is not None and self.uploaded_disk_name is not None:
             LOGGER.debug("Deleting '%s' from the bucket '%s'.",
                          self.uploaded_disk_name, self.bucket_name)
             self.delete_uploaded_disk(self.uploaded_disk_name)
     except ClientError as client_error:
         # Log the exception without propagating it further.
         LOGGER.exception(client_error)
 def get_blob(self, disk_path):
     """
     Gets the GCE disk blob representing the given disk_path.
     """
     blob = None
     try:
         blob = self.bucket.get_blob(disk_path)
     except google.cloud.exceptions.NotFound as exception:
         LOGGER.exception(exception)
         raise exception
     return blob