def delete_blob(self):
        """
        Delete the blob corresponding to self.uploaded_disk_name if it exists.
        """
        try:
            # Populate the bucket if not already.
            if self.bucket is None:
                self.init_bucket()

            if self.uploaded_disk_name is None:
                raise RuntimeError(
                    "Trying to delete a non-existent uploaded disk.")

            blob = self.get_blob(self.uploaded_disk_name)
            if blob is not None:
                LOGGER.info("Deleting blob '%s'.", self.uploaded_disk_name)
                self.bucket.delete_blob(self.uploaded_disk_name)

                blob = self.get_blob(self.uploaded_disk_name)
                if blob is not None:
                    raise RuntimeError(
                        "Deleting blob '{}' silently failed as it still exists."
                        .format(self.uploaded_disk_name))
        except google.cloud.exceptions.NotFound as exception:
            LOGGER.exception(exception)
            raise exception
        except RuntimeError as runtime_exception:
            raise runtime_exception
示例#2
0
    def is_image_ready(self, image_name):
        """Checks if the given image is ready."""
        def _is_image_ready():
            """Checks if an image with image_name exists and status is READY"""
            # pylint: disable=no-member
            request = self.gce_service.images().get(
                project=self.gce_project_id, image=image_name)
            result = request.execute()
            if not result or result['status'] == 'FAILED':
                raise RuntimeError(
                    "Creation of image [{}] failed!".format(image_name))
            return result['status'] == 'READY'

        retrier = Retrier(_is_image_ready)
        retrier.tries = int(
            get_config_value('GCE_IMAGE_CREATE_COMPLETED_RETRY_COUNT'))
        retrier.delay = int(
            get_config_value('GCE_IMAGE_CREATE_COMPLETED_RETRY_DELAY'))
        LOGGER.info("Waiting for image [%s] to be ready.", image_name)
        try:
            if retrier.execute():
                LOGGER.info("Image [%s] is ready.", image_name)
                self.metadata.set(self.__class__.__name__, 'image_id',
                                  image_name)
                return True
            LOGGER.warning(
                "Image [%s] was still not ready after checking [%d] times!",
                image_name, retrier.tries)
            return False
        except HttpError as exp:
            LOGGER.exception(exp)
            return False
        except RuntimeError as runtime_exception:
            LOGGER.exception(runtime_exception)
            return False
    def delete_old_image(self, image_name):
        """ Check if an image with the same name already exists and delete it.
            This is unlikely to happen unless the image name is specified in the configuration."""

        response = self.find_image(image_name)
        num_images = len(response['Images'])
        if num_images not in (0, 1, 2):
            raise RuntimeError('Number of images named {} '.format(image_name) +
                               'expected to be 0 or 1 (maybe 2, due to AWS replicability issues),' +
                               ' but found {}. '.format(num_images) +
                               '(Should have received InvalidAMIName.Duplicate error during ' +
                               'the previous image creation). Please delete them manually.')

        if num_images in (1, 2):
            try:
                first_image_id = response['Images'][0]['ImageId']
                if num_images == 2:
                    second_image_id = response['Images'][1]['ImageId']
            except KeyError as key_error:
                LOGGER.exception(key_error)
                raise RuntimeError(
                    'could not find ImageId key for image {} '.format(image_name) +
                    'in describe_images response: {}'.format(response)) from key_error

            LOGGER.info('There is an old image %s named %s, deleting it.', first_image_id,
                        image_name)
            self.delete_image(first_image_id)
            if num_images == 2:
                LOGGER.info('There is an old image %s named %s, deleting it.', second_image_id,
                            image_name)
                self.delete_image(second_image_id)
    def share_image(self):
        """Reads a list of AWS accounts and shares the AMI with each of those accounts."""
        share_account_ids = get_list_from_config_yaml('AWS_IMAGE_SHARE_ACCOUNT_IDS')
        if share_account_ids:
            LOGGER.info("Share the AMI with multiple AWS accounts.")
            for dest_account_id in share_account_ids:
                try:
                    LOGGER.info('Sharing image with account-id: %s', dest_account_id)

                    # Share the image with the destination account
                    response = self.ec2_client.modify_image_attribute(
                        ImageId=self.image_id,
                        Attribute='launchPermission',
                        OperationType='add',
                        UserIds=[str(dest_account_id)]
                    )
                    LOGGER.trace("image.modify_attribute response => %s", response)
                except ClientError as client_error:
                    LOGGER.exception(client_error)
                    # Log the error around malformed Account-id and move on.
                    if client_error.response['Error']['Code'] == 'InvalidAMIAttributeItemValue':
                        LOGGER.error('Malformed account-id: %s', dest_account_id)
                    else:
                        # Any other type of error can be irrecoverable and might
                        # point to a deeper malaise.
                        raise RuntimeError('aws IMAGE was not shared with other accounts') \
                            from client_error

            # Acknowledge all the account-ids that the image was shared with.
            self.is_share_image_succeeded(share_account_ids)
        else:
            LOGGER.info("No account IDs found for sharing AMI")
    def is_share_image_succeeded(self, share_account_ids):
        """Helper utility for share_image() that goes through the list of share_account_ids
           and confirms that the image was shared with all accounts. The function logs any
           error during its execution without propagating it up."""
        response_json = None
        try:
            LOGGER.info("Checking which accounts were added for sharing this image")
            response_json = self.client.describe_image_share_permission(self.image_id)

        except ServerException as exc:
            LOGGER.exception(exc)
            if exc.get_error_code() == 'InvalidImageId.NotFound' and \
                exc.get_error_msg().startswith('The specified ImageId does not exist'):
                raise RuntimeError('InvalidImageId.NotFound: Check if the Image ID exists')
            raise exc

        num_accounts = len(response_json['Accounts']['Account'])
        shared_accounts = []
        for each_account in range(num_accounts):
            account_id = response_json['Accounts']['Account'][each_account]['AliyunId']
            shared_accounts.append(int(account_id))

        counter = 0
        for an_account in share_account_ids:
            if an_account in shared_accounts:
                LOGGER.info("The image was successfully shared with account: %s", an_account)
                counter += 1
            else:
                LOGGER.warning("The image was not shared with account: %s", an_account)

        # Confirm that the number of accounts in share_account_ids and image's
        # 'LaunchPermissions' are matching.
        return counter == len(share_account_ids)
    def is_share_image_succeeded(self, share_account_ids):
        """Helper utility for share_image() that goes through the list of share_account_ids
        and confirms that the image was shared with all accounts. The function logs any
        error during its execution without propagating it up."""
        try:
            LOGGER.info("Checking which accounts were added for sharing this AMI")
            image_launch_perms = self.ec2_client.describe_image_attribute(
                ImageId=self.image_id,
                Attribute='launchPermission',
                DryRun=False
            )
            LOGGER.trace("image.describe_attribute() response => %s", image_launch_perms)
        except ClientError as client_error:
            # Simply log the exception without propagating it.
            LOGGER.exception(client_error)
            return False

        # Create a list of account IDs that has launch permission
        launch_permission_accounts = []
        for each in image_launch_perms['LaunchPermissions']:
            launch_permission_accounts.append(each['UserId'])

        counter = 0
        # Check which accounts were added for sharing this AMI
        for account_id in share_account_ids:
            if str(account_id) in launch_permission_accounts:
                LOGGER.info("The AMI was successfully shared with account: %s", account_id)
                counter += 1
            else:
                LOGGER.warning("The AMI was not shared with account: %s", account_id)

        # Confirm that the number of accounts in share_account_ids and image's
        # 'LaunchPermissions' are matching.
        return counter == len(share_account_ids)
示例#7
0
    def upload(self):
        """
        Upload tar.gz stored at self.disk_to_upload to Google storage
        """
        try:
            # Populate the bucket if not already.
            if self.bucket is None:
                self.init_bucket()

            # form blob name
            prefix = datetime.datetime.now().strftime('%Y%m%d') + '/'
            self.uploaded_disk_name = prefix + BaseDisk.decorate_disk_name(self.disk_to_upload)

            # delete the blob if it exists
            self.delete_blob()

            # create blob
            blob = self.bucket.blob(self.uploaded_disk_name)
            if blob is None:
                raise RuntimeError("Factory constructor for blob '{}' failed."
                                   .format(self.uploaded_disk_name))

            # upload blob
            LOGGER.info("Started to upload '%s' at '%s'.", self.uploaded_disk_name,
                        datetime.datetime.now().strftime('%H:%M:%S'))
            blob.upload_from_filename(self.disk_to_upload)
            LOGGER.info("Finished to upload '%s' at '%s'.", self.uploaded_disk_name,
                        datetime.datetime.now().strftime('%H:%M:%S'))
            if not blob.exists():
                raise RuntimeError("Uploading blob '{}' failed.".format(self.uploaded_disk_name))
        except RuntimeError as exception:
            LOGGER.exception(exception)
            raise exception
示例#8
0
    def create_snapshot(self):
        """Creates a snapshot from the uploaded s3_disk."""
        try:
            description = datetime.datetime.now().strftime('%Y%m%d%H%M%S') + '--BIGIP-Volume-From-'
            description += self.s3_disk
            LOGGER.info("Importing the disk [s3://%s/%s] as a snapshot in AWS.",
                        self.s3_bucket, self.s3_disk)
            response = self.ec2_client.import_snapshot(Description=description,
                                                       DiskContainer={
                                                           "Description": description,
                                                           "Format": "vmdk",
                                                           "UserBucket": {
                                                               "S3Bucket": self.s3_bucket,
                                                               "S3Key": self.s3_disk
                                                           }
                                                       })
            LOGGER.trace("import_snapshot() Response => '%s'", response)
            self.import_task_id = response['ImportTaskId']
            LOGGER.info("TaskId for the import_snapshot() operation  => [%s]",
                        self.import_task_id)
            # Wait for the snapshot import to complete.
            self.is_snapshot_ready(self.import_task_id)

            # As the import operation successfully completed, reset it back to None
            # to avoid trying to cancel a completed import-task during clean-up.
            self.import_task_id = None

            # Tag the snapshot
            self.create_tags()

        except RuntimeError as runtime_error:
            LOGGER.exception(runtime_error)
            raise
    def prepare(self, seed_image_name='', user_image_name=''):
        """Main controller"""
        try:
            self.set_image_name(seed_image_name, user_image_name)
            LOGGER.info("Starting prepare cloud image '%s'.", self.image_name)
            self.cloud_image.set_uploaded_disk_name(self.image_name)

            pipeline_build = os.getenv('CI') is not None
            self.initialize_image_metadata(self.artifacts_dir, pipeline_build)

            self.cloud_image.extract_disk()
            self.cloud_image.upload_disk()
            self.cloud_image.prep_disk()

            self.metadata.set(self.__class__.__name__, 'build_operation',
                              'create')
            self.cloud_image.create_image(self.image_name)
            build_time = time.time() - self.start_time
            self.metadata.set(self.__class__.__name__, 'build_time',
                              str(timedelta(seconds=build_time)))
            self.status = 'success'
            self.metadata.set(self.__class__.__name__, 'status', self.status)

            self.cloud_image.share_image()
            self.create_metadata()
            self.register_image()
            self.create_report()
            LOGGER.info("Finished prepare cloud image '%s'.", self.image_name)

        except BaseException as base_exception:
            LOGGER.exception(base_exception)
            raise base_exception
def download_file(url, dest_file):
    """ Download from url to a local file.
        Throws exceptions with wording specific to the file injection.
        Assumes that the directory containing the destination file already exists. """
    verify_tls = bool(get_config_value("IGNORE_DOWNLOAD_URL_TLS") is None)
    try:
        remote_file = requests.get(url, verify=verify_tls, timeout=60)
    except requests.exceptions.SSLError as exc:
        LOGGER.exception(exc)
        raise RuntimeError(
            'Cannot access \'{}\' due to TLS problems! '.format(url) +
            'Consider abandoning TLS verification by usage of ' +
            '\'IGNORE_DOWNLOAD_URL_TLS\' parameter.')
    except requests.exceptions.RequestException as exc:
        LOGGER.exception(exc)
        raise RuntimeError(
            '\'{}\' is neither a file nor a directory nor a valid url, cannot inject it!'
            .format(url))
    if remote_file.status_code != 200:
        LOGGER.info('requests.get response status: %s',
                    remote_file.status_code)
        LOGGER.info('requests.get response headers: %s', remote_file.headers)
        raise RuntimeError(
            'URL \'{}\' did not return content, cannot inject it!'.format(url))
    open(dest_file, 'wb').write(remote_file.content)
示例#11
0
        def _is_snapshot_ready():
            """Awaits the import operation represented by the import_task_id to reach
            'completed' status."""
            try:
                LOGGER.trace("Querying the status of import-task [%s].",
                             import_task_id)
                response = \
                    self.ec2_client.describe_import_snapshot_tasks(
                        ImportTaskIds=[import_task_id])
                if not response:
                    raise RuntimeError(
                        "describe_import_snapshot_tasks() returned none response!"
                    )

                LOGGER.trace(
                    "Response from describe_import_snapshot_tasks => '%s'",
                    response)
                task_status = response['ImportSnapshotTasks'][0][
                    'SnapshotTaskDetail']['Status']
                if task_status == 'error':
                    # Print the response before raising an exception.
                    LOGGER.debug(
                        "describe_import_snapshot_tasks() response for [%s] => [%s]",
                        import_task_id, response)
                    raise RuntimeError(
                        "import-snapshot task [{}] in unrecoverable 'error' state."
                        .format(import_task_id))

                return task_status == 'completed'
            except ClientError as client_error:
                LOGGER.exception(client_error)
                raise RuntimeError(
                    "describe_import_snapshot_tasks() failed for [{}]!".format(
                        import_task_id)) from client_error
示例#12
0
    def insert_image(self, image_name):
        """Create image in GCE and then check for status = READY"""
        bucket_name = get_config_value('GCE_BUCKET')
        image_body = {
            "name": image_name,
            "rawDisk": {
                # In the following line the bucket name along with blob name is required
                "source":
                "https://storage.googleapis.com/{}/{}".format(
                    bucket_name, self.disk.uploaded_disk_name)
            }
        }

        try:
            # pylint: disable=no-member
            request = self.gce_service.images().insert(
                project=self.gce_project_id, body=image_body)
            result = request.execute()
        except HttpError as exp:
            LOGGER.exception(exp)
            raise exp

        if not result:
            return False

        LOGGER.debug("Image creation response: '%s'", result)
        return self.is_image_ready(image_name)
示例#13
0
        def _resumable_upload():
            self.uploaded_disk_name = 'bakery-' + os.path.basename(self.disk_to_upload) + '-' + \
                                      ''.join(random.choices(string.digits, k=6))
            AlibabaDisk.iter += 1
            LOGGER.info('Upload iteration number %d', AlibabaDisk.iter)
            LOGGER.info('Uploading %s as %s', self.disk_to_upload,
                        self.uploaded_disk_name)
            start_time = time.time()
            time.sleep(1)
            result = False
            try:
                resumable_store = oss2.resumable.ResumableStore(
                    root=self.working_dir)
                oss2.resumable_upload(self.bucket,
                                      self.uploaded_disk_name,
                                      self.disk_to_upload,
                                      store=resumable_store,
                                      num_threads=number_of_threads)
                result = True
            except FileNotFoundError as exc:
                LOGGER.exception(exc)
                raise RuntimeError('Could not find file to upload: {}'.format(
                    self.disk_to_upload))
            except oss2.exceptions.NoSuchUpload as exc:
                LOGGER.error('Upload failed. UploadId: %s',
                             exc.details['UploadId'])
                LOGGER.exception(exc)

            LOGGER.info('Iteration %d of upload took %d seconds',
                        AlibabaDisk.iter,
                        time.time() - start_time)
            if not result:
                self.upload_cleanup()
            return result
    def share_image(self):
        """Reads a list of account IDs and shares the image with each of those accounts."""
        share_account_ids = get_list_from_config_yaml('ALIBABA_IMAGE_SHARE_ACCOUNT_IDS')
        if share_account_ids:
            try:
                LOGGER.info("Share the image with multiple accounts.")
                self.client.share_image_with_accounts(self.image_id, share_account_ids)

            except ServerException as exc:
                LOGGER.exception(exc)
                if exc.get_error_code() == 'InvalidAccount.NotFound' and \
                    exc.get_error_msg().startswith('The specified parameter "AddAccount.n" or ' +
                                                   '"RemoveAccount.n"  does not exist.'):
                    raise RuntimeError('InvalidAccount.NotFound: Check if the account IDs are ' +
                                       'correct')
                if exc.get_error_code() == 'InvalidImageId.NotFound' and \
                    exc.get_error_msg().startswith('The specified ImageId does not exist'):
                    raise RuntimeError('InvalidImageId.NotFound: Check if the Image ID exists')
                raise exc

            # Acknowledge all the account-ids that the image was shared with.
            if self.is_share_image_succeeded(share_account_ids):
                LOGGER.info("Image sharing with other accounts was successful")
        else:
            LOGGER.info("No account IDs found for sharing the image")
 def is_bucket_exist(self):
     """Checks if a bucket with self.bucket_name exists in S3."""
     try:
         return self.s3_resource.Bucket(
             self.bucket_name).creation_date is not None
     except ClientError as client_error:
         LOGGER.exception(client_error)
     return False
 def delete_image(self, image_id):
     """ Delete image by image id. Return response"""
     try:
         self.ec2_client.deregister_image(ImageId=image_id)
     except (ClientError, ParamValidationError) as botocore_exception:
         LOGGER.exception(botocore_exception)
         raise RuntimeError('deregister_image failed for image \'{}\' !'.format(image_id)) \
             from botocore_exception
    def create_image(self, image_name):
        """Create image implementation for AWS"""
        # image name must be unique
        self.delete_old_image(image_name)

        #start image creation
        LOGGER.info('Started creation of image %s at %s', image_name,
                    datetime.datetime.now().strftime('%H:%M:%S'))
        start_time = time()
        try:
            response = self.ec2_client.register_image(
                Architecture="x86_64",
                BlockDeviceMappings=[
                    {
                        "DeviceName": AWSImage.AWS_IMAGE_ROOT_VOLUME,
                        "Ebs":
                        {
                            "DeleteOnTermination": True,
                            "SnapshotId": self.snapshot.snapshot_id,
                            "VolumeType": "gp2"
                        }
                    }
                ],
                EnaSupport=True,
                Description=image_name,
                Name=image_name,
                RootDeviceName=AWSImage.AWS_IMAGE_ROOT_VOLUME,
                SriovNetSupport="simple",
                VirtualizationType="hvm"
                )
        except (ClientError, ParamValidationError) as botocore_exception:
            LOGGER.exception(botocore_exception)
            raise RuntimeError('register_image failed for image\'{}\'!'.format(image_name)) \
                from botocore_exception

        # get image id
        try:
            LOGGER.trace("register_image() response: %s", response)
            self.image_id = response['ImageId']
        except KeyError as key_error:
            LOGGER.exception(key_error)
            raise RuntimeError('could not find \'ImageId\' key for image {} '.format(image_name) +
                               'in create_image response: {}'.format(response)) from key_error
        LOGGER.info('Image id: %s', self.image_id)

        # save image id in artifacts dir json file
        save_image_id(self.image_id)

        # wait till the end of the image creation
        self.wait_for_image_availability()
        LOGGER.info('Creation of %s image took %d seconds', self.image_id, time() - start_time)

        LOGGER.info('Tagging %s as the image_id.', self.image_id)
        self.metadata.set(self.__class__.__name__, 'image_id', self.image_id)

        # add tags to the image
        self.create_tags()
    def __init__(self,
                 artifacts_dir,
                 cloud_type,
                 image_disk_path,
                 should_clean=True):
        self.start_time = time.time()
        self.artifacts_dir = artifacts_dir
        self.cloud_type = cloud_type
        self.image_name = None
        self.image_disk_path = image_disk_path
        self.metadata = None
        self.status = 'failure'
        self.transformed_image_name = None
        self.working_dir = None
        self.should_clean = should_clean
        self.cloud_image = None

        if not os.path.isdir(artifacts_dir):
            raise ValueError(
                "Missing or invalid artifacts directory '{}'.".format(
                    artifacts_dir))
        if not os.path.isfile(image_disk_path):
            raise ValueError(
                "Missing image disk '{}'.".format(image_disk_path))

        # Create a working directory under the artifacts dir to temporarily store
        # various build constructs and files.
        self.create_working_dir(artifacts_dir)

        try:
            # Factory (could be a separate object)
            # pylint: disable=import-outside-toplevel
            if cloud_type == 'alibaba':
                from image.alibaba_image import AlibabaImage
                self.cloud_image = AlibabaImage(self.working_dir,
                                                self.image_disk_path)
            elif cloud_type == 'aws':
                from image.aws_image import AWSImage
                self.cloud_image = AWSImage(self.working_dir,
                                            self.image_disk_path)
            elif cloud_type == 'azure':
                from image.azure_image import AzureImage
                self.cloud_image = AzureImage(self.working_dir,
                                              self.image_disk_path)
            elif cloud_type == 'gce':
                from image.google_image import GoogleImage
                self.cloud_image = GoogleImage(self.working_dir,
                                               self.image_disk_path)
            else:
                raise ValueError(
                    'Unexpected cloud type: {}'.format(cloud_type))
            # pylint: enable=import-outside-toplevel
            self.cloud_image_name = self.image_name_factory(cloud_type)
        except BaseException as base_exception:
            LOGGER.exception(base_exception)
            raise base_exception
 def is_disk_exist(self, disk_name):
     """Checks if the given disk_name exists in the bucket in S3"""
     try:
         bucket = self.get_bucket()
         if bucket is not None:
             bucket_objs = list(bucket.objects.filter(Prefix=disk_name))
             return bucket_objs and bucket_objs[0].key == disk_name
     except ClientError as client_error:
         LOGGER.exception(client_error)
     return False
 def clean_up(self):
     """Clean-up."""
     try:
         if self.bucket_name is not None and self.uploaded_disk_name is not None:
             LOGGER.debug("Deleting '%s' from the bucket '%s'.",
                          self.uploaded_disk_name, self.bucket_name)
             self.delete_uploaded_disk(self.uploaded_disk_name)
     except ClientError as client_error:
         # Log the exception without propagating it further.
         LOGGER.exception(client_error)
def main():
    """main command handler"""

    parser = argparse.ArgumentParser(description='Prepare a cloud image from a virtual disk')
    parser.add_argument('-a', '--artifacts-dir', required=True,
                        help='Absolute path to the artifacts directory')
    parser.add_argument('-c', '--check-name', action="store_true",
                        help='Check cloud image name')
    parser.add_argument('-i', '--input', required=True,
                        help='Absolute path to the input virtual disk')
    parser.add_argument('-p', '--platform', required=True,
                        help='The cloud type (i.e. aws, gce, azure, alibaba)')
    parser.add_argument('-s', '--seed-image-name', default='',
                        help='Use supplied autogenerated seed cloud image name')
    parser.add_argument('-u', '--user-image-name', default='',
                        help='Use user-supplied cloud image name')

    args = parser.parse_args()

    # Check either seed or user cloud image name was provided
    if (args.seed_image_name == '' and args.user_image_name == '') or \
       (args.seed_image_name != '' and args.user_image_name != ''):
        raise Exception('You must provide either --seed-image-name or --user-image-name')

    # create log handler for the global LOGGER
    create_log_handler()

    if args.check_name:
        # Check name
        if args.user_image_name == '':
            raise Exception('--check-name can only be used with --user-image-name')

        ImageController.check_valid_name(args.platform, args.user_image_name)
    else:
        result = False
        try:
            # Prepare image
            image_controller = ImageController(args.artifacts_dir, args.platform,
                                               args.input)
            image_controller.prepare(args.seed_image_name, args.user_image_name)
            # If execution came so far, all is well.
            result = True
        except RuntimeError as runtime_exce:
            LOGGER.exception(runtime_exce)
        finally:
            # Clean-up image controller and other internal constructs it created.
            image_controller.clean_up()
            if result is True:
                LOGGER.info("SUCCESS: Image generation completed.")
            else:
                LOGGER.warning("FAILURE: Check the log file '%s' and fix the problem "
                               "before re-running.", get_config_value('LOG_FILE'))
                sys.exit(1)
    sys.exit(0)
示例#22
0
    def tag_image(self, image_name):
        """Associate image tags with image"""
        LOGGER.info('Set image labels.')

        # Get current labels fingerprint.  To avoid/detect conflicts, you must
        # provide the current label fingerprint (reference) when you request to
        # set image labels.  This fingerprint value is updated whenever labels
        # are updated and the set labels request will fail if the labels were
        # updated out of band.
        try:
            # pylint: disable=no-member
            request = self.gce_service.images().get(
                project=self.gce_project_id, image=image_name)
            result = request.execute()
            label_fingerprint = result['labelFingerprint']
        except HttpError as exp:
            LOGGER.error("Exception setting image labels:")
            LOGGER.exception(exp)
            return False

        if not result:
            return False

        if label_fingerprint is None or label_fingerprint == '':
            LOGGER.info('Label fingerprint was empty.')
            return False

        cloud_image_tags = CloudImageTags(self.metadata)
        cloud_image_tags.transform_values(to_lower=True,
                                          disallowed_regex='[^a-z0-9-]')
        image_labels = cloud_image_tags.get()

        set_labels_body = {
            "labels": image_labels,
            "labelFingerprint": label_fingerprint
        }

        try:
            # pylint: disable=no-member
            request = self.gce_service.images().setLabels(
                project=self.gce_project_id,
                resource=image_name,
                body=set_labels_body)
            result = request.execute()
        except HttpError as exp:
            LOGGER.error("Exception setting image labels:")
            LOGGER.exception(exp)
            return False

        if not result:
            return False

        LOGGER.debug("Image set labels response: %s", result)
        return True
 def find_image(self, image_name):
     """ Find image by name. Return response"""
     try:
         response = self.ec2_client.describe_images(
             Filters=[{'Name': 'name', 'Values':[image_name]}])
     except (ClientError, ParamValidationError) as botocore_exception:
         LOGGER.exception(botocore_exception)
         raise RuntimeError('describe_images failed for image \'{}\' !'.format(image_name)) \
             from botocore_exception
     LOGGER.trace('describe_images response for image %s: %s', image_name, response)
     return response
 def get_blob(self, disk_path):
     """
     Gets the GCE disk blob representing the given disk_path.
     """
     blob = None
     try:
         blob = self.bucket.get_blob(disk_path)
     except google.cloud.exceptions.NotFound as exception:
         LOGGER.exception(exception)
         raise exception
     return blob
    def __init__(self,
                 artifacts_dir,
                 cloud_type,
                 image_disk_path,
                 should_clean=True):
        self.start_time = time.time()
        self.artifacts_dir = artifacts_dir
        self.cloud_type = cloud_type
        self.image_name = None
        self.image_disk_path = image_disk_path
        self.metadata = None
        self.status = 'failure'
        self.transformed_image_name = None
        self.working_dir = None
        self.should_clean = should_clean
        self.cloud_image = None

        if not os.path.isdir(artifacts_dir):
            raise ValueError(
                "Missing or invalid artifacts directory '{}'.".format(
                    artifacts_dir))
        if not is_supported_cloud(cloud_type):
            raise ValueError("Unexpected cloud '{}'.".format(cloud_type))
        if not os.path.isfile(image_disk_path):
            raise ValueError(
                "Missing image disk '{}'.".format(image_disk_path))

        # Create a working directory under the artifacts dir to temporarily store
        # various build constructs and files.
        self.create_working_dir(artifacts_dir)

        try:
            # Factory (could be a separate object)
            if cloud_type == 'alibaba':
                LOGGER.warning("Unimplemented cloud '%s'.", cloud_type)
                raise SystemExit(-1)
            if cloud_type == 'aws':
                self.cloud_image = AWSImage(self.working_dir,
                                            self.image_disk_path)
            elif cloud_type == 'azure':
                self.cloud_image = AzureImage(self.working_dir,
                                              self.image_disk_path)
            elif cloud_type == 'gce':
                self.cloud_image = GoogleImage(self.working_dir,
                                               self.image_disk_path)
            else:
                raise ValueError('Unexpected cloud type')

            self.cloud_image_name = self.image_name_factory(cloud_type)
        except BaseException as base_exception:
            LOGGER.exception(base_exception)
            raise base_exception
示例#26
0
    def create_tags(self):
        """ Create tags for snapshot. Tags are fetched from metadata. """
        snapshot_tags = self.get_snapshot_tag_metadata()
        tags_to_add = []
        for tag in snapshot_tags:
            tags_to_add.append({'Key': tag, 'Value': snapshot_tags[tag]})

        try:
            response = self.ec2_client.create_tags(Resources=[self.snapshot_id], Tags=tags_to_add)
        except (ClientError, ParamValidationError) as botocore_exception:
            LOGGER.exception(botocore_exception)
            raise RuntimeError('create_tags failed for snapshot\'{}\'!\n'.format(self.snapshot_id))
        LOGGER.trace('create_tags response for snapshot %s: %s', self.snapshot_id, response)
示例#27
0
    def is_snapshot_ready(self, import_task_id):
        """Checks if a snapshot with the given import_task_id exists and its
        status is 'completed'."""
        def _is_snapshot_ready():
            """Awaits the import operation represented by the import_task_id to reach
            'completed' status."""
            try:
                LOGGER.trace("Querying the status of import-task [%s].", import_task_id)
                response = \
                    self.ec2_client.describe_import_snapshot_tasks(
                        ImportTaskIds=[import_task_id])
                if not response:
                    raise RuntimeError("describe_import_snapshot_tasks() returned none response!")

                LOGGER.trace("Response from describe_import_snapshot_tasks => '%s'",
                             response)
                task_status = response['ImportSnapshotTasks'][0]['SnapshotTaskDetail']['Status']
                if task_status == 'error':
                    # Print the response before raising an exception.
                    LOGGER.debug("describe_import_snapshot_tasks() response for [%s] => [%s]",
                                 import_task_id, response)
                    raise RuntimeError("import-snapshot task [{}] in unrecoverable 'error' state.".
                                       format(import_task_id))

                return task_status == 'completed'
            except ClientError as client_error:
                LOGGER.exception(client_error)
                raise RuntimeError("describe_import_snapshot_tasks() failed for [{}]!".
                                   format(import_task_id))

        retrier = Retrier(_is_snapshot_ready)
        retrier.tries = int(get_config_value('AWS_IMPORT_SNAPSHOT_TASK_RETRY_COUNT'))
        retrier.delay = int(get_config_value('AWS_IMPORT_SNAPSHOT_TASK_RETRY_DELAY'))
        LOGGER.info("Waiting for the import snapshot task [%s] to complete.", import_task_id)
        try:
            if retrier.execute():
                LOGGER.info("import_snapshot_task [%s] is completed.", import_task_id)
                # Call it one last time to get the snapshot_id.
                response = \
                self.ec2_client.describe_import_snapshot_tasks(
                    ImportTaskIds=[import_task_id])
                self.snapshot_id = \
                    response['ImportSnapshotTasks'][0]['SnapshotTaskDetail']['SnapshotId']
                LOGGER.info("SnapshotID = [%s].", self.snapshot_id)
                return True
            LOGGER.warning("import_snapshot_task [%s] didn't complete after checking [%d] times!",
                           import_task_id, retrier.tries)
            return False
        except RuntimeError as runtime_exception:
            LOGGER.exception(runtime_exception)
            raise
    def set_bucket(self):
        """Return bucket for uploaded files"""
        access_key = get_config_value('ALIBABA_ACCESS_KEY_ID')
        secret_key = get_config_value('ALIBABA_ACCESS_KEY_SECRET')
        auth = oss2.Auth(access_key, secret_key)

        region = get_config_value('ALIBABA_REGION')
        bucket_name = get_config_value('ALIBABA_BUCKET')
        self.bucket = oss2.Bucket(auth,
                                  'https://oss-' + region + '.aliyuncs.com',
                                  bucket_name)

        try:
            self.bucket.get_bucket_info()
        except oss2.exceptions.SignatureDoesNotMatch as exc:
            LOGGER.exception(exc)
            raise RuntimeError('Bad credentials to get bucket info')
        except oss2.exceptions.ServerError as exc:
            if exc.details['Code'] == 'InvalidBucketName':
                LOGGER.exception(exc)
                raise RuntimeError('Invalid bucket name: ' +
                                   exc.details['BucketName'])
            LOGGER.exception(exc)
            raise RuntimeError('Unexpected Alibaba oss server error. ' +
                               'One of possible errors: invalid credentials.')
        except oss2.exceptions.RequestError as exc:
            LOGGER.exception(exc)
            raise RuntimeError(
                'Alibaba oss request error. ' +
                'One of possible errors: invalid Alibaba region.')
 def clean_up(self):
     """Clean-up the uploaded disk after image generation."""
     # Delete the uploaded disk as it no longer needs to be retained.
     try:
         if self.bucket and self.uploaded_disk_name:
             self.delete_blob()
     except google.cloud.exceptions.NotFound as exception:
         # Report the exception without propagating it up to ensure this
         # doesn't stop the rest of the clean-up.
         LOGGER.error("Caught exception during '%s' disk deletion.",
                      self.uploaded_disk_name)
         LOGGER.exception(exception)
     except RuntimeError as runtime_exception:
         LOGGER.error("Caught runtime exception during '%s' disk deletion.",
                      self.uploaded_disk_name)
         LOGGER.exception(runtime_exception)
def main():
    """ Wrapper to read user defined values for LV sizes """
    # create log handler for the global LOGGER
    create_log_handler()

    if len(sys.argv) != 2:
        LOGGER.error('%s received %s arguments, expected 1', basename(__file__), len(sys.argv) - 1)
        sys.exit(1)

    try:
        read_lv_sizes(sys.argv[1])
    except RuntimeError as runtime_exception:
        LOGGER.exception(runtime_exception)
        sys.exit(1)

    sys.exit(0)