Exemple #1
0
class GoogleImage(BaseImage):
    """
    Google class for all platform specific derivations
    """
    def __init__(self, working_dir, input_disk_path):
        super().__init__(working_dir, input_disk_path)
        self.disk = GoogleDisk(input_disk_path)

        # Retrieve credentials dictionary
        creds_dict = get_dict_from_config_json(
            "GOOGLE_APPLICATION_CREDENTIALS")

        # Obtain project ID from the credentials dictionary
        self.gce_project_id = ensure_value_from_dict(creds_dict, "project_id")
        LOGGER.info("Using project_id: '%s'", self.gce_project_id)

        # Record project ID in metadata
        self.metadata = CloudImageMetadata()
        self.metadata.set(self.__class__.__name__, 'gce_project',
                          self.gce_project_id)

        # Create a service object from the credentials dictionary
        self.gce_credentials = service_account.Credentials.from_service_account_info(
            creds_dict)
        self.gce_service = discovery.build('compute',
                                           'v1',
                                           credentials=self.gce_credentials)

    def clean_up(self):
        """Clean-up cloud objects created by this class and its members."""
        LOGGER.info("Cleaning-up GoogleImage artifacts.")

        if self.disk is not None:
            self.disk.clean_up()
        LOGGER.info("Completed GoogleImage clean-up.")

    def extract_disk(self):
        """Extract disk for upload"""
        BaseImage.extract_disk(self)

    # returns True if image exists, False otherwise
    def image_exists(self, image_name):
        """Check if image already exists in gce"""
        try:
            # pylint: disable=no-member
            request = self.gce_service.images().get(
                project=self.gce_project_id, image=image_name)
            result = request.execute()
            if not result:
                result = False
            else:
                result = True
        except HttpError as exp:
            if exp.resp.status == 404:
                result = False
            else:
                raise exp
        return result

    def is_image_deleted(self, image_name):
        """Waits for the image to be deleted."""

        retrier = Retrier(lambda s: not self.image_exists(s), image_name)
        retrier.tries = int(
            get_config_value('GCE_IMAGE_DELETE_COMPLETED_RETRY_COUNT'))
        retrier.delay = int(
            get_config_value('GCE_IMAGE_DELETE_COMPLETED_RETRY_DELAY'))
        LOGGER.info('Waiting for image [%s] to be deleted.', image_name)
        try:
            if retrier.execute():
                LOGGER.info("Image [%s] was deleted.", image_name)
                return True
            LOGGER.warning(
                "Image [%s] was still not deleted after checking [%d] times!",
                image_name, retrier.tries)
            return False
        except HttpError as exp:
            LOGGER.exception(exp)
            return False

    # delete the image, then wait for the deletion to complete
    # returns True if deletion was successful or image does not exist, False otherwise
    def delete_image(self, image_name):
        """Delete image from GCE"""
        image_deleted = False
        try:
            # pylint: disable=no-member
            request = self.gce_service.images().delete(project=self.gce_project_id, \
                                                       image=image_name)
            request.execute()
        except HttpError as exp:
            if exp.resp.status == 404:
                LOGGER.info("Image doesn't exist")
                image_deleted = True
            else:
                LOGGER.exception(exp)
                raise exp
            return image_deleted

        return self.is_image_deleted(image_name)

    def insert_image(self, image_name):
        """Create image in GCE and then check for status = READY"""
        bucket_name = get_config_value('GCE_BUCKET')
        image_body = {
            "name": image_name,
            "rawDisk": {
                # In the following line the bucket name along with blob name is required
                "source":
                "https://storage.googleapis.com/{}/{}".format(
                    bucket_name, self.disk.uploaded_disk_name)
            }
        }

        try:
            # pylint: disable=no-member
            request = self.gce_service.images().insert(
                project=self.gce_project_id, body=image_body)
            result = request.execute()
        except HttpError as exp:
            LOGGER.exception(exp)
            raise exp

        if not result:
            return False

        LOGGER.debug("Image creation response: '%s'", result)
        return self.is_image_ready(image_name)

    def is_image_ready(self, image_name):
        """Checks if the given image is ready."""
        def _is_image_ready():
            """Checks if an image with image_name exists and status is READY"""
            # pylint: disable=no-member
            request = self.gce_service.images().get(
                project=self.gce_project_id, image=image_name)
            result = request.execute()
            if not result or result['status'] == 'FAILED':
                raise RuntimeError(
                    "Creation of image [{}] failed!".format(image_name))
            return result['status'] == 'READY'

        retrier = Retrier(_is_image_ready)
        retrier.tries = int(
            get_config_value('GCE_IMAGE_CREATE_COMPLETED_RETRY_COUNT'))
        retrier.delay = int(
            get_config_value('GCE_IMAGE_CREATE_COMPLETED_RETRY_DELAY'))
        LOGGER.info("Waiting for image [%s] to be ready.", image_name)
        try:
            if retrier.execute():
                LOGGER.info("Image [%s] is ready.", image_name)
                self.metadata.set(self.__class__.__name__, 'image_id',
                                  image_name)
                return True
            LOGGER.warning(
                "Image [%s] was still not ready after checking [%d] times!",
                image_name, retrier.tries)
            return False
        except HttpError as exp:
            LOGGER.exception(exp)
            return False
        except RuntimeError as runtime_exception:
            LOGGER.exception(runtime_exception)
            return False

    def tag_image(self, image_name):
        """Associate image tags with image"""
        LOGGER.info('Set image labels.')

        # Get current labels fingerprint.  To avoid/detect conflicts, you must
        # provide the current label fingerprint (reference) when you request to
        # set image labels.  This fingerprint value is updated whenever labels
        # are updated and the set labels request will fail if the labels were
        # updated out of band.
        try:
            # pylint: disable=no-member
            request = self.gce_service.images().get(
                project=self.gce_project_id, image=image_name)
            result = request.execute()
            label_fingerprint = result['labelFingerprint']
        except HttpError as exp:
            LOGGER.error("Exception setting image labels:")
            LOGGER.exception(exp)
            return False

        if not result:
            return False

        if label_fingerprint is None or label_fingerprint == '':
            LOGGER.info('Label fingerprint was empty.')
            return False

        cloud_image_tags = CloudImageTags(self.metadata)
        cloud_image_tags.transform_values(to_lower=True,
                                          disallowed_regex='[^a-z0-9-]')
        image_labels = cloud_image_tags.get()

        set_labels_body = {
            "labels": image_labels,
            "labelFingerprint": label_fingerprint
        }

        try:
            # pylint: disable=no-member
            request = self.gce_service.images().setLabels(
                project=self.gce_project_id,
                resource=image_name,
                body=set_labels_body)
            result = request.execute()
        except HttpError as exp:
            LOGGER.error("Exception setting image labels:")
            LOGGER.exception(exp)
            return False

        if not result:
            return False

        LOGGER.debug("Image set labels response: %s", result)
        return True

    def create_image(self, image_name):
        LOGGER.info("Checking if the image '%s' already exists.", image_name)

        # Check if an image with image_name already exists. If so, delete the image
        result = self.image_exists(image_name)
        if not result:
            LOGGER.info("The image '%s' does not exist.", image_name)
        else:
            LOGGER.info("The image '%s' exists.", image_name)
            result = self.delete_image(image_name)
            if not result:
                LOGGER.error("Could not delete the image '%s', exiting.",
                             image_name)
                raise SystemExit(-1)

        LOGGER.info("Attempting to create an image '%s'.", image_name)

        result = self.insert_image(image_name)
        if not result:
            LOGGER.error("The image '%s' was not created successfully.",
                         image_name)
            raise SystemExit(-1)

        result = self.tag_image(image_name)
        if not result:
            LOGGER.error("The image '%s' was not tagged successfully.",
                         image_name)
            raise SystemExit(-1)

        LOGGER.info("Image '%s' creation succeeded.", image_name)
class AWSImage(BaseImage):
    """Class for handling AWS image related actions"""

    AWS_IMAGE_ROOT_VOLUME = '/dev/xvda'

    def __init__(self, working_dir, input_disk_path):
        super().__init__(working_dir, input_disk_path)
        self.session = Session(
            aws_access_key_id=get_config_value('AWS_ACCESS_KEY_ID'),
            aws_secret_access_key=get_config_value('AWS_SECRET_ACCESS_KEY'),
            aws_session_token=get_config_value('AWS_SESSION_TOKEN'),
            region_name=get_config_value('AWS_REGION')
        )

        self.disk = AWSDisk(input_disk_path, working_dir, self.session)

        # Create ec2 client object for performing low-level image actions.
        self.ec2_client = self.session.client('ec2')

        # Record REGION in the metadata.
        self.metadata = CloudImageMetadata()
        self.metadata.set(self.__class__.__name__, 'location', self.session.region_name)
        self.snapshot = None
        self.image_id = None

    def clean_up(self):
        """Clean-up cloud objects created by this class and its members."""
        LOGGER.info("Cleaning-up AWSImage artifacts.")

        # Clean-up the snapshot only if image generation didn't succeed. In case of a
        # successful image generation, the snapshot is associated with the image and
        # stays in-use. Trying to delete it in that case would always fail.
        if self.image_id is None and self.snapshot is not None:
            self.snapshot.clean_up()
        if self.disk is not None:
            self.disk.clean_up()
        LOGGER.info("Completed AWSImage clean-up.")

    def create_image(self, image_name):
        """Create image implementation for AWS"""
        # image name must be unique
        self.delete_old_image(image_name)

        #start image creation
        LOGGER.info('Started creation of image %s at %s', image_name,
                    datetime.datetime.now().strftime('%H:%M:%S'))
        start_time = time()
        try:
            response = self.ec2_client.register_image(
                Architecture="x86_64",
                BlockDeviceMappings=[
                    {
                        "DeviceName": AWSImage.AWS_IMAGE_ROOT_VOLUME,
                        "Ebs":
                        {
                            "DeleteOnTermination": True,
                            "SnapshotId": self.snapshot.snapshot_id,
                            "VolumeType": "gp2"
                        }
                    }
                ],
                EnaSupport=True,
                Description=image_name,
                Name=image_name,
                RootDeviceName=AWSImage.AWS_IMAGE_ROOT_VOLUME,
                SriovNetSupport="simple",
                VirtualizationType="hvm"
                )
        except (ClientError, ParamValidationError) as botocore_exception:
            LOGGER.exception(botocore_exception)
            raise RuntimeError('register_image failed for image\'{}\'!'.format(image_name)) \
                from botocore_exception

        # get image id
        try:
            LOGGER.trace("register_image() response: %s", response)
            self.image_id = response['ImageId']
        except KeyError as key_error:
            LOGGER.exception(key_error)
            raise RuntimeError('could not find \'ImageId\' key for image {} '.format(image_name) +
                               'in create_image response: {}'.format(response)) from key_error
        LOGGER.info('Image id: %s', self.image_id)

        # save image id in artifacts dir json file
        save_image_id(self.image_id)

        # wait till the end of the image creation
        self.wait_for_image_availability()
        LOGGER.info('Creation of %s image took %d seconds', self.image_id, time() - start_time)

        LOGGER.info('Tagging %s as the image_id.', self.image_id)
        self.metadata.set(self.__class__.__name__, 'image_id', self.image_id)

        # add tags to the image
        self.create_tags()

    def delete_old_image(self, image_name):
        """ Check if an image with the same name already exists and delete it.
            This is unlikely to happen unless the image name is specified in the configuration."""

        response = self.find_image(image_name)
        num_images = len(response['Images'])
        if num_images not in (0, 1, 2):
            raise RuntimeError('Number of images named {} '.format(image_name) +
                               'expected to be 0 or 1 (maybe 2, due to AWS replicability issues),' +
                               ' but found {}. '.format(num_images) +
                               '(Should have received InvalidAMIName.Duplicate error during ' +
                               'the previous image creation). Please delete them manually.')

        if num_images in (1, 2):
            try:
                first_image_id = response['Images'][0]['ImageId']
                if num_images == 2:
                    second_image_id = response['Images'][1]['ImageId']
            except KeyError as key_error:
                LOGGER.exception(key_error)
                raise RuntimeError(
                    'could not find ImageId key for image {} '.format(image_name) +
                    'in describe_images response: {}'.format(response)) from key_error

            LOGGER.info('There is an old image %s named %s, deleting it.', first_image_id,
                        image_name)
            self.delete_image(first_image_id)
            if num_images == 2:
                LOGGER.info('There is an old image %s named %s, deleting it.', second_image_id,
                            image_name)
                self.delete_image(second_image_id)

    def find_image(self, image_name):
        """ Find image by name. Return response"""
        try:
            response = self.ec2_client.describe_images(
                Filters=[{'Name': 'name', 'Values':[image_name]}])
        except (ClientError, ParamValidationError) as botocore_exception:
            LOGGER.exception(botocore_exception)
            raise RuntimeError('describe_images failed for image \'{}\' !'.format(image_name)) \
                from botocore_exception
        LOGGER.trace('describe_images response for image %s: %s', image_name, response)
        return response

    def delete_image(self, image_id):
        """ Delete image by image id. Return response"""
        try:
            self.ec2_client.deregister_image(ImageId=image_id)
        except (ClientError, ParamValidationError) as botocore_exception:
            LOGGER.exception(botocore_exception)
            raise RuntimeError('deregister_image failed for image \'{}\' !'.format(image_id)) \
                from botocore_exception

    def wait_for_image_availability(self):
        """ Wait for image to be created and available """
        def _wait_for_image_availability():
            """Awaits the describe_images() to successfully acknowledge availability
            of the given image."""
            try:
                response = self.ec2_client.describe_images(ImageIds=[self.image_id])
            except (ClientError, ParamValidationError) as botocore_exception:
                LOGGER.exception(botocore_exception)
                raise RuntimeError('EC2.Client.describe_images() failed for {} !'.
                                   format(self.image_id)) from botocore_exception
            if not response:
                raise RuntimeError('EC2.Client.describe_images() returned none response!') \
                    from botocore_exception
            try:
                if response['Images'][0]['State'] == 'available':
                    return True
                return False
            except (KeyError, IndexError) as image_describe_exception:
                LOGGER.exception(image_describe_exception)
                raise RuntimeError('EC2.Client.describe_images() did not have ' +
                                   '[\'Images\'][0][\'State\'] in its response: response \'{}\''.
                                   format(response)) from image_describe_exception

        retrier = Retrier(_wait_for_image_availability)
        retrier.tries = int(get_config_value('AWS_CREATE_IMAGE_RETRY_COUNT'))
        retrier.delay = int(get_config_value('AWS_CREATE_IMAGE_RETRY_DELAY'))
        LOGGER.info('Waiting for the image %s to become available.', self.image_id)

        if retrier.execute():
            LOGGER.info('Image [%s] is created in AWS.', self.image_id)
        else:
            raise RuntimeError('Exhausted all \'{}\' retries for image {} to become available.'.
                               format(self.image_id, retrier.tries))

    def get_image_tag_metadata(self):
        """Returns associated image metadata tags through the member variable metadata."""
        metadata_tags = CloudImageTags(self.metadata)
        return metadata_tags.get()

    def create_tags(self):
        """ Create tags for image. Tags are fetched from metadata. """
        image_tags = self.get_image_tag_metadata()
        tags_to_add = []
        for tag in image_tags:
            tags_to_add.append({'Key': tag, 'Value': image_tags[tag]})

        try:
            response = self.ec2_client.create_tags(Resources=[self.image_id], Tags=tags_to_add)
        except (ClientError, ParamValidationError) as botocore_exception:
            LOGGER.exception(botocore_exception)
            raise RuntimeError('create_tags failed for image\'{}\'!\n'.format(self.image_id)) \
                from botocore_exception
        LOGGER.trace('create_tags response for image %s: %s', self.image_id, response)

    def prep_disk(self):
        """Performs the leg work to convert the S3 Disk represented by self.disk into
        a snapshot from which an AWSImage can be created."""
        LOGGER.info("Prepare the uploaded s3 disk for image generation.")

        # Convert the s3Disk into an AWS Snapshot.
        self.snapshot = AWSSnapshot(self.ec2_client, self.disk.bucket_name,
                                    self.disk.uploaded_disk_name)
        self.snapshot.create_snapshot()
        LOGGER.info("AWS Disk preparation is complete for image creation.")

    def share_image(self):
        """Reads a list of AWS accounts and shares the AMI with each of those accounts."""
        share_account_ids = get_list_from_config_yaml('AWS_IMAGE_SHARE_ACCOUNT_IDS')
        if share_account_ids:
            LOGGER.info("Share the AMI with multiple AWS accounts.")
            for dest_account_id in share_account_ids:
                try:
                    LOGGER.info('Sharing image with account-id: %s', dest_account_id)

                    # Share the image with the destination account
                    response = self.ec2_client.modify_image_attribute(
                        ImageId=self.image_id,
                        Attribute='launchPermission',
                        OperationType='add',
                        UserIds=[str(dest_account_id)]
                    )
                    LOGGER.trace("image.modify_attribute response => %s", response)
                except ClientError as client_error:
                    LOGGER.exception(client_error)
                    # Log the error around malformed Account-id and move on.
                    if client_error.response['Error']['Code'] == 'InvalidAMIAttributeItemValue':
                        LOGGER.error('Malformed account-id: %s', dest_account_id)
                    else:
                        # Any other type of error can be irrecoverable and might
                        # point to a deeper malaise.
                        raise RuntimeError('aws IMAGE was not shared with other accounts') \
                            from client_error

            # Acknowledge all the account-ids that the image was shared with.
            self.is_share_image_succeeded(share_account_ids)
        else:
            LOGGER.info("No account IDs found for sharing AMI")

    def is_share_image_succeeded(self, share_account_ids):
        """Helper utility for share_image() that goes through the list of share_account_ids
        and confirms that the image was shared with all accounts. The function logs any
        error during its execution without propagating it up."""
        try:
            LOGGER.info("Checking which accounts were added for sharing this AMI")
            image_launch_perms = self.ec2_client.describe_image_attribute(
                ImageId=self.image_id,
                Attribute='launchPermission',
                DryRun=False
            )
            LOGGER.trace("image.describe_attribute() response => %s", image_launch_perms)
        except ClientError as client_error:
            # Simply log the exception without propagating it.
            LOGGER.exception(client_error)
            return False

        # Create a list of account IDs that has launch permission
        launch_permission_accounts = []
        for each in image_launch_perms['LaunchPermissions']:
            launch_permission_accounts.append(each['UserId'])

        counter = 0
        # Check which accounts were added for sharing this AMI
        for account_id in share_account_ids:
            if str(account_id) in launch_permission_accounts:
                LOGGER.info("The AMI was successfully shared with account: %s", account_id)
                counter += 1
            else:
                LOGGER.warning("The AMI was not shared with account: %s", account_id)

        # Confirm that the number of accounts in share_account_ids and image's
        # 'LaunchPermissions' are matching.
        return counter == len(share_account_ids)
    def create_image(self, image_name):
        """ Create image implementation for Alibaba """
        images_json = self.client.describe_images(None, image_name)
        if int(images_json['TotalCount']) == 0:
            LOGGER.debug('No old images named \'%s\' were found', image_name)
        else:
            # image names are unique, delete only one image
            image_id = images_json['Images']['Image'][0]['ImageId']
            LOGGER.info('Image \'%s\' already exists, its id is \'%s\', deleting it', image_name,
                        image_id)
            self.client.delete_image(image_id)

        # start image creation
        LOGGER.info('Started creation of image \'%s\' at %s', image_name,
                    datetime.datetime.now().strftime('%H:%M:%S'))
        start_time = time()
        imported_image = self.client.import_image(get_config_value('ALIBABA_BUCKET'),
                                                  self.disk.uploaded_disk_name, image_name)
        if 'Code' in imported_image.keys():
            if imported_image['Code'] == 'InvalidOSSObject.NotFound':
                raise RuntimeError('ImportImageRequest could not find uloaded disk \'' +
                                   image_name + '\'')
            if imported_image['Code'] == 'InvalidImageName.Duplicated':
                raise RuntimeError('Image \'' + image_name + '\' still exists, ' +
                                   'should have been removed by this point')
            if imported_image['Code'] == 'ImageIsImporting':
                raise RuntimeError('Another image named \'' + image_name + '\' is in the ' +
                                   'process of importing, probably from the previous run. ' +
                                   'Delete it first.')

        if 'ImageId' not in imported_image.keys() or 'TaskId' not in imported_image.keys():
            LOGGER.info('Alibaba response to ImportImageRequest:')
            LOGGER.info(json.dumps(imported_image, sort_keys=True, indent=4,
                                   separators=(',', ': ')))
            raise RuntimeError('ImageId and/or TaskId were not found in the response ' +
                               'cannot initiate image import')
        self.image_id = imported_image['ImageId']
        task_id = imported_image['TaskId']
        LOGGER.info('Started image import with image id \'%s\' and task id \'%s\'', self.image_id,
                    task_id)

        task_status_count = int(get_config_value('ALIBABA_IMAGE_IMPORT_MONITOR_RETRY_COUNT'))
        task_status_delay = int(get_config_value('ALIBABA_IMAGE_IMPORT_MONITOR_RETRY_DELAY'))
        if self.monitor_task(task_id, task_status_count, task_status_delay):
            LOGGER.info('Image \'%s\' imported after %d seconds',
                        self.image_id, time() - start_time)
        else:
            canceled_task_msg = 'Image import failed or took too long, ' + \
                                'canceling task \'{}\' and '.format(task_id) + \
                                'deleting image \'{}\''.format(self.image_id)
            LOGGER.info(canceled_task_msg)
            self.client.cancel_task(task_id)
            self.client.delete_image(self.image_id)
            raise RuntimeError('Failed to import image \'{}\' after monitoring it for {} retries'.
                               format(self.image_id, task_status_count))

        # Add image_id and location (region) to the metadata used for image registration
        metadata = CloudImageMetadata()
        metadata.set(self.__class__.__name__, 'image_id', self.image_id)
        metadata.set(self.__class__.__name__, 'location', get_config_value('ALIBABA_REGION'))

        # Add tags to image
        LOGGER.info('Add tags to image \'%s\'', self.image_id)
        self.client.add_tags(self.image_id, 'image', CloudImageTags(metadata).get())

        # Add tags to associated snapshot
        images_json = self.client.describe_images(self.image_id, None)
        if not 'Images' in images_json.keys():
            LOGGER.error('No image data found for image \'%s\'', self.image_id)
            LOGGER.error('Unable to tag snapshot.')
        else:
            snapshot_id = images_json['Images']['Image'][0] \
                              ['DiskDeviceMappings']['DiskDeviceMapping'][0]['SnapshotId']
            LOGGER.info('Add tags to snapshot \'%s\'', snapshot_id)
            self.client.add_tags(snapshot_id, 'snapshot', CloudImageTags(metadata).get())
Exemple #4
0
class AzureDisk(BaseDisk):
    """
    Manage Azure disk
    """

    # pylint: disable=too-many-instance-attributes
    def __init__(self, input_disk_path, working_dir):
        """Initialize azure disk object."""
        # First initialize the super class.
        super().__init__(input_disk_path, working_dir)
        self.uploaded_disk_url = None

        self.connection_string = get_config_value(
            'AZURE_STORAGE_CONNECTION_STRING')
        self.container_name = get_config_value('AZURE_STORAGE_CONTAINER_NAME')

        try:
            self.svc = PageBlobService(
                connection_string=self.connection_string)
        except ValueError:
            LOGGER.error(
                "Could not create a PageBlobService with connection_string=%s",
                self.connection_string)
            raise RuntimeError(
                "Runtime Error during Instantiating Azure Blob Service")

        self.progress_cb_lu = 0
        self.metadata = CloudImageMetadata()

    def clean_up(self):
        """Clean-up the uploaded disk after image generation."""

    def set_uploaded_disk_name(self, disk_name):
        """Set the uploaded disk name"""
        # As Azure disk takes its name from the image-name (unlike other clouds where
        # the disk-name are auto-generated during disk extraction), append disk extension
        # to the uploaded disk name.
        self.uploaded_disk_name = disk_name + '.vhd'
        LOGGER.info("The uploaded disk name is '%s'.", self.uploaded_disk_name)

    def extract(self):
        """Extract the vhd disk out of tar.gz."""
        self.disk_to_upload = BaseDisk.decompress(self.input_disk_path, '.vhd',
                                                  self.working_dir)
        LOGGER.info("Azure disk_to_upload = '%s'", self.disk_to_upload)

    def _get_tags(self):
        tags = CloudImageTags(self.metadata)
        tags.title_case_keys()
        return tags.get()

    def _progress_cb(self, byte_up, byte_total):
        sec = int(time())

        # No update within 10 second interval
        if sec - self.progress_cb_lu > 10:
            self.progress_cb_lu = sec
            byte_up //= (1 << 20)
            byte_total //= (1 << 20)
            LOGGER.info('Uploaded %d MB of total %d MB', byte_up, byte_total)

    def upload(self):
        """ Upload a F5 BIG-IP VE image to provided container """
        def _upload_impl():
            """ Azure blob upload implementation """
            cnum = int(
                get_config_value('AZURE_BLOB_UPLOAD_CONCURRENT_THREAD_COUNT'))
            timeout = int(get_config_value('AZURE_BLOB_UPLOAD_TIMEOUT'))

            try:
                self.svc.create_blob_from_path(self.container_name, self.uploaded_disk_name, \
                         self.disk_to_upload, max_connections=cnum, \
                         metadata=self._get_tags(), progress_callback=self._progress_cb, \
                         timeout=timeout)

                uploaded_blob = self.svc.get_blob_properties(self.container_name, \
                                                             self.uploaded_disk_name)

                uploaded_blob_size = uploaded_blob.properties.content_length
                local_blob_size = getsize(self.disk_to_upload)

                LOGGER.info("uploaded blob size: %s and local blob_size: %s", \
                            str(uploaded_blob_size), str(local_blob_size))

                if uploaded_blob_size != local_blob_size:
                    return False

            except AzureMissingResourceHttpError:
                LOGGER.error("Exception during uploading %s",
                             self.disk_to_upload)
                return False
            except AzureException:
                LOGGER.error("Exception during uploading %s",
                             self.disk_to_upload)
                return False

            self.uploaded_disk_url = self.svc.make_blob_url(
                self.container_name, self.uploaded_disk_name)

            # save uploaded disk in artifacts dir json file
            vhd_url_json = {"vhd_url": self.uploaded_disk_url}
            artifacts_dir = get_config_value("ARTIFACTS_DIR")
            with open(artifacts_dir + "/vhd_url.json",
                      "w") as vhd_url_json_file:
                json.dump(vhd_url_json, vhd_url_json_file)

            # insert file with vhd url
            self.metadata.set(self.__class__.__name__, 'vhd_url',
                              self.uploaded_disk_url)
            self.metadata.set(self.__class__.__name__, 'image_id',
                              self.uploaded_disk_name)
            LOGGER.info('Uploaded disk url is: %s', self.uploaded_disk_url)
            return True

        retrier = Retrier(_upload_impl)
        retrier.tries = int(
            get_config_value('AZURE_BLOB_UPLOAD_COMPLETED_RETRY_COUNT'))
        retrier.delay = int(
            get_config_value('AZURE_BLOB_UPLOAD_COMPLETED_RETRY_DELAY'))
        LOGGER.info("Waiting for blob %s to be uploaded.", self.disk_to_upload)

        if retrier.execute():
            LOGGER.info("blob [%s] is ready.", self.disk_to_upload)
            return True
        LOGGER.error(
            "blob [%s] was still not ready after checking [%d] times!",
            self.disk_to_upload, retrier.tries)
        raise RuntimeError("Runtime Error Occured during Azure Disk Upload")
class ImageController():  # pylint: disable=too-many-instance-attributes
    """Controller to prepare cloud image"""
    def __init__(self,
                 artifacts_dir,
                 cloud_type,
                 image_disk_path,
                 should_clean=True):
        self.start_time = time.time()
        self.artifacts_dir = artifacts_dir
        self.cloud_type = cloud_type
        self.image_name = None
        self.image_disk_path = image_disk_path
        self.metadata = None
        self.status = 'failure'
        self.transformed_image_name = None
        self.working_dir = None
        self.should_clean = should_clean
        self.cloud_image = None

        if not os.path.isdir(artifacts_dir):
            raise ValueError(
                "Missing or invalid artifacts directory '{}'.".format(
                    artifacts_dir))
        if not os.path.isfile(image_disk_path):
            raise ValueError(
                "Missing image disk '{}'.".format(image_disk_path))

        # Create a working directory under the artifacts dir to temporarily store
        # various build constructs and files.
        self.create_working_dir(artifacts_dir)

        try:
            # Factory (could be a separate object)
            # pylint: disable=import-outside-toplevel
            if cloud_type == 'alibaba':
                from image.alibaba_image import AlibabaImage
                self.cloud_image = AlibabaImage(self.working_dir,
                                                self.image_disk_path)
            elif cloud_type == 'aws':
                from image.aws_image import AWSImage
                self.cloud_image = AWSImage(self.working_dir,
                                            self.image_disk_path)
            elif cloud_type == 'azure':
                from image.azure_image import AzureImage
                self.cloud_image = AzureImage(self.working_dir,
                                              self.image_disk_path)
            elif cloud_type == 'gce':
                from image.google_image import GoogleImage
                self.cloud_image = GoogleImage(self.working_dir,
                                               self.image_disk_path)
            else:
                raise ValueError(
                    'Unexpected cloud type: {}'.format(cloud_type))
            # pylint: enable=import-outside-toplevel
            self.cloud_image_name = self.image_name_factory(cloud_type)
        except BaseException as base_exception:
            LOGGER.exception(base_exception)
            raise base_exception

    def clean_up(self):
        """Cleans-up the cloud and local artifacts created by this object."""
        try:
            if self.should_clean is True:
                if self.cloud_image is not None:
                    LOGGER.info("Cleaning up image controller constructs.")
                    self.cloud_image.clean_up()
                    self.cloud_image = None
                if self.working_dir is not None and os.path.isdir(
                        self.working_dir):
                    LOGGER.debug("Removing working dir '%s'.",
                                 self.working_dir)
                    shutil.rmtree(self.working_dir)
                    self.working_dir = None
            else:
                LOGGER.debug("Skipping removal of working dir '%s'.",
                             self.working_dir)
        except OSError as os_exception:
            # Simply log the exception without propagating.
            LOGGER.error(os_exception)

    @staticmethod
    def image_name_factory(cloud_type):
        """Factory pattern for ImageName"""
        # pylint: disable=import-outside-toplevel
        if cloud_type == 'alibaba':
            from image.alibaba_image_name import AlibabaImageName
            return AlibabaImageName()
        if cloud_type == 'aws':
            from image.aws_image_name import AWSImageName
            return AWSImageName()
        if cloud_type == 'azure':
            from image.azure_image_name import AzureImageName
            return AzureImageName()
        if cloud_type == 'gce':
            from image.google_image_name import GoogleImageName
            return GoogleImageName()
        raise ValueError('Unexpected cloud type: {}'.format(cloud_type))
        # pylint: enable=import-outside-toplevel

    @staticmethod
    def check_valid_name(cloud_type, user_image_name):
        """Check if user-supplied image name is valid"""
        cloud_image_name = ImageController.image_name_factory(cloud_type)
        cloud_image_name.check_valid_name(user_image_name)

    def set_image_name(self, seed_image_name='', user_image_name=''):
        """Set/Transform image name"""
        if user_image_name != '':
            user_image_name = user_image_name.strip()
            self.cloud_image_name.check_valid_name(user_image_name)
            self.image_name = user_image_name
        else:
            if seed_image_name == '':
                raise ValueError(
                    'seed_image_name or user_image_name is required')
            self.image_name = \
                self.cloud_image_name.apply_transform(seed_image_name.strip())[0]

    def initialize_image_metadata(self, artifacts_dir, pipeline_build=False):
        """Initialize image metadata"""
        self.metadata = CloudImageMetadata()
        self.metadata.load_artifact_files(artifacts_dir)

        # Set common metadata values
        self.metadata.set(self.__class__.__name__, 'input',
                          self.image_disk_path)
        self.metadata.set(self.__class__.__name__, 'image_name',
                          self.image_name)
        if pipeline_build is True:
            self.metadata.set(self.__class__.__name__, 'build_type',
                              'pipeline')
        else:
            self.metadata.set(self.__class__.__name__, 'build_type', 'local')

        # License model is currently hardwired
        self.metadata.set(self.__class__.__name__, 'license_model', 'byol')

    def prepare(self, seed_image_name='', user_image_name=''):
        """Main controller"""
        try:
            self.set_image_name(seed_image_name, user_image_name)
            LOGGER.info("Starting prepare cloud image '%s'.", self.image_name)
            self.cloud_image.set_uploaded_disk_name(self.image_name)

            pipeline_build = os.getenv('CI') is not None
            self.initialize_image_metadata(self.artifacts_dir, pipeline_build)

            self.cloud_image.extract_disk()
            self.cloud_image.upload_disk()
            self.cloud_image.prep_disk()

            self.metadata.set(self.__class__.__name__, 'build_operation',
                              'create')
            self.cloud_image.create_image(self.image_name)
            build_time = time.time() - self.start_time
            self.metadata.set(self.__class__.__name__, 'build_time',
                              str(timedelta(seconds=build_time)))
            self.status = 'success'
            self.metadata.set(self.__class__.__name__, 'status', self.status)

            self.cloud_image.share_image()
            self.create_metadata()
            self.register_image()
            self.create_report()
            LOGGER.info("Finished prepare cloud image '%s'.", self.image_name)

        except BaseException as base_exception:
            LOGGER.exception(base_exception)
            raise base_exception

    def create_working_dir(self, artifacts_dir):
        """Create temporary directory"""
        self.working_dir = tempfile.mkdtemp('',
                                            "image_" + self.cloud_type + "_",
                                            artifacts_dir)
        LOGGER.debug("Working directory = '%s'.", self.working_dir)

    def register_image(self):
        """Register image with CIR"""
        CloudImageRegister(self.metadata).register_image()

    def create_metadata(self):
        """Create metadata file"""
        # initialize input/output
        input_metadata = self.metadata.get()
        output_metadata = {'status': self.status}

        # Place metadata from self and cloud image class in output metadata
        build_sources = [
            self.__class__.__name__, self.cloud_image.__class__.__name__
        ]
        for build_source in build_sources:
            if build_source in input_metadata:
                output_metadata.update(input_metadata[build_source])

        # Write metadata file
        metadata_file = '{}/prepare_cloud_image.json'.format(
            self.artifacts_dir)
        with open(metadata_file, 'w') as metadata_fp:
            json.dump(output_metadata, metadata_fp, indent=4, sort_keys=True)

    def create_report(self):
        """Create report of created images"""

    def dry_run(self):
        """Perform environment checks"""
        self.cloud_image.dry_run()
class AzureDisk(BaseDisk):
    """
    Manage Azure disk
    """
    # pylint: disable=too-many-instance-attributes
    def __init__(self, input_disk_path, working_dir):
        """Initialize azure disk object."""
        # First initialize the super class.
        super().__init__(input_disk_path, working_dir)
        self.uploaded_disk_url = None

        self.connection_string = get_config_value('AZURE_STORAGE_CONNECTION_STRING')
        self.container_name = get_config_value('AZURE_STORAGE_CONTAINER_NAME')
        self.blob = None
        self.progress_cb_lu = 0
        self.metadata = CloudImageMetadata()

    def clean_up(self):
        """Clean-up the uploaded disk after image generation."""

    def set_uploaded_disk_name(self, disk_name):
        """Set the uploaded disk name"""
        # As Azure disk takes its name from the image-name (unlike other clouds where
        # the disk-name are auto-generated during disk extraction), append disk extension
        # to the uploaded disk name.
        self.uploaded_disk_name = disk_name + '.vhd'
        LOGGER.info("The uploaded disk name is '%s'.", self.uploaded_disk_name)

    def extract(self):
        """Extract the vhd disk out of tar.gz."""
        self.disk_to_upload = BaseDisk.decompress(self.input_disk_path, '.vhd', self.working_dir)
        LOGGER.info("Azure disk_to_upload = '%s'", self.disk_to_upload)

    def _get_tags(self):
        tags = CloudImageTags(self.metadata)
        tags.title_case_keys()
        return tags.get()

    def _progress_cb(self, byte_up, byte_total):
        sec = int(time())

        # No update within 10 second interval
        if sec-self.progress_cb_lu > 10:
            self.progress_cb_lu = sec
            byte_up //= (1<<20)
            byte_total //= (1<<20)
            LOGGER.info('Uploaded %d MB of total %d MB', byte_up, byte_total)

    def upload(self):
        """ Upload a F5 BIG-IP VE image to provided container """

        def upload_azure():
            with open(self.disk_to_upload,'rb') as vhd_file:
                self.blob.upload_blob(
                    vhd_file.read(),
                    blob_type="PageBlob",
                    metadata=self._get_tags()
                    )

        def _upload_impl():
            """ Azure blob upload implementation """
            timeout = int(get_config_value('AZURE_BLOB_UPLOAD_TIMEOUT'))

            try:
                self.connection_string = get_config_value('AZURE_STORAGE_CONNECTION_STRING')
                LOGGER.info("create blob client")
                self.blob = BlobClient.from_connection_string(
                    conn_str=self.connection_string,
                    container_name=self.container_name,
                    blob_name=self.uploaded_disk_name,
                    connection_timeout=timeout
                    )

                LOGGER.info(self._get_tags())
                nonlocal upload_azure
                upload_azure_p = Process(target=upload_azure)
                upload_azure_p.start()
                limit = int(timeout/10)
                for _ in range(limit):
                    if not upload_azure_p.is_alive():
                        break
                    sleep(10)
                    os.write(1, b".")
                else:
                    raise TimeoutError

                LOGGER.info(self.blob.get_blob_properties())
                local_blob_size = os.stat(self.disk_to_upload).st_size

                uploaded_blob_size = self.blob.get_blob_properties().get("size")

                LOGGER.info("uploaded blob size: %s and local blob_size: %s", \
                            str(uploaded_blob_size), str(local_blob_size))
                if uploaded_blob_size != local_blob_size:
                    return False

            except AzureMissingResourceHttpError:
                LOGGER.error("Exception during uploading %s", self.disk_to_upload)
                return False
            except AzureException:
                LOGGER.error("Exception during uploading %s", self.disk_to_upload)
                return False
            except TimeoutError:
                LOGGER.error("Timeout while uploading")
                return False

            self.uploaded_disk_url = self.blob.url
            # save uploaded disk in artifacts dir json file
            vhd_url_json = {"vhd_url": self.uploaded_disk_url}
            artifacts_dir = get_config_value("ARTIFACTS_DIR")
            with open(artifacts_dir + "/vhd_url.json", "w") as vhd_url_json_file:
                json.dump(vhd_url_json, vhd_url_json_file)

            # insert file with vhd url
            self.metadata.set(self.__class__.__name__, 'vhd_url', self.uploaded_disk_url)
            self.metadata.set(self.__class__.__name__, 'image_id', self.uploaded_disk_name)
            LOGGER.info('Uploaded disk url is: %s', self.uploaded_disk_url)
            return True

        retrier = Retrier(_upload_impl)
        retrier.tries = int(get_config_value('AZURE_BLOB_UPLOAD_COMPLETED_RETRY_COUNT'))
        retrier.delay = int(get_config_value('AZURE_BLOB_UPLOAD_COMPLETED_RETRY_DELAY'))
        LOGGER.info("Waiting for blob %s to be uploaded.", self.disk_to_upload)

        if retrier.execute():
            LOGGER.info("blob [%s] is ready.", self.disk_to_upload)
            return True
        LOGGER.error("blob [%s] was still not ready after checking [%d] times!",
                     self.disk_to_upload, retrier.tries)
        raise RuntimeError("Runtime Error Occured during Azure Disk Upload")