def is_share_image_succeeded(self, share_account_ids):
        """Helper utility for share_image() that goes through the list of share_account_ids
           and confirms that the image was shared with all accounts. The function logs any
           error during its execution without propagating it up."""
        response_json = None
        try:
            LOGGER.info("Checking which accounts were added for sharing this image")
            response_json = self.client.describe_image_share_permission(self.image_id)

        except ServerException as exc:
            LOGGER.exception(exc)
            if exc.get_error_code() == 'InvalidImageId.NotFound' and \
                exc.get_error_msg().startswith('The specified ImageId does not exist'):
                raise RuntimeError('InvalidImageId.NotFound: Check if the Image ID exists')
            raise exc

        num_accounts = len(response_json['Accounts']['Account'])
        shared_accounts = []
        for each_account in range(num_accounts):
            account_id = response_json['Accounts']['Account'][each_account]['AliyunId']
            shared_accounts.append(int(account_id))

        counter = 0
        for an_account in share_account_ids:
            if an_account in shared_accounts:
                LOGGER.info("The image was successfully shared with account: %s", an_account)
                counter += 1
            else:
                LOGGER.warning("The image was not shared with account: %s", an_account)

        # Confirm that the number of accounts in share_account_ids and image's
        # 'LaunchPermissions' are matching.
        return counter == len(share_account_ids)
Beispiel #2
0
    def __init__(self):
        """
        Class that other information gathering classes inherit from

        All information gathering from this class means that all other
        information gathering classes also need that information
        """

        LOGGER.info(
            "Collecting information about installed software on the build machine"
        )

        self.json_info = {}
        self.json_info["modules"] = telemetry.operation_info.get_module()
        self.json_info["uuid"] = uuid.uuid1().hex
        self.json_info["image_name"] = get_image_name()
        self.json_info["image_id"] = get_image_id()
        self.json_info["platform"] = telemetry.operation_info.get_platform()
        self.json_info[
            "boot_locations"] = telemetry.operation_info.get_boot_locations()
        self.json_info["modules"] = telemetry.operation_info.get_module()
        self.json_info["result"] = telemetry.operation_info.get_result()
        self.json_info["start_time"] = telemetry.operation_info.get_start_time(
        )
        self.json_info["end_time"] = telemetry.operation_info.get_end_time()
        self.get_platform_specific_info()
        self.output_file_path = "./"  # set default output to main folder
Beispiel #3
0
    def create_snapshot(self):
        """Creates a snapshot from the uploaded s3_disk."""
        try:
            description = datetime.datetime.now().strftime('%Y%m%d%H%M%S') + '--BIGIP-Volume-From-'
            description += self.s3_disk
            LOGGER.info("Importing the disk [s3://%s/%s] as a snapshot in AWS.",
                        self.s3_bucket, self.s3_disk)
            response = self.ec2_client.import_snapshot(Description=description,
                                                       DiskContainer={
                                                           "Description": description,
                                                           "Format": "vmdk",
                                                           "UserBucket": {
                                                               "S3Bucket": self.s3_bucket,
                                                               "S3Key": self.s3_disk
                                                           }
                                                       })
            LOGGER.trace("import_snapshot() Response => '%s'", response)
            self.import_task_id = response['ImportTaskId']
            LOGGER.info("TaskId for the import_snapshot() operation  => [%s]",
                        self.import_task_id)
            # Wait for the snapshot import to complete.
            self.is_snapshot_ready(self.import_task_id)

            # As the import operation successfully completed, reset it back to None
            # to avoid trying to cancel a completed import-task during clean-up.
            self.import_task_id = None

            # Tag the snapshot
            self.create_tags()

        except RuntimeError as runtime_error:
            LOGGER.exception(runtime_error)
            raise
    def delete_blob(self):
        """
        Delete the blob corresponding to self.uploaded_disk_name if it exists.
        """
        try:
            # Populate the bucket if not already.
            if self.bucket is None:
                self.init_bucket()

            if self.uploaded_disk_name is None:
                raise RuntimeError(
                    "Trying to delete a non-existent uploaded disk.")

            blob = self.get_blob(self.uploaded_disk_name)
            if blob is not None:
                LOGGER.info("Deleting blob '%s'.", self.uploaded_disk_name)
                self.bucket.delete_blob(self.uploaded_disk_name)

                blob = self.get_blob(self.uploaded_disk_name)
                if blob is not None:
                    raise RuntimeError(
                        "Deleting blob '{}' silently failed as it still exists."
                        .format(self.uploaded_disk_name))
        except google.cloud.exceptions.NotFound as exception:
            LOGGER.exception(exception)
            raise exception
        except RuntimeError as runtime_exception:
            raise runtime_exception
Beispiel #5
0
    def clean_up(self):
        """Clean-up cloud objects created by this class and its members."""
        LOGGER.info("Cleaning-up GoogleImage artifacts.")

        if self.disk is not None:
            self.disk.clean_up()
        LOGGER.info("Completed GoogleImage clean-up.")
Beispiel #6
0
    def create_image(self, image_name):
        LOGGER.info("Checking if the image '%s' already exists.", image_name)

        # Check if an image with image_name already exists. If so, delete the image
        result = self.image_exists(image_name)
        if not result:
            LOGGER.info("The image '%s' does not exist.", image_name)
        else:
            LOGGER.info("The image '%s' exists.", image_name)
            result = self.delete_image(image_name)
            if not result:
                LOGGER.error("Could not delete the image '%s', exiting.",
                             image_name)
                raise SystemExit(-1)

        LOGGER.info("Attempting to create an image '%s'.", image_name)

        result = self.insert_image(image_name)
        if not result:
            LOGGER.error("The image '%s' was not created successfully.",
                         image_name)
            raise SystemExit(-1)

        result = self.tag_image(image_name)
        if not result:
            LOGGER.error("The image '%s' was not tagged successfully.",
                         image_name)
            raise SystemExit(-1)

        LOGGER.info("Image '%s' creation succeeded.", image_name)
Beispiel #7
0
    def post_to_url(self, cir_url, timeout):
        """Post data to URL with retries"""
        def _post_to_url():
            try:
                # Note: Total retry time is timeout (in the requests.post call) + retrier.delay
                LOGGER.debug('Post to URL:%s', cir_url)
                response = requests.post(url=cir_url,
                                         data=self.registration_data,
                                         timeout=timeout)
                LOGGER.debug('Response: %s:%s', response, response.text)
            except (requests.exceptions.Timeout,
                    requests.exceptions.ConnectionError) as exception:
                LOGGER.debug('Caught exception:%s', exception)
                return False
            return True

        retrier = Retrier(_post_to_url)
        retrier.tries = int(get_config_value('IMAGE_REGISTRATION_RETRY_COUNT'))
        retrier.delay = int(get_config_value('IMAGE_REGISTRATION_RETRY_DELAY'))
        LOGGER.info('Attempt to register cloud image.')
        LOGGER.debug('Register cloud image detail: %s', self.registration_data)

        if retrier.execute():
            LOGGER.info('Cloud image was registered')
        else:
            raise RuntimeError(
                'Exhausted all [{}] retries for image registration.'.format(
                    retrier.tries))
    def prepare(self, seed_image_name='', user_image_name=''):
        """Main controller"""
        try:
            self.set_image_name(seed_image_name, user_image_name)
            LOGGER.info("Starting prepare cloud image '%s'.", self.image_name)
            self.cloud_image.set_uploaded_disk_name(self.image_name)

            pipeline_build = os.getenv('CI') is not None
            self.initialize_image_metadata(self.artifacts_dir, pipeline_build)

            self.cloud_image.extract_disk()
            self.cloud_image.upload_disk()
            self.cloud_image.prep_disk()

            self.metadata.set(self.__class__.__name__, 'build_operation',
                              'create')
            self.cloud_image.create_image(self.image_name)
            build_time = time.time() - self.start_time
            self.metadata.set(self.__class__.__name__, 'build_time',
                              str(timedelta(seconds=build_time)))
            self.status = 'success'
            self.metadata.set(self.__class__.__name__, 'status', self.status)

            self.cloud_image.share_image()
            self.create_metadata()
            self.register_image()
            self.create_report()
            LOGGER.info("Finished prepare cloud image '%s'.", self.image_name)

        except BaseException as base_exception:
            LOGGER.exception(base_exception)
            raise base_exception
Beispiel #9
0
 def set_uploaded_disk_name(self, disk_name):
     """Set the uploaded disk name"""
     # As Azure disk takes its name from the image-name (unlike other clouds where
     # the disk-name are auto-generated during disk extraction), append disk extension
     # to the uploaded disk name.
     self.uploaded_disk_name = disk_name + '.vhd'
     LOGGER.info("The uploaded disk name is '%s'.", self.uploaded_disk_name)
Beispiel #10
0
 def delete_snapshot(self):
     """Delete the AWS snapshot created by this object."""
     if self.snapshot_id is not None:
         LOGGER.info("Deleting the snapshot '%s'.", self.snapshot_id)
         self.ec2_client.delete_snapshot(SnapshotId=self.snapshot_id)
         LOGGER.info("Successfully deleted snapshot '%s'.",
                     self.snapshot_id)
def download_file(url, dest_file):
    """ Download from url to a local file.
        Throws exceptions with wording specific to the file injection.
        Assumes that the directory containing the destination file already exists. """
    verify_tls = bool(get_config_value("IGNORE_DOWNLOAD_URL_TLS") is None)
    try:
        remote_file = requests.get(url, verify=verify_tls, timeout=60)
    except requests.exceptions.SSLError as exc:
        LOGGER.exception(exc)
        raise RuntimeError(
            'Cannot access \'{}\' due to TLS problems! '.format(url) +
            'Consider abandoning TLS verification by usage of ' +
            '\'IGNORE_DOWNLOAD_URL_TLS\' parameter.')
    except requests.exceptions.RequestException as exc:
        LOGGER.exception(exc)
        raise RuntimeError(
            '\'{}\' is neither a file nor a directory nor a valid url, cannot inject it!'
            .format(url))
    if remote_file.status_code != 200:
        LOGGER.info('requests.get response status: %s',
                    remote_file.status_code)
        LOGGER.info('requests.get response headers: %s', remote_file.headers)
        raise RuntimeError(
            'URL \'{}\' did not return content, cannot inject it!'.format(url))
    open(dest_file, 'wb').write(remote_file.content)
 def extract(self):
     """
     Input disk is already tar.gz file of disk.tar.
     Just copy the path.
     """
     self.disk_to_upload = self.input_disk_path
     LOGGER.info("Google disk_to_upload is '%s'.", self.disk_to_upload)
Beispiel #13
0
    def __init__(self, metadata):
        # Create list of metadata config files for parent
        if metadata is None:
            raise ValueError('metadata is required.')

        context = 'tag'
        tag_config_files = MetadataConfigFileUtil(metadata.artifacts_dir, context). \
            get_all_config_filenames(['VersionFile'])

        # Init metadata with metadata/config files
        MetadataFilter.__init__(self, metadata, tag_config_files)
        self.filter()

        # Add user defined tags to the config/metadata
        user_tags = get_list_from_config_yaml('IMAGE_TAGS')
        for user_tag in user_tags:
            for key, value in user_tag.items():
                self.metadata[key] = value

        # Remove tags user requested to exclude
        user_exclude_tags = get_list_from_config_yaml('IMAGE_TAGS_EXCLUDE')
        for key in user_exclude_tags:
            if key in self.metadata:
                del self.metadata[key]
                LOGGER.info('Excluded key [%s] from image tags.', key)
            else:
                LOGGER.info(
                    'Key [%s] does not exist in image tags and cannot be excluded.',
                    key)
Beispiel #14
0
    def upload(self):
        """
        Upload tar.gz stored at self.disk_to_upload to Google storage
        """
        try:
            # Populate the bucket if not already.
            if self.bucket is None:
                self.init_bucket()

            # form blob name
            prefix = datetime.datetime.now().strftime('%Y%m%d') + '/'
            self.uploaded_disk_name = prefix + BaseDisk.decorate_disk_name(self.disk_to_upload)

            # delete the blob if it exists
            self.delete_blob()

            # create blob
            blob = self.bucket.blob(self.uploaded_disk_name)
            if blob is None:
                raise RuntimeError("Factory constructor for blob '{}' failed."
                                   .format(self.uploaded_disk_name))

            # upload blob
            LOGGER.info("Started to upload '%s' at '%s'.", self.uploaded_disk_name,
                        datetime.datetime.now().strftime('%H:%M:%S'))
            blob.upload_from_filename(self.disk_to_upload)
            LOGGER.info("Finished to upload '%s' at '%s'.", self.uploaded_disk_name,
                        datetime.datetime.now().strftime('%H:%M:%S'))
            if not blob.exists():
                raise RuntimeError("Uploading blob '{}' failed.".format(self.uploaded_disk_name))
        except RuntimeError as exception:
            LOGGER.exception(exception)
            raise exception
    def delete_old_image(self, image_name):
        """ Check if an image with the same name already exists and delete it.
            This is unlikely to happen unless the image name is specified in the configuration."""

        response = self.find_image(image_name)
        num_images = len(response['Images'])
        if num_images not in (0, 1, 2):
            raise RuntimeError('Number of images named {} '.format(image_name) +
                               'expected to be 0 or 1 (maybe 2, due to AWS replicability issues),' +
                               ' but found {}. '.format(num_images) +
                               '(Should have received InvalidAMIName.Duplicate error during ' +
                               'the previous image creation). Please delete them manually.')

        if num_images in (1, 2):
            try:
                first_image_id = response['Images'][0]['ImageId']
                if num_images == 2:
                    second_image_id = response['Images'][1]['ImageId']
            except KeyError as key_error:
                LOGGER.exception(key_error)
                raise RuntimeError(
                    'could not find ImageId key for image {} '.format(image_name) +
                    'in describe_images response: {}'.format(response)) from key_error

            LOGGER.info('There is an old image %s named %s, deleting it.', first_image_id,
                        image_name)
            self.delete_image(first_image_id)
            if num_images == 2:
                LOGGER.info('There is an old image %s named %s, deleting it.', second_image_id,
                            image_name)
                self.delete_image(second_image_id)
Beispiel #16
0
 def extract(self):
     """Extract the vmdk disk out of zip."""
     LOGGER.debug("Extracting '.vmdk' disk file from [%s].",
                  self.input_disk_path)
     self.disk_to_upload = BaseDisk.decompress(self.input_disk_path,
                                               '.vmdk', self.working_dir)
     LOGGER.info("AWS disk_to_upload = '%s'", self.disk_to_upload)
Beispiel #17
0
    def is_image_ready(self, image_name):
        """Checks if the given image is ready."""
        def _is_image_ready():
            """Checks if an image with image_name exists and status is READY"""
            # pylint: disable=no-member
            request = self.gce_service.images().get(
                project=self.gce_project_id, image=image_name)
            result = request.execute()
            if not result or result['status'] == 'FAILED':
                raise RuntimeError(
                    "Creation of image [{}] failed!".format(image_name))
            return result['status'] == 'READY'

        retrier = Retrier(_is_image_ready)
        retrier.tries = int(
            get_config_value('GCE_IMAGE_CREATE_COMPLETED_RETRY_COUNT'))
        retrier.delay = int(
            get_config_value('GCE_IMAGE_CREATE_COMPLETED_RETRY_DELAY'))
        LOGGER.info("Waiting for image [%s] to be ready.", image_name)
        try:
            if retrier.execute():
                LOGGER.info("Image [%s] is ready.", image_name)
                self.metadata.set(self.__class__.__name__, 'image_id',
                                  image_name)
                return True
            LOGGER.warning(
                "Image [%s] was still not ready after checking [%d] times!",
                image_name, retrier.tries)
            return False
        except HttpError as exp:
            LOGGER.exception(exp)
            return False
        except RuntimeError as runtime_exception:
            LOGGER.exception(runtime_exception)
            return False
    def is_share_image_succeeded(self, share_account_ids):
        """Helper utility for share_image() that goes through the list of share_account_ids
        and confirms that the image was shared with all accounts. The function logs any
        error during its execution without propagating it up."""
        try:
            LOGGER.info("Checking which accounts were added for sharing this AMI")
            image_launch_perms = self.ec2_client.describe_image_attribute(
                ImageId=self.image_id,
                Attribute='launchPermission',
                DryRun=False
            )
            LOGGER.trace("image.describe_attribute() response => %s", image_launch_perms)
        except ClientError as client_error:
            # Simply log the exception without propagating it.
            LOGGER.exception(client_error)
            return False

        # Create a list of account IDs that has launch permission
        launch_permission_accounts = []
        for each in image_launch_perms['LaunchPermissions']:
            launch_permission_accounts.append(each['UserId'])

        counter = 0
        # Check which accounts were added for sharing this AMI
        for account_id in share_account_ids:
            if str(account_id) in launch_permission_accounts:
                LOGGER.info("The AMI was successfully shared with account: %s", account_id)
                counter += 1
            else:
                LOGGER.warning("The AMI was not shared with account: %s", account_id)

        # Confirm that the number of accounts in share_account_ids and image's
        # 'LaunchPermissions' are matching.
        return counter == len(share_account_ids)
 def delete_uploaded_disk(self, disk_name):
     """Deletes the given disk_name from the self.bucket_name S3 bucket."""
     if self.is_disk_exist(disk_name):
         self.s3_client.delete_object(Bucket=self.bucket_name,
                                      Key=disk_name)
         LOGGER.info("Deleted '%s' from bucket '%s'.", disk_name,
                     self.bucket_name)
    def wait_for_image_availability(self):
        """ Wait for image to be created and available """
        def _wait_for_image_availability():
            """Awaits the describe_images() to successfully acknowledge availability
            of the given image."""
            try:
                response = self.ec2_client.describe_images(ImageIds=[self.image_id])
            except (ClientError, ParamValidationError) as botocore_exception:
                LOGGER.exception(botocore_exception)
                raise RuntimeError('EC2.Client.describe_images() failed for {} !'.
                                   format(self.image_id)) from botocore_exception
            if not response:
                raise RuntimeError('EC2.Client.describe_images() returned none response!') \
                    from botocore_exception
            try:
                if response['Images'][0]['State'] == 'available':
                    return True
                return False
            except (KeyError, IndexError) as image_describe_exception:
                LOGGER.exception(image_describe_exception)
                raise RuntimeError('EC2.Client.describe_images() did not have ' +
                                   '[\'Images\'][0][\'State\'] in its response: response \'{}\''.
                                   format(response)) from image_describe_exception

        retrier = Retrier(_wait_for_image_availability)
        retrier.tries = int(get_config_value('AWS_CREATE_IMAGE_RETRY_COUNT'))
        retrier.delay = int(get_config_value('AWS_CREATE_IMAGE_RETRY_DELAY'))
        LOGGER.info('Waiting for the image %s to become available.', self.image_id)

        if retrier.execute():
            LOGGER.info('Image [%s] is created in AWS.', self.image_id)
        else:
            raise RuntimeError('Exhausted all \'{}\' retries for image {} to become available.'.
                               format(self.image_id, retrier.tries))
        def _upload_impl():
            """ Azure blob upload implementation """
            timeout = int(get_config_value('AZURE_BLOB_UPLOAD_TIMEOUT'))

            try:
                self.connection_string = get_config_value('AZURE_STORAGE_CONNECTION_STRING')
                LOGGER.info("create blob client")
                self.blob = BlobClient.from_connection_string(
                    conn_str=self.connection_string,
                    container_name=self.container_name,
                    blob_name=self.uploaded_disk_name,
                    connection_timeout=timeout
                    )

                LOGGER.info(self._get_tags())
                nonlocal upload_azure
                upload_azure_p = Process(target=upload_azure)
                upload_azure_p.start()
                limit = int(timeout/10)
                for _ in range(limit):
                    if not upload_azure_p.is_alive():
                        break
                    sleep(10)
                    os.write(1, b".")
                else:
                    raise TimeoutError

                LOGGER.info(self.blob.get_blob_properties())
                local_blob_size = os.stat(self.disk_to_upload).st_size

                uploaded_blob_size = self.blob.get_blob_properties().get("size")

                LOGGER.info("uploaded blob size: %s and local blob_size: %s", \
                            str(uploaded_blob_size), str(local_blob_size))
                if uploaded_blob_size != local_blob_size:
                    return False

            except AzureMissingResourceHttpError:
                LOGGER.error("Exception during uploading %s", self.disk_to_upload)
                return False
            except AzureException:
                LOGGER.error("Exception during uploading %s", self.disk_to_upload)
                return False
            except TimeoutError:
                LOGGER.error("Timeout while uploading")
                return False

            self.uploaded_disk_url = self.blob.url
            # save uploaded disk in artifacts dir json file
            vhd_url_json = {"vhd_url": self.uploaded_disk_url}
            artifacts_dir = get_config_value("ARTIFACTS_DIR")
            with open(artifacts_dir + "/vhd_url.json", "w") as vhd_url_json_file:
                json.dump(vhd_url_json, vhd_url_json_file)

            # insert file with vhd url
            self.metadata.set(self.__class__.__name__, 'vhd_url', self.uploaded_disk_url)
            self.metadata.set(self.__class__.__name__, 'image_id', self.uploaded_disk_name)
            LOGGER.info('Uploaded disk url is: %s', self.uploaded_disk_url)
            return True
    def upload(self):
        """ Upload a F5 BIG-IP VE image to provided container """
        def _upload_impl():
            """ Azure blob upload implementation """
            cnum = int(
                get_config_value('AZURE_BLOB_UPLOAD_CONCURRENT_THREAD_COUNT'))
            timeout = int(get_config_value('AZURE_BLOB_UPLOAD_TIMEOUT'))

            try:
                self.svc.create_blob_from_path(self.container_name, self.uploaded_disk_name, \
                         self.disk_to_upload, max_connections=cnum, \
                         metadata=self._get_tags(), progress_callback=self._progress_cb, \
                         timeout=timeout)

                uploaded_blob = self.svc.get_blob_properties(self.container_name, \
                                                             self.uploaded_disk_name)

                uploaded_blob_size = uploaded_blob.properties.content_length
                local_blob_size = getsize(self.disk_to_upload)

                LOGGER.info("uploaded blob size: %s and local blob_size: %s", \
                            str(uploaded_blob_size), str(local_blob_size))

                if uploaded_blob_size != local_blob_size:
                    return False

            except AzureMissingResourceHttpError:
                LOGGER.error("Exception during uploading %s",
                             self.disk_to_upload)
                return False
            except AzureException:
                LOGGER.error("Exception during uploading %s",
                             self.disk_to_upload)
                return False

            self.uploaded_disk_url = self.svc.make_blob_url(
                self.container_name, self.uploaded_disk_name)

            self.metadata.set(self.__class__.__name__, 'vhd_url',
                              self.uploaded_disk_url)
            self.metadata.set(self.__class__.__name__, 'image_id',
                              self.uploaded_disk_name)
            LOGGER.info('Uploaded disk url is: %s', self.uploaded_disk_url)
            return True

        retrier = Retrier(_upload_impl)
        retrier.tries = int(
            get_config_value('AZURE_BLOB_UPLOAD_COMPLETED_RETRY_COUNT'))
        retrier.delay = int(
            get_config_value('AZURE_BLOB_UPLOAD_COMPLETED_RETRY_DELAY'))
        LOGGER.info("Waiting for blob %s to be uploaded.", self.disk_to_upload)

        if retrier.execute():
            LOGGER.info("blob [%s] is ready.", self.disk_to_upload)
            return True
        LOGGER.error(
            "blob [%s] was still not ready after checking [%d] times!",
            self.disk_to_upload, retrier.tries)
        raise RuntimeError("Runtime Error Occured during Azure Disk Upload")
Beispiel #23
0
 def cancel_import_task(self):
     """Cancel an on-going import task as represented by the self.import_task_id.
     As per AWS, this only works on "pending" import tasks. For a completed task
     this would essentially be a NO-OP."""
     if self.import_task_id is not None:
         LOGGER.info("Cancelling pending import task '%s'.", self.import_task_id)
         self.ec2_client.cancel_import_task(ImportTaskId=self.import_task_id)
         LOGGER.info("Successfully cancelled pending import task '%s'.", self.import_task_id)
Beispiel #24
0
 def delete_file_from_storage(self):
     """delete file from storage"""
     if self.bucket.object_exists(self.uploaded_disk_name):
         LOGGER.info('Storage file %s exists, deleting it', self.uploaded_disk_name)
         self.bucket.delete_object(self.uploaded_disk_name)
     else:
         LOGGER.info('Storage file %s does not exist, no need to delete it',
                     self.uploaded_disk_name)
 def extract(self):
     """Extract the vhd disk out of tar.gz."""
     try:
         self.disk_to_upload = BaseDisk.decompress(self.input_disk_path,
                                                   '.vhd',
                                                   self.working_dir)
         LOGGER.info("Azure disk_to_upload = '%s'", self.disk_to_upload)
     except RuntimeError as runtime_error:
         raise runtime_error
Beispiel #26
0
    def _progress_cb(self, byte_up, byte_total):
        sec = int(time())

        # No update within 10 second interval
        if sec - self.progress_cb_lu > 10:
            self.progress_cb_lu = sec
            byte_up //= (1 << 20)
            byte_total //= (1 << 20)
            LOGGER.info('Uploaded %d MB of total %d MB', byte_up, byte_total)
    def prep_disk(self):
        """Performs the leg work to convert the S3 Disk represented by self.disk into
        a snapshot from which an AWSImage can be created."""
        LOGGER.info("Prepare the uploaded s3 disk for image generation.")

        # Convert the s3Disk into an AWS Snapshot.
        self.snapshot = AWSSnapshot(self.ec2_client, self.disk.bucket_name,
                                    self.disk.uploaded_disk_name)
        self.snapshot.create_snapshot()
        LOGGER.info("AWS Disk preparation is complete for image creation.")
def get_git_version():
    """Returns the git version."""
    try:
        with subprocess.Popen(['git', '--version'],
                              stdout=subprocess.PIPE,
                              stderr=subprocess.STDOUT) as proc:
            out, _ = proc.communicate()
            return out.decode("UTF-8").strip()
    except OSError as error:
        LOGGER.info(error)
        return None
 def extract(self):
     """Extract the vmdk disk out of zip."""
     try:
         LOGGER.debug("Extracting '.vmdk' disk file from [%s].",
                      self.input_disk_path)
         self.disk_to_upload = BaseDisk.decompress(self.input_disk_path,
                                                   '.vmdk',
                                                   self.working_dir)
         LOGGER.info("AWS disk_to_upload = '%s'", self.disk_to_upload)
     except RuntimeError as runtime_error:
         raise runtime_error
Beispiel #30
0
    def tag_image(self, image_name):
        """Associate image tags with image"""
        LOGGER.info('Set image labels.')

        # Get current labels fingerprint.  To avoid/detect conflicts, you must
        # provide the current label fingerprint (reference) when you request to
        # set image labels.  This fingerprint value is updated whenever labels
        # are updated and the set labels request will fail if the labels were
        # updated out of band.
        try:
            # pylint: disable=no-member
            request = self.gce_service.images().get(
                project=self.gce_project_id, image=image_name)
            result = request.execute()
            label_fingerprint = result['labelFingerprint']
        except HttpError as exp:
            LOGGER.error("Exception setting image labels:")
            LOGGER.exception(exp)
            return False

        if not result:
            return False

        if label_fingerprint is None or label_fingerprint == '':
            LOGGER.info('Label fingerprint was empty.')
            return False

        cloud_image_tags = CloudImageTags(self.metadata)
        cloud_image_tags.transform_values(to_lower=True,
                                          disallowed_regex='[^a-z0-9-]')
        image_labels = cloud_image_tags.get()

        set_labels_body = {
            "labels": image_labels,
            "labelFingerprint": label_fingerprint
        }

        try:
            # pylint: disable=no-member
            request = self.gce_service.images().setLabels(
                project=self.gce_project_id,
                resource=image_name,
                body=set_labels_body)
            result = request.execute()
        except HttpError as exp:
            LOGGER.error("Exception setting image labels:")
            LOGGER.exception(exp)
            return False

        if not result:
            return False

        LOGGER.debug("Image set labels response: %s", result)
        return True