コード例 #1
0
def get_image_name():
    """Retrieves the image name from config system."""
    plat = get_config_value("PLATFORM")
    if is_hypervisor_image(plat):
        return get_config_value("HYPERVISOR_IMAGE_NAME")

    return get_config_value("CLOUD_IMAGE_NAME")
コード例 #2
0
 def __get_acs_client():
     """ Setup and return a client to Alibaba cloud service.
         Requires ALIBABA_ACCESS_KEY_ID and ALIBABA_ACCESS_KEY_SECRET to be set. """
     client = AcsClient(get_config_value('ALIBABA_ACCESS_KEY_ID'),
                        get_config_value('ALIBABA_ACCESS_KEY_SECRET'),
                        get_config_value('ALIBABA_REGION'))
     return client
コード例 #3
0
    def issue_image_creation_commands(self):
        """ Initiate image creation and wait for image to be ready """

        # start image creation
        LOGGER.info('Started creation of image \'%s\' at %s.', self.image_name,
                    datetime.now().strftime('%H:%M:%S'))
        start_time = time()
        async_create_image = self.compute_client.images.create_or_update(
            get_config_value('AZURE_RESOURCE_GROUP'),
            self.image_name,
            {
                'location': get_config_value('AZURE_REGION'),
                'storage_profile': {
                    'os_disk': {
                        'os_type': 'Linux',
                        'os_state': "Generalized",
                        'blob_uri': self.disk.uploaded_disk_url,
                        'caching': "ReadWrite"
                    }
                }
            }
        )

        # wait for image to be ready
        LOGGER.info('Created image %s', async_create_image.result())
        LOGGER.info('Creation of image \'%s\' took %d seconds.', self.image_name,
                    time() - start_time)
コード例 #4
0
    def set_bucket(self):
        """Return bucket for uploaded files"""
        access_key = get_config_value('ALIBABA_ACCESS_KEY_ID')
        secret_key = get_config_value('ALIBABA_ACCESS_KEY_SECRET')
        auth = oss2.Auth(access_key, secret_key)

        region = get_config_value('ALIBABA_REGION')
        bucket_name = get_config_value('ALIBABA_BUCKET')
        self.bucket = oss2.Bucket(auth,
                                  'https://oss-' + region + '.aliyuncs.com',
                                  bucket_name)

        try:
            self.bucket.get_bucket_info()
        except oss2.exceptions.SignatureDoesNotMatch as exc:
            LOGGER.exception(exc)
            raise RuntimeError('Bad credentials to get bucket info')
        except oss2.exceptions.ServerError as exc:
            if exc.details['Code'] == 'InvalidBucketName':
                LOGGER.exception(exc)
                raise RuntimeError('Invalid bucket name: ' +
                                   exc.details['BucketName'])
            LOGGER.exception(exc)
            raise RuntimeError('Unexpected Alibaba oss server error. ' +
                               'One of possible errors: invalid credentials.')
        except oss2.exceptions.RequestError as exc:
            LOGGER.exception(exc)
            raise RuntimeError(
                'Alibaba oss request error. ' +
                'One of possible errors: invalid Alibaba region.')
コード例 #5
0
 def get_azure_info(self):
     """Gets azure specific info."""
     self.json_info["azure_resource_group"] = get_config_value(
         'AZURE_RESOURCE_GROUP')
     self.json_info["azure_region"] = get_config_value('AZURE_REGION')
     self.json_info["azure_location"] = read_file_value(
         "vhd_url.json", "vhd_url")
コード例 #6
0
    def post_to_url(self, cir_url, timeout):
        """Post data to URL with retries"""
        def _post_to_url():
            try:
                # Note: Total retry time is timeout (in the requests.post call) + retrier.delay
                LOGGER.debug('Post to URL:%s', cir_url)
                response = requests.post(url=cir_url,
                                         data=self.registration_data,
                                         timeout=timeout)
                LOGGER.debug('Response: %s:%s', response, response.text)
            except (requests.exceptions.Timeout,
                    requests.exceptions.ConnectionError) as exception:
                LOGGER.debug('Caught exception:%s', exception)
                return False
            return True

        retrier = Retrier(_post_to_url)
        retrier.tries = int(get_config_value('IMAGE_REGISTRATION_RETRY_COUNT'))
        retrier.delay = int(get_config_value('IMAGE_REGISTRATION_RETRY_DELAY'))
        LOGGER.info('Attempt to register cloud image.')
        LOGGER.debug('Register cloud image detail: %s', self.registration_data)

        if retrier.execute():
            LOGGER.info('Cloud image was registered')
        else:
            raise RuntimeError(
                'Exhausted all [{}] retries for image registration.'.format(
                    retrier.tries))
コード例 #7
0
    def wait_for_image_availability(self):
        """ Wait for image to be created and available """
        def _wait_for_image_availability():
            """Awaits the describe_images() to successfully acknowledge availability
            of the given image."""
            try:
                response = self.ec2_client.describe_images(ImageIds=[self.image_id])
            except (ClientError, ParamValidationError) as botocore_exception:
                LOGGER.exception(botocore_exception)
                raise RuntimeError('EC2.Client.describe_images() failed for {} !'.
                                   format(self.image_id)) from botocore_exception
            if not response:
                raise RuntimeError('EC2.Client.describe_images() returned none response!') \
                    from botocore_exception
            try:
                if response['Images'][0]['State'] == 'available':
                    return True
                return False
            except (KeyError, IndexError) as image_describe_exception:
                LOGGER.exception(image_describe_exception)
                raise RuntimeError('EC2.Client.describe_images() did not have ' +
                                   '[\'Images\'][0][\'State\'] in its response: response \'{}\''.
                                   format(response)) from image_describe_exception

        retrier = Retrier(_wait_for_image_availability)
        retrier.tries = int(get_config_value('AWS_CREATE_IMAGE_RETRY_COUNT'))
        retrier.delay = int(get_config_value('AWS_CREATE_IMAGE_RETRY_DELAY'))
        LOGGER.info('Waiting for the image %s to become available.', self.image_id)

        if retrier.execute():
            LOGGER.info('Image [%s] is created in AWS.', self.image_id)
        else:
            raise RuntimeError('Exhausted all \'{}\' retries for image {} to become available.'.
                               format(self.image_id, retrier.tries))
コード例 #8
0
    def open_compute_client(self):
        """ Open compute management client """
        try:
            credentials = ServicePrincipalCredentials(
                tenant=get_config_value('AZURE_TENANT_ID'),
                client_id=get_config_value('AZURE_APPLICATION_ID'),
                secret=get_config_value('AZURE_APPLICATION_SECRET'))
        except exceptions.AuthenticationError as exc:
            # check if more specific message can be provided
            error_key = 'error'
            if hasattr(exc, 'inner_exception') and hasattr(exc.inner_exception, 'error_response') \
                and error_key in exc.inner_exception.error_response:
                error_dict = exc.inner_exception.error_response
                bad_parameter = None
                if error_dict[error_key] == 'invalid_request':
                    bad_parameter = 'AZURE_TENANT_ID'
                if error_dict[error_key] == 'unauthorized_client':
                    bad_parameter = 'AZURE_APPLICATION_ID'
                if error_dict[error_key] == 'invalid_client':
                    bad_parameter = 'AZURE_APPLICATION_SECRET'

                if bad_parameter:
                    azure_failure_msg = 'Azure did not accept the request. Possible fix:'
                    raise RuntimeError('{} verify that \'{}\' is correct.'.format(
                        azure_failure_msg, bad_parameter))
            raise
        self.compute_client = ComputeManagementClient(credentials,
                                                      get_config_value('AZURE_SUBSCRIPTION_ID'))
コード例 #9
0
    def is_image_ready(self, image_name):
        """Checks if the given image is ready."""
        def _is_image_ready():
            """Checks if an image with image_name exists and status is READY"""
            # pylint: disable=no-member
            request = self.gce_service.images().get(
                project=self.gce_project_id, image=image_name)
            result = request.execute()
            if not result or result['status'] == 'FAILED':
                raise RuntimeError(
                    "Creation of image [{}] failed!".format(image_name))
            return result['status'] == 'READY'

        retrier = Retrier(_is_image_ready)
        retrier.tries = int(
            get_config_value('GCE_IMAGE_CREATE_COMPLETED_RETRY_COUNT'))
        retrier.delay = int(
            get_config_value('GCE_IMAGE_CREATE_COMPLETED_RETRY_DELAY'))
        LOGGER.info("Waiting for image [%s] to be ready.", image_name)
        try:
            if retrier.execute():
                LOGGER.info("Image [%s] is ready.", image_name)
                self.metadata.set(self.__class__.__name__, 'image_id',
                                  image_name)
                return True
            LOGGER.warning(
                "Image [%s] was still not ready after checking [%d] times!",
                image_name, retrier.tries)
            return False
        except HttpError as exp:
            LOGGER.exception(exp)
            return False
        except RuntimeError as runtime_exception:
            LOGGER.exception(runtime_exception)
            return False
コード例 #10
0
        def _upload_impl():
            """ Azure blob upload implementation """
            timeout = int(get_config_value('AZURE_BLOB_UPLOAD_TIMEOUT'))

            try:
                self.connection_string = get_config_value('AZURE_STORAGE_CONNECTION_STRING')
                LOGGER.info("create blob client")
                self.blob = BlobClient.from_connection_string(
                    conn_str=self.connection_string,
                    container_name=self.container_name,
                    blob_name=self.uploaded_disk_name,
                    connection_timeout=timeout
                    )

                LOGGER.info(self._get_tags())
                nonlocal upload_azure
                upload_azure_p = Process(target=upload_azure)
                upload_azure_p.start()
                limit = int(timeout/10)
                for _ in range(limit):
                    if not upload_azure_p.is_alive():
                        break
                    sleep(10)
                    os.write(1, b".")
                else:
                    raise TimeoutError

                LOGGER.info(self.blob.get_blob_properties())
                local_blob_size = os.stat(self.disk_to_upload).st_size

                uploaded_blob_size = self.blob.get_blob_properties().get("size")

                LOGGER.info("uploaded blob size: %s and local blob_size: %s", \
                            str(uploaded_blob_size), str(local_blob_size))
                if uploaded_blob_size != local_blob_size:
                    return False

            except AzureMissingResourceHttpError:
                LOGGER.error("Exception during uploading %s", self.disk_to_upload)
                return False
            except AzureException:
                LOGGER.error("Exception during uploading %s", self.disk_to_upload)
                return False
            except TimeoutError:
                LOGGER.error("Timeout while uploading")
                return False

            self.uploaded_disk_url = self.blob.url
            # save uploaded disk in artifacts dir json file
            vhd_url_json = {"vhd_url": self.uploaded_disk_url}
            artifacts_dir = get_config_value("ARTIFACTS_DIR")
            with open(artifacts_dir + "/vhd_url.json", "w") as vhd_url_json_file:
                json.dump(vhd_url_json, vhd_url_json_file)

            # insert file with vhd url
            self.metadata.set(self.__class__.__name__, 'vhd_url', self.uploaded_disk_url)
            self.metadata.set(self.__class__.__name__, 'image_id', self.uploaded_disk_name)
            LOGGER.info('Uploaded disk url is: %s', self.uploaded_disk_url)
            return True
コード例 #11
0
    def upload(self):
        """ Upload a F5 BIG-IP VE image to provided container """
        def _upload_impl():
            """ Azure blob upload implementation """
            cnum = int(
                get_config_value('AZURE_BLOB_UPLOAD_CONCURRENT_THREAD_COUNT'))
            timeout = int(get_config_value('AZURE_BLOB_UPLOAD_TIMEOUT'))

            try:
                self.svc.create_blob_from_path(self.container_name, self.uploaded_disk_name, \
                         self.disk_to_upload, max_connections=cnum, \
                         metadata=self._get_tags(), progress_callback=self._progress_cb, \
                         timeout=timeout)

                uploaded_blob = self.svc.get_blob_properties(self.container_name, \
                                                             self.uploaded_disk_name)

                uploaded_blob_size = uploaded_blob.properties.content_length
                local_blob_size = getsize(self.disk_to_upload)

                LOGGER.info("uploaded blob size: %s and local blob_size: %s", \
                            str(uploaded_blob_size), str(local_blob_size))

                if uploaded_blob_size != local_blob_size:
                    return False

            except AzureMissingResourceHttpError:
                LOGGER.error("Exception during uploading %s",
                             self.disk_to_upload)
                return False
            except AzureException:
                LOGGER.error("Exception during uploading %s",
                             self.disk_to_upload)
                return False

            self.uploaded_disk_url = self.svc.make_blob_url(
                self.container_name, self.uploaded_disk_name)

            self.metadata.set(self.__class__.__name__, 'vhd_url',
                              self.uploaded_disk_url)
            self.metadata.set(self.__class__.__name__, 'image_id',
                              self.uploaded_disk_name)
            LOGGER.info('Uploaded disk url is: %s', self.uploaded_disk_url)
            return True

        retrier = Retrier(_upload_impl)
        retrier.tries = int(
            get_config_value('AZURE_BLOB_UPLOAD_COMPLETED_RETRY_COUNT'))
        retrier.delay = int(
            get_config_value('AZURE_BLOB_UPLOAD_COMPLETED_RETRY_DELAY'))
        LOGGER.info("Waiting for blob %s to be uploaded.", self.disk_to_upload)

        if retrier.execute():
            LOGGER.info("blob [%s] is ready.", self.disk_to_upload)
            return True
        LOGGER.error(
            "blob [%s] was still not ready after checking [%d] times!",
            self.disk_to_upload, retrier.tries)
        raise RuntimeError("Runtime Error Occured during Azure Disk Upload")
コード例 #12
0
 def set_number_of_threads():
     """number of threads should not be higher than oss2.defaults.connection_pool_size"""
     if int(get_config_value('ALIBABA_THREAD_COUNT')) > int(oss2.defaults.connection_pool_size):
         number_of_threads_message = 'Will use only ' + \
             '{} threads for the image upload, '.format(oss2.defaults.connection_pool_size) + \
             'the limit is imposed by oss2.defaults.connection_pool_size'
         LOGGER.warning(number_of_threads_message)
         return int(oss2.defaults.connection_pool_size)
     return int(get_config_value('ALIBABA_THREAD_COUNT'))
コード例 #13
0
 def __init__(self):
     """Set rules and transformations to be used later"""
     min_chars = int(get_config_value('AWS_IMAGE_NAME_LENGTH_MIN'))
     max_chars = int(get_config_value('AWS_IMAGE_NAME_LENGTH_MAX'))
     rules = ImageNameRules(min_chars,
                            max_chars,
                            match_regex=r'^[a-zA-Z0-9\(\).\-\/_]+$')
     transform = ImageNameTransform(
         disallowed_regex=r'[^a-zA-Z0-9\(\).\-\/_]')
     super().__init__(rules, transform)
コード例 #14
0
    def upload(self):
        """Upload disk with OSS2 (Alibaba Python SDK)
           resumable_upload is used to upload large size files"""
        number_of_threads = self.set_number_of_threads()
        self.set_bucket()
        AlibabaDisk.iter = 0

        def _resumable_upload():
            self.uploaded_disk_name = 'bakery-' + os.path.basename(self.disk_to_upload) + '-' + \
                                      ''.join(random.choices(string.digits, k=6))
            AlibabaDisk.iter += 1
            LOGGER.info('Upload iteration number %d', AlibabaDisk.iter)
            LOGGER.info('Uploading %s as %s', self.disk_to_upload,
                        self.uploaded_disk_name)
            start_time = time.time()
            time.sleep(1)
            result = False
            try:
                resumable_store = oss2.resumable.ResumableStore(
                    root=self.working_dir)
                oss2.resumable_upload(self.bucket,
                                      self.uploaded_disk_name,
                                      self.disk_to_upload,
                                      store=resumable_store,
                                      num_threads=number_of_threads)
                result = True
            except FileNotFoundError as exc:
                LOGGER.exception(exc)
                raise RuntimeError('Could not find file to upload: {}'.format(
                    self.disk_to_upload))
            except oss2.exceptions.NoSuchUpload as exc:
                LOGGER.error('Upload failed. UploadId: %s',
                             exc.details['UploadId'])
                LOGGER.exception(exc)

            LOGGER.info('Iteration %d of upload took %d seconds',
                        AlibabaDisk.iter,
                        time.time() - start_time)
            if not result:
                self.upload_cleanup()
            return result

        retrier = Retrier(_resumable_upload)
        retrier.tries = int(
            get_config_value('ALIBABA_UPLOAD_FILE_RETRY_COUNT'))
        retrier.delay = int(
            get_config_value('ALIBABA_UPLOAD_FILE_RETRY_DELAY'))

        if retrier.execute():
            LOGGER.info('Finished upload of %s', self.disk_to_upload)
        else:
            raise RuntimeError(
                'Exhausted all {} retries for file {} to upload.'.format(
                    retrier.tries, self.uploaded_disk_name))
コード例 #15
0
    def __init__(self, input_disk_path, working_dir):
        """Initialize azure disk object."""
        # First initialize the super class.
        super().__init__(input_disk_path, working_dir)
        self.uploaded_disk_url = None

        self.connection_string = get_config_value('AZURE_STORAGE_CONNECTION_STRING')
        self.container_name = get_config_value('AZURE_STORAGE_CONTAINER_NAME')
        self.blob = None
        self.progress_cb_lu = 0
        self.metadata = CloudImageMetadata()
コード例 #16
0
    def is_snapshot_ready(self, import_task_id):
        """Checks if a snapshot with the given import_task_id exists and its
        status is 'completed'."""
        def _is_snapshot_ready():
            """Awaits the import operation represented by the import_task_id to reach
            'completed' status."""
            try:
                LOGGER.trace("Querying the status of import-task [%s].", import_task_id)
                response = \
                    self.ec2_client.describe_import_snapshot_tasks(
                        ImportTaskIds=[import_task_id])
                if not response:
                    raise RuntimeError("describe_import_snapshot_tasks() returned none response!")

                LOGGER.trace("Response from describe_import_snapshot_tasks => '%s'",
                             response)
                task_status = response['ImportSnapshotTasks'][0]['SnapshotTaskDetail']['Status']
                if task_status == 'error':
                    # Print the response before raising an exception.
                    LOGGER.debug("describe_import_snapshot_tasks() response for [%s] => [%s]",
                                 import_task_id, response)
                    raise RuntimeError("import-snapshot task [{}] in unrecoverable 'error' state.".
                                       format(import_task_id))

                return task_status == 'completed'
            except ClientError as client_error:
                LOGGER.exception(client_error)
                raise RuntimeError("describe_import_snapshot_tasks() failed for [{}]!".
                                   format(import_task_id))

        retrier = Retrier(_is_snapshot_ready)
        retrier.tries = int(get_config_value('AWS_IMPORT_SNAPSHOT_TASK_RETRY_COUNT'))
        retrier.delay = int(get_config_value('AWS_IMPORT_SNAPSHOT_TASK_RETRY_DELAY'))
        LOGGER.info("Waiting for the import snapshot task [%s] to complete.", import_task_id)
        try:
            if retrier.execute():
                LOGGER.info("import_snapshot_task [%s] is completed.", import_task_id)
                # Call it one last time to get the snapshot_id.
                response = \
                self.ec2_client.describe_import_snapshot_tasks(
                    ImportTaskIds=[import_task_id])
                self.snapshot_id = \
                    response['ImportSnapshotTasks'][0]['SnapshotTaskDetail']['SnapshotId']
                LOGGER.info("SnapshotID = [%s].", self.snapshot_id)
                return True
            LOGGER.warning("import_snapshot_task [%s] didn't complete after checking [%d] times!",
                           import_task_id, retrier.tries)
            return False
        except RuntimeError as runtime_exception:
            LOGGER.exception(runtime_exception)
            raise
コード例 #17
0
        def _upload_impl():
            """ Azure blob upload implementation """
            cnum = int(
                get_config_value('AZURE_BLOB_UPLOAD_CONCURRENT_THREAD_COUNT'))
            timeout = int(get_config_value('AZURE_BLOB_UPLOAD_TIMEOUT'))

            try:
                self.svc.create_blob_from_path(self.container_name, self.uploaded_disk_name, \
                         self.disk_to_upload, max_connections=cnum, \
                         metadata=self._get_tags(), progress_callback=self._progress_cb, \
                         timeout=timeout)

                uploaded_blob = self.svc.get_blob_properties(self.container_name, \
                                                             self.uploaded_disk_name)

                uploaded_blob_size = uploaded_blob.properties.content_length
                local_blob_size = getsize(self.disk_to_upload)

                LOGGER.info("uploaded blob size: %s and local blob_size: %s", \
                            str(uploaded_blob_size), str(local_blob_size))

                if uploaded_blob_size != local_blob_size:
                    return False

            except AzureMissingResourceHttpError:
                LOGGER.error("Exception during uploading %s",
                             self.disk_to_upload)
                return False
            except AzureException:
                LOGGER.error("Exception during uploading %s",
                             self.disk_to_upload)
                return False

            self.uploaded_disk_url = self.svc.make_blob_url(
                self.container_name, self.uploaded_disk_name)

            # save uploaded disk in artifacts dir json file
            vhd_url_json = {"vhd_url": self.uploaded_disk_url}
            artifacts_dir = get_config_value("ARTIFACTS_DIR")
            with open(artifacts_dir + "/vhd_url.json",
                      "w") as vhd_url_json_file:
                json.dump(vhd_url_json, vhd_url_json_file)

            # insert file with vhd url
            self.metadata.set(self.__class__.__name__, 'vhd_url',
                              self.uploaded_disk_url)
            self.metadata.set(self.__class__.__name__, 'image_id',
                              self.uploaded_disk_name)
            LOGGER.info('Uploaded disk url is: %s', self.uploaded_disk_url)
            return True
コード例 #18
0
    def __init__(self):
        # 1-63 chars.  First char is lower alpha, middle chars lower alpha, number,
        # or dash.  Last char is lower alpha or number (no dash).  One character
        # is acceptable.
        min_chars = int(get_config_value('GCE_IMAGE_NAME_LENGTH_MIN'))
        max_chars = int(get_config_value('GCE_IMAGE_NAME_LENGTH_MAX'))
        rules = ImageNameRules(min_chars,
                               max_chars,
                               match_regex='^[a-z]([-a-z0-9]{0,61}[a-z0-9])?$')

        # Use default replacement char ('-') and padding (10 chars)
        transform = ImageNameTransform(disallowed_regex='[^a-z0-9-]',
                                       to_lower=True)

        super().__init__(rules, transform)
コード例 #19
0
def read_lv_sizes(lv_sizes_patch_json):
    """ Read user defined values for LV sizes, validate them and store in a json file """
    if not get_config_value('UPDATE_LV_SIZES'):
        # LV sizes are not overridden
        return

    modifiable_size_lvs = {'appdata', 'config', 'log', 'shared', 'var'}
    lv_sizes_dict = get_dict_from_config_json('UPDATE_LV_SIZES')
    filtered_dict = {}
    for lv_name in lv_sizes_dict:
        lv_size = lv_sizes_dict[lv_name]
        lv_name = lv_name.lower()
        if not isinstance(lv_size, int):
            raise RuntimeError(
                'LV size for \'{}\' must be an integer, '.format(lv_name) +
                'denoting the size in MiBs (without quotation marks).')
        if lv_name not in modifiable_size_lvs:
            raise RuntimeError(
                '\'{}\' is not a member '.format(lv_name) +
                'of modifiable size LVs: {}. '.format(modifiable_size_lvs) +
                '\'{}\' is not an LV or '.format(lv_name) +
                'its size cannot be changed!')
        filtered_dict[lv_name] = lv_size

    with open(lv_sizes_patch_json, 'w') as patch_file:
        json.dump(filtered_dict, patch_file, indent=2)
コード例 #20
0
    def insert_image(self, image_name):
        """Create image in GCE and then check for status = READY"""
        bucket_name = get_config_value('GCE_BUCKET')
        image_body = {
            "name": image_name,
            "rawDisk": {
                # In the following line the bucket name along with blob name is required
                "source":
                "https://storage.googleapis.com/{}/{}".format(
                    bucket_name, self.disk.uploaded_disk_name)
            }
        }

        try:
            # pylint: disable=no-member
            request = self.gce_service.images().insert(
                project=self.gce_project_id, body=image_body)
            result = request.execute()
        except HttpError as exp:
            LOGGER.exception(exp)
            raise exp

        if not result:
            return False

        LOGGER.debug("Image creation response: '%s'", result)
        return self.is_image_ready(image_name)
コード例 #21
0
def get_image_id():
    """Retrieves image id."""
    plat = get_config_value("PLATFORM")
    if plat in ("aws", "alibaba"):
        return telemetry.operation_info.read_file_value(
            "prepare_cloud_image.json", "image_id")
    return get_image_name()
コード例 #22
0
    def wait_for_image_deletion(self):
        """ Wait for image to be deleted """

        def _wait_for_image_deletion():
            """ Check if image does not exist """
            return not self.does_image_exist()

        retrier = Retrier(_wait_for_image_deletion)
        retrier.tries = int(get_config_value('AZURE_DELETE_IMAGE_RETRY_COUNT'))
        retrier.delay = int(get_config_value('AZURE_DELETE_IMAGE_RETRY_DELAY'))

        if retrier.execute():
            LOGGER.info('Preexisting image \'%s\' has been deleted.', self.image_name)
        else:
            raise RuntimeError('Exhausted all {} retries for image \'{}\' to be deleted.'.
                               format(retrier.tries, self.image_name))
コード例 #23
0
 def does_image_exist(self):
     """ Wrap around get method to determine whether the image exists.
         Expects to get msrestazure.azure_exceptions.CloudError exception
         with error.error equal to 'ResourceNotFound' when image does not exist,
         or error.error equal to 'NotFound' when image is on the last stage of its existence.
         Also providing a particular subscription exception, since it is likely to be
         a first Azure compute call that is issued.
         Returns True or False. """
     try:
         self.compute_client.images.get(get_config_value('AZURE_RESOURCE_GROUP'),
                                        self.image_name)
     except azure_exceptions.CloudError as exc:
         if hasattr(exc, 'error') and hasattr(exc.error, 'error'):
             if exc.error.error == 'ResourceNotFound':
                 return False
             if exc.error.error == 'NotFound':
                 # the image is on the last stage of its existence
                 return True
             if exc.error.error == 'SubscriptionNotFound':
                 raise RuntimeError(('Azure could not find the subscription, '
                                     'check value of \'AZURE_SUBSCRIPTION_ID\'.'))
             raise RuntimeError(('Unexpected CloudError type: \'{}\', '
                                 'while checking about \'{}\' image.').
                                format(exc.error.error, self.image_name))
         raise
     return True
コード例 #24
0
def download_file(url, dest_file):
    """ Download from url to a local file.
        Throws exceptions with wording specific to the file injection.
        Assumes that the directory containing the destination file already exists. """
    verify_tls = bool(get_config_value("IGNORE_DOWNLOAD_URL_TLS") is None)
    try:
        remote_file = requests.get(url, verify=verify_tls, timeout=60)
    except requests.exceptions.SSLError as exc:
        LOGGER.exception(exc)
        raise RuntimeError(
            'Cannot access \'{}\' due to TLS problems! '.format(url) +
            'Consider abandoning TLS verification by usage of ' +
            '\'IGNORE_DOWNLOAD_URL_TLS\' parameter.')
    except requests.exceptions.RequestException as exc:
        LOGGER.exception(exc)
        raise RuntimeError(
            '\'{}\' is neither a file nor a directory nor a valid url, cannot inject it!'
            .format(url))
    if remote_file.status_code != 200:
        LOGGER.info('requests.get response status: %s',
                    remote_file.status_code)
        LOGGER.info('requests.get response headers: %s', remote_file.headers)
        raise RuntimeError(
            'URL \'{}\' did not return content, cannot inject it!'.format(url))
    open(dest_file, 'wb').write(remote_file.content)
コード例 #25
0
 def get_alibaba_info(self):
     """Gets alibaba specific."""
     self.json_info["alibaba_image_id"] = read_file_value(
         "image_id.json", "image_id")
     self.json_info["alibaba_region"] = get_config_value('ALIBABA_REGION')
     self.json_info["alibaba_location"] = read_file_value(
         "alibaba_location.json", "alibaba_location")
コード例 #26
0
    def __init__(self):

        # Valid Length = 1 to 80 characters(Tested in Azure Portal and is allowed)
        # alphanumeric characters - 0-9a-zA-Z
        # The name must begin with a letter or number, end with a letter, number or
        # underscore, and may contain only letters, numbers, underscores, periods, or hyphens.

        min_chars = int(get_config_value('AZURE_IMAGE_NAME_LENGTH_MIN'))
        max_chars = int(get_config_value('AZURE_IMAGE_NAME_LENGTH_MAX'))

        rules = ImageNameRules(min_chars, max_chars,
                               match_regex= \
                                   r'^[a-zA-Z0-9]([a-zA-Z0-9\-\.\_]{0,78}[a-zA-Z0-9\_])?$')
        # Use default replacement char ('-') and padding (10 chars)
        transform = ImageNameTransform(disallowed_regex=r'[^a-zA-Z0-9\-\.\_]',
                                       to_lower=False)
        super().__init__(rules, transform)
コード例 #27
0
def read_file_value(file_name, value_name):
    """Reads a file from the artifacts directory and returns a value."""
    artifacts_dir = get_config_value("ARTIFACTS_DIR")
    if not os.path.exists(artifacts_dir + "/" + file_name):
        return None
    with open(artifacts_dir + "/" + file_name, "r") as art_file:
        info = json.load(art_file)
        return info[value_name]
コード例 #28
0
    def __init__(self):
        """Set rules and transformations for alibaba image name"""
        # The name of the user-defined image, [2, 128] English or Chinese characters.
        # It must begin with an uppercase/lowercase letter or a Chinese character,
        # and may contain numbers, _ or -. It cannot begin with http:// or https://.

        min_chars = int(get_config_value('ALIBABA_IMAGE_NAME_LENGTH_MIN'))
        max_chars = int(get_config_value('ALIBABA_IMAGE_NAME_LENGTH_MAX'))
        rules = ImageNameRules(min_chars,
                               max_chars,
                               match_regex=r'^[a-zA-Z][a-zA-Z0-9\.\-_]+$')

        transform = ImageNameTransform(disallowed_regex=r'[^a-zA-Z0-9\-_]',
                                       replacement_char='-',
                                       to_lower=False)

        super().__init__(rules, transform)
コード例 #29
0
    def __init__(self, input_disk_path, working_dir):
        """Initialize azure disk object."""
        # First initialize the super class.
        super().__init__(input_disk_path, working_dir)

        self.connection_string = get_config_value('AZURE_STORAGE_CONNECTION_STRING')
        self.container_name = get_config_value('AZURE_STORAGE_CONTAINER_NAME')

        try:
            self.svc = PageBlobService(connection_string=self.connection_string)
        except ValueError:
            LOGGER.error("Could not create a PageBlobService with connection_string=%s",
                         self.connection_string)
            raise RuntimeError("Runtime Error during Instantiating Azure Blob Service")

        self.progress_cb_lu = 0
        self.metadata = CloudImageMetadata()
コード例 #30
0
    def __init__(self, working_dir, input_disk_path):
        super().__init__(working_dir, input_disk_path)
        self.session = Session(
            aws_access_key_id=get_config_value('AWS_ACCESS_KEY_ID'),
            aws_secret_access_key=get_config_value('AWS_SECRET_ACCESS_KEY'),
            region_name=get_config_value('AWS_REGION'))

        self.disk = AWSDisk(input_disk_path, working_dir, self.session)

        # Create ec2 client object for performing low-level image actions.
        self.ec2_client = self.session.client('ec2')

        # Record REGION in the metadata.
        self.metadata = CloudImageMetadata()
        self.metadata.set(self.__class__.__name__, 'location',
                          self.session.region_name)
        self.snapshot = None
        self.image_id = None