Esempio n. 1
0
    def is_image_ready(self, image_name):
        """Checks if the given image is ready."""
        def _is_image_ready():
            """Checks if an image with image_name exists and status is READY"""
            # pylint: disable=no-member
            request = self.gce_service.images().get(
                project=self.gce_project_id, image=image_name)
            result = request.execute()
            if not result or result['status'] == 'FAILED':
                raise RuntimeError(
                    "Creation of image [{}] failed!".format(image_name))
            return result['status'] == 'READY'

        retrier = Retrier(_is_image_ready)
        retrier.tries = int(
            get_config_value('GCE_IMAGE_CREATE_COMPLETED_RETRY_COUNT'))
        retrier.delay = int(
            get_config_value('GCE_IMAGE_CREATE_COMPLETED_RETRY_DELAY'))
        LOGGER.info("Waiting for image [%s] to be ready.", image_name)
        try:
            if retrier.execute():
                LOGGER.info("Image [%s] is ready.", image_name)
                self.metadata.set(self.__class__.__name__, 'image_id',
                                  image_name)
                return True
            LOGGER.warning(
                "Image [%s] was still not ready after checking [%d] times!",
                image_name, retrier.tries)
            return False
        except HttpError as exp:
            LOGGER.exception(exp)
            return False
        except RuntimeError as runtime_exception:
            LOGGER.exception(runtime_exception)
            return False
Esempio n. 2
0
    def post_to_url(self, cir_url, timeout):
        """Post data to URL with retries"""
        def _post_to_url():
            try:
                # Note: Total retry time is timeout (in the requests.post call) + retrier.delay
                LOGGER.debug('Post to URL:%s', cir_url)
                response = requests.post(url=cir_url,
                                         data=self.registration_data,
                                         timeout=timeout)
                LOGGER.debug('Response: %s:%s', response, response.text)
            except (requests.exceptions.Timeout,
                    requests.exceptions.ConnectionError) as exception:
                LOGGER.debug('Caught exception:%s', exception)
                return False
            return True

        retrier = Retrier(_post_to_url)
        retrier.tries = int(get_config_value('IMAGE_REGISTRATION_RETRY_COUNT'))
        retrier.delay = int(get_config_value('IMAGE_REGISTRATION_RETRY_DELAY'))
        LOGGER.info('Attempt to register cloud image.')
        LOGGER.debug('Register cloud image detail: %s', self.registration_data)

        if retrier.execute():
            LOGGER.info('Cloud image was registered')
        else:
            raise RuntimeError(
                'Exhausted all [{}] retries for image registration.'.format(
                    retrier.tries))
    def wait_for_image_availability(self):
        """ Wait for image to be created and available """
        def _wait_for_image_availability():
            """Awaits the describe_images() to successfully acknowledge availability
            of the given image."""
            try:
                response = self.ec2_client.describe_images(ImageIds=[self.image_id])
            except (ClientError, ParamValidationError) as botocore_exception:
                LOGGER.exception(botocore_exception)
                raise RuntimeError('EC2.Client.describe_images() failed for {} !'.
                                   format(self.image_id)) from botocore_exception
            if not response:
                raise RuntimeError('EC2.Client.describe_images() returned none response!') \
                    from botocore_exception
            try:
                if response['Images'][0]['State'] == 'available':
                    return True
                return False
            except (KeyError, IndexError) as image_describe_exception:
                LOGGER.exception(image_describe_exception)
                raise RuntimeError('EC2.Client.describe_images() did not have ' +
                                   '[\'Images\'][0][\'State\'] in its response: response \'{}\''.
                                   format(response)) from image_describe_exception

        retrier = Retrier(_wait_for_image_availability)
        retrier.tries = int(get_config_value('AWS_CREATE_IMAGE_RETRY_COUNT'))
        retrier.delay = int(get_config_value('AWS_CREATE_IMAGE_RETRY_DELAY'))
        LOGGER.info('Waiting for the image %s to become available.', self.image_id)

        if retrier.execute():
            LOGGER.info('Image [%s] is created in AWS.', self.image_id)
        else:
            raise RuntimeError('Exhausted all \'{}\' retries for image {} to become available.'.
                               format(self.image_id, retrier.tries))
    def upload(self):
        """ Upload a F5 BIG-IP VE image to provided container """
        def _upload_impl():
            """ Azure blob upload implementation """
            cnum = int(
                get_config_value('AZURE_BLOB_UPLOAD_CONCURRENT_THREAD_COUNT'))
            timeout = int(get_config_value('AZURE_BLOB_UPLOAD_TIMEOUT'))

            try:
                self.svc.create_blob_from_path(self.container_name, self.uploaded_disk_name, \
                         self.disk_to_upload, max_connections=cnum, \
                         metadata=self._get_tags(), progress_callback=self._progress_cb, \
                         timeout=timeout)

                uploaded_blob = self.svc.get_blob_properties(self.container_name, \
                                                             self.uploaded_disk_name)

                uploaded_blob_size = uploaded_blob.properties.content_length
                local_blob_size = getsize(self.disk_to_upload)

                LOGGER.info("uploaded blob size: %s and local blob_size: %s", \
                            str(uploaded_blob_size), str(local_blob_size))

                if uploaded_blob_size != local_blob_size:
                    return False

            except AzureMissingResourceHttpError:
                LOGGER.error("Exception during uploading %s",
                             self.disk_to_upload)
                return False
            except AzureException:
                LOGGER.error("Exception during uploading %s",
                             self.disk_to_upload)
                return False

            self.uploaded_disk_url = self.svc.make_blob_url(
                self.container_name, self.uploaded_disk_name)

            self.metadata.set(self.__class__.__name__, 'vhd_url',
                              self.uploaded_disk_url)
            self.metadata.set(self.__class__.__name__, 'image_id',
                              self.uploaded_disk_name)
            LOGGER.info('Uploaded disk url is: %s', self.uploaded_disk_url)
            return True

        retrier = Retrier(_upload_impl)
        retrier.tries = int(
            get_config_value('AZURE_BLOB_UPLOAD_COMPLETED_RETRY_COUNT'))
        retrier.delay = int(
            get_config_value('AZURE_BLOB_UPLOAD_COMPLETED_RETRY_DELAY'))
        LOGGER.info("Waiting for blob %s to be uploaded.", self.disk_to_upload)

        if retrier.execute():
            LOGGER.info("blob [%s] is ready.", self.disk_to_upload)
            return True
        LOGGER.error(
            "blob [%s] was still not ready after checking [%d] times!",
            self.disk_to_upload, retrier.tries)
        raise RuntimeError("Runtime Error Occured during Azure Disk Upload")
Esempio n. 5
0
    def upload(self):
        """Upload disk with OSS2 (Alibaba Python SDK)
           resumable_upload is used to upload large size files"""
        number_of_threads = self.set_number_of_threads()
        self.set_bucket()
        AlibabaDisk.iter = 0

        def _resumable_upload():
            self.uploaded_disk_name = 'bakery-' + os.path.basename(self.disk_to_upload) + '-' + \
                                      ''.join(random.choices(string.digits, k=6))
            AlibabaDisk.iter += 1
            LOGGER.info('Upload iteration number %d', AlibabaDisk.iter)
            LOGGER.info('Uploading %s as %s', self.disk_to_upload,
                        self.uploaded_disk_name)
            start_time = time.time()
            time.sleep(1)
            result = False
            try:
                resumable_store = oss2.resumable.ResumableStore(
                    root=self.working_dir)
                oss2.resumable_upload(self.bucket,
                                      self.uploaded_disk_name,
                                      self.disk_to_upload,
                                      store=resumable_store,
                                      num_threads=number_of_threads)
                result = True
            except FileNotFoundError as exc:
                LOGGER.exception(exc)
                raise RuntimeError('Could not find file to upload: {}'.format(
                    self.disk_to_upload))
            except oss2.exceptions.NoSuchUpload as exc:
                LOGGER.error('Upload failed. UploadId: %s',
                             exc.details['UploadId'])
                LOGGER.exception(exc)

            LOGGER.info('Iteration %d of upload took %d seconds',
                        AlibabaDisk.iter,
                        time.time() - start_time)
            if not result:
                self.upload_cleanup()
            return result

        retrier = Retrier(_resumable_upload)
        retrier.tries = int(
            get_config_value('ALIBABA_UPLOAD_FILE_RETRY_COUNT'))
        retrier.delay = int(
            get_config_value('ALIBABA_UPLOAD_FILE_RETRY_DELAY'))

        if retrier.execute():
            LOGGER.info('Finished upload of %s', self.disk_to_upload)
        else:
            raise RuntimeError(
                'Exhausted all {} retries for file {} to upload.'.format(
                    retrier.tries, self.uploaded_disk_name))
Esempio n. 6
0
    def is_snapshot_ready(self, import_task_id):
        """Checks if a snapshot with the given import_task_id exists and its
        status is 'completed'."""
        def _is_snapshot_ready():
            """Awaits the import operation represented by the import_task_id to reach
            'completed' status."""
            try:
                LOGGER.trace("Querying the status of import-task [%s].", import_task_id)
                response = \
                    self.ec2_client.describe_import_snapshot_tasks(
                        ImportTaskIds=[import_task_id])
                if not response:
                    raise RuntimeError("describe_import_snapshot_tasks() returned none response!")

                LOGGER.trace("Response from describe_import_snapshot_tasks => '%s'",
                             response)
                task_status = response['ImportSnapshotTasks'][0]['SnapshotTaskDetail']['Status']
                if task_status == 'error':
                    # Print the response before raising an exception.
                    LOGGER.debug("describe_import_snapshot_tasks() response for [%s] => [%s]",
                                 import_task_id, response)
                    raise RuntimeError("import-snapshot task [{}] in unrecoverable 'error' state.".
                                       format(import_task_id))

                return task_status == 'completed'
            except ClientError as client_error:
                LOGGER.exception(client_error)
                raise RuntimeError("describe_import_snapshot_tasks() failed for [{}]!".
                                   format(import_task_id))

        retrier = Retrier(_is_snapshot_ready)
        retrier.tries = int(get_config_value('AWS_IMPORT_SNAPSHOT_TASK_RETRY_COUNT'))
        retrier.delay = int(get_config_value('AWS_IMPORT_SNAPSHOT_TASK_RETRY_DELAY'))
        LOGGER.info("Waiting for the import snapshot task [%s] to complete.", import_task_id)
        try:
            if retrier.execute():
                LOGGER.info("import_snapshot_task [%s] is completed.", import_task_id)
                # Call it one last time to get the snapshot_id.
                response = \
                self.ec2_client.describe_import_snapshot_tasks(
                    ImportTaskIds=[import_task_id])
                self.snapshot_id = \
                    response['ImportSnapshotTasks'][0]['SnapshotTaskDetail']['SnapshotId']
                LOGGER.info("SnapshotID = [%s].", self.snapshot_id)
                return True
            LOGGER.warning("import_snapshot_task [%s] didn't complete after checking [%d] times!",
                           import_task_id, retrier.tries)
            return False
        except RuntimeError as runtime_exception:
            LOGGER.exception(runtime_exception)
            raise
Esempio n. 7
0
    def wait_for_image_deletion(self):
        """ Wait for image to be deleted """

        def _wait_for_image_deletion():
            """ Check if image does not exist """
            return not self.does_image_exist()

        retrier = Retrier(_wait_for_image_deletion)
        retrier.tries = int(get_config_value('AZURE_DELETE_IMAGE_RETRY_COUNT'))
        retrier.delay = int(get_config_value('AZURE_DELETE_IMAGE_RETRY_DELAY'))

        if retrier.execute():
            LOGGER.info('Preexisting image \'%s\' has been deleted.', self.image_name)
        else:
            raise RuntimeError('Exhausted all {} retries for image \'{}\' to be deleted.'.
                               format(retrier.tries, self.image_name))
    def monitor_task(self, task_id, task_status_count, task_status_delay):
        """ Monitor task progress by issuing DescribeTaskAttributeRequest requets
            task_status_count - max number of requests
            task_status_delay - delay between requests
            return True if the task succeeded, False otherwise """
        self.prev_progress = None
        unsuccessful_finish_msg = 'Task finished unsuccessfully, note \'TaskProcess\' value'
        def _monitor_task():
            task = self.client.describe_task_attribute(task_id)
            if 'TaskProcess' not in task.keys() or 'TaskStatus' not in task.keys():
                LOGGER.info('Alibaba response to DescribeTaskAttributeRequest:')
                LOGGER.info(json.dumps(task, sort_keys=True, indent=4, separators=(',', ': ')))
                raise RuntimeError('TaskStatus and/or TaskProcess were not found in the response ' +
                                   'cannot monitor task')

            if task['TaskStatus'] != 'Processing' and task['TaskStatus'] != 'Waiting' and \
               task['TaskStatus'] != 'Finished':
                LOGGER.info('Alibaba response to DescribeTaskAttributeRequest:')
                LOGGER.info(json.dumps(task, sort_keys=True, indent=4, separators=(',', ': ')))
                raise RuntimeError('Unexpected TaskStatus \'{}\' for task \'{}\''.
                                   format(task['TaskStatus'], task_id))

            if task['TaskProcess'] != self.prev_progress:
                self.prev_progress = task['TaskProcess']
                LOGGER.info('Task progress: \'%s\'', task['TaskProcess'])
            if task['TaskStatus'] == 'Finished':
                if task['TaskProcess'] == '100%':
                    return True
                LOGGER.info(unsuccessful_finish_msg)
                LOGGER.info('Alibaba response to DescribeTaskAttributeRequest:')
                LOGGER.info(json.dumps(task, sort_keys=True, indent=4, separators=(',', ': ')))
                raise RuntimeError(unsuccessful_finish_msg)
            return False

        retrier = Retrier(_monitor_task)
        retrier.tries = task_status_count
        retrier.delay = task_status_delay
        try:
            return retrier.execute()
        except RuntimeError as exp:
            if exp.args[0] == unsuccessful_finish_msg:
                return False
            raise
Esempio n. 9
0
    def is_image_deleted(self, image_name):
        """Waits for the image to be deleted."""

        retrier = Retrier(lambda s: not self.image_exists(s), image_name)
        retrier.tries = int(
            get_config_value('GCE_IMAGE_DELETE_COMPLETED_RETRY_COUNT'))
        retrier.delay = int(
            get_config_value('GCE_IMAGE_DELETE_COMPLETED_RETRY_DELAY'))
        LOGGER.info('Waiting for image [%s] to be deleted.', image_name)
        try:
            if retrier.execute():
                LOGGER.info("Image [%s] was deleted.", image_name)
                return True
            LOGGER.warning(
                "Image [%s] was still not deleted after checking [%d] times!",
                image_name, retrier.tries)
            return False
        except HttpError as exp:
            LOGGER.exception(exp)
            return False
def main():
    """main publish telemetry information function"""
    # create log handler for the global LOGGER
    create_log_handler()

    # gather telemetry info
    build_info_telemetry = BuildInfoTelemetry()
    LOGGER.debug("telemetry info:")
    LOGGER.debug(build_info_telemetry.build_info)

    version = build_info_telemetry.build_info['product']['version']

    # Check if specific api key is set, if not use default
    if environ.get("F5_TEEM_API_KEY") is not None:
        environ['F5_TEEM_API_ENVIRONMENT'] = "staging"
        f5_api_key = environ.get("F5_TEEM_API_KEY")
    else:
        f5_api_key = 'mmhJU2sCd63BznXAXDh4kxLIyfIMm3Ar'

    generated_uuid = str(uuid.uuid4())
    LOGGER.debug("telemetry UUID: %s", generated_uuid)
    client_info = {
        'name': 'f5-image-generator',
        'version': str(version),
        'id': generated_uuid
    }
    telemetry_client = AnonymousDeviceClient(client_info, api_key=f5_api_key)

    retrier = Retrier(_publish_telemetry_database, build_info_telemetry,
                      telemetry_client)
    retrier.tries = int(get_config_value('PUBLISH_TELEMETRY_TASK_RETRY_COUNT'))
    retrier.delay = int(get_config_value('PUBLISH_TELEMETRY_TASK_RETRY_DELAY'))
    if retrier.execute():
        LOGGER.info("Publishing to telemetry success.")
        return True
    LOGGER.info("Publishing to telemetry did not succeed.")
    sys.exit(0)
    def upload(self):
        """ Upload a F5 BIG-IP VE image to provided container """

        def upload_azure():
            with open(self.disk_to_upload,'rb') as vhd_file:
                self.blob.upload_blob(
                    vhd_file.read(),
                    blob_type="PageBlob",
                    metadata=self._get_tags()
                    )

        def _upload_impl():
            """ Azure blob upload implementation """
            timeout = int(get_config_value('AZURE_BLOB_UPLOAD_TIMEOUT'))

            try:
                self.connection_string = get_config_value('AZURE_STORAGE_CONNECTION_STRING')
                LOGGER.info("create blob client")
                self.blob = BlobClient.from_connection_string(
                    conn_str=self.connection_string,
                    container_name=self.container_name,
                    blob_name=self.uploaded_disk_name,
                    connection_timeout=timeout
                    )

                LOGGER.info(self._get_tags())
                nonlocal upload_azure
                upload_azure_p = Process(target=upload_azure)
                upload_azure_p.start()
                limit = int(timeout/10)
                for _ in range(limit):
                    if not upload_azure_p.is_alive():
                        break
                    sleep(10)
                    os.write(1, b".")
                else:
                    raise TimeoutError

                LOGGER.info(self.blob.get_blob_properties())
                local_blob_size = os.stat(self.disk_to_upload).st_size

                uploaded_blob_size = self.blob.get_blob_properties().get("size")

                LOGGER.info("uploaded blob size: %s and local blob_size: %s", \
                            str(uploaded_blob_size), str(local_blob_size))
                if uploaded_blob_size != local_blob_size:
                    return False

            except AzureMissingResourceHttpError:
                LOGGER.error("Exception during uploading %s", self.disk_to_upload)
                return False
            except AzureException:
                LOGGER.error("Exception during uploading %s", self.disk_to_upload)
                return False
            except TimeoutError:
                LOGGER.error("Timeout while uploading")
                return False

            self.uploaded_disk_url = self.blob.url
            # save uploaded disk in artifacts dir json file
            vhd_url_json = {"vhd_url": self.uploaded_disk_url}
            artifacts_dir = get_config_value("ARTIFACTS_DIR")
            with open(artifacts_dir + "/vhd_url.json", "w") as vhd_url_json_file:
                json.dump(vhd_url_json, vhd_url_json_file)

            # insert file with vhd url
            self.metadata.set(self.__class__.__name__, 'vhd_url', self.uploaded_disk_url)
            self.metadata.set(self.__class__.__name__, 'image_id', self.uploaded_disk_name)
            LOGGER.info('Uploaded disk url is: %s', self.uploaded_disk_url)
            return True

        retrier = Retrier(_upload_impl)
        retrier.tries = int(get_config_value('AZURE_BLOB_UPLOAD_COMPLETED_RETRY_COUNT'))
        retrier.delay = int(get_config_value('AZURE_BLOB_UPLOAD_COMPLETED_RETRY_DELAY'))
        LOGGER.info("Waiting for blob %s to be uploaded.", self.disk_to_upload)

        if retrier.execute():
            LOGGER.info("blob [%s] is ready.", self.disk_to_upload)
            return True
        LOGGER.error("blob [%s] was still not ready after checking [%d] times!",
                     self.disk_to_upload, retrier.tries)
        raise RuntimeError("Runtime Error Occured during Azure Disk Upload")