示例#1
0
def get_linux_version(package, operating_system):
    """Returns Ubuntu version number of package."""
    if operating_system == "Ubuntu":
        with subprocess.Popen(["dpkg", "-s", package],
                              stdout=subprocess.PIPE,
                              stderr=subprocess.PIPE) as process:
            output = process.communicate()
            LOGGER.debug("dpkg response:")
            LOGGER.debug(output[0])
            LOGGER.debug(output[1])
            if b"not installed" in output[1]:
                return "not installed"
            # Get Version from output
            version = str(output[0])
            if version.find("Version:"):
                version = str(output[0]).split("Version: ")[1]
                version = version.split('\\n', 1)[0]
            return version
    elif operating_system == "Alpine":
        with subprocess.Popen(["sudo", "apk", "search", "-v", "-x", package],
                              stdout=subprocess.PIPE) as process:
            output = str(process.communicate()[0])
            if output == "b''":
                return "not installed"
            if output.startswith("b'"):
                output = output.split("b'")[1]
            return output.split(" ")[0]
    else:
        LOGGER.error("operating system %s not supported", operating_system)
        return ""
示例#2
0
def get_list_from_config_yaml(key):
    """Retrieves a value from the config system and returns its YAML file or JSON string contents
    as a list.
    Returns an empty list for an empty YAML content."""

    # Retrieve string value for key
    value_string = get_config_value(key)
    if not value_string:
        return []

    # Convert YAML file or JSON string to a list
    if value_string.endswith('.yml') or value_string.endswith('.yaml'):
        try:
            with open(value_string, "r") as value_file:
                value_list = yaml.safe_load(value_file)
        except YAMLError:
            LOGGER.error("Unable to parse YAML from file [%s]!", value_string)
            raise
    else:
        try:
            value_list = json.loads(value_string)
        except ValueError:
            LOGGER.error("Unable to parse JSON from string [%s]!",
                         value_string)
            raise

    # Return the list
    return value_list
示例#3
0
        def _resumable_upload():
            self.uploaded_disk_name = 'bakery-' + os.path.basename(self.disk_to_upload) + '-' + \
                                      ''.join(random.choices(string.digits, k=6))
            AlibabaDisk.iter += 1
            LOGGER.info('Upload iteration number %d', AlibabaDisk.iter)
            LOGGER.info('Uploading %s as %s', self.disk_to_upload,
                        self.uploaded_disk_name)
            start_time = time.time()
            time.sleep(1)
            result = False
            try:
                resumable_store = oss2.resumable.ResumableStore(
                    root=self.working_dir)
                oss2.resumable_upload(self.bucket,
                                      self.uploaded_disk_name,
                                      self.disk_to_upload,
                                      store=resumable_store,
                                      num_threads=number_of_threads)
                result = True
            except FileNotFoundError as exc:
                LOGGER.exception(exc)
                raise RuntimeError('Could not find file to upload: {}'.format(
                    self.disk_to_upload))
            except oss2.exceptions.NoSuchUpload as exc:
                LOGGER.error('Upload failed. UploadId: %s',
                             exc.details['UploadId'])
                LOGGER.exception(exc)

            LOGGER.info('Iteration %d of upload took %d seconds',
                        AlibabaDisk.iter,
                        time.time() - start_time)
            if not result:
                self.upload_cleanup()
            return result
    def share_image(self):
        """Reads a list of AWS accounts and shares the AMI with each of those accounts."""
        share_account_ids = get_list_from_config_yaml('AWS_IMAGE_SHARE_ACCOUNT_IDS')
        if share_account_ids:
            LOGGER.info("Share the AMI with multiple AWS accounts.")
            for dest_account_id in share_account_ids:
                try:
                    LOGGER.info('Sharing image with account-id: %s', dest_account_id)

                    # Share the image with the destination account
                    response = self.ec2_client.modify_image_attribute(
                        ImageId=self.image_id,
                        Attribute='launchPermission',
                        OperationType='add',
                        UserIds=[str(dest_account_id)]
                    )
                    LOGGER.trace("image.modify_attribute response => %s", response)
                except ClientError as client_error:
                    LOGGER.exception(client_error)
                    # Log the error around malformed Account-id and move on.
                    if client_error.response['Error']['Code'] == 'InvalidAMIAttributeItemValue':
                        LOGGER.error('Malformed account-id: %s', dest_account_id)
                    else:
                        # Any other type of error can be irrecoverable and might
                        # point to a deeper malaise.
                        raise RuntimeError('aws IMAGE was not shared with other accounts') \
                            from client_error

            # Acknowledge all the account-ids that the image was shared with.
            self.is_share_image_succeeded(share_account_ids)
        else:
            LOGGER.info("No account IDs found for sharing AMI")
    def upload(self):
        """ Upload a F5 BIG-IP VE image to provided container """
        def _upload_impl():
            """ Azure blob upload implementation """
            cnum = int(
                get_config_value('AZURE_BLOB_UPLOAD_CONCURRENT_THREAD_COUNT'))
            timeout = int(get_config_value('AZURE_BLOB_UPLOAD_TIMEOUT'))

            try:
                self.svc.create_blob_from_path(self.container_name, self.uploaded_disk_name, \
                         self.disk_to_upload, max_connections=cnum, \
                         metadata=self._get_tags(), progress_callback=self._progress_cb, \
                         timeout=timeout)

                uploaded_blob = self.svc.get_blob_properties(self.container_name, \
                                                             self.uploaded_disk_name)

                uploaded_blob_size = uploaded_blob.properties.content_length
                local_blob_size = getsize(self.disk_to_upload)

                LOGGER.info("uploaded blob size: %s and local blob_size: %s", \
                            str(uploaded_blob_size), str(local_blob_size))

                if uploaded_blob_size != local_blob_size:
                    return False

            except AzureMissingResourceHttpError:
                LOGGER.error("Exception during uploading %s",
                             self.disk_to_upload)
                return False
            except AzureException:
                LOGGER.error("Exception during uploading %s",
                             self.disk_to_upload)
                return False

            self.uploaded_disk_url = self.svc.make_blob_url(
                self.container_name, self.uploaded_disk_name)

            self.metadata.set(self.__class__.__name__, 'vhd_url',
                              self.uploaded_disk_url)
            self.metadata.set(self.__class__.__name__, 'image_id',
                              self.uploaded_disk_name)
            LOGGER.info('Uploaded disk url is: %s', self.uploaded_disk_url)
            return True

        retrier = Retrier(_upload_impl)
        retrier.tries = int(
            get_config_value('AZURE_BLOB_UPLOAD_COMPLETED_RETRY_COUNT'))
        retrier.delay = int(
            get_config_value('AZURE_BLOB_UPLOAD_COMPLETED_RETRY_DELAY'))
        LOGGER.info("Waiting for blob %s to be uploaded.", self.disk_to_upload)

        if retrier.execute():
            LOGGER.info("blob [%s] is ready.", self.disk_to_upload)
            return True
        LOGGER.error(
            "blob [%s] was still not ready after checking [%d] times!",
            self.disk_to_upload, retrier.tries)
        raise RuntimeError("Runtime Error Occured during Azure Disk Upload")
示例#6
0
    def tag_image(self, image_name):
        """Associate image tags with image"""
        LOGGER.info('Set image labels.')

        # Get current labels fingerprint.  To avoid/detect conflicts, you must
        # provide the current label fingerprint (reference) when you request to
        # set image labels.  This fingerprint value is updated whenever labels
        # are updated and the set labels request will fail if the labels were
        # updated out of band.
        try:
            # pylint: disable=no-member
            request = self.gce_service.images().get(
                project=self.gce_project_id, image=image_name)
            result = request.execute()
            label_fingerprint = result['labelFingerprint']
        except HttpError as exp:
            LOGGER.error("Exception setting image labels:")
            LOGGER.exception(exp)
            return False

        if not result:
            return False

        if label_fingerprint is None or label_fingerprint == '':
            LOGGER.info('Label fingerprint was empty.')
            return False

        cloud_image_tags = CloudImageTags(self.metadata)
        cloud_image_tags.transform_values(to_lower=True,
                                          disallowed_regex='[^a-z0-9-]')
        image_labels = cloud_image_tags.get()

        set_labels_body = {
            "labels": image_labels,
            "labelFingerprint": label_fingerprint
        }

        try:
            # pylint: disable=no-member
            request = self.gce_service.images().setLabels(
                project=self.gce_project_id,
                resource=image_name,
                body=set_labels_body)
            result = request.execute()
        except HttpError as exp:
            LOGGER.error("Exception setting image labels:")
            LOGGER.exception(exp)
            return False

        if not result:
            return False

        LOGGER.debug("Image set labels response: %s", result)
        return True
def _publish_telemetry_database(build_info_telemetry, telemetry_client):
    """Retry function for publishing to telemetry servers."""
    LOGGER.info('Attempt to post telemetry data.')
    try:
        telemetry_client.report(build_info_telemetry.build_info,
                                telemetry_type='Installation Usage',
                                telemetry_type_version='1')
    except ReadTimeoutError:
        LOGGER.error(
            "ReadTimeoutError occured during publishing to telemetry database")
        return False
    return True
示例#8
0
def ensure_value_from_dict(value_dict, key):
    """Attempts to retrieve a value from a dictionary.  Throws an exception if it's empty."""
    try:
        value = value_dict[key]
    except KeyError:
        LOGGER.error("Unable to retrieve key [%s] from dictionary!", key)
        raise
    if not value:
        error_message = "Dictionary contained an empty value for key [{}]!".format(key)
        LOGGER.error(error_message)
        raise ValueError(error_message)
    return value
示例#9
0
        def _upload_impl():
            """ Azure blob upload implementation """
            cnum = int(
                get_config_value('AZURE_BLOB_UPLOAD_CONCURRENT_THREAD_COUNT'))
            timeout = int(get_config_value('AZURE_BLOB_UPLOAD_TIMEOUT'))

            try:
                self.svc.create_blob_from_path(self.container_name, self.uploaded_disk_name, \
                         self.disk_to_upload, max_connections=cnum, \
                         metadata=self._get_tags(), progress_callback=self._progress_cb, \
                         timeout=timeout)

                uploaded_blob = self.svc.get_blob_properties(self.container_name, \
                                                             self.uploaded_disk_name)

                uploaded_blob_size = uploaded_blob.properties.content_length
                local_blob_size = getsize(self.disk_to_upload)

                LOGGER.info("uploaded blob size: %s and local blob_size: %s", \
                            str(uploaded_blob_size), str(local_blob_size))

                if uploaded_blob_size != local_blob_size:
                    return False

            except AzureMissingResourceHttpError:
                LOGGER.error("Exception during uploading %s",
                             self.disk_to_upload)
                return False
            except AzureException:
                LOGGER.error("Exception during uploading %s",
                             self.disk_to_upload)
                return False

            self.uploaded_disk_url = self.svc.make_blob_url(
                self.container_name, self.uploaded_disk_name)

            # save uploaded disk in artifacts dir json file
            vhd_url_json = {"vhd_url": self.uploaded_disk_url}
            artifacts_dir = get_config_value("ARTIFACTS_DIR")
            with open(artifacts_dir + "/vhd_url.json",
                      "w") as vhd_url_json_file:
                json.dump(vhd_url_json, vhd_url_json_file)

            # insert file with vhd url
            self.metadata.set(self.__class__.__name__, 'vhd_url',
                              self.uploaded_disk_url)
            self.metadata.set(self.__class__.__name__, 'image_id',
                              self.uploaded_disk_name)
            LOGGER.info('Uploaded disk url is: %s', self.uploaded_disk_url)
            return True
 def clean_up(self):
     """Clean-up the uploaded disk after image generation."""
     # Delete the uploaded disk as it no longer needs to be retained.
     try:
         if self.bucket and self.uploaded_disk_name:
             self.delete_blob()
     except google.cloud.exceptions.NotFound as exception:
         # Report the exception without propagating it up to ensure this
         # doesn't stop the rest of the clean-up.
         LOGGER.error("Caught exception during '%s' disk deletion.",
                      self.uploaded_disk_name)
         LOGGER.exception(exception)
     except RuntimeError as runtime_exception:
         LOGGER.error("Caught runtime exception during '%s' disk deletion.",
                      self.uploaded_disk_name)
         LOGGER.exception(runtime_exception)
def main():
    """ Wrapper to read user defined values for LV sizes """
    # create log handler for the global LOGGER
    create_log_handler()

    if len(sys.argv) != 2:
        LOGGER.error('%s received %s arguments, expected 1', basename(__file__), len(sys.argv) - 1)
        sys.exit(1)

    try:
        read_lv_sizes(sys.argv[1])
    except RuntimeError as runtime_exception:
        LOGGER.exception(runtime_exception)
        sys.exit(1)

    sys.exit(0)
 def clean_up(self):
     """Cleans-up the cloud and local artifacts created by this object."""
     try:
         if self.should_clean is True:
             if self.cloud_image is not None:
                 LOGGER.info("Cleaning up image controller constructs.")
                 self.cloud_image.clean_up()
                 self.cloud_image = None
             if self.working_dir is not None and os.path.isdir(self.working_dir):
                 LOGGER.debug("Removing working dir '%s'.", self.working_dir)
                 shutil.rmtree(self.working_dir)
                 self.working_dir = None
         else:
             LOGGER.debug("Skipping removal of working dir '%s'.", self.working_dir)
     except OSError as os_exception:
         # Simply log the exception without propagating.
         LOGGER.error(os_exception)
    def __init__(self, input_disk_path, working_dir):
        """Initialize azure disk object."""
        # First initialize the super class.
        super().__init__(input_disk_path, working_dir)

        self.connection_string = get_config_value('AZURE_STORAGE_CONNECTION_STRING')
        self.container_name = get_config_value('AZURE_STORAGE_CONTAINER_NAME')

        try:
            self.svc = PageBlobService(connection_string=self.connection_string)
        except ValueError:
            LOGGER.error("Could not create a PageBlobService with connection_string=%s",
                         self.connection_string)
            raise RuntimeError("Runtime Error during Instantiating Azure Blob Service")

        self.progress_cb_lu = 0
        self.metadata = CloudImageMetadata()
 def create_bucket(self):
     """Creates a bucket self.bucket_name in S3"""
     try:
         self.s3_resource.create_bucket(Bucket=self.bucket_name,
                                        CreateBucketConfiguration={
                                            'LocationConstraint':
                                            self.session.region_name
                                        })
     except ClientError as client_error:
         # Suppress the error around trying to create an already existing bucket.
         if client_error.response['Error']['Code'] == 'BucketAlreadyExists':
             LOGGER.error(
                 "Tried to create an already existing bucket '%s'.",
                 self.bucket_name)
         else:
             LOGGER.exception(client_error)
             raise
    def get_bucket(self):
        """Get the S3 bucket object for the self.bucket_name"""
        bucket = None
        try:
            if self.is_bucket_exist() is True:
                LOGGER.debug("'%s' bucket exists.", self.bucket_name)
                bucket = self.s3_resource.Bucket(self.bucket_name)
        except ClientError as client_error:
            LOGGER.exception(client_error)
            # Suppress the error around non-existent bucket.
            if client_error.response['Error']['Code'] == 'NoSuchBucket':
                LOGGER.error("Bucket '%s' doesn't exist.", self.bucket_name)
            else:
                LOGGER.exception(client_error)
                # Re-raise the exception to force the caller to handle it.
                raise

        return bucket
def get_linux_version(package, operating_system):
    """Returns Ubuntu version number of package."""
    if operating_system == "Ubuntu":
        with subprocess.Popen(["apt", "show", package],
                              stdout=subprocess.PIPE,
                              stderr=subprocess.PIPE) as process:
            output = str(process.communicate()[0])
            version = output.split("\\n")[1].split(" ")[1]
            return version
    elif operating_system == "Alpine":
        process = subprocess.Popen(
            ["sudo", "apk", "search", "-v", "-x", package],
            stdout=subprocess.PIPE)
        output = str(process.communicate()[0])
        return output.split(" ")[0]
    else:
        LOGGER.error("operating system %s not supported", operating_system)
        return ""
def main():
    """main read injected iles function"""
    # create log handler for the global LOGGER
    create_log_handler()

    if len(sys.argv) != 3:
        LOGGER.error('%s received %s arguments, expected 2',
                     basename(__file__),
                     len(sys.argv) - 1)
        sys.exit(1)

    try:
        read_injected_files(sys.argv[1], sys.argv[2])
    except RuntimeError as runtime_exception:
        LOGGER.exception(runtime_exception)
        sys.exit(1)

    sys.exit(0)
def main():
    """main read function"""
    # Add a file handler to the global LOGGER
    log_file = get_config_value('LOG_FILE')
    log_level = get_config_value('LOG_LEVEL').upper()
    create_file_handler(LOGGER, log_file, log_level)

    if len(sys.argv) != 2:
        LOGGER.error('%s received %s arguments, expected 2',
                     basename(__file__), len(sys.argv))
        sys.exit(1)

    try:
        read_injected_files(sys.argv[1])
    except RuntimeError as runtime_exception:
        LOGGER.exception(runtime_exception)
        sys.exit(1)

    sys.exit(0)
示例#19
0
def get_dict_from_config_json(key):
    """Retrieves a value from the config system and returns its JSON file or JSON string contents
    as a dictionary."""

    # Retrieve string value for key
    value_string = get_config_value(key)
    if not value_string:
        error_message = "Value for key [{}] is missing.  Unable to proceed!".format(
            key)
        LOGGER.error(error_message)
        raise ValueError(error_message)

    # Convert JSON file or JSON string to a dictionary
    if value_string.endswith('.json'):
        try:
            with open(value_string, "r") as value_file:
                value_dict = json.load(value_file)
        except ValueError:
            LOGGER.error("Unable to parse JSON from file [%s]!", value_string)
            raise
    else:
        try:
            value_dict = json.loads(value_string)
        except ValueError:
            LOGGER.error("Unable to parse JSON from string [%s]!",
                         value_string)
            raise

    # Return the dictionary
    return value_dict
示例#20
0
    def create_image(self, image_name):
        LOGGER.info("Checking if the image '%s' already exists.", image_name)

        # Check if an image with image_name already exists. If so, delete the image
        result = self.image_exists(image_name)
        if not result:
            LOGGER.info("The image '%s' does not exist.", image_name)
        else:
            LOGGER.info("The image '%s' exists.", image_name)
            result = self.delete_image(image_name)
            if not result:
                LOGGER.error("Could not delete the image '%s', exiting.",
                             image_name)
                raise SystemExit(-1)

        LOGGER.info("Attempting to create an image '%s'.", image_name)

        result = self.insert_image(image_name)
        if not result:
            LOGGER.error("The image '%s' was not created successfully.",
                         image_name)
            raise SystemExit(-1)

        result = self.tag_image(image_name)
        if not result:
            LOGGER.error("The image '%s' was not tagged successfully.",
                         image_name)
            raise SystemExit(-1)

        LOGGER.info("Image '%s' creation succeeded.", image_name)
        def _upload_impl():
            """ Azure blob upload implementation """
            timeout = int(get_config_value('AZURE_BLOB_UPLOAD_TIMEOUT'))

            try:
                self.connection_string = get_config_value('AZURE_STORAGE_CONNECTION_STRING')
                LOGGER.info("create blob client")
                self.blob = BlobClient.from_connection_string(
                    conn_str=self.connection_string,
                    container_name=self.container_name,
                    blob_name=self.uploaded_disk_name,
                    connection_timeout=timeout
                    )

                LOGGER.info(self._get_tags())
                nonlocal upload_azure
                upload_azure_p = Process(target=upload_azure)
                upload_azure_p.start()
                limit = int(timeout/10)
                for _ in range(limit):
                    if not upload_azure_p.is_alive():
                        break
                    sleep(10)
                    os.write(1, b".")
                else:
                    raise TimeoutError

                LOGGER.info(self.blob.get_blob_properties())
                local_blob_size = os.stat(self.disk_to_upload).st_size

                uploaded_blob_size = self.blob.get_blob_properties().get("size")

                LOGGER.info("uploaded blob size: %s and local blob_size: %s", \
                            str(uploaded_blob_size), str(local_blob_size))
                if uploaded_blob_size != local_blob_size:
                    return False

            except AzureMissingResourceHttpError:
                LOGGER.error("Exception during uploading %s", self.disk_to_upload)
                return False
            except AzureException:
                LOGGER.error("Exception during uploading %s", self.disk_to_upload)
                return False
            except TimeoutError:
                LOGGER.error("Timeout while uploading")
                return False

            self.uploaded_disk_url = self.blob.url
            # save uploaded disk in artifacts dir json file
            vhd_url_json = {"vhd_url": self.uploaded_disk_url}
            artifacts_dir = get_config_value("ARTIFACTS_DIR")
            with open(artifacts_dir + "/vhd_url.json", "w") as vhd_url_json_file:
                json.dump(vhd_url_json, vhd_url_json_file)

            # insert file with vhd url
            self.metadata.set(self.__class__.__name__, 'vhd_url', self.uploaded_disk_url)
            self.metadata.set(self.__class__.__name__, 'image_id', self.uploaded_disk_name)
            LOGGER.info('Uploaded disk url is: %s', self.uploaded_disk_url)
            return True
        def _upload_impl():
            """ Azure blob upload implementation """
            cnum = int(get_config_value('AZURE_BLOB_UPLOAD_CONCURRENT_THREAD_COUNT'))
            timeout = int(get_config_value('AZURE_BLOB_UPLOAD_TIMEOUT'))

            try:
                self.svc.create_blob_from_path(self.container_name, self.uploaded_disk_name, \
                         self.disk_to_upload, max_connections=cnum, \
                         metadata=self._get_tags(), progress_callback=self._progress_cb, \
                         timeout=timeout)

                uploaded_blob = self.svc.get_blob_properties(self.container_name, \
                                                             self.uploaded_disk_name)

                uploaded_blob_size = uploaded_blob.properties.content_length
                local_blob_size = getsize(self.disk_to_upload)

                LOGGER.info("uploaded blob size: %s and local blob_size: %s", \
                            str(uploaded_blob_size), str(local_blob_size))

                if uploaded_blob_size != local_blob_size:
                    return False

            except AzureMissingResourceHttpError:
                LOGGER.error("Exception during uploading %s", self.disk_to_upload)
                return False
            except AzureException:
                LOGGER.error("Exception during uploading %s", self.disk_to_upload)
                return False

            vhd_url = self.svc.make_blob_url(self.container_name, self.uploaded_disk_name)

            self.metadata.set(self.__class__.__name__, 'vhd_url', vhd_url)
            self.metadata.set(self.__class__.__name__, 'image_id', self.uploaded_disk_name)
            LOGGER.info('Uploaded vhd is: %s', vhd_url)
            return True
示例#23
0
    def __init__(self, input_disk_path, working_dir=None):
        if not os.path.isfile(input_disk_path):
            self.input_disk_path = None
            LOGGER.error(
                "'%s' (input_disk_path to BaseDisk.__init__) is not a file.",
                input_disk_path)
            LOGGER.error('BaseDisk cannot be properly initialized.')
            raise RuntimeError(
                'Invalid input disk path {}'.format(input_disk_path))
        self.input_disk_path = input_disk_path

        # If a working directory is given, make sure it exists.
        if working_dir is not None:
            if not os.path.isdir(working_dir):
                LOGGER.error(
                    "'%s' (working_dir to BaseDisk.__init__) is not a directory.",
                    working_dir)
                raise RuntimeError(
                    'Invalid working dir path {}'.format(working_dir))

        self.working_dir = working_dir
        LOGGER.debug("BaseDisk.input_disk_path is '%s'.", self.input_disk_path)
        self.disk_to_upload = None
        self.uploaded_disk_name = None
示例#24
0
def get_installed_components(operating_system):
    """returns installed programs related to image generator.

    Abreviated yaml input looks like this:
    _________________________________________
    installComponents:
    alibaba:
        - oss2: python
        - aliyun-python-sdk-ecs: python
    aws:
        - boto3: python
        - moto: python
    _________________________________________
    components = the whole doc except first line
    components = like alibaba or aws
    package    = like oss2 or boto3
    tool       = like python or linux

    For the return it looks like this:
    ______________________________________________
    "installedComponents": {
            "alibaba": {
                "aliyun-python-sdk-ecs": "4.17.6",
                "oss2": "2.8.0"
            },
            "aws": {
                "boto3": "1.10.10",
                "moto": "1.3.13"
            }
    }
    ______________________________________________
    component_return = like alibaba or aws

    """
    script_dir = os.path.dirname(__file__)
    if operating_system == "Ubuntu":
        rel_path = "../../../resource/telemetry/product_ubuntu.yml"
    elif operating_system == "Alpine":
        rel_path = "../../../resource/telemetry/product_alpine.yml"
    else:
        LOGGER.error("unknown operating system")
        return "error getting operating system"
    abs_additional_file_path = os.path.join(script_dir, rel_path)
    main_file_path = os.path.join(script_dir,
                                  "../../../resource/telemetry/product.yml")
    install_components = {}
    for abs_file_path in [abs_additional_file_path, main_file_path]:
        with open(abs_file_path) as file:
            yaml_output = yaml.load(file, Loader=yaml.FullLoader)
            components = yaml_output['installComponents']
            for component in components:
                component_return = {}
                for package_info in components[component]:
                    package = list(package_info.keys())[0]
                    tool = list(package_info.values())[0]
                    if tool == "python":
                        component_return[package] = get_python_version(package)
                    elif tool == "linux":
                        component_return[package] = get_linux_version(
                            package, operating_system)
                    component_return[package] = component_return[
                        package].split('\n')[0]
                install_components[component] = component_return
    if not install_components:
        return " "
    return install_components
    def upload(self):
        """ Upload a F5 BIG-IP VE image to provided container """

        def upload_azure():
            with open(self.disk_to_upload,'rb') as vhd_file:
                self.blob.upload_blob(
                    vhd_file.read(),
                    blob_type="PageBlob",
                    metadata=self._get_tags()
                    )

        def _upload_impl():
            """ Azure blob upload implementation """
            timeout = int(get_config_value('AZURE_BLOB_UPLOAD_TIMEOUT'))

            try:
                self.connection_string = get_config_value('AZURE_STORAGE_CONNECTION_STRING')
                LOGGER.info("create blob client")
                self.blob = BlobClient.from_connection_string(
                    conn_str=self.connection_string,
                    container_name=self.container_name,
                    blob_name=self.uploaded_disk_name,
                    connection_timeout=timeout
                    )

                LOGGER.info(self._get_tags())
                nonlocal upload_azure
                upload_azure_p = Process(target=upload_azure)
                upload_azure_p.start()
                limit = int(timeout/10)
                for _ in range(limit):
                    if not upload_azure_p.is_alive():
                        break
                    sleep(10)
                    os.write(1, b".")
                else:
                    raise TimeoutError

                LOGGER.info(self.blob.get_blob_properties())
                local_blob_size = os.stat(self.disk_to_upload).st_size

                uploaded_blob_size = self.blob.get_blob_properties().get("size")

                LOGGER.info("uploaded blob size: %s and local blob_size: %s", \
                            str(uploaded_blob_size), str(local_blob_size))
                if uploaded_blob_size != local_blob_size:
                    return False

            except AzureMissingResourceHttpError:
                LOGGER.error("Exception during uploading %s", self.disk_to_upload)
                return False
            except AzureException:
                LOGGER.error("Exception during uploading %s", self.disk_to_upload)
                return False
            except TimeoutError:
                LOGGER.error("Timeout while uploading")
                return False

            self.uploaded_disk_url = self.blob.url
            # save uploaded disk in artifacts dir json file
            vhd_url_json = {"vhd_url": self.uploaded_disk_url}
            artifacts_dir = get_config_value("ARTIFACTS_DIR")
            with open(artifacts_dir + "/vhd_url.json", "w") as vhd_url_json_file:
                json.dump(vhd_url_json, vhd_url_json_file)

            # insert file with vhd url
            self.metadata.set(self.__class__.__name__, 'vhd_url', self.uploaded_disk_url)
            self.metadata.set(self.__class__.__name__, 'image_id', self.uploaded_disk_name)
            LOGGER.info('Uploaded disk url is: %s', self.uploaded_disk_url)
            return True

        retrier = Retrier(_upload_impl)
        retrier.tries = int(get_config_value('AZURE_BLOB_UPLOAD_COMPLETED_RETRY_COUNT'))
        retrier.delay = int(get_config_value('AZURE_BLOB_UPLOAD_COMPLETED_RETRY_DELAY'))
        LOGGER.info("Waiting for blob %s to be uploaded.", self.disk_to_upload)

        if retrier.execute():
            LOGGER.info("blob [%s] is ready.", self.disk_to_upload)
            return True
        LOGGER.error("blob [%s] was still not ready after checking [%d] times!",
                     self.disk_to_upload, retrier.tries)
        raise RuntimeError("Runtime Error Occured during Azure Disk Upload")
示例#26
0
def call_subprocess(command, input_data=None, timeout_millis=-1, check_return_code=True,
                    input_encoding="utf-8", output_encoding="utf-8"):
    """Calls a subprocess, records progress to console, performs error handling, and returns the
    output.
    ----
    command: The command and arguments to execute. This must either be a string or a list. String
    formatting is more convenient for simple cases while list formatting provides more control over
    escaped characters and whitespace within arguments. If list formatting is used then the first
    item in the list will be executed as a subprocess and the remaining commands will be treated as
    arguments to that subprocess.
    ----
    in_data: The data to send to the subprocess' STDIN. This is used for processes which
    ordinarily read data from pipes instead of arguments. This may either be a bytes-like-object or
    a string.
    ----
    timeout_millis: The number of milliseconds to wait for the subprocess to return before killing
    it. A negative number means that no timeout will occur.
    ----
    check_return_code: Raises a ReturnCodeError if the subprocess returns a non-zero exit status.
    ----
    input_encoding: Encoding type to use when passing data to STDIN as a string. This is ignored for
    bytes-like-objects.
    ----
    output_encoding: Encoding type to use when decoding output from the subprocess. Set this to None
    to receive raw binary output.
    """
    if isinstance(command, str):
        # Popen will only accept a list
        command = command.split()
    if input_data:
        if isinstance(input_data, str):
            # Popen.communicate will only accept a bytes-like-object
            input_data = input_data.encode(input_encoding)
        elif not isinstance(input_data, bytes):
            message = "input_data was not a string or bytes-like-object! " \
                      "Unable to send to command [{}]!".format(" ".join(command))
            LOGGER.error(message)
            raise TypeError(message)
    poll_millis = int(config.get_config_value("SUBPROCESS_POLL_MILLIS"))
    progress_update_delay_millis = \
        int(config.get_config_value("CONSOLE_PROGRESS_BAR_UPDATE_DELAY")) * 1000
    start_time_millis = time.time() * 1000
    next_progress_update_millis = start_time_millis + progress_update_delay_millis
    # We create the output buffer as a list so that we can pass it by reference to the
    # communications thread. Once that thread has joined we'll be able to safely unwrap the output
    # string from position 0 of this list.
    output = []
    LOGGER.info("Calling: %s", " ".join(command))
    child = subprocess.Popen(command,
                             stdin=subprocess.PIPE,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT)
    # If the child process produces enough output to fill the STDOUT buffer then it will block until
    # another process frees up space in the buffer by reading from it. Unfortunately, Python's
    # subprocess.read function is a blocking call which will only return once the child process
    # exits and the pipe is closed. Since we need the main thread to make polling and progress calls
    # we're not able to call the blocking read function until after the child process has already
    # terminated. This leads to a deadlock where nothing is reading from the STDOUT buffer because
    # the child process hasn't terminated yet and the child process hasn't terminated yet because
    # it's waiting for the STDOUT buffer to be freed up by a read call.
    #     A popular solution here is to to use subprocess.readline instead, as this will only block
    # until a newline character is detected in the output. However, this is still unreliable since
    # not all data sent to STDOUT is guaranteed to terminate in a newline.
    #     A better solution is to start a separate communications thread where we can begin reading
    # without blocking the main thread from polling the child process. Since writing is affected by
    # a similar issue (STDIN can fill up and cause the main thread to block) we use the
    # Popen.communicate method to perform both reading and writing asynchronously on the
    # communications thread.
    comm = threading.Thread(target=lambda p, i, o: o.append(p.communicate(i)[0]),
                            args=(child, input_data, output))
    comm.start()
    wrote_progress = False
    while child.poll() is None:
        current_time_millis = time.time() * 1000
        if current_time_millis > next_progress_update_millis:
            sys.stdout.write('.')
            sys.stdout.flush()
            wrote_progress = True
            next_progress_update_millis = current_time_millis + progress_update_delay_millis
        if timeout_millis > -1 and current_time_millis >= start_time_millis + timeout_millis:
            message = "Command [{}] has timed out!".format(" ".join(command))
            LOGGER.warning(message)
            child.kill()
            comm.join()
            if output_encoding == "utf-8":
                LOGGER.warning("Command output was: %s", output[0].decode(output_encoding).rstrip())
            raise TimeoutError(message)
        time.sleep(poll_millis / 1000)
    comm.join()
    if wrote_progress:
        sys.stdout.write('\n')
        sys.stdout.flush()
    if check_return_code and child.returncode != 0:
        message = "Command [{}] returned with error code [{}]!".format(" ".join(command),
                                                                       child.returncode)
        LOGGER.warning(message)
        if output_encoding == "utf-8":
            LOGGER.warning("Command output was: %s", output[0].decode(output_encoding).rstrip())
        raise exceptions.ReturnCodeError(child.returncode, message)
    if output_encoding:
        return output[0].decode(output_encoding).rstrip()
    return output[0]
    def create_image(self, image_name):
        """ Create image implementation for Alibaba """
        images_json = self.client.describe_images(None, image_name)
        if int(images_json['TotalCount']) == 0:
            LOGGER.debug('No old images named \'%s\' were found', image_name)
        else:
            # image names are unique, delete only one image
            image_id = images_json['Images']['Image'][0]['ImageId']
            LOGGER.info('Image \'%s\' already exists, its id is \'%s\', deleting it', image_name,
                        image_id)
            self.client.delete_image(image_id)

        # start image creation
        LOGGER.info('Started creation of image \'%s\' at %s', image_name,
                    datetime.datetime.now().strftime('%H:%M:%S'))
        start_time = time()
        imported_image = self.client.import_image(get_config_value('ALIBABA_BUCKET'),
                                                  self.disk.uploaded_disk_name, image_name)
        if 'Code' in imported_image.keys():
            if imported_image['Code'] == 'InvalidOSSObject.NotFound':
                raise RuntimeError('ImportImageRequest could not find uloaded disk \'' +
                                   image_name + '\'')
            if imported_image['Code'] == 'InvalidImageName.Duplicated':
                raise RuntimeError('Image \'' + image_name + '\' still exists, ' +
                                   'should have been removed by this point')
            if imported_image['Code'] == 'ImageIsImporting':
                raise RuntimeError('Another image named \'' + image_name + '\' is in the ' +
                                   'process of importing, probably from the previous run. ' +
                                   'Delete it first.')

        if 'ImageId' not in imported_image.keys() or 'TaskId' not in imported_image.keys():
            LOGGER.info('Alibaba response to ImportImageRequest:')
            LOGGER.info(json.dumps(imported_image, sort_keys=True, indent=4,
                                   separators=(',', ': ')))
            raise RuntimeError('ImageId and/or TaskId were not found in the response ' +
                               'cannot initiate image import')
        self.image_id = imported_image['ImageId']
        task_id = imported_image['TaskId']
        LOGGER.info('Started image import with image id \'%s\' and task id \'%s\'', self.image_id,
                    task_id)

        task_status_count = int(get_config_value('ALIBABA_IMAGE_IMPORT_MONITOR_RETRY_COUNT'))
        task_status_delay = int(get_config_value('ALIBABA_IMAGE_IMPORT_MONITOR_RETRY_DELAY'))
        if self.monitor_task(task_id, task_status_count, task_status_delay):
            LOGGER.info('Image \'%s\' imported after %d seconds',
                        self.image_id, time() - start_time)
        else:
            canceled_task_msg = 'Image import failed or took too long, ' + \
                                'canceling task \'{}\' and '.format(task_id) + \
                                'deleting image \'{}\''.format(self.image_id)
            LOGGER.info(canceled_task_msg)
            self.client.cancel_task(task_id)
            self.client.delete_image(self.image_id)
            raise RuntimeError('Failed to import image \'{}\' after monitoring it for {} retries'.
                               format(self.image_id, task_status_count))

        # Add image_id and location (region) to the metadata used for image registration
        metadata = CloudImageMetadata()
        metadata.set(self.__class__.__name__, 'image_id', self.image_id)
        metadata.set(self.__class__.__name__, 'location', get_config_value('ALIBABA_REGION'))

        # Add tags to image
        LOGGER.info('Add tags to image \'%s\'', self.image_id)
        self.client.add_tags(self.image_id, 'image', CloudImageTags(metadata).get())

        # Add tags to associated snapshot
        images_json = self.client.describe_images(self.image_id, None)
        if not 'Images' in images_json.keys():
            LOGGER.error('No image data found for image \'%s\'', self.image_id)
            LOGGER.error('Unable to tag snapshot.')
        else:
            snapshot_id = images_json['Images']['Image'][0] \
                              ['DiskDeviceMappings']['DiskDeviceMapping'][0]['SnapshotId']
            LOGGER.info('Add tags to snapshot \'%s\'', snapshot_id)
            self.client.add_tags(snapshot_id, 'snapshot', CloudImageTags(metadata).get())