def load_files(self, metadata_files):
        """Load metadata from files"""
        LOGGER.debug('Load metadata files...')
        for metadata_file in metadata_files:
            LOGGER.debug('Loading %s...', metadata_file)
            if not os.path.exists(metadata_file):
                raise ValueError(
                    'No such file or directory: {}'.format(metadata_file))
            if os.stat(metadata_file).st_size == 0:
                raise ValueError(
                    'Empty metadata file: {}'.format(metadata_file))

            with open(metadata_file) as metadata_contents:
                new_metadata = json.load(metadata_contents)
                if 'build_source' not in new_metadata:
                    raise ValueError(
                        'build_source field not found in {}'.format(
                            new_metadata))

                # Each build_source can have only one set of metadata
                build_source = new_metadata['build_source']
                if build_source in self.metadata:
                    raise ValueError(
                        'Source {} already loaded'.format(build_source))

                # Remove any empty strings
                self.metadata[build_source] = \
                    {k: v for k, v in new_metadata.items() if v != u''}

        LOGGER.trace('metadata:%s',
                     json.dumps(self.metadata, indent=4, sort_keys=True))
Exemplo n.º 2
0
    def transform_values(self,
                         to_lower=False,
                         disallowed_regex='[^a-zA-Z0-9-]',
                         replacement_char='-'):
        """Transform data values"""
        LOGGER.debug('Transform metadata values')

        if disallowed_regex is not None and disallowed_regex != '':
            try:
                re.compile(disallowed_regex)
            except re.error as exc:
                raise ValueError('disallowed_regex is invalid: {}'.format(
                    str(exc))) from exc

            if replacement_char is None or replacement_char == '':
                raise ValueError(
                    'Replacement character is required for disallowed_regex')

        for key, val in self.metadata.items():
            # convert values to lower as requested
            if to_lower:
                val = val.lower()

            # substitute replacement character as requested
            if disallowed_regex is not None and disallowed_regex != '':
                val = re.sub(disallowed_regex, replacement_char, val)

            self.metadata[key] = val

        LOGGER.trace('metadata:%s',
                     json.dumps(self.metadata, indent=4, sort_keys=True))
Exemplo n.º 3
0
        def _is_snapshot_ready():
            """Awaits the import operation represented by the import_task_id to reach
            'completed' status."""
            try:
                LOGGER.trace("Querying the status of import-task [%s].",
                             import_task_id)
                response = \
                    self.ec2_client.describe_import_snapshot_tasks(
                        ImportTaskIds=[import_task_id])
                if not response:
                    raise RuntimeError(
                        "describe_import_snapshot_tasks() returned none response!"
                    )

                LOGGER.trace(
                    "Response from describe_import_snapshot_tasks => '%s'",
                    response)
                task_status = response['ImportSnapshotTasks'][0][
                    'SnapshotTaskDetail']['Status']
                if task_status == 'error':
                    # Print the response before raising an exception.
                    LOGGER.debug(
                        "describe_import_snapshot_tasks() response for [%s] => [%s]",
                        import_task_id, response)
                    raise RuntimeError(
                        "import-snapshot task [{}] in unrecoverable 'error' state."
                        .format(import_task_id))

                return task_status == 'completed'
            except ClientError as client_error:
                LOGGER.exception(client_error)
                raise RuntimeError(
                    "describe_import_snapshot_tasks() failed for [{}]!".format(
                        import_task_id)) from client_error
    def share_image(self):
        """Reads a list of AWS accounts and shares the AMI with each of those accounts."""
        share_account_ids = get_list_from_config_yaml('AWS_IMAGE_SHARE_ACCOUNT_IDS')
        if share_account_ids:
            LOGGER.info("Share the AMI with multiple AWS accounts.")
            for dest_account_id in share_account_ids:
                try:
                    LOGGER.info('Sharing image with account-id: %s', dest_account_id)

                    # Share the image with the destination account
                    response = self.ec2_client.modify_image_attribute(
                        ImageId=self.image_id,
                        Attribute='launchPermission',
                        OperationType='add',
                        UserIds=[str(dest_account_id)]
                    )
                    LOGGER.trace("image.modify_attribute response => %s", response)
                except ClientError as client_error:
                    LOGGER.exception(client_error)
                    # Log the error around malformed Account-id and move on.
                    if client_error.response['Error']['Code'] == 'InvalidAMIAttributeItemValue':
                        LOGGER.error('Malformed account-id: %s', dest_account_id)
                    else:
                        # Any other type of error can be irrecoverable and might
                        # point to a deeper malaise.
                        raise RuntimeError('aws IMAGE was not shared with other accounts') \
                            from client_error

            # Acknowledge all the account-ids that the image was shared with.
            self.is_share_image_succeeded(share_account_ids)
        else:
            LOGGER.info("No account IDs found for sharing AMI")
def _command_key_values_to_dict(command, regex):
    """Runs a command in a subprocess, searches the output of the command for key/value pairs
    using the specified regex, and returns a dictionary containing those pairs"""
    dictionary = {}
    LOGGER.debug("Searching for version information using command: %s",
                 command)
    try:
        lines = check_output(command.split(),
                             universal_newlines=True,
                             stderr=STDOUT).split('\n')
    except FileNotFoundError:
        LOGGER.warning(
            "Command [%s] not found on system.  Unable to check version!",
            command)
        return dictionary
    except CalledProcessError as error:
        LOGGER.warning(
            "Skipping version information since command [%s] returned with error: %s",
            command, error.output)
        return dictionary

    for line in lines:
        LOGGER.trace("Regex search string: %s", regex)
        LOGGER.trace("Regex search line: %s", line)
        search = re.search(regex, line)
        if search:
            LOGGER.trace("Regex succeeded")
            dictionary[search.group(1)] = search.group(2)
        else:
            LOGGER.trace("Regex failed")
    LOGGER.trace("Completed dictionary: %s", dictionary)
    return dictionary
    def is_share_image_succeeded(self, share_account_ids):
        """Helper utility for share_image() that goes through the list of share_account_ids
        and confirms that the image was shared with all accounts. The function logs any
        error during its execution without propagating it up."""
        try:
            LOGGER.info("Checking which accounts were added for sharing this AMI")
            image_launch_perms = self.ec2_client.describe_image_attribute(
                ImageId=self.image_id,
                Attribute='launchPermission',
                DryRun=False
            )
            LOGGER.trace("image.describe_attribute() response => %s", image_launch_perms)
        except ClientError as client_error:
            # Simply log the exception without propagating it.
            LOGGER.exception(client_error)
            return False

        # Create a list of account IDs that has launch permission
        launch_permission_accounts = []
        for each in image_launch_perms['LaunchPermissions']:
            launch_permission_accounts.append(each['UserId'])

        counter = 0
        # Check which accounts were added for sharing this AMI
        for account_id in share_account_ids:
            if str(account_id) in launch_permission_accounts:
                LOGGER.info("The AMI was successfully shared with account: %s", account_id)
                counter += 1
            else:
                LOGGER.warning("The AMI was not shared with account: %s", account_id)

        # Confirm that the number of accounts in share_account_ids and image's
        # 'LaunchPermissions' are matching.
        return counter == len(share_account_ids)
Exemplo n.º 7
0
    def create_snapshot(self):
        """Creates a snapshot from the uploaded s3_disk."""
        try:
            description = datetime.datetime.now().strftime('%Y%m%d%H%M%S') + '--BIGIP-Volume-From-'
            description += self.s3_disk
            LOGGER.info("Importing the disk [s3://%s/%s] as a snapshot in AWS.",
                        self.s3_bucket, self.s3_disk)
            response = self.ec2_client.import_snapshot(Description=description,
                                                       DiskContainer={
                                                           "Description": description,
                                                           "Format": "vmdk",
                                                           "UserBucket": {
                                                               "S3Bucket": self.s3_bucket,
                                                               "S3Key": self.s3_disk
                                                           }
                                                       })
            LOGGER.trace("import_snapshot() Response => '%s'", response)
            self.import_task_id = response['ImportTaskId']
            LOGGER.info("TaskId for the import_snapshot() operation  => [%s]",
                        self.import_task_id)
            # Wait for the snapshot import to complete.
            self.is_snapshot_ready(self.import_task_id)

            # As the import operation successfully completed, reset it back to None
            # to avoid trying to cancel a completed import-task during clean-up.
            self.import_task_id = None

            # Tag the snapshot
            self.create_tags()

        except RuntimeError as runtime_error:
            LOGGER.exception(runtime_error)
            raise
Exemplo n.º 8
0
    def filter(self, platform=None):
        """Filter metadata using config files."""
        self.__load_config_attribute_keys(platform)

        # Build metadata by walking config data and grabbing appropriate metadata
        self.metadata = {}
        LOGGER.debug('Filter metadata using config attribute keys')
        for source_key, source_attribute_keys in self.config_attribute_keys.items(
        ):
            LOGGER.debug('Add attributes for source %s', source_key)
            if source_key not in self.all_metadata:
                raise ValueError(
                    'Metadata source_key:{} not found'.format(source_key))

            for attribute_key in source_attribute_keys:
                LOGGER.trace('  Add attribute for key %s:%s', source_key,
                             attribute_key)
                if attribute_key not in self.all_metadata[source_key]:
                    raise ValueError('Metadata for key {}:{} not found'.format(
                        source_key, attribute_key))
                self.metadata[attribute_key] = self.all_metadata[source_key][
                    attribute_key]

        LOGGER.trace('metadata:%s',
                     json.dumps(self.metadata, indent=4, sort_keys=True))
    def create_image(self, image_name):
        """Create image implementation for AWS"""
        # image name must be unique
        self.delete_old_image(image_name)

        #start image creation
        LOGGER.info('Started creation of image %s at %s', image_name,
                    datetime.datetime.now().strftime('%H:%M:%S'))
        start_time = time()
        try:
            response = self.ec2_client.register_image(
                Architecture="x86_64",
                BlockDeviceMappings=[
                    {
                        "DeviceName": AWSImage.AWS_IMAGE_ROOT_VOLUME,
                        "Ebs":
                        {
                            "DeleteOnTermination": True,
                            "SnapshotId": self.snapshot.snapshot_id,
                            "VolumeType": "gp2"
                        }
                    }
                ],
                EnaSupport=True,
                Description=image_name,
                Name=image_name,
                RootDeviceName=AWSImage.AWS_IMAGE_ROOT_VOLUME,
                SriovNetSupport="simple",
                VirtualizationType="hvm"
                )
        except (ClientError, ParamValidationError) as botocore_exception:
            LOGGER.exception(botocore_exception)
            raise RuntimeError('register_image failed for image\'{}\'!'.format(image_name)) \
                from botocore_exception

        # get image id
        try:
            LOGGER.trace("register_image() response: %s", response)
            self.image_id = response['ImageId']
        except KeyError as key_error:
            LOGGER.exception(key_error)
            raise RuntimeError('could not find \'ImageId\' key for image {} '.format(image_name) +
                               'in create_image response: {}'.format(response)) from key_error
        LOGGER.info('Image id: %s', self.image_id)

        # save image id in artifacts dir json file
        save_image_id(self.image_id)

        # wait till the end of the image creation
        self.wait_for_image_availability()
        LOGGER.info('Creation of %s image took %d seconds', self.image_id, time() - start_time)

        LOGGER.info('Tagging %s as the image_id.', self.image_id)
        self.metadata.set(self.__class__.__name__, 'image_id', self.image_id)

        # add tags to the image
        self.create_tags()
Exemplo n.º 10
0
 def to_file(self, build_info_file_path):
     """Output build info as pre-formatted JSON string to file at specified path"""
     LOGGER.debug("Writing build info to specified file as a JSON string")
     output = self.to_json()
     LOGGER.trace("build_info: %s", output)
     Path(dirname(build_info_file_path)).mkdir(parents=True, exist_ok=True)
     with open(build_info_file_path, 'w') as output_file:
         LOGGER.trace("output_file: %s", build_info_file_path)
         output_file.writelines(output)
     LOGGER.debug("Wrote build info to [%s]", build_info_file_path)
 def find_image(self, image_name):
     """ Find image by name. Return response"""
     try:
         response = self.ec2_client.describe_images(
             Filters=[{'Name': 'name', 'Values':[image_name]}])
     except (ClientError, ParamValidationError) as botocore_exception:
         LOGGER.exception(botocore_exception)
         raise RuntimeError('describe_images failed for image \'{}\' !'.format(image_name)) \
             from botocore_exception
     LOGGER.trace('describe_images response for image %s: %s', image_name, response)
     return response
Exemplo n.º 12
0
    def create_tags(self):
        """ Create tags for snapshot. Tags are fetched from metadata. """
        snapshot_tags = self.get_snapshot_tag_metadata()
        tags_to_add = []
        for tag in snapshot_tags:
            tags_to_add.append({'Key': tag, 'Value': snapshot_tags[tag]})

        try:
            response = self.ec2_client.create_tags(Resources=[self.snapshot_id], Tags=tags_to_add)
        except (ClientError, ParamValidationError) as botocore_exception:
            LOGGER.exception(botocore_exception)
            raise RuntimeError('create_tags failed for snapshot\'{}\'!\n'.format(self.snapshot_id))
        LOGGER.trace('create_tags response for snapshot %s: %s', self.snapshot_id, response)
Exemplo n.º 13
0
    def __init__(self, ec2_client, s3_bucket, s3_disk):
        self.s3_bucket = s3_bucket
        self.s3_disk = s3_disk

        # ec2_client object to perform various EC2 operations.
        self.ec2_client = ec2_client
        LOGGER.trace("self.s3_disk = '%s'", self.s3_disk)
        LOGGER.trace("self.s3_bucket = '%s'", self.s3_bucket)

        # Snapshot created from the uploaded 's3_disk'.
        self.snapshot_id = None

        # Import-task-id for the disk import operation.
        self.import_task_id = None
Exemplo n.º 14
0
    def register_image(self, skip_post=False, timeout=60.0):
        """Register image."""

        # Check for URL
        cir_url = get_config_value('IMAGE_REGISTRATION_URL')
        if (cir_url is None) and (not skip_post):
            LOGGER.trace(
                'IMAGE_REGISTRATION_URL is not defined. Skip image registration.'
            )
            return

        # Format data
        metadata = copy.deepcopy(self.metadata)
        self.registration_data = {}

        # Azure supports both ASM and ARM models.  We are using ARM now.
        if ('platform' in metadata) and (metadata['platform'] == 'azure'):
            metadata['platform'] = 'azurerm'

        # These metadata attributes are used as keys in the registry
        for key in ['platform', 'image_id', 'image_name']:
            # special case mapping for register API platform -> cloud
            if key not in metadata:
                raise ValueError(
                    '{} attribute missing from metadata'.format(key))
            if key == 'platform':
                self.registration_data['cloud'] = str(metadata[key])
            else:
                self.registration_data[key] = str(metadata[key])
            del metadata[key]

        # Add bundle attribute to support legacy cloud BVT
        metadata['bundle'] = self.get_bundle(metadata)

        # All other metadata attributes are considered registry attributes
        self.registration_data['attributes'] = json.dumps(metadata,
                                                          sort_keys=True)

        if skip_post:
            LOGGER.info('skip_post flag is set. Skip image registration.')
            LOGGER.trace('Registration data:%s', self.registration_data)
            return

        # Register image
        self.post_to_url(cir_url, timeout)
Exemplo n.º 15
0
    def __load_config_attribute_keys(self, platform=None):
        """Load config attributes"""
        # Load attributes to include in metadata
        LOGGER.debug('Load config files.')

        # Always use 'all' and add optional platform
        self.config_attribute_keys = {}
        platforms = ['all']
        if platform is not None:
            platforms.append(platform)

        # Load config files
        for config_file in self.config_files:
            LOGGER.debug('Loading %s...', config_file)

            # Check for empty file
            if os.stat(config_file).st_size == 0:
                raise ValueError('Empty config file: {}'.format(config_file))

            with open(config_file) as config:
                new_config = yaml.safe_load(config)

                # Get build sources for all platforms
                for config_platform in platforms:
                    # If config file has platform def, add build sources
                    if config_platform in new_config:
                        for build_source in new_config[config_platform]:
                            # Create empty list before adding attribute keys
                            if build_source not in self.config_attribute_keys:
                                self.config_attribute_keys[build_source] = []
                            self.config_attribute_keys[build_source] += \
                                new_config[config_platform][build_source]

            # Attributes must be unique.  Check if attributes already exist for any source
            check_keys = []
            for build_source in list(self.config_attribute_keys.keys()):
                for attribute in self.config_attribute_keys[build_source]:
                    if attribute not in check_keys:
                        check_keys.append(attribute)
                    else:
                        raise ValueError(
                            'Duplicate attribute {} found in config'.format(
                                attribute))
        LOGGER.trace('config_attribute_keys:%s', self.config_attribute_keys)
Exemplo n.º 16
0
    def title_case_keys(self):
        """Transform keys to TitleCase. Note if words in a key aren't properly
           TitleCased or broken up, this won't fix that (e.g. version_jobid is
           transformed to VersionJobid rather than VersionJobId)."""
        LOGGER.debug('Transform keys to TitleCase')

        # Use a copy to avoid changing the data structure that is being iterated over
        metadata = copy.deepcopy(self.metadata)

        for key in metadata:
            # Replace non-alphanumeric with spaces to prepare to capitalize first char of each word
            new_key = ''.join(c if c.isalnum() else ' ' for c in key)

            # Capitalize first char of each word
            new_key = ''.join(word.title() for word in new_key.split())

            # Replace existing key with TitleCase key
            LOGGER.trace('Tranform key %s to %s', key, new_key)
            self.metadata[new_key] = self.metadata.pop(key)

        LOGGER.trace('metadata:%s',
                     json.dumps(self.metadata, indent=4, sort_keys=True))
Exemplo n.º 17
0
def read_injected_files(top_call_dir, overall_dest_dir):
    """
    Copy file that need to be injected to temporary location,
    which will be accessible during post-install.
    Two mandatory arguments:
        a path from where build-image was called
        a path to initrd directory that will be available during post_install
    """

    # location used by post-install, should be created only if there are files to inject
    injected_files = 'etc/injected_files'  # location used by post-install
    overall_dest_dir = overall_dest_dir + '/' + injected_files
    LOGGER.info('temporary location for injected files: %s', overall_dest_dir)

    # include user-specified files
    files_to_inject = get_list_from_config_yaml('UPDATE_IMAGE_FILES')

    # add build_info.json
    prep_build_info_for_injection(files_to_inject)

    # each injected file directory to be stored in a separate directory "file<number>"
    count = 0
    LOGGER.trace("files_to_inject: %s", files_to_inject)
    for file in files_to_inject:
        LOGGER.trace("file: %s", file)
        src = extract_single_worded_key(file, 'source')
        if src[0] != '/' and src[0] != '~':
            # make it an absolute path
            src = top_call_dir + '/' + src
        src = abspath(realpath(expanduser(src)))
        dest = extract_single_worded_key(file, 'destination')
        LOGGER.info('inject %s to temporary location %s', src, dest)

        file_holder = overall_dest_dir + '/file' + str(count) + '/'
        # copy source to "src"
        # source file name does not need to be preserved;
        # it will be copied to destination path on BIG-IP
        source_holder = file_holder + 'src'
        if isfile(src):
            Path(file_holder).mkdir(parents=True, exist_ok=True)
            copy2(src, source_holder)
        elif isdir(src):
            copytree(src, source_holder)
        else:
            raise RuntimeError(
                '\'{}\' is neither a file nor a directory, cannot inject it!'.
                format(src))

        # store destination
        if dest[0] != '/':
            raise RuntimeError(
                'injected file destination \'{}\' must be an absolute path!'.
                format(dest))
        with open(file_holder + 'dest', 'w') as dest_holder:
            print("{}".format(dest), file=dest_holder)

        count += 1
        # end of for loop

    LOGGER.debug('leaving %s', basename(__file__))
    return 0
def read_injected_files(overall_dest_dir):
    """
    Copy file that need to be injected to temporary location,
    which will be accessible during post-install.
    One mandatory argument: a path to initrd directory that will be available during post_install
    """
    artifacts_dir = get_config_value("ARTIFACTS_DIR")

    # location used by post-install, should be created only if there are files to inject
    injected_files = 'etc/injected_files'  # location used by post-install
    overall_dest_dir = overall_dest_dir + '/' + injected_files
    LOGGER.info('temporary location for injected files: %s', overall_dest_dir)

    # include user-specified files
    files_to_inject = get_list_from_config_yaml('UPDATE_IMAGE_FILES')

    # include information about installed software on the build machine
    build_info_file_name = "build_info.json"
    build_info_source = artifacts_dir + "/" + build_info_file_name
    build_info_destination = "/" + build_info_file_name
    files_to_inject.append({
        'source': build_info_source,
        'destination': build_info_destination
    })
    build_info = BuildInfo()
    build_info.to_file(build_info_source)

    # each injected file directory to be stored in a separate directory "file<number>"
    count = 0
    LOGGER.trace("files_to_inject: %s", files_to_inject)
    for file in files_to_inject:
        LOGGER.trace("file: %s", file)
        src = extract_single_worded_key(file, 'source')
        dest = extract_single_worded_key(file, 'destination')
        LOGGER.info('inject %s to temporary location %s', src, dest)

        file_holder = overall_dest_dir + '/file' + str(count) + '/'
        # copy source to "src"
        # source file name does not need to be preserved;
        # it will be copied to destination path on BIG-IP
        source_holder = file_holder + 'src'
        if isfile(src):
            Path(file_holder).mkdir(parents=True, exist_ok=True)
            copy2(src, source_holder)
        elif isdir(src):
            copytree(src, source_holder)
        else:
            raise RuntimeError(
                '\'{}\' is neither a file nor a directory, cannot inject it!'.
                format(src))

        # store destination
        if dest[0] != '/':
            raise RuntimeError(
                'injected file destination \'{}\' must be an absolute path!'.
                format(dest))
        with open(file_holder + 'dest', 'w') as dest_holder:
            print("{}".format(dest), file=dest_holder)

        count += 1
        # end of for loop

    LOGGER.debug('leaving %s', basename(__file__))
    return 0
def read_injected_files(top_call_dir, overall_dest_dir):
    """
    Copy files that need to be injected to a temporary location,
    which will be accessible during post-install.
    Two mandatory arguments:
        a path from where build-image was called
        a path to initrd directory that will be available during post_install
    """

    # location used by post-install, should be created only if there are files to inject
    injected_files = 'etc/injected_files'  # location used by post-install
    overall_dest_dir = overall_dest_dir + '/' + injected_files
    LOGGER.info('Temporary location for injected files: \'%s\'',
                overall_dest_dir)

    # include user-specified files
    files_to_inject = get_list_from_config_yaml('UPDATE_IMAGE_FILES')

    # add build_info.json
    prep_build_info_for_injection(files_to_inject)

    # each injected file directory to be stored in a separate directory "file<number>"
    count = 0
    LOGGER.trace("files_to_inject: %s", files_to_inject)
    for file in files_to_inject:
        LOGGER.debug('Injecting file: \'%s\'.', file)
        src = extract_single_worded_key(file, 'source')
        dest = extract_single_worded_key(file, 'destination')
        if 'mode' in file:
            mode = extract_single_worded_key(file, 'mode')
        else:
            mode = None
        LOGGER.info('Copy \'%s\' to a temporary location for \'%s\'.', src,
                    dest)

        url = src  # treat 'src' as a file path and 'url' as a url
        if src[0] != '/' and src[0] != '~':
            # make it an absolute path
            src = top_call_dir + '/' + src
        src = abspath(realpath(expanduser(src)))

        file_holder = overall_dest_dir + '/file' + str(count) + '/'
        # copy source to "src"
        # source file name does not need to be preserved;
        # it will be copied to destination path on BIG-IP
        source_holder = file_holder + 'src'
        Path(file_holder).mkdir(parents=True, exist_ok=True)
        if isfile(src):
            LOGGER.info('Treating \'%s\' as a file for file injection', src)
            copy2(src, source_holder)
        elif isdir(src):
            LOGGER.info('Treating \'%s\' as a directory for file injection',
                        src)
            copytree(src, source_holder)
        else:
            LOGGER.info('Treating \'%s\' as a URL for the file injection', url)
            download_file(url, source_holder)

        # store destination
        if dest[0] != '/':
            raise RuntimeError(
                'injected file destination \'{}\' must be an absolute path!'.
                format(dest))
        with open(file_holder + 'dest', 'w') as dest_holder:
            print("{}".format(dest), file=dest_holder)

        # Store mode. Should be a string consisting of one to four octal digits.
        if mode:
            LOGGER.debug('Creating mode holder for mode \'%s\'.', mode)
            mode_pattern = re.compile('^[0-7][0-7]?[0-7]?[0-7]?$')
            if not mode_pattern.match(mode):
                raise RuntimeError('Invalid mode \'' + mode +
                                   '\', must be a string ' +
                                   'consisting of one to four octal digits.')
            with open(file_holder + 'mode', 'w') as mode_holder:
                print("{}".format(mode), file=mode_holder)

        count += 1
        # end of for loop

    LOGGER.debug('leaving %s', basename(__file__))
    return 0