Ejemplo n.º 1
0
Archivo: yum.py Proyecto: SUSE/kiwi
    def add_repo(
        self, name, uri, repo_type='rpm-md', prio=None, dist=None, components=None
    ):
        """
        Add yum repository

        :param string name: repository base file name
        :param string uri: repository URI
        :param repo_type: repostory type name
        :param int prio: yum repostory priority
        :param dist: unused
        :param components: unused
        """
        repo_file = self.shared_yum_dir['reposd-dir'] + '/' + name + '.repo'
        self.repo_names.append(name + '.repo')
        if os.path.exists(uri):
            # yum requires local paths to take the file: type
            uri = 'file://' + uri
        repo_config = ConfigParser()
        repo_config.add_section(name)
        repo_config.set(
            name, 'name', name
        )
        repo_config.set(
            name, 'baseurl', uri
        )
        if prio:
            repo_config.set(
                name, 'priority', format(prio)
            )
        with open(repo_file, 'w') as repo:
            repo_config.write(repo)
Ejemplo n.º 2
0
def _load_ec_as_default_policy(proxy_conf_file, swift_conf_file, **kwargs):
    """
    Override swift.conf [storage-policy:0] section to use a 2+1 EC policy.

    :param proxy_conf_file: Source proxy conf filename
    :param swift_conf_file: Source swift conf filename
    :returns: Tuple of paths to the proxy conf file and swift conf file to use
    """
    _debug('Setting configuration for default EC policy')

    conf = ConfigParser()
    conf.read(swift_conf_file)
    # remove existing policy sections that came with swift.conf-sample
    for section in list(conf.sections()):
        if section.startswith('storage-policy'):
            conf.remove_section(section)
    # add new policy 0 section for an EC policy
    conf.add_section('storage-policy:0')
    ec_policy_spec = {
        'name': 'ec-test',
        'policy_type': 'erasure_coding',
        'ec_type': 'liberasurecode_rs_vand',
        'ec_num_data_fragments': 2,
        'ec_num_parity_fragments': 1,
        'ec_object_segment_size': 1048576,
        'default': True
    }

    for k, v in ec_policy_spec.items():
        conf.set('storage-policy:0', k, str(v))

    with open(swift_conf_file, 'w') as fp:
        conf.write(fp)
    return proxy_conf_file, swift_conf_file
Ejemplo n.º 3
0
    def add_repo(
        self, name, uri, repo_type='rpm-md',
        prio=None, dist=None, components=None,
        user=None, secret=None, credentials_file=None,
        repo_gpgcheck=None, pkg_gpgcheck=None
    ):
        """
        Add yum repository

        :param string name: repository base file name
        :param string uri: repository URI
        :param repo_type: repostory type name
        :param int prio: yum repostory priority
        :param dist: unused
        :param components: unused
        :param user: unused
        :param secret: unused
        :param credentials_file: unused
        :param bool repo_gpgcheck: enable repository signature validation
        :param bool pkg_gpgcheck: enable package signature validation
        """
        repo_file = self.shared_yum_dir['reposd-dir'] + '/' + name + '.repo'
        self.repo_names.append(name + '.repo')
        if os.path.exists(uri):
            # yum requires local paths to take the file: type
            uri = 'file://' + uri
        repo_config = ConfigParser()
        repo_config.add_section(name)
        repo_config.set(
            name, 'name', name
        )
        repo_config.set(
            name, 'baseurl', uri
        )
        repo_config.set(
            name, 'enabled', '1'
        )
        if prio:
            repo_config.set(
                name, 'priority', format(prio)
            )
        if repo_gpgcheck is not None:
            repo_config.set(
                name, 'repo_gpgcheck', '1' if repo_gpgcheck else '0'
            )
        if pkg_gpgcheck is not None:
            repo_config.set(
                name, 'gpgcheck', '1' if pkg_gpgcheck else '0'
            )
        with open(repo_file, 'w') as repo:
            repo_config.write(RepositoryYumSpaceRemover(repo))
Ejemplo n.º 4
0
    def test_get_user_list(self):
        config = ConfigParser()
        config.add_section('slapformat')
        config.set('slapformat', 'partition_amount', '3')
        config.set('slapformat', 'user_base_name', 'slapuser')
        config.set('slapformat', 'partition_base_name', 'slappart')
        config.add_section('slapos')
        config.set('slapos', 'instance_root', self.instance_root)
 
        user_dict = entity.get_user_list(config)
        username_set = {'slapuser0', 'slapuser1', 'slapuser2'} 
        self.assertEqual(username_set, set(user_dict))
       
        for name in username_set:
          self.assertEqual(user_dict[name].name, name)
          self.assertEqual(user_dict[name].snapshot_list, [])
          expected_path = "%s/slappart%s" % (self.instance_root, name.strip("slapuser")) 
          self.assertEqual(user_dict[name].path, expected_path) 
Ejemplo n.º 5
0
class Namespaces(object):
    r"""Helper for namespaces.

    The config file would look like:
    ```
    [carbon-relay]
    pattern = carbon\.relay\.*

    [carbon-cache]
    pattern = carbon\.agents\.*

    [carbon-aggregator]
    pattern = carbon\.aggregator\.*

    [prometheus]
    pattern = prometheus\.*
    ```
    """

    def __init__(self, filename=None):
        """Initializer."""
        self.config = ConfigParser({}, collections.OrderedDict)
        self.patterns = collections.OrderedDict()

        if not filename:
            self.patterns[re.compile(".*")] = "total"
            self.config.add_section("total")
            return

        self.config.read(filename)
        for section in self.config.sections():
            pattern = re.compile(self.config.get(section, "pattern"))
            self.patterns[pattern] = section

    def lookup(self, metric_name):
        """Return the namespace corresponding to the metric."""
        for pattern, section in self.patterns.items():
            if pattern.match(metric_name):
                return section, self.config.items(section)
        return "none", None
Ejemplo n.º 6
0
    def setUp(self):
        self.tmpdir = tempfile.mkdtemp()

        self.conffile = os.path.join(self.tmpdir, 'zeyple.conf')
        self.homedir = os.path.join(self.tmpdir, 'gpg')
        self.logfile = os.path.join(self.tmpdir, 'zeyple.log')

        config = ConfigParser()

        config.add_section('zeyple')
        config.set('zeyple', 'log_file', self.logfile)
        config.set('zeyple', 'add_header', 'true')

        config.add_section('gpg')
        config.set('gpg', 'home', self.homedir)

        config.add_section('relay')
        config.set('relay', 'host', 'example.net')
        config.set('relay', 'port', '2525')

        with open(self.conffile, 'w') as fp:
            config.write(fp)

        os.mkdir(self.homedir, 0o700)
        subprocess.check_call(
            ['gpg', '--homedir', self.homedir, '--import', KEYS_FNAME],
            stderr=open('/dev/null'),
        )

        self.zeyple = zeyple.Zeyple(self.conffile)
        self.zeyple._send_message = Mock()  # don't try to send emails
Ejemplo n.º 7
0
    def _install_desktop_file(self, prefix, activity_path):
        cp = ConfigParser()
        section = 'Desktop Entry'
        cp.add_section(section)
        cp.optionxform = str  # Allow CamelCase entries

        # Get it from the activity.info for the non-translated version
        info = ConfigParser()
        info.read(os.path.join(activity_path, 'activity', 'activity.info'))
        cp.set(section, 'Name', info.get('Activity', 'name'))
        if info.has_option('Activity', 'summary'):
            cp.set(section, 'Comment', info.get('Activity', 'summary'))

        for path in sorted(glob(os.path.join(activity_path, 'locale',
                                             '*', 'activity.linfo'))):
            locale = path.split(os.path.sep)[-2]
            info = ConfigParser()
            info.read(path)
            if info.has_option('Activity', 'name'):
                cp.set(section, 'Name[{}]'.format(locale),
                       info.get('Activity', 'name'))
            if info.has_option('Activity', 'summary'):
                cp.set(section, 'Comment[{}]'.format(locale),
                       info.get('Activity', 'summary'))

        cp.set(section, 'Terminal', 'false')
        cp.set(section, 'Type', 'Application')
        cp.set(section, 'Categories', 'Education;')
        cp.set(section, 'Icon', os.path.join(
            activity_path, 'activity', self.config.bundle.get_icon_filename()))
        cp.set(section, 'Exec', self.config.bundle.get_command())
        cp.set(section, 'Path', activity_path)  # Path == CWD for running

        name = '{}.activity.desktop'.format(self.config.bundle_id)
        path = os.path.join(prefix, 'share', 'applications', name)
        if not os.path.isdir(os.path.dirname(path)):
            os.makedirs(os.path.dirname(path))
        with open(path, 'w') as f:
            cp.write(f)
Ejemplo n.º 8
0
Archivo: auth.py Proyecto: lyandut/st2
    def run(self, args, **kwargs):

        if not args.password:
            args.password = getpass.getpass()
        instance = self.resource(ttl=args.ttl) if args.ttl else self.resource()

        cli = BaseCLIApp()

        # Determine path to config file
        try:
            config_file = cli._get_config_file_path(args)
        except ValueError:
            # config file not found in args or in env, defaulting
            config_file = config_parser.ST2_CONFIG_PATH

        # Retrieve token
        manager = self.manager.create(instance, auth=(args.username, args.password), **kwargs)
        cli._cache_auth_token(token_obj=manager)

        # Update existing configuration with new credentials
        config = ConfigParser()
        config.read(config_file)

        # Modify config (and optionally populate with password)
        if not config.has_section('credentials'):
            config.add_section('credentials')

        config.set('credentials', 'username', args.username)
        if args.write_password:
            config.set('credentials', 'password', args.password)
        else:
            # Remove any existing password from config
            config.remove_option('credentials', 'password')

        with open(config_file, 'w') as cfg_file_out:
            config.write(cfg_file_out)

        return manager
Ejemplo n.º 9
0
    def __spawn_instance(self):
        """
        Create and configure a new KRA instance using pkispawn.
        Creates a configuration file with IPA-specific
        parameters and passes it to the base class to call pkispawn
        """

        # Create an empty and secured file
        (cfg_fd, cfg_file) = tempfile.mkstemp()
        os.close(cfg_fd)
        pent = pwd.getpwnam(self.service_user)
        os.chown(cfg_file, pent.pw_uid, pent.pw_gid)

        # Create KRA configuration
        config = ConfigParser()
        config.optionxform = str
        config.add_section("KRA")

        # Security Domain Authentication
        config.set("KRA", "pki_security_domain_https_port", "443")
        config.set("KRA", "pki_security_domain_password", self.admin_password)
        config.set("KRA", "pki_security_domain_user", self.admin_user)

        # issuing ca
        config.set("KRA", "pki_issuing_ca_uri", "https://%s" %
                   ipautil.format_netloc(self.fqdn, 443))

        # Server
        config.set("KRA", "pki_enable_proxy", "True")
        config.set("KRA", "pki_restart_configured_instance", "False")
        config.set("KRA", "pki_backup_keys", "True")
        config.set("KRA", "pki_backup_password", self.admin_password)

        # Client security database
        config.set("KRA", "pki_client_database_dir", self.agent_db)
        config.set("KRA", "pki_client_database_password", self.admin_password)
        config.set("KRA", "pki_client_database_purge", "False")
        config.set("KRA", "pki_client_pkcs12_password", self.admin_password)

        # Administrator
        config.set("KRA", "pki_admin_name", self.admin_user)
        config.set("KRA", "pki_admin_uid", self.admin_user)
        config.set("KRA", "pki_admin_email", "root@localhost")
        config.set("KRA", "pki_admin_password", self.admin_password)
        config.set("KRA", "pki_admin_nickname", "ipa-ca-agent")
        config.set("KRA", "pki_admin_subject_dn",
                   str(DN(('cn', 'ipa-ca-agent'), self.subject_base)))
        config.set("KRA", "pki_import_admin_cert", "True")
        config.set("KRA", "pki_admin_cert_file", paths.ADMIN_CERT_PATH)
        config.set("KRA", "pki_client_admin_cert_p12", paths.DOGTAG_ADMIN_P12)

        # Directory server
        config.set("KRA", "pki_ds_ldap_port", "389")
        config.set("KRA", "pki_ds_password", self.dm_password)
        config.set("KRA", "pki_ds_base_dn", self.basedn)
        config.set("KRA", "pki_ds_database", "ipaca")
        config.set("KRA", "pki_ds_create_new_db", "False")

        self._use_ldaps_during_spawn(config)

        # Certificate subject DNs
        config.set("KRA", "pki_subsystem_subject_dn",
                   str(DN(('cn', 'CA Subsystem'), self.subject_base)))
        config.set("KRA", "pki_ssl_server_subject_dn",
                   str(DN(('cn', self.fqdn), self.subject_base)))
        config.set("KRA", "pki_audit_signing_subject_dn",
                   str(DN(('cn', 'KRA Audit'), self.subject_base)))
        config.set(
            "KRA", "pki_transport_subject_dn",
            str(DN(('cn', 'KRA Transport Certificate'), self.subject_base)))
        config.set(
            "KRA", "pki_storage_subject_dn",
            str(DN(('cn', 'KRA Storage Certificate'), self.subject_base)))

        # Certificate nicknames
        # Note that both the server certs and subsystem certs reuse
        # the ca certs.
        config.set("KRA", "pki_subsystem_nickname",
                   "subsystemCert cert-pki-ca")
        config.set("KRA", "pki_ssl_server_nickname",
                   "Server-Cert cert-pki-ca")
        config.set("KRA", "pki_audit_signing_nickname",
                   "auditSigningCert cert-pki-kra")
        config.set("KRA", "pki_transport_nickname",
                   "transportCert cert-pki-kra")
        config.set("KRA", "pki_storage_nickname",
                   "storageCert cert-pki-kra")

        # Shared db settings
        # Needed because CA and KRA share the same database
        # We will use the dbuser created for the CA
        config.set("KRA", "pki_share_db", "True")
        config.set(
            "KRA", "pki_share_dbuser_dn",
            str(DN(('uid', 'pkidbuser'), ('ou', 'people'), ('o', 'ipaca'))))

        _p12_tmpfile_handle, p12_tmpfile_name = tempfile.mkstemp(dir=paths.TMP)

        if self.clone:
            krafile = self.pkcs12_info[0]
            shutil.copy(krafile, p12_tmpfile_name)
            pent = pwd.getpwnam(self.service_user)
            os.chown(p12_tmpfile_name, pent.pw_uid, pent.pw_gid)

            # Security domain registration
            config.set("KRA", "pki_security_domain_hostname", self.master_host)
            config.set("KRA", "pki_security_domain_https_port", "443")
            config.set("KRA", "pki_security_domain_user", self.admin_user)
            config.set("KRA", "pki_security_domain_password",
                       self.admin_password)

            # Clone
            config.set("KRA", "pki_clone", "True")
            config.set("KRA", "pki_clone_pkcs12_path", p12_tmpfile_name)
            config.set("KRA", "pki_clone_pkcs12_password", self.dm_password)
            config.set("KRA", "pki_clone_setup_replication", "False")
            config.set(
                "KRA", "pki_clone_uri",
                "https://%s" % ipautil.format_netloc(self.master_host, 443))
        else:
            # the admin cert file is needed for the first instance of KRA
            cert = DogtagInstance.get_admin_cert(self)
            with open(paths.ADMIN_CERT_PATH, "w") as admin_path:
                admin_path.write(cert)

        # Generate configuration file
        with open(cfg_file, "wb") as f:
            config.write(f)

        try:
            DogtagInstance.spawn_instance(
                self, cfg_file,
                nolog_list=(self.dm_password, self.admin_password)
            )
        finally:
            os.remove(p12_tmpfile_name)
            os.remove(cfg_file)

        shutil.move(paths.KRA_BACKUP_KEYS_P12, paths.KRACERT_P12)

        export_kra_agent_pem()

        self.log.debug("completed creating KRA instance")
Ejemplo n.º 10
0
def _in_process_setup_ring(swift_conf, conf_src_dir, testdir):
    """
    If SWIFT_TEST_POLICY is set:
    - look in swift.conf file for specified policy
    - move this to be policy-0 but preserving its options
    - copy its ring file to test dir, changing its devices to suit
      in process testing, and renaming it to suit policy-0
    Otherwise, create a default ring file.
    """
    conf = ConfigParser()
    conf.read(swift_conf)
    sp_prefix = 'storage-policy:'

    try:
        # policy index 0 will be created if no policy exists in conf
        policies = parse_storage_policies(conf)
    except PolicyError as e:
        raise InProcessException(e)

    # clear all policies from test swift.conf before adding test policy back
    for policy in policies:
        conf.remove_section(sp_prefix + str(policy.idx))

    if policy_specified:
        policy_to_test = policies.get_by_name(policy_specified)
        if policy_to_test is None:
            raise InProcessException('Failed to find policy name "%s"'
                                     % policy_specified)
        _info('Using specified policy %s' % policy_to_test.name)
    else:
        policy_to_test = policies.default
        _info('Defaulting to policy %s' % policy_to_test.name)

    # make policy_to_test be policy index 0 and default for the test config
    sp_zero_section = sp_prefix + '0'
    conf.add_section(sp_zero_section)
    for (k, v) in policy_to_test.get_info(config=True).items():
        conf.set(sp_zero_section, k, v)
    conf.set(sp_zero_section, 'default', True)

    with open(swift_conf, 'w') as fp:
        conf.write(fp)

    # look for a source ring file
    ring_file_src = ring_file_test = 'object.ring.gz'
    if policy_to_test.idx:
        ring_file_src = 'object-%s.ring.gz' % policy_to_test.idx
    try:
        ring_file_src = _in_process_find_conf_file(conf_src_dir, ring_file_src,
                                                   use_sample=False)
    except InProcessException as e:
        if policy_specified:
            raise InProcessException('Failed to find ring file %s'
                                     % ring_file_src)
        ring_file_src = None

    ring_file_test = os.path.join(testdir, ring_file_test)
    if ring_file_src:
        # copy source ring file to a policy-0 test ring file, re-homing servers
        _info('Using source ring file %s' % ring_file_src)
        ring_data = ring.RingData.load(ring_file_src)
        obj_sockets = []
        for dev in ring_data.devs:
            device = 'sd%c1' % chr(len(obj_sockets) + ord('a'))
            utils.mkdirs(os.path.join(_testdir, 'sda1'))
            utils.mkdirs(os.path.join(_testdir, 'sda1', 'tmp'))
            obj_socket = eventlet.listen(('localhost', 0))
            obj_sockets.append(obj_socket)
            dev['port'] = obj_socket.getsockname()[1]
            dev['ip'] = '127.0.0.1'
            dev['device'] = device
            dev['replication_port'] = dev['port']
            dev['replication_ip'] = dev['ip']
        ring_data.save(ring_file_test)
    else:
        # make default test ring, 2 replicas, 4 partitions, 2 devices
        _info('No source object ring file, creating 2rep/4part/2dev ring')
        obj_sockets = [eventlet.listen(('localhost', 0)) for _ in (0, 1)]
        ring_data = ring.RingData(
            [[0, 1, 0, 1], [1, 0, 1, 0]],
            [{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
              'port': obj_sockets[0].getsockname()[1]},
             {'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
              'port': obj_sockets[1].getsockname()[1]}],
            30)
        with closing(GzipFile(ring_file_test, 'wb')) as f:
            pickle.dump(ring_data, f)

    for dev in ring_data.devs:
        _debug('Ring file dev: %s' % dev)

    return obj_sockets
Ejemplo n.º 11
0
class RepositoryDnf(RepositoryBase):
    """
    **Implements repository handling for dnf package manager**

    :param str shared_dnf_dir: shared directory between image root
        and build system root
    :param str runtime_dnf_config_file: dnf runtime config file name
    :param dict command_env: customized os.environ for dnf
    :param str runtime_dnf_config: instance of :class:`ConfigParser`
    """
    def post_init(self, custom_args=None):
        """
        Post initialization method

        Store custom dnf arguments and create runtime configuration
        and environment

        :param list custom_args: dnf arguments
        """
        self.custom_args = custom_args
        self.exclude_docs = False
        if not custom_args:
            self.custom_args = []

        # extract custom arguments not used in dnf call
        if 'exclude_docs' in self.custom_args:
            self.custom_args.remove('exclude_docs')
            self.exclude_docs = True

        if 'check_signatures' in self.custom_args:
            self.custom_args.remove('check_signatures')
            self.gpg_check = '1'
        else:
            self.gpg_check = '0'

        self.locale = list(item for item in self.custom_args
                           if '_install_langs' in item)
        if self.locale:
            self.custom_args.remove(self.locale[0])

        self.repo_names = []

        # dnf support is based on creating repo files which contains
        # path names to the repo and its cache. In order to allow a
        # persistent use of the files in and outside of a chroot call
        # an active bind mount from RootBind::mount_shared_directory
        # is expected and required
        manager_base = self.shared_location + '/dnf'

        self.shared_dnf_dir = {
            'reposd-dir': manager_base + '/repos',
            'cache-dir': manager_base + '/cache',
            'pluginconf-dir': manager_base + '/pluginconf'
        }

        self.runtime_dnf_config_file = NamedTemporaryFile(dir=self.root_dir)

        self.dnf_args = ['-c', self.runtime_dnf_config_file.name, '-y'
                         ] + self.custom_args

        self.command_env = self._create_dnf_runtime_environment()

        # config file parameters for dnf tool
        self._create_runtime_config_parser()
        self._create_runtime_plugin_config_parser()
        self._write_runtime_config()

    def setup_package_database_configuration(self):
        """
        Setup rpm macros for bootstrapping and image building

        1. Create the rpm image macro which persists during the build
        2. Create the rpm bootstrap macro to make sure for bootstrapping
           the rpm database location matches the host rpm database setup.
           This macro only persists during the bootstrap phase
        """
        rpmdb = RpmDataBase(self.root_dir,
                            Defaults.get_custom_rpm_image_macro_name())
        if self.locale:
            rpmdb.set_macro_from_string(self.locale[0])
        rpmdb.write_config()

        RpmDataBase(self.root_dir).set_database_to_host_path()

    def use_default_location(self):
        """
        Setup dnf repository operations to store all data
        in the default places
        """
        self.shared_dnf_dir['reposd-dir'] = \
            self.root_dir + '/etc/yum.repos.d'
        self.shared_dnf_dir['cache-dir'] = \
            self.root_dir + '/var/cache/dnf'
        self.shared_dnf_dir['pluginconf-dir'] = \
            self.root_dir + '/etc/dnf/plugins'
        self._create_runtime_config_parser()
        self._create_runtime_plugin_config_parser()
        self._write_runtime_config()

    def runtime_config(self):
        """
        dnf runtime configuration and environment

        :return: dnf_args:list, command_env:dict

        :rtype: dict
        """
        return {'dnf_args': self.dnf_args, 'command_env': self.command_env}

    def add_repo(self,
                 name,
                 uri,
                 repo_type='rpm-md',
                 prio=None,
                 dist=None,
                 components=None,
                 user=None,
                 secret=None,
                 credentials_file=None,
                 repo_gpgcheck=None,
                 pkg_gpgcheck=None):
        """
        Add dnf repository

        :param str name: repository base file name
        :param str uri: repository URI
        :param repo_type: repostory type name
        :param int prio: dnf repostory priority
        :param dist: unused
        :param components: unused
        :param user: unused
        :param secret: unused
        :param credentials_file: unused
        :param bool repo_gpgcheck: enable repository signature validation
        :param bool pkg_gpgcheck: enable package signature validation
        """
        repo_file = self.shared_dnf_dir['reposd-dir'] + '/' + name + '.repo'
        self.repo_names.append(name + '.repo')
        if os.path.exists(uri):
            # dnf requires local paths to take the file: type
            uri = 'file://' + uri
        repo_config = ConfigParser()
        repo_config.add_section(name)
        repo_config.set(name, 'name', name)
        repo_config.set(name, 'baseurl', uri)
        if prio:
            repo_config.set(name, 'priority', format(prio))
        if repo_gpgcheck is not None:
            repo_config.set(name, 'repo_gpgcheck',
                            '1' if repo_gpgcheck else '0')
        if pkg_gpgcheck is not None:
            repo_config.set(name, 'gpgcheck', '1' if pkg_gpgcheck else '0')
        with open(repo_file, 'w') as repo:
            repo_config.write(repo)

    def import_trusted_keys(self, signing_keys):
        """
        Imports trusted keys into the image

        :param list signing_keys: list of the key files to import
        """
        rpmdb = RpmDataBase(self.root_dir)
        for key in signing_keys:
            rpmdb.import_signing_key_to_image(key)

    def delete_repo(self, name):
        """
        Delete dnf repository

        :param str name: repository base file name
        """
        Path.wipe(self.shared_dnf_dir['reposd-dir'] + '/' + name + '.repo')

    def delete_all_repos(self):
        """
        Delete all dnf repositories
        """
        Path.wipe(self.shared_dnf_dir['reposd-dir'])
        Path.create(self.shared_dnf_dir['reposd-dir'])

    def delete_repo_cache(self, name):
        """
        Delete dnf repository cache

        The cache data for each repository is stored in a directory
        and additional files all starting with the repository name.
        The method glob deletes all files and directories matching
        the repository name followed by any characters to cleanup
        the cache information

        :param str name: repository name
        """
        dnf_cache_glob_pattern = ''.join(
            [self.shared_dnf_dir['cache-dir'], os.sep, name, '*'])
        for dnf_cache_file in glob.iglob(dnf_cache_glob_pattern):
            Path.wipe(dnf_cache_file)

    def cleanup_unused_repos(self):
        """
        Delete unused dnf repositories

        Repository configurations which are not used for this build
        must be removed otherwise they are taken into account for
        the package installations
        """
        repos_dir = self.shared_dnf_dir['reposd-dir']
        repo_files = list(os.walk(repos_dir))[0][2]
        for repo_file in repo_files:
            if repo_file not in self.repo_names:
                Path.wipe(repos_dir + '/' + repo_file)

    def _create_dnf_runtime_environment(self):
        for dnf_dir in list(self.shared_dnf_dir.values()):
            Path.create(dnf_dir)
        return dict(os.environ, LANG='C')

    def _create_runtime_config_parser(self):
        self.runtime_dnf_config = ConfigParser()
        self.runtime_dnf_config.add_section('main')

        self.runtime_dnf_config.set('main', 'cachedir',
                                    self.shared_dnf_dir['cache-dir'])
        self.runtime_dnf_config.set('main', 'reposdir',
                                    self.shared_dnf_dir['reposd-dir'])
        self.runtime_dnf_config.set('main', 'pluginconfpath',
                                    self.shared_dnf_dir['pluginconf-dir'])
        self.runtime_dnf_config.set('main', 'keepcache', '1')
        self.runtime_dnf_config.set('main', 'debuglevel', '2')
        self.runtime_dnf_config.set('main', 'best', '1')
        self.runtime_dnf_config.set('main', 'obsoletes', '1')
        self.runtime_dnf_config.set('main', 'plugins', '1')
        self.runtime_dnf_config.set('main', 'gpgcheck', self.gpg_check)
        if self.exclude_docs:
            self.runtime_dnf_config.set('main', 'tsflags', 'nodocs')

    def _create_runtime_plugin_config_parser(self):
        self.runtime_dnf_plugin_config = ConfigParser()
        self.runtime_dnf_plugin_config.add_section('main')

        self.runtime_dnf_plugin_config.set('main', 'enabled', '1')

    def _write_runtime_config(self):
        with open(self.runtime_dnf_config_file.name, 'w') as config:
            self.runtime_dnf_config.write(config)
        dnf_plugin_config_file = \
            self.shared_dnf_dir['pluginconf-dir'] + '/priorities.conf'
        with open(dnf_plugin_config_file, 'w') as pluginconfig:
            self.runtime_dnf_plugin_config.write(pluginconfig)
class MultiPortConfig(object):

    HW_LB = "HW"

    @staticmethod
    def float_x_plus_one_tenth_of_y(x, y):
        return float(x) + float(y) / 10.0

    @staticmethod
    def make_str(base, iterator):
        return ' '.join((base.format(x) for x in iterator))

    @classmethod
    def make_range_str(cls, base, start, stop=0, offset=0):
        if offset and not stop:
            stop = start + offset
        return cls.make_str(base, range(start, stop))

    @staticmethod
    def parser_get(parser, section, key, default=None):
        if parser.has_option(section, key):
            return parser.get(section, key)
        return default

    @staticmethod
    def make_ip_addr(ip, mask):
        """
        :param ip: ip adddress
        :type ip: str
        :param mask: /24 prefix of 255.255.255.0 netmask
        :type mask: str
        :return: interface
        :rtype: IPv4Interface
        """

        try:
            return ipaddress.ip_interface(six.text_type('/'.join([ip, mask])))
        except (TypeError, ValueError):
            # None so we can skip later
            return None

    @classmethod
    def validate_ip_and_prefixlen(cls, ip_addr, prefixlen):
        ip_addr = cls.make_ip_addr(ip_addr, prefixlen)
        return ip_addr.ip.exploded, ip_addr.network.prefixlen

    def __init__(self,
                 topology_file,
                 config_tpl,
                 tmp_file,
                 vnfd_helper,
                 vnf_type='CGNAT',
                 lb_count=2,
                 worker_threads=3,
                 worker_config='1C/1T',
                 lb_config='SW',
                 socket=0):

        super(MultiPortConfig, self).__init__()
        self.topology_file = topology_file
        self.worker_config = worker_config.split('/')[1].lower()
        self.worker_threads = self.get_worker_threads(worker_threads)
        self.vnf_type = vnf_type
        self.pipe_line = 0
        self.vnfd_helper = vnfd_helper
        self.write_parser = ConfigParser()
        self.read_parser = ConfigParser()
        self.read_parser.read(config_tpl)
        self.master_core = self.read_parser.get("PIPELINE0", "core")
        self.master_tpl = self.get_config_tpl_data('MASTER')
        self.arpicmp_tpl = self.get_config_tpl_data('ARPICMP')
        self.txrx_tpl = self.get_config_tpl_data('TXRX')
        self.loadb_tpl = self.get_config_tpl_data('LOADB')
        self.vnf_tpl = self.get_config_tpl_data(vnf_type)
        self.swq = 0
        self.lb_count = int(lb_count)
        self.lb_config = lb_config
        self.tmp_file = os.path.join("/tmp", tmp_file)
        self.pktq_out_os = []
        self.socket = socket
        self.start_core = 0
        self.pipeline_counter = ""
        self.txrx_pipeline = ""
        self._port_pairs = None
        self.all_ports = []
        self.port_pair_list = []
        self.lb_to_port_pair_mapping = {}
        self.init_eal()

        self.lb_index = None
        self.mul = 0
        self.port_pairs = []
        self.ports_len = 0
        self.prv_que_handler = None
        self.vnfd = None
        self.rules = None
        self.pktq_out = []

    @staticmethod
    def gen_core(core):
        # return "s{}c{}".format(self.socket, core)
        # don't use sockets for VNFs, because we don't want to have to
        # adjust VM CPU topology.  It is virtual anyway
        return str(core)

    def make_port_pairs_iter(self, operand, iterable):
        return (operand(self.vnfd_helper.port_num(x), y) for y in iterable
                for x in chain.from_iterable(self.port_pairs))

    def make_range_port_pairs_iter(self, operand, start, end):
        return self.make_port_pairs_iter(operand, range(start, end))

    def init_eal(self):
        lines = ['[EAL]\n']
        vpci = (v['virtual-interface']["vpci"]
                for v in self.vnfd_helper.interfaces)
        lines.extend('w = {0}\n'.format(item) for item in vpci)
        lines.append('\n')
        with open(self.tmp_file, 'w') as fh:
            fh.writelines(lines)

    def update_timer(self):
        timer_tpl = self.get_config_tpl_data('TIMER')
        timer_tpl['core'] = self.gen_core(0)
        self.update_write_parser(timer_tpl)

    def get_config_tpl_data(self, type_value):
        for section in self.read_parser.sections():
            if self.read_parser.has_option(section, 'type'):
                if type_value == self.read_parser.get(section, 'type'):
                    tpl = OrderedDict(self.read_parser.items(section))
                    return tpl

    def get_txrx_tpl_data(self, value):
        for section in self.read_parser.sections():
            if self.read_parser.has_option(section, 'pipeline_txrx_type'):
                if value == self.read_parser.get(section,
                                                 'pipeline_txrx_type'):
                    tpl = OrderedDict(self.read_parser.items(section))
                    return tpl

    def init_write_parser_template(self, type_value='ARPICMP'):
        for section in self.read_parser.sections():
            if type_value == self.parser_get(self.read_parser, section, 'type',
                                             object()):
                self.pipeline_counter = self.read_parser.getint(
                    section, 'core')
                self.txrx_pipeline = self.read_parser.getint(section, 'core')
                return
            self.write_parser.add_section(section)
            for name, value in self.read_parser.items(section):
                self.write_parser.set(section, name, value)

    def update_write_parser(self, data):
        section = "PIPELINE{0}".format(self.pipeline_counter)
        self.write_parser.add_section(section)
        for name, value in data.items():
            self.write_parser.set(section, name, value)

    def get_worker_threads(self, worker_threads):
        if self.worker_config == '1t':
            return worker_threads
        else:
            return worker_threads - worker_threads % 2

    def generate_next_core_id(self):
        if self.worker_config == '1t':
            self.start_core += 1
            return

        try:
            self.start_core = '{}h'.format(int(self.start_core))
        except ValueError:
            self.start_core = int(self.start_core[:-1]) + 1

    def get_lb_count(self):
        self.lb_count = int(min(len(self.port_pair_list), self.lb_count))

    def generate_lb_to_port_pair_mapping(self):
        self.lb_to_port_pair_mapping = defaultdict(int)
        port_pair_count = len(self.port_pair_list)
        lb_pair_count = int(port_pair_count / self.lb_count)
        extra = port_pair_count % self.lb_count
        extra_iter = repeat(lb_pair_count + 1, extra)
        norm_iter = repeat(lb_pair_count, port_pair_count - extra)
        new_values = {
            i: v
            for i, v in enumerate(chain(extra_iter, norm_iter), 1)
        }
        self.lb_to_port_pair_mapping.update(new_values)

    def set_priv_to_pub_mapping(self):
        port_nums = [
            tuple(self.vnfd_helper.port_nums(x)) for x in self.port_pair_list
        ]
        return "".join(str(y).replace(" ", "") for y in port_nums)

    def set_priv_que_handler(self):
        # iterated twice, can't be generator
        priv_to_pub_map = [
            tuple(self.vnfd_helper.port_nums(x)) for x in self.port_pairs
        ]
        # must be list to use .index()
        port_list = list(chain.from_iterable(priv_to_pub_map))
        uplink_ports = (x[0] for x in priv_to_pub_map)
        self.prv_que_handler = '({})'.format("".join(
            ("{},".format(port_list.index(x)) for x in uplink_ports)))

    def generate_arp_route_tbl(self):
        arp_route_tbl_tmpl = "({port0_dst_ip_hex},{port0_netmask_hex},{port_num}," \
                             "{next_hop_ip_hex})"

        def build_arp_config(port):
            dpdk_port_num = self.vnfd_helper.port_num(port)
            interface = self.vnfd_helper.find_interface(
                name=port)["virtual-interface"]
            # We must use the dst because we are on the VNF and we need to
            # reach the TG.
            dst_port0_ip = ipaddress.ip_interface(
                six.text_type("%s/%s" %
                              (interface["dst_ip"], interface["netmask"])))

            arp_vars = {
                "port0_dst_ip_hex":
                ip_to_hex(dst_port0_ip.network.network_address.exploded),
                "port0_netmask_hex":
                ip_to_hex(dst_port0_ip.network.netmask.exploded),
                # this is the port num that contains port0 subnet and next_hop_ip_hex
                # this is LINKID which should be based on DPDK port number
                "port_num":
                dpdk_port_num,
                # next hop is dst in this case
                # must be within subnet
                "next_hop_ip_hex":
                ip_to_hex(dst_port0_ip.ip.exploded),
            }
            return arp_route_tbl_tmpl.format(**arp_vars)

        return ' '.join(build_arp_config(port) for port in self.all_ports)

    def generate_arpicmp_data(self):
        swq_in_str = self.make_range_str('SWQ{}',
                                         self.swq,
                                         offset=self.lb_count)
        self.swq += self.lb_count
        swq_out_str = self.make_range_str('SWQ{}',
                                          self.swq,
                                          offset=self.lb_count)
        self.swq += self.lb_count
        # ports_mac_list is disabled for some reason

        # mac_iter = (self.vnfd_helper.find_interface(name=port)['virtual-interface']['local_mac']
        #             for port in self.all_ports)
        pktq_in_iter = ('RXQ{}.0'.format(self.vnfd_helper.port_num(x[0]))
                        for x in self.port_pair_list)

        arpicmp_data = {
            'core': self.gen_core(0),
            'pktq_in': swq_in_str,
            'pktq_out': swq_out_str,
            # we need to disable ports_mac_list?
            # it looks like ports_mac_list is no longer required
            # 'ports_mac_list': ' '.join(mac_iter),
            'pktq_in_prv': ' '.join(pktq_in_iter),
            'prv_to_pub_map': self.set_priv_to_pub_mapping(),
            'arp_route_tbl': self.generate_arp_route_tbl(),
            # nd_route_tbl must be set or we get segault on random OpenStack IPv6 traffic
            # 'nd_route_tbl': "(0064:ff9b:0:0:0:0:9810:6414,120,0,0064:ff9b:0:0:0:0:9810:6414)"
            # safe default?  route discard prefix to localhost
            'nd_route_tbl': "(0100::,64,0,::1)"
        }
        self.pktq_out_os = swq_out_str.split(' ')
        # HWLB is a run to complition. So override the pktq_in/pktq_out
        if self.lb_config == self.HW_LB:
            self.swq = 0
            swq_in_str = \
                self.make_range_str('SWQ{}', self.swq,
                                    offset=(self.lb_count * self.worker_threads))
            arpicmp_data['pktq_in'] = swq_in_str
            # WA: Since port_pairs will not be populated during arp pipeline
            self.port_pairs = self.port_pair_list
            port_iter = \
                self.make_port_pairs_iter(self.float_x_plus_one_tenth_of_y, [self.mul])
            pktq_out = self.make_str('TXQ{}', port_iter)
            arpicmp_data['pktq_out'] = pktq_out

        return arpicmp_data

    def generate_final_txrx_data(self, core=0):
        swq_start = self.swq - self.ports_len * self.worker_threads

        txq_start = 0
        txq_end = self.worker_threads

        pktq_out_iter = self.make_range_port_pairs_iter(
            self.float_x_plus_one_tenth_of_y, txq_start, txq_end)

        swq_str = self.make_range_str('SWQ{}', swq_start, self.swq)
        txq_str = self.make_str('TXQ{}', pktq_out_iter)
        rxtx_data = {
            'pktq_in': swq_str,
            'pktq_out': txq_str,
            'pipeline_txrx_type': 'TXTX',
            'core': self.gen_core(core),
        }
        pktq_in = rxtx_data['pktq_in']
        pktq_in = '{0} {1}'.format(pktq_in,
                                   self.pktq_out_os[self.lb_index - 1])
        rxtx_data['pktq_in'] = pktq_in
        self.pipeline_counter += 1
        return rxtx_data

    def generate_initial_txrx_data(self):
        pktq_iter = self.make_range_port_pairs_iter(
            self.float_x_plus_one_tenth_of_y, 0, self.worker_threads)

        rxq_str = self.make_str('RXQ{}', pktq_iter)
        swq_str = self.make_range_str('SWQ{}', self.swq, offset=self.ports_len)
        txrx_data = {
            'pktq_in': rxq_str,
            'pktq_out': swq_str + ' SWQ{0}'.format(self.lb_index - 1),
            'pipeline_txrx_type': 'RXRX',
            'core': self.gen_core(self.start_core),
        }
        self.pipeline_counter += 1
        return self.start_core, txrx_data

    def generate_lb_data(self):
        pktq_in = self.make_range_str('SWQ{}', self.swq, offset=self.ports_len)
        self.swq += self.ports_len

        offset = self.ports_len * self.worker_threads
        pktq_out = self.make_range_str('SWQ{}', self.swq, offset=offset)
        self.pktq_out = pktq_out.split()

        self.swq += (self.ports_len * self.worker_threads)
        lb_data = {
            'prv_que_handler': self.prv_que_handler,
            'pktq_in': pktq_in,
            'pktq_out': pktq_out,
            'n_vnf_threads': str(self.worker_threads),
            'core': self.gen_core(self.start_core),
        }
        self.pipeline_counter += 1
        return lb_data

    def generate_vnf_data(self):
        if self.lb_config == self.HW_LB:
            port_iter = self.make_port_pairs_iter(
                self.float_x_plus_one_tenth_of_y, [self.mul])
            pktq_in = self.make_str('RXQ{}', port_iter)

            self.mul += 1
            port_iter = self.make_port_pairs_iter(
                self.float_x_plus_one_tenth_of_y, [self.mul])
            pktq_out = self.make_str('TXQ{}', port_iter)

            pipe_line_data = {
                'pktq_in': pktq_in,
                'pktq_out': pktq_out + ' SWQ{0}'.format(self.swq),
                'prv_que_handler': self.prv_que_handler,
                'core': self.gen_core(self.start_core),
            }
            self.swq += 1
        else:
            pipe_line_data = {
                'pktq_in':
                ' '.join(
                    (self.pktq_out.pop(0) for _ in range(self.ports_len))),
                'pktq_out':
                self.make_range_str('SWQ{}', self.swq, offset=self.ports_len),
                'prv_que_handler':
                self.prv_que_handler,
                'core':
                self.gen_core(self.start_core),
            }
            self.swq += self.ports_len

        if self.vnf_type in ('ACL', 'VFW'):
            pipe_line_data.pop('prv_que_handler')

        if self.vnf_tpl.get('vnf_set'):
            public_ip_port_range_list = self.vnf_tpl[
                'public_ip_port_range'].split(':')
            ip_in_hex = '{:x}'.format(
                int(public_ip_port_range_list[0], 16) + self.lb_index - 1)
            public_ip_port_range_list[0] = ip_in_hex
            self.vnf_tpl['public_ip_port_range'] = ':'.join(
                public_ip_port_range_list)

        self.pipeline_counter += 1
        return pipe_line_data

    def generate_config_data(self):
        self.init_write_parser_template()

        # use master core for master, don't use self.start_core
        self.write_parser.set('PIPELINE0', 'core',
                              self.gen_core(self.master_core))
        arpicmp_data = self.generate_arpicmp_data()
        self.arpicmp_tpl.update(arpicmp_data)
        self.update_write_parser(self.arpicmp_tpl)

        if self.vnf_type == 'CGNAPT':
            self.pipeline_counter += 1
            self.update_timer()

        if self.lb_config == 'HW':
            self.start_core = 1

        for lb in self.lb_to_port_pair_mapping:
            self.lb_index = lb
            self.mul = 0
            port_pair_count = self.lb_to_port_pair_mapping[lb]
            if not self.port_pair_list:
                continue

            self.port_pairs = self.port_pair_list[:port_pair_count]
            self.port_pair_list = self.port_pair_list[port_pair_count:]
            self.ports_len = port_pair_count * 2
            self.set_priv_que_handler()
            if self.lb_config == 'SW':
                core, txrx_data = self.generate_initial_txrx_data()
                self.txrx_tpl.update(txrx_data)
                self.update_write_parser(self.txrx_tpl)
                self.start_core += 1
                lb_data = self.generate_lb_data()
                self.loadb_tpl.update(lb_data)
                self.update_write_parser(self.loadb_tpl)
                self.start_core += 1

            for i in range(self.worker_threads):
                vnf_data = self.generate_vnf_data()
                if not self.vnf_tpl:
                    self.vnf_tpl = {}
                self.vnf_tpl.update(vnf_data)
                self.update_write_parser(self.vnf_tpl)
                try:
                    self.vnf_tpl.pop('vnf_set')
                except KeyError:
                    pass
                else:
                    self.vnf_tpl.pop('public_ip_port_range')
                self.generate_next_core_id()

            if self.lb_config == 'SW':
                txrx_data = self.generate_final_txrx_data(core)
                self.txrx_tpl.update(txrx_data)
                self.update_write_parser(self.txrx_tpl)
            self.vnf_tpl = self.get_config_tpl_data(self.vnf_type)

    def generate_config(self):
        self._port_pairs = PortPairs(self.vnfd_helper.interfaces)
        self.port_pair_list = self._port_pairs.port_pair_list
        self.all_ports = self._port_pairs.all_ports

        self.get_lb_count()
        self.generate_lb_to_port_pair_mapping()
        self.generate_config_data()
        self.write_parser.write(sys.stdout)
        with open(self.tmp_file, 'a') as tfh:
            self.write_parser.write(tfh)

    def generate_link_config(self):
        def build_args(port):
            # lookup interface by name
            virtual_interface = self.vnfd_helper.find_interface(
                name=port)["virtual-interface"]
            local_ip = virtual_interface["local_ip"]
            netmask = virtual_interface["netmask"]
            port_num = self.vnfd_helper.port_num(port)
            port_ip, prefix_len = self.validate_ip_and_prefixlen(
                local_ip, netmask)
            return LINK_CONFIG_TEMPLATE.format(port_num, port_ip, prefix_len)

        return ''.join(build_args(port) for port in self.all_ports)

    def get_route_data(self, src_key, data_key, port):
        route_list = self.vnfd['vdu'][0].get(src_key, [])
        try:
            return next((route[data_key]
                         for route in route_list if route['if'] == port), None)
        except (TypeError, StopIteration, KeyError):
            return None

    def get_ports_gateway(self, port):
        return self.get_route_data('routing_table', 'gateway', port)

    def get_ports_gateway6(self, port):
        return self.get_route_data('nd_route_tbl', 'gateway', port)

    def get_netmask_gateway(self, port):
        return self.get_route_data('routing_table', 'netmask', port)

    def get_netmask_gateway6(self, port):
        return self.get_route_data('nd_route_tbl', 'netmask', port)

    def generate_arp_config(self):
        arp_config = []
        for port in self.all_ports:
            # ignore gateway, always use TG IP
            # gateway = self.get_ports_gateway(port)
            vintf = self.vnfd_helper.find_interface(
                name=port)["virtual-interface"]
            dst_mac = vintf["dst_mac"]
            dst_ip = vintf["dst_ip"]
            # arp_config.append(
            #     (self.vnfd_helper.port_num(port), gateway, dst_mac, self.txrx_pipeline))
            # so dst_mac is the TG dest mac, so we need TG dest IP.
            # should be dpdk_port_num
            arp_config.append((self.vnfd_helper.port_num(port), dst_ip,
                               dst_mac, self.txrx_pipeline))

        return '\n'.join(('p {3} arpadd {0} {1} {2}'.format(*values)
                          for values in arp_config))

    def generate_arp_config6(self):
        arp_config6 = []
        for port in self.all_ports:
            # ignore gateway, always use TG IP
            # gateway6 = self.get_ports_gateway6(port)
            vintf = self.vnfd_helper.find_interface(
                name=port)["virtual-interface"]
            dst_mac6 = vintf["dst_mac"]
            dst_ip6 = vintf["dst_ip"]
            # arp_config6.append(
            #     (self.vnfd_helper.port_num(port), gateway6, dst_mac6, self.txrx_pipeline))
            arp_config6.append((self.vnfd_helper.port_num(port), dst_ip6,
                                dst_mac6, self.txrx_pipeline))

        return '\n'.join(('p {3} arpadd {0} {1} {2}'.format(*values)
                          for values in arp_config6))

    def generate_action_config(self):
        port_list = (self.vnfd_helper.port_num(p) for p in self.all_ports)
        if self.vnf_type == "VFW":
            template = FW_ACTION_TEMPLATE
        else:
            template = ACTION_TEMPLATE

        return ''.join((template.format(port) for port in port_list))

    def get_ip_from_port(self, port):
        # we can't use gateway because in OpenStack gateways interfer with floating ip routing
        # return self.make_ip_addr(self.get_ports_gateway(port), self.get_netmask_gateway(port))
        vintf = self.vnfd_helper.find_interface(name=port)["virtual-interface"]
        ip = vintf["local_ip"]
        netmask = vintf["netmask"]
        return self.make_ip_addr(ip, netmask)

    def get_network_and_prefixlen_from_ip_of_port(self, port):
        ip_addr = self.get_ip_from_port(port)
        # handle cases with no gateway
        if ip_addr:
            return ip_addr.network.network_address.exploded, ip_addr.network.prefixlen
        else:
            return None, None

    def generate_rule_config(self):
        cmd = 'acl' if self.vnf_type == "ACL" else "vfw"
        rules_config = self.rules if self.rules else ''
        new_rules = []
        new_ipv6_rules = []
        pattern = 'p {0} add {1} {2} {3} {4} {5} 0 65535 0 65535 0 0 {6}'
        for src_intf, dst_intf in self.port_pair_list:
            src_port = self.vnfd_helper.port_num(src_intf)
            dst_port = self.vnfd_helper.port_num(dst_intf)

            src_net, src_prefix_len = self.get_network_and_prefixlen_from_ip_of_port(
                src_intf)
            dst_net, dst_prefix_len = self.get_network_and_prefixlen_from_ip_of_port(
                dst_intf)
            # ignore entires with empty values
            if all((src_net, src_prefix_len, dst_net, dst_prefix_len)):
                new_rules.append(
                    (cmd, self.txrx_pipeline, src_net, src_prefix_len, dst_net,
                     dst_prefix_len, dst_port))
                new_rules.append(
                    (cmd, self.txrx_pipeline, dst_net, dst_prefix_len, src_net,
                     src_prefix_len, src_port))

            # src_net = self.get_ports_gateway6(port_pair[0])
            # src_prefix_len = self.get_netmask_gateway6(port_pair[0])
            # dst_net = self.get_ports_gateway6(port_pair[1])
            # dst_prefix_len = self.get_netmask_gateway6(port_pair[0])
            # # ignore entires with empty values
            # if all((src_net, src_prefix_len, dst_net, dst_prefix_len)):
            #     new_ipv6_rules.append((cmd, self.txrx_pipeline, src_net, src_prefix_len,
            #                            dst_net, dst_prefix_len, dst_port))
            #     new_ipv6_rules.append((cmd, self.txrx_pipeline, dst_net, dst_prefix_len,
            #                            src_net, src_prefix_len, src_port))

        acl_apply = "\np %s applyruleset" % cmd
        new_rules_config = '\n'.join(
            pattern.format(*values)
            for values in chain(new_rules, new_ipv6_rules))
        return ''.join([rules_config, new_rules_config, acl_apply])

    def generate_script_data(self):
        self._port_pairs = PortPairs(self.vnfd_helper.interfaces)
        self.port_pair_list = self._port_pairs.port_pair_list
        self.get_lb_count()
        script_data = {
            'link_config': self.generate_link_config(),
            'arp_config': self.generate_arp_config(),
            # disable IPv6 for now
            # 'arp_config6': self.generate_arp_config6(),
            'arp_config6': "",
            'actions': '',
            'rules': '',
        }

        if self.vnf_type in ('ACL', 'VFW'):
            script_data.update({
                'actions': self.generate_action_config(),
                'rules': self.generate_rule_config(),
            })

        return script_data

    def generate_script(self, vnfd, rules=None):
        self.vnfd = vnfd
        self.rules = rules
        script_data = self.generate_script_data()
        script = SCRIPT_TPL.format(**script_data)
        if self.lb_config == self.HW_LB:
            script += 'set fwd rxonly'
            hwlb_tpl = """
set_sym_hash_ena_per_port {0} enable
set_hash_global_config {0} simple_xor ipv4-udp enable
set_sym_hash_ena_per_port {1} enable
set_hash_global_config {1} simple_xor ipv4-udp enable
set_hash_input_set {0} ipv4-udp src-ipv4 udp-src-port add
set_hash_input_set {1} ipv4-udp dst-ipv4 udp-dst-port add
set_hash_input_set {0} ipv6-udp src-ipv6 udp-src-port add
set_hash_input_set {1} ipv6-udp dst-ipv6 udp-dst-port add
"""
            for port_pair in self.port_pair_list:
                script += hwlb_tpl.format(
                    *(self.vnfd_helper.port_nums(port_pair)))
        return script
Ejemplo n.º 13
0
def test_topic_prefix(topic_prefix, expected):
    conf = ConfigParser()
    conf.add_section('broker')
    conf.set('broker', 'topic_prefix', topic_prefix)
    handler = protonmsg.TimeoutHandler('amqp://broker1.example.com:5672', [], conf)
    assert handler.topic_prefix == expected
Ejemplo n.º 14
0
def _in_process_setup_ring(swift_conf, conf_src_dir, testdir):
    """
    If SWIFT_TEST_POLICY is set:
    - look in swift.conf file for specified policy
    - move this to be policy-0 but preserving its options
    - copy its ring file to test dir, changing its devices to suit
      in process testing, and renaming it to suit policy-0
    Otherwise, create a default ring file.
    """
    conf = ConfigParser()
    conf.read(swift_conf)
    sp_prefix = "storage-policy:"

    try:
        # policy index 0 will be created if no policy exists in conf
        policies = parse_storage_policies(conf)
    except PolicyError as e:
        raise InProcessException(e)

    # clear all policies from test swift.conf before adding test policy back
    for policy in policies:
        conf.remove_section(sp_prefix + str(policy.idx))

    if policy_specified:
        policy_to_test = policies.get_by_name(policy_specified)
        if policy_to_test is None:
            raise InProcessException('Failed to find policy name "%s"' % policy_specified)
        _info("Using specified policy %s" % policy_to_test.name)
    else:
        policy_to_test = policies.default
        _info("Defaulting to policy %s" % policy_to_test.name)

    # make policy_to_test be policy index 0 and default for the test config
    sp_zero_section = sp_prefix + "0"
    conf.add_section(sp_zero_section)
    for (k, v) in policy_to_test.get_info(config=True).items():
        conf.set(sp_zero_section, k, v)
    conf.set(sp_zero_section, "default", True)

    with open(swift_conf, "w") as fp:
        conf.write(fp)

    # look for a source ring file
    ring_file_src = ring_file_test = "object.ring.gz"
    if policy_to_test.idx:
        ring_file_src = "object-%s.ring.gz" % policy_to_test.idx
    try:
        ring_file_src = _in_process_find_conf_file(conf_src_dir, ring_file_src, use_sample=False)
    except InProcessException as e:
        if policy_specified:
            raise InProcessException("Failed to find ring file %s" % ring_file_src)
        ring_file_src = None

    ring_file_test = os.path.join(testdir, ring_file_test)
    if ring_file_src:
        # copy source ring file to a policy-0 test ring file, re-homing servers
        _info("Using source ring file %s" % ring_file_src)
        ring_data = ring.RingData.load(ring_file_src)
        obj_sockets = []
        for dev in ring_data.devs:
            device = "sd%c1" % chr(len(obj_sockets) + ord("a"))
            utils.mkdirs(os.path.join(_testdir, "sda1"))
            utils.mkdirs(os.path.join(_testdir, "sda1", "tmp"))
            obj_socket = eventlet.listen(("localhost", 0))
            obj_sockets.append(obj_socket)
            dev["port"] = obj_socket.getsockname()[1]
            dev["ip"] = "127.0.0.1"
            dev["device"] = device
            dev["replication_port"] = dev["port"]
            dev["replication_ip"] = dev["ip"]
        ring_data.save(ring_file_test)
    else:
        # make default test ring, 2 replicas, 4 partitions, 2 devices
        _info("No source object ring file, creating 2rep/4part/2dev ring")
        obj_sockets = [eventlet.listen(("localhost", 0)) for _ in (0, 1)]
        ring_data = ring.RingData(
            [[0, 1, 0, 1], [1, 0, 1, 0]],
            [
                {"id": 0, "zone": 0, "device": "sda1", "ip": "127.0.0.1", "port": obj_sockets[0].getsockname()[1]},
                {"id": 1, "zone": 1, "device": "sdb1", "ip": "127.0.0.1", "port": obj_sockets[1].getsockname()[1]},
            ],
            30,
        )
        with closing(GzipFile(ring_file_test, "wb")) as f:
            pickle.dump(ring_data, f)

    for dev in ring_data.devs:
        _debug("Ring file dev: %s" % dev)

    return obj_sockets
Ejemplo n.º 15
0
            file_path = os.path.join(LOG_PATH, file)
            # Make sure we use gzip to decompress any compressed log files.
            if file.endswith('.gz'):
                handle = gzip.open(file_path, 'rb')
            else:
                handle = open(file_path, 'r')

            # Run each access log line through a regex to extract the IPv4 addresses of remote hosts.
            for line in handle:
                match = re.match(NGINX_ACCESS_PATTERN, line)
                if match:
                    total_hits += 1
                    unique_hits.add(match.group('ipaddress'))
            handle.close()

        stats['unique_hits'] = len(unique_hits)
        stats['total_hits'] = total_hits

    # Build the ConfigParser data.
    config = ConfigParser()
    config.add_section(args.config_section)
    for key, value in stats.items():
        config.set(args.config_section, key, str(value))

    # Output the data in ConfigParser format to stdout and to a file.
    config.write(sys.stdout)
    if args.out:
        print('Writing to file passed via parameter: {filename}'.format(filename=args.out), file=sys.stderr)
        with open(args.out, 'w') as output_file:
            config.write(output_file)
Ejemplo n.º 16
0
def _in_process_setup_ring(swift_conf, conf_src_dir, testdir):
    """
    If SWIFT_TEST_POLICY is set:
    - look in swift.conf file for specified policy
    - move this to be policy-0 but preserving its options
    - copy its ring file to test dir, changing its devices to suit
      in process testing, and renaming it to suit policy-0
    Otherwise, create a default ring file.
    """
    conf = ConfigParser()
    conf.read(swift_conf)
    sp_prefix = 'storage-policy:'

    try:
        # policy index 0 will be created if no policy exists in conf
        policies = parse_storage_policies(conf)
    except PolicyError as e:
        raise InProcessException(e)

    # clear all policies from test swift.conf before adding test policy back
    for policy in policies:
        conf.remove_section(sp_prefix + str(policy.idx))

    if policy_specified:
        policy_to_test = policies.get_by_name(policy_specified)
        if policy_to_test is None:
            raise InProcessException('Failed to find policy name "%s"'
                                     % policy_specified)
        _info('Using specified policy %s' % policy_to_test.name)
    else:
        policy_to_test = policies.default
        _info('Defaulting to policy %s' % policy_to_test.name)

    # make policy_to_test be policy index 0 and default for the test config
    sp_zero_section = sp_prefix + '0'
    conf.add_section(sp_zero_section)
    for (k, v) in policy_to_test.get_info(config=True).items():
        conf.set(sp_zero_section, k, str(v))
    conf.set(sp_zero_section, 'default', 'True')

    with open(swift_conf, 'w') as fp:
        conf.write(fp)

    # look for a source ring file
    ring_file_src = ring_file_test = 'object.ring.gz'
    if policy_to_test.idx:
        ring_file_src = 'object-%s.ring.gz' % policy_to_test.idx
    try:
        ring_file_src = _in_process_find_conf_file(conf_src_dir, ring_file_src,
                                                   use_sample=False)
    except InProcessException as e:
        if policy_specified:
            raise InProcessException('Failed to find ring file %s'
                                     % ring_file_src)
        ring_file_src = None

    ring_file_test = os.path.join(testdir, ring_file_test)
    if ring_file_src:
        # copy source ring file to a policy-0 test ring file, re-homing servers
        _info('Using source ring file %s' % ring_file_src)
        ring_data = ring.RingData.load(ring_file_src)
        obj_sockets = []
        for dev in ring_data.devs:
            device = 'sd%c1' % chr(len(obj_sockets) + ord('a'))
            utils.mkdirs(os.path.join(_testdir, 'sda1'))
            utils.mkdirs(os.path.join(_testdir, 'sda1', 'tmp'))
            obj_socket = listen_zero()
            obj_sockets.append(obj_socket)
            dev['port'] = obj_socket.getsockname()[1]
            dev['ip'] = '127.0.0.1'
            dev['device'] = device
            dev['replication_port'] = dev['port']
            dev['replication_ip'] = dev['ip']
        ring_data.save(ring_file_test)
    else:
        # make default test ring, 3 replicas, 4 partitions, 3 devices
        # which will work for a replication policy or a 2+1 EC policy
        _info('No source object ring file, creating 3rep/4part/3dev ring')
        obj_sockets = [listen_zero() for _ in (0, 1, 2)]
        replica2part2dev_id = [[0, 1, 2, 0],
                               [1, 2, 0, 1],
                               [2, 0, 1, 2]]
        devs = [{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
                 'port': obj_sockets[0].getsockname()[1]},
                {'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
                 'port': obj_sockets[1].getsockname()[1]},
                {'id': 2, 'zone': 2, 'device': 'sdc1', 'ip': '127.0.0.1',
                 'port': obj_sockets[2].getsockname()[1]}]
        ring_data = ring.RingData(replica2part2dev_id, devs, 30)
        with closing(GzipFile(ring_file_test, 'wb')) as f:
            pickle.dump(ring_data, f)

    for dev in ring_data.devs:
        _debug('Ring file dev: %s' % dev)

    return obj_sockets
Ejemplo n.º 17
0
        os.makedirs(progdir)
    except OSError:
        pass

cfg = RawConfigParser()

# LOAD CONFIG
CONFIG_FN = os.path.join(DIR.user_config_dir, 'config.ini')
try:
    cfg.readfp(open(CONFIG_FN))
except:
    open(CONFIG_FN, 'a+').close()
    os.chmod(CONFIG_FN, 0o600)  # u=rw
    with open(CONFIG_FN, 'wb') as cfgfile:
        tmpcfg = ConfigParser()
        tmpcfg.add_section('defaults')
        tmpcfg.set('defaults', 'service', 'trello')
        tmpcfg.set('defaults', 'board', '')
        tmpcfg.add_section('trello')
        tmpcfg.set('trello', 'apikey', '')
        tmpcfg.set('trello', 'token', '')
        tmpcfg.write(cfgfile)
    print('Trello credentials must be specified here:')
    print(CONFIG_FN)
    sys.exit(1)

# CREATE CACHE
CACHE_FN = os.path.join(DIR.user_config_dir, 'cache.ini')
cache = RawConfigParser()
if not os.path.exists(CACHE_FN):
    open(CACHE_FN, 'a+').close()
Ejemplo n.º 18
0
class RepositoryZypper(RepositoryBase):
    """
    Implements repo handling for zypper package manager
    """
    def post_init(self, custom_args=None):
        """
        Post initialization method

        Store custom zypper arguments and create runtime configuration
        and environment

        Attributes

        * :attr:`shared_zypper_dir`
            shared directory between image root and build system root

        * :attr:`runtime_zypper_config_file`
            zypper runtime config file name

        * :attr:`runtime_zypp_config_file`
            libzypp runtime config file name

        * :attr:`zypper_args`
            zypper caller args plus additional custom args

        * :attr:`command_env`
            customized os.environ for zypper

        * :attr:`runtime_zypper_config`
            Instance of ConfigParser

        :param list custom_args: zypper arguments
        """
        self.custom_args = custom_args
        self.exclude_docs = False
        self.gpgcheck = False
        if not custom_args:
            self.custom_args = []

        # extract custom arguments used for zypp config only
        if 'exclude_docs' in self.custom_args:
            self.custom_args.remove('exclude_docs')
            self.exclude_docs = True

        if 'check_signatures' in self.custom_args:
            self.custom_args.remove('check_signatures')
            self.gpgcheck = True

        self.repo_names = []

        # zypper support by default point all actions into the root
        # directory of the image system. This information is passed
        # as arguments to zypper and adapted if the call runs as
        # chrooted operation. Therefore the use of the shared location
        # via RootBind::mount_shared_directory is optional but
        # recommended to make use of the repo cache
        manager_base = self.root_dir + self.shared_location

        self.shared_zypper_dir = {
            'pkg-cache-dir': os.sep.join(
                [manager_base, 'packages']
            ),
            'reposd-dir': os.sep.join(
                [manager_base, 'zypper/repos']
            ),
            'credentials-dir': os.sep.join(
                [manager_base, 'zypper/credentials']
            ),
            'solv-cache-dir': os.sep.join(
                [manager_base, 'zypper/solv']
            ),
            'raw-cache-dir': os.sep.join(
                [manager_base, 'zypper/raw']
            ),
            'cache-dir': os.sep.join(
                [manager_base, 'zypper']
            )
        }

        self.runtime_zypper_config_file = NamedTemporaryFile(
            dir=self.root_dir
        )
        self.runtime_zypp_config_file = NamedTemporaryFile(
            dir=self.root_dir
        )

        self.zypper_args = [
            '--non-interactive',
            '--pkg-cache-dir', self.shared_zypper_dir['pkg-cache-dir'],
            '--reposd-dir', self.shared_zypper_dir['reposd-dir'],
            '--solv-cache-dir', self.shared_zypper_dir['solv-cache-dir'],
            '--cache-dir', self.shared_zypper_dir['cache-dir'],
            '--raw-cache-dir', self.shared_zypper_dir['raw-cache-dir'],
            '--config', self.runtime_zypper_config_file.name
        ] + self.custom_args

        self.command_env = self._create_zypper_runtime_environment()

        # config file parameters for zypper tool
        self.runtime_zypper_config = ConfigParser()
        self.runtime_zypper_config.add_section('main')

        # config file parameters for libzypp library
        self.runtime_zypp_config = ConfigParser()
        self.runtime_zypp_config.add_section('main')
        self.runtime_zypp_config.set(
            'main', 'credentials.global.dir',
            self.shared_zypper_dir['credentials-dir']
        )
        if self.exclude_docs:
            self.runtime_zypp_config.set(
                'main', 'rpm.install.excludedocs', 'yes'
            )

        if self.gpgcheck:
            self.runtime_zypp_config.set(
                'main', 'gpgcheck', '1'
            )
        else:
            self.runtime_zypp_config.set(
                'main', 'gpgcheck', '0'
            )

        self._write_runtime_config()

    def use_default_location(self):
        """
        Setup zypper repository operations to store all data
        in the default places
        """
        self.shared_zypper_dir['reposd-dir'] = \
            self.root_dir + '/etc/zypp/repos.d'
        self.shared_zypper_dir['credentials-dir'] = \
            self.root_dir + '/etc/zypp/credentials.d'
        self.zypper_args = [
            '--non-interactive',
        ] + self.custom_args
        self.command_env = dict(os.environ, LANG='C')

    def runtime_config(self):
        """
        zypper runtime configuration and environment
        """
        return {
            'zypper_args': self.zypper_args,
            'command_env': self.command_env
        }

    def add_repo(
        self, name, uri, repo_type='rpm-md',
        prio=None, dist=None, components=None,
        user=None, secret=None, credentials_file=None,
        repo_gpgcheck=None, pkg_gpgcheck=None
    ):
        """
        Add zypper repository

        :param string name: repository name
        :param string uri: repository URI
        :param repo_type: repostory type name
        :param int prio: zypper repostory priority
        :param dist: unused
        :param components: unused
        :param user: credentials username
        :param secret: credentials password
        :param credentials_file: zypper credentials file
        :param bool repo_gpgcheck: enable repository signature validation
        :param bool pkg_gpgcheck: enable package signature validation
        """
        if credentials_file:
            repo_secret = os.sep.join(
                [self.shared_zypper_dir['credentials-dir'], credentials_file]
            )
            if os.path.exists(repo_secret):
                Path.wipe(repo_secret)

            if user and secret:
                uri = ''.join([uri, '?credentials=', credentials_file])
                with open(repo_secret, 'w') as credentials:
                    credentials.write('username={0}{1}'.format(
                        user, os.linesep)
                    )
                    credentials.write('password={0}{1}'.format(
                        secret, os.linesep)
                    )

        repo_file = ''.join(
            [self.shared_zypper_dir['reposd-dir'], '/', name, '.repo']
        )
        self.repo_names.append(''.join([name, '.repo']))

        if os.path.exists(repo_file):
            Path.wipe(repo_file)

        self._backup_package_cache()
        Command.run(
            ['zypper'] + self.zypper_args + [
                '--root', self.root_dir,
                'addrepo',
                '--refresh',
                '--type', self._translate_repo_type(repo_type),
                '--keep-packages',
                '-C',
                uri,
                name
            ],
            self.command_env
        )
        if prio or repo_gpgcheck is not None or pkg_gpgcheck is not None:
            repo_config = ConfigParser()
            repo_config.read(repo_file)
            if repo_gpgcheck is not None:
                repo_config.set(
                    name, 'repo_gpgcheck', '1' if repo_gpgcheck else '0'
                )
            if pkg_gpgcheck is not None:
                repo_config.set(
                    name, 'pkg_gpgcheck', '1' if pkg_gpgcheck else '0'
                )
            if prio:
                repo_config.set(
                    name, 'priority', format(prio)
                )
            with open(repo_file, 'w') as repo:
                repo_config.write(repo)
        self._restore_package_cache()

    def import_trusted_keys(self, signing_keys):
        """
        Imports trusted keys into the image

        :param list signing_keys: list of the key files to import
        """
        for key in signing_keys:
            Command.run(['rpm', '--root', self.root_dir, '--import', key])

    def delete_repo(self, name):
        """
        Delete zypper repository

        :param string name: repository name
        """
        Command.run(
            ['zypper'] + self.zypper_args + [
                '--root', self.root_dir, 'removerepo', name
            ],
            self.command_env
        )

    def delete_all_repos(self):
        """
        Delete all zypper repositories
        """
        Path.wipe(self.shared_zypper_dir['reposd-dir'])
        Path.create(self.shared_zypper_dir['reposd-dir'])

    def delete_repo_cache(self, name):
        """
        Delete zypper repository cache

        The cache data for each repository is stored in a list of
        directories of the same name as the repository name. The method
        deletes these directories to cleanup the cache information

        :param string name: repository name
        """
        Path.wipe(
            os.sep.join([self.shared_zypper_dir['pkg-cache-dir'], name])
        )
        Path.wipe(
            os.sep.join([self.shared_zypper_dir['solv-cache-dir'], name])
        )
        Path.wipe(
            os.sep.join([self.shared_zypper_dir['raw-cache-dir'], name])
        )

    def cleanup_unused_repos(self):
        """
        Delete unused zypper repositories

        zypper creates a system solvable which is unwanted for the
        purpose of building images. In addition zypper fails with
        an error message 'Failed to cache rpm database' if such a
        system solvable exists and a new root system is created

        All other repository configurations which are not used for
        this build must be removed too, otherwise they are taken into
        account for the package installations
        """
        solv_dir = self.shared_zypper_dir['solv-cache-dir']
        Path.wipe(solv_dir + '/@System')

        repos_dir = self.shared_zypper_dir['reposd-dir']
        repo_files = list(os.walk(repos_dir))[0][2]
        for repo_file in repo_files:
            if repo_file not in self.repo_names:
                Path.wipe(repos_dir + '/' + repo_file)

    def _create_zypper_runtime_environment(self):
        for zypper_dir in list(self.shared_zypper_dir.values()):
            Path.create(zypper_dir)
        return dict(
            os.environ,
            LANG='C',
            ZYPP_CONF=self.runtime_zypp_config_file.name
        )

    def _write_runtime_config(self):
        with open(self.runtime_zypper_config_file.name, 'w') as config:
            self.runtime_zypper_config.write(config)
        with open(self.runtime_zypp_config_file.name, 'w') as config:
            self.runtime_zypp_config.write(config)

    def _translate_repo_type(self, repo_type):
        """
            Translate kiwi supported common repo type names from the schema
            into the name the zyper package manager understands
        """
        zypper_type_for = {
            'rpm-md': 'YUM',
            'rpm-dir': 'Plaindir',
            'yast2': 'YaST'
        }
        try:
            return zypper_type_for[repo_type]
        except Exception:
            raise KiwiRepoTypeUnknown(
                'Unsupported zypper repo type: %s' % repo_type
            )

    def _backup_package_cache(self):
        """
        preserve package cache which otherwise will be removed by
        zypper if no repo file is found. But this situation is
        normal for an image build process which setup and remove
        repos for building at runtime
        """
        self._move_package_cache(backup=True)

    def _restore_package_cache(self):
        """
        restore preserved package cache at the location passed to zypper
        """
        self._move_package_cache(restore=True)

    def _move_package_cache(self, backup=False, restore=False):
        package_cache = self.shared_location + '/packages'
        package_cache_moved = package_cache + '.moved'
        if backup and os.path.exists(package_cache):
            Command.run(
                ['mv', '-f', package_cache, package_cache_moved]
            )
        elif restore and os.path.exists(package_cache_moved):
            Command.run(
                ['mv', '-f', package_cache_moved, package_cache]
            )

    def __del__(self):
        self._restore_package_cache()
Ejemplo n.º 19
0
class MultiPortConfig(object):

    HW_LB = "HW"

    @staticmethod
    def float_x_plus_one_tenth_of_y(x, y):
        return float(x) + float(y) / 10.0

    @staticmethod
    def make_str(base, iterator):
        return ' '.join((base.format(x) for x in iterator))

    @classmethod
    def make_range_str(cls, base, start, stop=0, offset=0):
        if offset and not stop:
            stop = start + offset
        return cls.make_str(base, range(start, stop))

    @staticmethod
    def parser_get(parser, section, key, default=None):
        if parser.has_option(section, key):
            return parser.get(section, key)
        return default

    @staticmethod
    def make_ip_addr(ip, mask_len):
        try:
            return ipaddress.ip_interface(
                six.text_type('/'.join([ip, mask_len])))
        except ValueError:
            # None so we can skip later
            return None

    @classmethod
    def validate_ip_and_prefixlen(cls, ip_addr, prefixlen):
        ip_addr = cls.make_ip_addr(ip_addr, prefixlen)
        return ip_addr.ip.exploded, ip_addr.network.prefixlen

    def __init__(self,
                 topology_file,
                 config_tpl,
                 tmp_file,
                 interfaces=None,
                 vnf_type='CGNAT',
                 lb_count=2,
                 worker_threads=3,
                 worker_config='1C/1T',
                 lb_config='SW',
                 socket=0):

        super(MultiPortConfig, self).__init__()
        self.topology_file = topology_file
        self.worker_config = worker_config.split('/')[1].lower()
        self.worker_threads = self.get_worker_threads(worker_threads)
        self.vnf_type = vnf_type
        self.pipe_line = 0
        self.interfaces = interfaces if interfaces else {}
        self.networks = {}
        self.write_parser = ConfigParser()
        self.read_parser = ConfigParser()
        self.read_parser.read(config_tpl)
        self.master_core = self.read_parser.get("PIPELINE0", "core")
        self.master_tpl = self.get_config_tpl_data('MASTER')
        self.arpicmp_tpl = self.get_config_tpl_data('ARPICMP')
        self.txrx_tpl = self.get_config_tpl_data('TXRX')
        self.loadb_tpl = self.get_config_tpl_data('LOADB')
        self.vnf_tpl = self.get_config_tpl_data(vnf_type)
        self.swq = 0
        self.lb_count = int(lb_count)
        self.lb_config = lb_config
        self.tmp_file = os.path.join("/tmp", tmp_file)
        self.pktq_out_os = []
        self.socket = socket
        self.start_core = ""
        self.pipeline_counter = ""
        self.txrx_pipeline = ""
        self.port_pair_list = []
        self.lb_to_port_pair_mapping = {}
        self.init_eal()

        self.lb_index = None
        self.mul = 0
        self.port_pairs = []
        self.port_pair_list = []
        self.ports_len = 0
        self.prv_que_handler = None
        self.vnfd = None
        self.rules = None
        self.pktq_out = ''

    @staticmethod
    def gen_core(core):
        # return "s{}c{}".format(self.socket, core)
        # don't use sockets for VNFs, because we don't want to have to
        # adjust VM CPU topology.  It is virtual anyway
        return str(core)

    def make_port_pairs_iter(self, operand, iterable):
        return (operand(x[-1], y) for y in iterable
                for x in chain(*self.port_pairs))

    def make_range_port_pairs_iter(self, operand, start, end):
        return self.make_port_pairs_iter(operand, range(start, end))

    def init_eal(self):
        vpci = [v['virtual-interface']["vpci"] for v in self.interfaces]
        with open(self.tmp_file, 'w') as fh:
            fh.write('[EAL]\n')
            for item in vpci:
                fh.write('w = {0}\n'.format(item))
            fh.write('\n')

    def update_timer(self):
        timer_tpl = self.get_config_tpl_data('TIMER')
        timer_tpl['core'] = self.gen_core(self.start_core)
        self.update_write_parser(timer_tpl)
        self.start_core += 1

    def get_config_tpl_data(self, type_value):
        for section in self.read_parser.sections():
            if self.read_parser.has_option(section, 'type'):
                if type_value == self.read_parser.get(section, 'type'):
                    tpl = OrderedDict(self.read_parser.items(section))
                    return tpl

    def get_txrx_tpl_data(self, value):
        for section in self.read_parser.sections():
            if self.read_parser.has_option(section, 'pipeline_txrx_type'):
                if value == self.read_parser.get(section,
                                                 'pipeline_txrx_type'):
                    tpl = OrderedDict(self.read_parser.items(section))
                    return tpl

    def init_write_parser_template(self, type_value='ARPICMP'):
        for section in self.read_parser.sections():
            if type_value == self.parser_get(self.read_parser, section, 'type',
                                             object()):
                self.start_core = self.read_parser.getint(section, 'core')
                self.pipeline_counter = self.read_parser.getint(
                    section, 'core')
                self.txrx_pipeline = self.read_parser.getint(section, 'core')
                return
            self.write_parser.add_section(section)
            for name, value in self.read_parser.items(section):
                self.write_parser.set(section, name, value)

    def update_write_parser(self, data):
        section = "PIPELINE{0}".format(self.pipeline_counter)
        self.write_parser.add_section(section)
        for name, value in data.items():
            self.write_parser.set(section, name, value)

    def get_worker_threads(self, worker_threads):
        if self.worker_config == '1t':
            return worker_threads
        else:
            return worker_threads - worker_threads % 2

    def generate_next_core_id(self):
        if self.worker_config == '1t':
            self.start_core += 1
            return

        try:
            self.start_core = 'h{}'.format(int(self.start_core))
        except ValueError:
            self.start_core = int(self.start_core[:-1]) + 1

    @staticmethod
    def get_port_pairs(interfaces):
        port_pair_list = []
        networks = defaultdict(list)
        for private_intf in interfaces:
            vintf = private_intf['virtual-interface']
            networks[vintf['vld_id']].append(vintf)

        for name, net in networks.items():
            # partition returns a tuple
            parts = list(name.partition('private'))
            if parts[0]:
                # 'private' was not in or not leftmost in the string
                continue
            parts[1] = 'public'
            public_id = ''.join(parts)
            for private_intf in net:
                try:
                    public_peer_intfs = networks[public_id]
                except KeyError:
                    LOG.warning(
                        "private network without peer %s, %s not found", name,
                        public_id)
                    continue

                for public_intf in public_peer_intfs:
                    port_pair = private_intf["ifname"], public_intf["ifname"]
                    port_pair_list.append(port_pair)

        return port_pair_list, networks

    def get_lb_count(self):
        self.lb_count = int(min(len(self.port_pair_list), self.lb_count))

    def generate_lb_to_port_pair_mapping(self):
        self.lb_to_port_pair_mapping = defaultdict(int)
        port_pair_count = len(self.port_pair_list)
        lb_pair_count = int(port_pair_count / self.lb_count)
        for i in range(self.lb_count):
            self.lb_to_port_pair_mapping[i + 1] = lb_pair_count
        for i in range(port_pair_count % self.lb_count):
            self.lb_to_port_pair_mapping[i + 1] += 1

    def set_priv_to_pub_mapping(self):
        return "".join(
            str(y) for y in [(int(x[0][-1]), int(x[1][-1]))
                             for x in self.port_pair_list])

    def set_priv_que_handler(self):
        # iterated twice, can't be generator
        priv_to_pub_map = [(int(x[0][-1]), int(x[1][-1]))
                           for x in self.port_pairs]
        # must be list to use .index()
        port_list = list(chain.from_iterable(priv_to_pub_map))
        priv_ports = (x[0] for x in priv_to_pub_map)
        self.prv_que_handler = '({})'.format(",".join(
            (str(port_list.index(x)) for x in priv_ports)))

    def generate_arp_route_tbl(self):
        arp_config = []
        arp_route_tbl_tmpl = "({port0_dst_ip_hex},{port0_netmask_hex},{port_num}," \
                             "{next_hop_ip_hex})"
        for port_pair in self.port_pair_list:
            for port in port_pair:
                port_num = int(port[-1])
                interface = self.interfaces[port_num]
                # port0_ip = ipaddress.ip_interface(six.text_type(
                #     "%s/%s" % (interface["virtual-interface"]["local_ip"],
                #                interface["virtual-interface"]["netmask"])))
                dst_port0_ip = \
                    ipaddress.ip_interface(six.text_type(
                        "%s/%s" % (interface["virtual-interface"]["dst_ip"],
                                   interface["virtual-interface"]["netmask"])))
                arp_vars = {
                    "port0_dst_ip_hex":
                    ip_to_hex(dst_port0_ip.ip.exploded),
                    "port0_netmask_hex":
                    ip_to_hex(dst_port0_ip.network.netmask.exploded),
                    "port_num":
                    port_num,
                    # next hop is dst in this case
                    "next_hop_ip_hex":
                    ip_to_hex(dst_port0_ip.ip.exploded),
                }
                arp_config.append(arp_route_tbl_tmpl.format(**arp_vars))

        return ' '.join(arp_config)

    def generate_arpicmp_data(self):
        swq_in_str = self.make_range_str('SWQ{}',
                                         self.swq,
                                         offset=self.lb_count)
        self.swq += self.lb_count
        swq_out_str = self.make_range_str('SWQ{}',
                                          self.swq,
                                          offset=self.lb_count)
        self.swq += self.lb_count
        mac_iter = (self.interfaces[int(
            x[-1])]['virtual-interface']['local_mac']
                    for port_pair in self.port_pair_list for x in port_pair)
        pktq_in_iter = ('RXQ{}'.format(float(x[0][-1]))
                        for x in self.port_pair_list)

        arpicmp_data = {
            'core': self.gen_core(self.start_core),
            'pktq_in': swq_in_str,
            'pktq_out': swq_out_str,
            'ports_mac_list': ' '.join(mac_iter),
            'pktq_in_prv': ' '.join(pktq_in_iter),
            'prv_to_pub_map': self.set_priv_to_pub_mapping(),
            'arp_route_tbl': self.generate_arp_route_tbl(),
            # can't use empty string, defaul to ()
            'nd_route_tbl': "()",
        }
        self.pktq_out_os = swq_out_str.split(' ')
        # why?
        if self.lb_config == self.HW_LB:
            arpicmp_data['pktq_in'] = swq_in_str
            self.swq = 0
        return arpicmp_data

    def generate_final_txrx_data(self):
        swq_start = self.swq - self.ports_len * self.worker_threads

        txq_start = 0
        txq_end = self.worker_threads

        pktq_out_iter = self.make_range_port_pairs_iter(
            self.float_x_plus_one_tenth_of_y, txq_start, txq_end)

        swq_str = self.make_range_str('SWQ{}', swq_start, self.swq)
        txq_str = self.make_str('TXQ{}', pktq_out_iter)
        rxtx_data = {
            'pktq_in': swq_str,
            'pktq_out': txq_str,
            'pipeline_txrx_type': 'TXTX',
            'core': self.gen_core(self.start_core),
        }
        pktq_in = rxtx_data['pktq_in']
        pktq_in = '{0} {1}'.format(pktq_in,
                                   self.pktq_out_os[self.lb_index - 1])
        rxtx_data['pktq_in'] = pktq_in
        self.pipeline_counter += 1
        return rxtx_data

    def generate_initial_txrx_data(self):
        pktq_iter = self.make_range_port_pairs_iter(
            self.float_x_plus_one_tenth_of_y, 0, self.worker_threads)

        rxq_str = self.make_str('RXQ{}', pktq_iter)
        swq_str = self.make_range_str('SWQ{}', self.swq, offset=self.ports_len)
        txrx_data = {
            'pktq_in': rxq_str,
            'pktq_out': swq_str + ' SWQ{0}'.format(self.lb_index - 1),
            'pipeline_txrx_type': 'RXRX',
            'core': self.gen_core(self.start_core),
        }
        self.pipeline_counter += 1
        return txrx_data

    def generate_lb_data(self):
        pktq_in = self.make_range_str('SWQ{}', self.swq, offset=self.ports_len)
        self.swq += self.ports_len

        offset = self.ports_len * self.worker_threads
        pktq_out = self.make_range_str('SWQ{}', self.swq, offset=offset)
        self.pktq_out = pktq_out.split()

        self.swq += (self.ports_len * self.worker_threads)
        lb_data = {
            'prv_que_handler': self.prv_que_handler,
            'pktq_in': pktq_in,
            'pktq_out': pktq_out,
            'n_vnf_threads': str(self.worker_threads),
            'core': self.gen_core(self.start_core),
        }
        self.pipeline_counter += 1
        return lb_data

    def generate_vnf_data(self):
        if self.lb_config == self.HW_LB:
            port_iter = self.make_port_pairs_iter(
                self.float_x_plus_one_tenth_of_y, [self.mul])
            pktq_in = self.make_str('RXQ{}', port_iter)

            self.mul += 1
            port_iter = self.make_port_pairs_iter(
                self.float_x_plus_one_tenth_of_y, [self.mul])
            pktq_out = self.make_str('TXQ{}', port_iter)

            pipe_line_data = {
                'pktq_in': pktq_in,
                'pktq_out': pktq_out + ' SWQ{0}'.format(self.swq),
                'prv_que_handler': self.prv_que_handler,
                'core': self.gen_core(self.start_core),
            }
            self.swq += 1
        else:
            pipe_line_data = {
                'pktq_in':
                ' '.join(
                    (self.pktq_out.pop(0) for _ in range(self.ports_len))),
                'pktq_out':
                self.make_range_str('SWQ{}', self.swq, offset=self.ports_len),
                'prv_que_handler':
                self.prv_que_handler,
                'core':
                self.gen_core(self.start_core),
            }
            self.swq += self.ports_len

        if self.vnf_type in ('ACL', 'VFW'):
            pipe_line_data.pop('prv_que_handler')

        if self.vnf_tpl.get('vnf_set'):
            public_ip_port_range_list = self.vnf_tpl[
                'public_ip_port_range'].split(':')
            ip_in_hex = '{:x}'.format(
                int(public_ip_port_range_list[0], 16) + self.lb_index - 1)
            public_ip_port_range_list[0] = ip_in_hex
            self.vnf_tpl['public_ip_port_range'] = ':'.join(
                public_ip_port_range_list)

        self.pipeline_counter += 1
        return pipe_line_data

    def generate_config_data(self):
        self.init_write_parser_template()

        # use master core for master, don't use self.start_core
        self.write_parser.set('PIPELINE0', 'core',
                              self.gen_core(self.master_core))
        arpicmp_data = self.generate_arpicmp_data()
        self.arpicmp_tpl.update(arpicmp_data)
        self.update_write_parser(self.arpicmp_tpl)

        self.start_core += 1
        if self.vnf_type == 'CGNAPT':
            self.pipeline_counter += 1
            self.update_timer()

        for lb in self.lb_to_port_pair_mapping:
            self.lb_index = lb
            self.mul = 0
            port_pair_count = self.lb_to_port_pair_mapping[lb]
            if not self.port_pair_list:
                continue

            self.port_pairs = self.port_pair_list[:port_pair_count]
            self.port_pair_list = self.port_pair_list[port_pair_count:]
            self.ports_len = port_pair_count * 2
            self.set_priv_que_handler()
            if self.lb_config == 'SW':
                txrx_data = self.generate_initial_txrx_data()
                self.txrx_tpl.update(txrx_data)
                self.update_write_parser(self.txrx_tpl)
                self.start_core += 1
                lb_data = self.generate_lb_data()
                self.loadb_tpl.update(lb_data)
                self.update_write_parser(self.loadb_tpl)
                self.start_core += 1

            for i in range(self.worker_threads):
                vnf_data = self.generate_vnf_data()
                if not self.vnf_tpl:
                    self.vnf_tpl = {}
                self.vnf_tpl.update(vnf_data)
                self.update_write_parser(self.vnf_tpl)
                try:
                    self.vnf_tpl.pop('vnf_set')
                except KeyError:
                    pass
                else:
                    self.vnf_tpl.pop('public_ip_port_range')
                self.generate_next_core_id()

            if self.lb_config == 'SW':
                txrx_data = self.generate_final_txrx_data()
                self.txrx_tpl.update(txrx_data)
                self.update_write_parser(self.txrx_tpl)
                self.start_core += 1
            self.vnf_tpl = self.get_config_tpl_data(self.vnf_type)

    def generate_config(self):
        self.port_pair_list, self.networks = self.get_port_pairs(
            self.interfaces)
        self.get_lb_count()
        self.generate_lb_to_port_pair_mapping()
        self.generate_config_data()
        self.write_parser.write(sys.stdout)
        with open(self.tmp_file, 'a') as tfh:
            self.write_parser.write(tfh)

    def generate_link_config(self):

        link_configs = []
        for port_pair in self.port_pair_list:
            for port in port_pair:
                port = port[-1]
                virtual_interface = self.interfaces[int(
                    port)]["virtual-interface"]
                local_ip = virtual_interface["local_ip"]
                netmask = virtual_interface["netmask"]
                port_ip, prefix_len = self.validate_ip_and_prefixlen(
                    local_ip, netmask)
                link_configs.append(
                    LINK_CONFIG_TEMPLATE.format(port, port_ip, prefix_len))

        return ''.join(link_configs)

    def get_route_data(self, src_key, data_key, port):
        route_list = self.vnfd['vdu'][0].get(src_key, [])
        return next((route[data_key]
                     for route in route_list if route['if'] == port), None)

    def get_ports_gateway(self, port):
        return self.get_route_data('routing_table', 'gateway', port)

    def get_ports_gateway6(self, port):
        return self.get_route_data('nd_route_tbl', 'gateway', port)

    def get_netmask_gateway(self, port):
        return self.get_route_data('routing_table', 'netmask', port)

    def get_netmask_gateway6(self, port):
        return self.get_route_data('nd_route_tbl', 'netmask', port)

    def generate_arp_config(self):
        arp_config = []
        for port_pair in self.port_pair_list:
            for port in port_pair:
                gateway = self.get_ports_gateway(port)
                # omit entries with no gateway
                if not gateway:
                    continue
                dst_mac = self.interfaces[int(
                    port[-1])]["virtual-interface"]["dst_mac"]
                arp_config.append(
                    (port[-1], gateway, dst_mac, self.txrx_pipeline))

        return '\n'.join(('p {3} arpadd {0} {1} {2}'.format(*values)
                          for values in arp_config))

    def generate_arp_config6(self):
        arp_config6 = []
        for port_pair in self.port_pair_list:
            for port in port_pair:
                gateway6 = self.get_ports_gateway6(port)
                # omit entries with no gateway
                if not gateway6:
                    continue
                dst_mac6 = self.interfaces[int(
                    port[-1])]["virtual-interface"]["dst_mac"]
                arp_config6.append(
                    (port[-1], gateway6, dst_mac6, self.txrx_pipeline))

        return '\n'.join(('p {3} arpadd {0} {1} {2}'.format(*values)
                          for values in arp_config6))

    def generate_action_config(self):
        port_list = []
        for port_pair in self.port_pair_list:
            for port in port_pair:
                port_list.append(port[-1])

        if self.vnf_type == "VFW":
            template = FW_ACTION_TEMPLATE
        else:
            template = ACTION_TEMPLATE

        return ''.join((template.format(port) for port in port_list))

    def get_ip_from_port(self, port):
        return self.make_ip_addr(self.get_ports_gateway(port),
                                 self.get_netmask_gateway(port))

    def get_ip_and_prefixlen_from_ip_of_port(self, port):
        ip_addr = self.get_ip_from_port(port)
        # handle cases with no gateway
        if ip_addr:
            return ip_addr.ip.exploded, ip_addr.network.prefixlen
        else:
            return None, None

    def generate_rule_config(self):
        cmd = 'acl' if self.vnf_type == "ACL" else "vfw"
        rules_config = self.rules if self.rules else ''
        new_rules = []
        new_ipv6_rules = []
        pattern = 'p {0} add {1} {2} {3} {4} {5} 0 65535 0 65535 0 0 {6}'
        for port_pair in self.port_pair_list:
            src_port = int(port_pair[0][-1])
            dst_port = int(port_pair[1][-1])

            src_ip, src_prefix_len = self.get_ip_and_prefixlen_from_ip_of_port(
                port_pair[0])
            dst_ip, dst_prefix_len = self.get_ip_and_prefixlen_from_ip_of_port(
                port_pair[1])
            # ignore entires with empty values
            if all((src_ip, src_prefix_len, dst_ip, dst_prefix_len)):
                new_rules.append(
                    (cmd, self.txrx_pipeline, src_ip, src_prefix_len, dst_ip,
                     dst_prefix_len, dst_port))
                new_rules.append(
                    (cmd, self.txrx_pipeline, dst_ip, dst_prefix_len, src_ip,
                     src_prefix_len, src_port))

            src_ip = self.get_ports_gateway6(port_pair[0])
            src_prefix_len = self.get_netmask_gateway6(port_pair[0])
            dst_ip = self.get_ports_gateway6(port_pair[1])
            dst_prefix_len = self.get_netmask_gateway6(port_pair[0])
            # ignore entires with empty values
            if all((src_ip, src_prefix_len, dst_ip, dst_prefix_len)):
                new_ipv6_rules.append(
                    (cmd, self.txrx_pipeline, src_ip, src_prefix_len, dst_ip,
                     dst_prefix_len, dst_port))
                new_ipv6_rules.append(
                    (cmd, self.txrx_pipeline, dst_ip, dst_prefix_len, src_ip,
                     src_prefix_len, src_port))

        acl_apply = "\np %s applyruleset" % cmd
        new_rules_config = '\n'.join(
            pattern.format(*values)
            for values in chain(new_rules, new_ipv6_rules))
        return ''.join([rules_config, new_rules_config, acl_apply])

    def generate_script_data(self):
        self.port_pair_list, self.networks = self.get_port_pairs(
            self.interfaces)
        self.get_lb_count()
        script_data = {
            'link_config': self.generate_link_config(),
            'arp_config': self.generate_arp_config(),
            'arp_config6': self.generate_arp_config6(),
            'actions': '',
            'rules': '',
        }

        if self.vnf_type in ('ACL', 'VFW'):
            script_data.update({
                'actions': self.generate_action_config(),
                'rules': self.generate_rule_config(),
            })

        return script_data

    def generate_script(self, vnfd, rules=None):
        self.vnfd = vnfd
        self.rules = rules
        script_data = self.generate_script_data()
        script = SCRIPT_TPL.format(**script_data)
        if self.lb_config == self.HW_LB:
            script += 'set fwd rxonly'
            hwlb_tpl = """
set_sym_hash_ena_per_port {0} enable
set_hash_global_config {0} simple_xor ipv4-udp enable
set_sym_hash_ena_per_port {1} enable
set_hash_global_config {1} simple_xor ipv4-udp enable
set_hash_input_set {0} ipv4-udp src-ipv4 udp-src-port add
set_hash_input_set {1} ipv4-udp dst-ipv4 udp-dst-port add
set_hash_input_set {0} ipv6-udp src-ipv6 udp-src-port add
set_hash_input_set {1} ipv6-udp dst-ipv6 udp-dst-port add
"""
            for port_pair in self.port_pair_list:
                script += hwlb_tpl.format(port_pair[0][-1], port_pair[1][-1])
        return script
Ejemplo n.º 20
0
    def __spawn_instance(self):
        """
        Create and configure a new KRA instance using pkispawn.
        Creates a configuration file with IPA-specific
        parameters and passes it to the base class to call pkispawn
        """

        # Create an empty and secured file
        (cfg_fd, cfg_file) = tempfile.mkstemp()
        os.close(cfg_fd)
        pent = pwd.getpwnam(self.service_user)
        os.chown(cfg_file, pent.pw_uid, pent.pw_gid)
        self.tmp_agent_db = tempfile.mkdtemp(prefix="tmp-",
                                             dir=paths.VAR_LIB_IPA)
        tmp_agent_pwd = ipautil.ipa_generate_password()

        # Create KRA configuration
        config = ConfigParser()
        config.optionxform = str
        config.add_section("KRA")

        # Security Domain Authentication
        config.set("KRA", "pki_security_domain_https_port", "443")
        config.set("KRA", "pki_security_domain_password", self.admin_password)
        config.set("KRA", "pki_security_domain_user", self.admin_user)

        # issuing ca
        config.set("KRA", "pki_issuing_ca_uri",
                   "https://%s" % ipautil.format_netloc(self.fqdn, 443))

        # Server
        config.set("KRA", "pki_enable_proxy", "True")
        config.set("KRA", "pki_restart_configured_instance", "False")
        config.set("KRA", "pki_backup_keys", "True")
        config.set("KRA", "pki_backup_password", self.admin_password)

        # Client security database
        config.set("KRA", "pki_client_database_dir", self.tmp_agent_db)
        config.set("KRA", "pki_client_database_password", tmp_agent_pwd)
        config.set("KRA", "pki_client_database_purge", "True")
        config.set("KRA", "pki_client_pkcs12_password", self.admin_password)

        # Administrator
        config.set("KRA", "pki_admin_name", self.admin_user)
        config.set("KRA", "pki_admin_uid", self.admin_user)
        config.set("KRA", "pki_admin_email", "root@localhost")
        config.set("KRA", "pki_admin_password", self.admin_password)
        config.set("KRA", "pki_admin_nickname", "ipa-ca-agent")
        config.set("KRA", "pki_admin_subject_dn",
                   str(DN(('cn', 'ipa-ca-agent'), self.subject_base)))
        config.set("KRA", "pki_import_admin_cert", "True")
        config.set("KRA", "pki_admin_cert_file", paths.ADMIN_CERT_PATH)
        config.set("KRA", "pki_client_admin_cert_p12", paths.DOGTAG_ADMIN_P12)

        # Directory server
        config.set("KRA", "pki_ds_ldap_port", "389")
        config.set("KRA", "pki_ds_password", self.dm_password)
        config.set("KRA", "pki_ds_base_dn", six.text_type(self.basedn))
        config.set("KRA", "pki_ds_database", "ipaca")
        config.set("KRA", "pki_ds_create_new_db", "False")

        self._use_ldaps_during_spawn(config)

        # Certificate subject DNs
        config.set("KRA", "pki_subsystem_subject_dn",
                   str(DN(('cn', 'CA Subsystem'), self.subject_base)))
        config.set("KRA", "pki_ssl_server_subject_dn",
                   str(DN(('cn', self.fqdn), self.subject_base)))
        config.set("KRA", "pki_audit_signing_subject_dn",
                   str(DN(('cn', 'KRA Audit'), self.subject_base)))
        config.set(
            "KRA", "pki_transport_subject_dn",
            str(DN(('cn', 'KRA Transport Certificate'), self.subject_base)))
        config.set(
            "KRA", "pki_storage_subject_dn",
            str(DN(('cn', 'KRA Storage Certificate'), self.subject_base)))

        # Certificate nicknames
        # Note that both the server certs and subsystem certs reuse
        # the ca certs.
        config.set("KRA", "pki_subsystem_nickname",
                   "subsystemCert cert-pki-ca")
        config.set("KRA", "pki_ssl_server_nickname", "Server-Cert cert-pki-ca")
        config.set("KRA", "pki_audit_signing_nickname",
                   "auditSigningCert cert-pki-kra")
        config.set("KRA", "pki_transport_nickname",
                   "transportCert cert-pki-kra")
        config.set("KRA", "pki_storage_nickname", "storageCert cert-pki-kra")

        # Shared db settings
        # Needed because CA and KRA share the same database
        # We will use the dbuser created for the CA
        config.set("KRA", "pki_share_db", "True")
        config.set(
            "KRA", "pki_share_dbuser_dn",
            str(DN(('uid', 'pkidbuser'), ('ou', 'people'), ('o', 'ipaca'))))

        if not (os.path.isdir(paths.PKI_TOMCAT_ALIAS_DIR)
                and os.path.isfile(paths.PKI_TOMCAT_PASSWORD_CONF)):
            # generate pin which we know can be used for FIPS NSS database
            pki_pin = ipautil.ipa_generate_password()
            config.set("KRA", "pki_pin", pki_pin)
        else:
            pki_pin = None

        _p12_tmpfile_handle, p12_tmpfile_name = tempfile.mkstemp(dir=paths.TMP)

        if self.clone:
            krafile = self.pkcs12_info[0]
            shutil.copy(krafile, p12_tmpfile_name)
            pent = pwd.getpwnam(self.service_user)
            os.chown(p12_tmpfile_name, pent.pw_uid, pent.pw_gid)

            # Security domain registration
            config.set("KRA", "pki_security_domain_hostname", self.master_host)
            config.set("KRA", "pki_security_domain_https_port", "443")
            config.set("KRA", "pki_security_domain_user", self.admin_user)
            config.set("KRA", "pki_security_domain_password",
                       self.admin_password)

            # Clone
            config.set("KRA", "pki_clone", "True")
            config.set("KRA", "pki_clone_pkcs12_path", p12_tmpfile_name)
            config.set("KRA", "pki_clone_pkcs12_password", self.dm_password)
            config.set("KRA", "pki_clone_setup_replication", "False")
            config.set(
                "KRA", "pki_clone_uri",
                "https://%s" % ipautil.format_netloc(self.master_host, 443))
        else:
            # the admin cert file is needed for the first instance of KRA
            cert = DogtagInstance.get_admin_cert(self)
            # First make sure that the directory exists
            parentdir = os.path.dirname(paths.ADMIN_CERT_PATH)
            if not os.path.exists(parentdir):
                os.makedirs(parentdir)
            with open(paths.ADMIN_CERT_PATH, "w") as admin_path:
                admin_path.write(cert)

        # Generate configuration file
        with open(cfg_file, "w") as f:
            config.write(f)

        try:
            DogtagInstance.spawn_instance(self,
                                          cfg_file,
                                          nolog_list=(self.dm_password,
                                                      self.admin_password,
                                                      pki_pin, tmp_agent_pwd))
        finally:
            os.remove(p12_tmpfile_name)
            os.remove(cfg_file)

        shutil.move(paths.KRA_BACKUP_KEYS_P12, paths.KRACERT_P12)
        self.log.debug("completed creating KRA instance")
Ejemplo n.º 21
0
Archivo: zypper.py Proyecto: D4N/kiwi
class RepositoryZypper(RepositoryBase):
    """
    **Implements repo handling for zypper package manager**

    :param str shared_zypper_dir: shared directory between image root
        and build system root
    :param str runtime_zypper_config_file: zypper runtime config file name
    :param str runtime_zypp_config_file: libzypp runtime config file name
    :param list zypper_args: zypper caller args plus additional custom args
    :param dict command_env: customized os.environ for zypper
    :param object runtime_zypper_config: instance of :class:`ConfigParser`
    """
    def post_init(self, custom_args=None):
        """
        Post initialization method

        Store custom zypper arguments and create runtime configuration
        and environment

        :param list custom_args: zypper arguments
        """
        self.custom_args = custom_args
        self.exclude_docs = False
        self.gpgcheck = False
        if not custom_args:
            self.custom_args = []

        # extract custom arguments used for zypp config only
        if 'exclude_docs' in self.custom_args:
            self.custom_args.remove('exclude_docs')
            self.exclude_docs = True

        if 'check_signatures' in self.custom_args:
            self.custom_args.remove('check_signatures')
            self.gpgcheck = True

        self.locale = list(item for item in self.custom_args
                           if '_install_langs' in item)
        if self.locale:
            self.custom_args.remove(self.locale[0])

        self.repo_names = []

        # zypper support by default point all actions into the root
        # directory of the image system. This information is passed
        # as arguments to zypper and adapted if the call runs as
        # chrooted operation. Therefore the use of the shared location
        # via RootBind::mount_shared_directory is optional but
        # recommended to make use of the repo cache
        manager_base = self.root_dir + self.shared_location

        self.shared_zypper_dir = {
            'pkg-cache-dir': os.sep.join([manager_base, 'packages']),
            'reposd-dir': os.sep.join([manager_base, 'zypper/repos']),
            'credentials-dir':
            os.sep.join([manager_base, 'zypper/credentials']),
            'solv-cache-dir': os.sep.join([manager_base, 'zypper/solv']),
            'raw-cache-dir': os.sep.join([manager_base, 'zypper/raw']),
            'cache-dir': os.sep.join([manager_base, 'zypper'])
        }

        self.runtime_zypper_config_file = NamedTemporaryFile(dir=self.root_dir)
        self.runtime_zypp_config_file = NamedTemporaryFile(dir=self.root_dir)

        self.zypper_args = [
            '--non-interactive', '--pkg-cache-dir',
            self.shared_zypper_dir['pkg-cache-dir'], '--reposd-dir',
            self.shared_zypper_dir['reposd-dir'], '--solv-cache-dir',
            self.shared_zypper_dir['solv-cache-dir'], '--cache-dir',
            self.shared_zypper_dir['cache-dir'], '--raw-cache-dir',
            self.shared_zypper_dir['raw-cache-dir'], '--config',
            self.runtime_zypper_config_file.name
        ] + self.custom_args

        self.command_env = self._create_zypper_runtime_environment()

        # config file parameters for zypper tool
        self.runtime_zypper_config = ConfigParser()
        self.runtime_zypper_config.add_section('main')

        # config file parameters for libzypp library
        self.runtime_zypp_config = ConfigParser()
        self.runtime_zypp_config.add_section('main')
        self.runtime_zypp_config.set('main', 'credentials.global.dir',
                                     self.shared_zypper_dir['credentials-dir'])
        if self.exclude_docs:
            self.runtime_zypp_config.set('main', 'rpm.install.excludedocs',
                                         'yes')

        if self.gpgcheck:
            self.runtime_zypp_config.set('main', 'gpgcheck', '1')
        else:
            self.runtime_zypp_config.set('main', 'gpgcheck', '0')

        self._write_runtime_config()

    def setup_package_database_configuration(self):
        """
        Setup rpm macros for bootstrapping and image building

        1. Create the rpm image macro which persists during the build
        2. Create the rpm bootstrap macro to make sure for bootstrapping
           the rpm database location matches the host rpm database setup.
           This macro only persists during the bootstrap phase
        3. Create zypper compat link
        """
        rpmdb = RpmDataBase(self.root_dir,
                            Defaults.get_custom_rpm_image_macro_name())
        if self.locale:
            rpmdb.set_macro_from_string(self.locale[0])
        rpmdb.write_config()

        RpmDataBase(self.root_dir).set_database_to_host_path()
        # Zypper compat code:
        #
        # Manually adding the compat link /var/lib/rpm that points to the
        # rpmdb location as it is configured in the host rpm setup. The
        # host rpm setup is taken into account because import_trusted_keys
        # is called during the bootstrap phase where rpm (respectively zypper)
        # is called from the host
        #
        # Usually it is expected that the package manager reads the
        # signing keys from the rpm database setup provisioned by rpm
        # itself (macro level) but zypper doesn't take the rpm macro
        # setup into account and relies on a hard coded path which we
        # can only provide as a symlink.
        #
        # That symlink is usually created by the rpm package when it gets
        # installed. However at that early phase when we import the
        # signing keys no rpm is installed yet nor any symlink exists.
        # Thus we have to create it here and hope to get rid of it in the
        # future.
        #
        # For further details on the motivation in zypper please
        # refer to bsc#1112357
        rpmdb.init_database()
        Path.create(os.sep.join([self.root_dir, 'var', 'lib']))
        Command.run([
            'ln', '-s', ''.join(
                ['../..', rpmdb.rpmdb_host.expand_query('%_dbpath')]),
            os.sep.join([self.root_dir, 'var', 'lib', 'rpm'])
        ],
                    raise_on_error=False)

    def use_default_location(self):
        """
        Setup zypper repository operations to store all data
        in the default places
        """
        self.shared_zypper_dir['reposd-dir'] = \
            self.root_dir + '/etc/zypp/repos.d'
        self.shared_zypper_dir['credentials-dir'] = \
            self.root_dir + '/etc/zypp/credentials.d'
        self.zypper_args = [
            '--non-interactive',
        ] + self.custom_args
        self.command_env = dict(os.environ, LANG='C')

    def runtime_config(self):
        """
        zypper runtime configuration and environment
        """
        return {
            'zypper_args': self.zypper_args,
            'command_env': self.command_env
        }

    def add_repo(self,
                 name,
                 uri,
                 repo_type='rpm-md',
                 prio=None,
                 dist=None,
                 components=None,
                 user=None,
                 secret=None,
                 credentials_file=None,
                 repo_gpgcheck=None,
                 pkg_gpgcheck=None):
        """
        Add zypper repository

        :param str name: repository name
        :param str uri: repository URI
        :param repo_type: repostory type name
        :param int prio: zypper repostory priority
        :param dist: unused
        :param components: unused
        :param user: credentials username
        :param secret: credentials password
        :param credentials_file: zypper credentials file
        :param bool repo_gpgcheck: enable repository signature validation
        :param bool pkg_gpgcheck: enable package signature validation
        """
        if credentials_file:
            repo_secret = os.sep.join(
                [self.shared_zypper_dir['credentials-dir'], credentials_file])
            if os.path.exists(repo_secret):
                Path.wipe(repo_secret)

            if user and secret:
                uri = ''.join([uri, '?credentials=', credentials_file])
                with open(repo_secret, 'w') as credentials:
                    credentials.write('username={0}{1}'.format(
                        user, os.linesep))
                    credentials.write('password={0}{1}'.format(
                        secret, os.linesep))

        repo_file = ''.join(
            [self.shared_zypper_dir['reposd-dir'], '/', name, '.repo'])
        self.repo_names.append(''.join([name, '.repo']))

        if os.path.exists(repo_file):
            Path.wipe(repo_file)

        self._backup_package_cache()
        zypper_addrepo_command = ['zypper'] + self.zypper_args + [
            '--root', self.root_dir, 'addrepo', '--refresh', '--keep-packages'
            if Uri(uri).is_remote() else '--no-keep-packages', '--no-check',
            uri, name
        ]
        try:
            Command.run(zypper_addrepo_command, self.command_env)
        except Exception:
            # for whatever reason zypper sometimes failes with
            # a 'failed to cache rpm database' error. I could not
            # find any reason why and a simple recall of the exact
            # same command in the exact same environment works.
            # Thus the stupid but simple workaround to this problem
            # is try one recall before really failing
            Command.run(zypper_addrepo_command, self.command_env)

        if prio or repo_gpgcheck is not None or pkg_gpgcheck is not None:
            repo_config = ConfigParser()
            repo_config.read(repo_file)
            if repo_gpgcheck is not None:
                repo_config.set(name, 'repo_gpgcheck',
                                '1' if repo_gpgcheck else '0')
            if pkg_gpgcheck is not None:
                repo_config.set(name, 'pkg_gpgcheck',
                                '1' if pkg_gpgcheck else '0')
            if prio:
                repo_config.set(name, 'priority', format(prio))
            with open(repo_file, 'w') as repo:
                repo_config.write(repo)
        self._restore_package_cache()

    def import_trusted_keys(self, signing_keys):
        """
        Imports trusted keys into the image

        :param list signing_keys: list of the key files to import
        """
        rpmdb = RpmDataBase(self.root_dir)
        for key in signing_keys:
            rpmdb.import_signing_key_to_image(key)

    def delete_repo(self, name):
        """
        Delete zypper repository

        :param str name: repository name
        """
        Command.run(['zypper'] + self.zypper_args +
                    ['--root', self.root_dir, 'removerepo', name],
                    self.command_env)

    def delete_all_repos(self):
        """
        Delete all zypper repositories
        """
        Path.wipe(self.shared_zypper_dir['reposd-dir'])
        Path.create(self.shared_zypper_dir['reposd-dir'])

    def delete_repo_cache(self, name):
        """
        Delete zypper repository cache

        The cache data for each repository is stored in a list of
        directories of the same name as the repository name. The method
        deletes these directories to cleanup the cache information

        :param str name: repository name
        """
        Path.wipe(os.sep.join([self.shared_zypper_dir['pkg-cache-dir'], name]))
        Path.wipe(os.sep.join([self.shared_zypper_dir['solv-cache-dir'],
                               name]))
        Path.wipe(os.sep.join([self.shared_zypper_dir['raw-cache-dir'], name]))

    def cleanup_unused_repos(self):
        """
        Delete unused zypper repositories

        zypper creates a system solvable which is unwanted for the
        purpose of building images. In addition zypper fails with
        an error message 'Failed to cache rpm database' if such a
        system solvable exists and a new root system is created

        All other repository configurations which are not used for
        this build must be removed too, otherwise they are taken into
        account for the package installations
        """
        solv_dir = self.shared_zypper_dir['solv-cache-dir']
        Path.wipe(solv_dir + '/@System')

        repos_dir = self.shared_zypper_dir['reposd-dir']
        repo_files = list(os.walk(repos_dir))[0][2]
        for repo_file in repo_files:
            if repo_file not in self.repo_names:
                Path.wipe(repos_dir + '/' + repo_file)

    def _create_zypper_runtime_environment(self):
        for zypper_dir in list(self.shared_zypper_dir.values()):
            Path.create(zypper_dir)
        return dict(os.environ,
                    LANG='C',
                    ZYPP_CONF=self.runtime_zypp_config_file.name)

    def _write_runtime_config(self):
        with open(self.runtime_zypper_config_file.name, 'w') as config:
            self.runtime_zypper_config.write(config)
        with open(self.runtime_zypp_config_file.name, 'w') as config:
            self.runtime_zypp_config.write(config)

    def _backup_package_cache(self):
        """
        preserve package cache which otherwise will be removed by
        zypper if no repo file is found. But this situation is
        normal for an image build process which setup and remove
        repos for building at runtime
        """
        self._move_package_cache(backup=True)

    def _restore_package_cache(self):
        """
        restore preserved package cache at the location passed to zypper
        """
        self._move_package_cache(restore=True)

    def _move_package_cache(self, backup=False, restore=False):
        package_cache = self.shared_location + '/packages'
        package_cache_moved = package_cache + '.moved'
        if backup and os.path.exists(package_cache):
            Command.run(['mv', '-f', package_cache, package_cache_moved])
        elif restore and os.path.exists(package_cache_moved):
            Command.run(['mv', '-f', package_cache_moved, package_cache])

    def __del__(self):
        self._restore_package_cache()
Ejemplo n.º 22
0
    def __init__(self, parser=None, defaults={}, writeargstofile=False):
        if parser is None:
            parser = argparse.ArgumentParser(
                description="Default psycodict parser")

            parser.add_argument(
                "-c",
                "--config-file",
                dest="config_file",
                metavar="FILE",
                help="configuration file [default: %(default)s]",
                default=defaults.get("config_file", "config.ini"),
            )
            parser.add_argument(
                "-s",
                "--secrets-file",
                dest="secrets_file",
                metavar="SECRETS",
                help="secrets file [default: %(default)s]",
                default=defaults.get("secrets_file", "secrets.ini"),
            )

            logginggroup = parser.add_argument_group("Logging options:")
            logginggroup.add_argument(
                "--slowcutoff",
                dest="logging_slowcutoff",
                metavar="SLOWCUTOFF",
                help="threshold to log slow queries [default: %(default)s]",
                default=defaults.get("logging_slowcutoff", 0.1),
                type=float,
            )

            logginggroup.add_argument(
                "--slowlogfile",
                help="logfile for slow queries [default: %(default)s]",
                dest="logging_slowlogfile",
                metavar="FILE",
                default=defaults.get("logging_slowlogfile",
                                     "slow_queries.log"),
            )

            # PostgresSQL options
            postgresqlgroup = parser.add_argument_group("PostgreSQL options")
            postgresqlgroup.add_argument(
                "--postgresql-host",
                dest="postgresql_host",
                metavar="HOST",
                help=
                "PostgreSQL server host or socket directory [default: %(default)s]",
                default=defaults.get("postgresql_host", "localhost"),
            )
            postgresqlgroup.add_argument(
                "--postgresql-port",
                dest="postgresql_port",
                metavar="PORT",
                type=int,
                help="PostgreSQL server port [default: %(default)d]",
                default=defaults.get("postgresql_port", 5432),
            )

            postgresqlgroup.add_argument(
                "--postgresql-user",
                dest="postgresql_user",
                metavar="USER",
                help="PostgreSQL username [default: %(default)s]",
                default=defaults.get("postgresql_user", "postgres"),
            )

            postgresqlgroup.add_argument(
                "--postgresql-pass",
                dest="postgresql_password",
                metavar="PASS",
                help="PostgreSQL password [default: %(default)s]",
                default=defaults.get("postgres_password", ""),
            )

            postgresqlgroup.add_argument(
                "--postgresql-dbname",
                dest="postgresql_dbname",
                metavar="DBNAME",
                help="PostgreSQL database name [default: %(default)s]",
                default="lmfdb",
            )

        # 1: parsing command-line arguments
        if writeargstofile:
            args = parser.parse_args()
        else:
            # only read config file
            args = parser.parse_args([])
        args_dict = vars(args)
        default_arguments_dict = vars(parser.parse_args([]))
        if writeargstofile:
            default_arguments_dict = dict(args_dict)

        del default_arguments_dict["config_file"]
        del default_arguments_dict["secrets_file"]

        self.default_args = {}
        for key, val in default_arguments_dict.items():
            sec, opt = key.split("_", 1)
            if sec not in self.default_args:
                self.default_args[sec] = {}
            self.default_args[sec][opt] = str(val)

        # reading the config file, creating it if necessary
        # 2/1: does config file exist?
        if not os.path.exists(args.config_file):
            if not writeargstofile:
                print(
                    "Config file: %s not found, creating it with the default values"
                    % args.config_file)
            else:
                print(
                    "Config file: %s not found, creating it with the passed values"
                    % args.config_file)
            _cfgp = ConfigParser()

            # create sections
            for sec, options in self.default_args.items():
                _cfgp.add_section(sec)
                for opt, val in options.items():
                    _cfgp.set(sec, opt, str(val))

            with open(args.config_file, "w") as configfile:
                _cfgp.write(configfile)

        # 2/2: reading the config file
        _cfgp = ConfigParser()
        _cfgp.read(args.config_file)
        # 2/3: reading the secrets file, which can override the config
        if os.path.exists(args.secrets_file):
            _cfgp.read(args.secrets_file)

        # 3: override specific settings
        def all(sep="_"):
            ret = {}
            for s in _cfgp.sections():
                for k, v in _cfgp.items(s):
                    ret["%s%s%s" % (s, sep, k)] = v
            return ret

        all_set = all()

        for key, val in default_arguments_dict.items():
            # if a nondefault value was passed through command line arguments set it
            # or if a default value was not set in the config file
            if args_dict[key] != val or key not in all_set:
                if "_" in key:
                    sec, opt = key.split("_")
                else:
                    sec = "misc"
                    opt = key
                _cfgp.set(sec, opt, str(args_dict[key]))

        # We can derive the types from the parser
        type_dict = {}
        for action in parser._actions:
            if isinstance(
                    action,
                (argparse._StoreTrueAction, argparse._StoreFalseAction)):
                type_dict[action.dest] = strbool
            else:
                type_dict[action.dest] = action.type

        def get(section, key):
            val = _cfgp.get(section, key)
            full = section + "_" + key
            type_func = type_dict.get(full)
            if type_func is not None:
                val = type_func(val)
            return val

        self.options = defaultdict(dict)
        for sec, options in self.default_args.items():
            for opt in options:
                self.options[sec][opt] = get(sec, opt)

        self.extra_options = {}  # not stored in the config file
        for key, val in args_dict.items():
            if key not in default_arguments_dict:
                self.extra_options[key] = val
Ejemplo n.º 23
0
class RepositoryZypper(RepositoryBase):
    """
    Implements repo handling for zypper package manager
    """
    def post_init(self, custom_args=None):
        """
        Post initialization method

        Store custom zypper arguments and create runtime configuration
        and environment

        Attributes

        * :attr:`shared_zypper_dir`
            shared directory between image root and build system root

        * :attr:`runtime_zypper_config_file`
            zypper runtime config file name

        * :attr:`runtime_zypp_config_file`
            libzypp runtime config file name

        * :attr:`zypper_args`
            zypper caller args plus additional custom args

        * :attr:`command_env`
            customized os.environ for zypper

        * :attr:`runtime_zypper_config`
            Instance of ConfigParser

        :param list custom_args: zypper arguments
        """
        self.custom_args = custom_args
        self.exclude_docs = False
        self.gpgcheck = False
        if not custom_args:
            self.custom_args = []

        # extract custom arguments used for zypp config only
        if 'exclude_docs' in self.custom_args:
            self.custom_args.remove('exclude_docs')
            self.exclude_docs = True

        if 'check_signatures' in self.custom_args:
            self.custom_args.remove('check_signatures')
            self.gpgcheck = True

        self.repo_names = []

        # zypper support by default point all actions into the root
        # directory of the image system. This information is passed
        # as arguments to zypper and adapted if the call runs as
        # chrooted operation. Therefore the use of the shared location
        # via RootBind::mount_shared_directory is optional but
        # recommended to make use of the repo cache
        manager_base = self.root_dir + self.shared_location

        self.shared_zypper_dir = {
            'pkg-cache-dir': os.sep.join(
                [manager_base, 'packages']
            ),
            'reposd-dir': os.sep.join(
                [manager_base, 'zypper/repos']
            ),
            'credentials-dir': os.sep.join(
                [manager_base, 'zypper/credentials']
            ),
            'solv-cache-dir': os.sep.join(
                [manager_base, 'zypper/solv']
            ),
            'raw-cache-dir': os.sep.join(
                [manager_base, 'zypper/raw']
            ),
            'cache-dir': os.sep.join(
                [manager_base, 'zypper']
            )
        }

        self.runtime_zypper_config_file = NamedTemporaryFile(
            dir=self.root_dir
        )
        self.runtime_zypp_config_file = NamedTemporaryFile(
            dir=self.root_dir
        )

        self.zypper_args = [
            '--non-interactive',
            '--pkg-cache-dir', self.shared_zypper_dir['pkg-cache-dir'],
            '--reposd-dir', self.shared_zypper_dir['reposd-dir'],
            '--solv-cache-dir', self.shared_zypper_dir['solv-cache-dir'],
            '--cache-dir', self.shared_zypper_dir['cache-dir'],
            '--raw-cache-dir', self.shared_zypper_dir['raw-cache-dir'],
            '--config', self.runtime_zypper_config_file.name
        ] + self.custom_args

        self.command_env = self._create_zypper_runtime_environment()

        # config file parameters for zypper tool
        self.runtime_zypper_config = ConfigParser()
        self.runtime_zypper_config.add_section('main')

        # config file parameters for libzypp library
        self.runtime_zypp_config = ConfigParser()
        self.runtime_zypp_config.add_section('main')
        self.runtime_zypp_config.set(
            'main', 'credentials.global.dir',
            self.shared_zypper_dir['credentials-dir']
        )
        if self.exclude_docs:
            self.runtime_zypp_config.set(
                'main', 'rpm.install.excludedocs', 'yes'
            )

        if self.gpgcheck:
            self.runtime_zypp_config.set(
                'main', 'gpgcheck', '1'
            )
        else:
            self.runtime_zypp_config.set(
                'main', 'gpgcheck', '0'
            )

        self._write_runtime_config()

    def use_default_location(self):
        """
        Setup zypper repository operations to store all data
        in the default places
        """
        self.shared_zypper_dir['reposd-dir'] = \
            self.root_dir + '/etc/zypp/repos.d'
        self.shared_zypper_dir['credentials-dir'] = \
            self.root_dir + '/etc/zypp/credentials.d'
        self.zypper_args = [
            '--non-interactive',
        ] + self.custom_args
        self.command_env = dict(os.environ, LANG='C')

    def runtime_config(self):
        """
        zypper runtime configuration and environment
        """
        return {
            'zypper_args': self.zypper_args,
            'command_env': self.command_env
        }

    def add_repo(
        self, name, uri, repo_type='rpm-md',
        prio=None, dist=None, components=None,
        user=None, secret=None, credentials_file=None,
        repo_gpgcheck=None, pkg_gpgcheck=None
    ):
        """
        Add zypper repository

        :param string name: repository name
        :param string uri: repository URI
        :param repo_type: repostory type name
        :param int prio: zypper repostory priority
        :param dist: unused
        :param components: unused
        :param user: credentials username
        :param secret: credentials password
        :param credentials_file: zypper credentials file
        :param bool repo_gpgcheck: enable repository signature validation
        :param bool pkg_gpgcheck: enable package signature validation
        """
        if credentials_file:
            repo_secret = os.sep.join(
                [self.shared_zypper_dir['credentials-dir'], credentials_file]
            )
            if os.path.exists(repo_secret):
                Path.wipe(repo_secret)

            if user and secret:
                uri = ''.join([uri, '?credentials=', credentials_file])
                with open(repo_secret, 'w') as credentials:
                    credentials.write('username={0}'.format(user))
                    credentials.write('password={0}'.format(secret))

        repo_file = ''.join(
            [self.shared_zypper_dir['reposd-dir'], '/', name, '.repo']
        )
        self.repo_names.append(''.join([name, '.repo']))

        if os.path.exists(repo_file):
            Path.wipe(repo_file)

        self._backup_package_cache()
        Command.run(
            ['zypper'] + self.zypper_args + [
                '--root', self.root_dir,
                'addrepo',
                '--refresh',
                '--type', self._translate_repo_type(repo_type),
                '--keep-packages',
                '-C',
                uri,
                name
            ],
            self.command_env
        )
        if prio or repo_gpgcheck is not None or pkg_gpgcheck is not None:
            repo_config = ConfigParser()
            repo_config.read(repo_file)
            if repo_gpgcheck is not None:
                repo_config.set(
                    name, 'repo_gpgcheck', '1' if repo_gpgcheck else '0'
                )
            if pkg_gpgcheck is not None:
                repo_config.set(
                    name, 'pkg_gpgcheck', '1' if pkg_gpgcheck else '0'
                )
            if prio:
                repo_config.set(
                    name, 'priority', format(prio)
                )
            with open(repo_file, 'w') as repo:
                repo_config.write(repo)
        self._restore_package_cache()

    def import_trusted_keys(self, signing_keys):
        """
        Imports trusted keys into the image

        :param list signing_keys: list of the key files to import
        """
        for key in signing_keys:
            Command.run(['rpm', '--root', self.root_dir, '--import', key])

    def delete_repo(self, name):
        """
        Delete zypper repository

        :param string name: repository name
        """
        Command.run(
            ['zypper'] + self.zypper_args + [
                '--root', self.root_dir, 'removerepo', name
            ],
            self.command_env
        )

    def delete_all_repos(self):
        """
        Delete all zypper repositories
        """
        Path.wipe(self.shared_zypper_dir['reposd-dir'])
        Path.create(self.shared_zypper_dir['reposd-dir'])

    def delete_repo_cache(self, name):
        """
        Delete zypper repository cache

        The cache data for each repository is stored in a list of
        directories of the same name as the repository name. The method
        deletes these directories to cleanup the cache information

        :param string name: repository name
        """
        Path.wipe(
            os.sep.join([self.shared_zypper_dir['pkg-cache-dir'], name])
        )
        Path.wipe(
            os.sep.join([self.shared_zypper_dir['solv-cache-dir'], name])
        )
        Path.wipe(
            os.sep.join([self.shared_zypper_dir['raw-cache-dir'], name])
        )

    def cleanup_unused_repos(self):
        """
        Delete unused zypper repositories

        zypper creates a system solvable which is unwanted for the
        purpose of building images. In addition zypper fails with
        an error message 'Failed to cache rpm database' if such a
        system solvable exists and a new root system is created

        All other repository configurations which are not used for
        this build must be removed too, otherwise they are taken into
        account for the package installations
        """
        solv_dir = self.shared_zypper_dir['solv-cache-dir']
        Path.wipe(solv_dir + '/@System')

        repos_dir = self.shared_zypper_dir['reposd-dir']
        repo_files = list(os.walk(repos_dir))[0][2]
        for repo_file in repo_files:
            if repo_file not in self.repo_names:
                Path.wipe(repos_dir + '/' + repo_file)

    def _create_zypper_runtime_environment(self):
        for zypper_dir in list(self.shared_zypper_dir.values()):
            Path.create(zypper_dir)
        return dict(
            os.environ,
            LANG='C',
            ZYPP_CONF=self.runtime_zypp_config_file.name
        )

    def _write_runtime_config(self):
        with open(self.runtime_zypper_config_file.name, 'w') as config:
            self.runtime_zypper_config.write(config)
        with open(self.runtime_zypp_config_file.name, 'w') as config:
            self.runtime_zypp_config.write(config)

    def _translate_repo_type(self, repo_type):
        """
            Translate kiwi supported common repo type names from the schema
            into the name the zyper package manager understands
        """
        zypper_type_for = {
            'rpm-md': 'YUM',
            'rpm-dir': 'Plaindir',
            'yast2': 'YaST'
        }
        try:
            return zypper_type_for[repo_type]
        except Exception:
            raise KiwiRepoTypeUnknown(
                'Unsupported zypper repo type: %s' % repo_type
            )

    def _backup_package_cache(self):
        """
        preserve package cache which otherwise will be removed by
        zypper if no repo file is found. But this situation is
        normal for an image build process which setup and remove
        repos for building at runtime
        """
        self._move_package_cache(backup=True)

    def _restore_package_cache(self):
        """
        restore preserved package cache at the location passed to zypper
        """
        self._move_package_cache(restore=True)

    def _move_package_cache(self, backup=False, restore=False):
        package_cache = self.shared_location + '/packages'
        package_cache_moved = package_cache + '.moved'
        if backup and os.path.exists(package_cache):
            Command.run(
                ['mv', '-f', package_cache, package_cache_moved]
            )
        elif restore and os.path.exists(package_cache_moved):
            Command.run(
                ['mv', '-f', package_cache_moved, package_cache]
            )

    def __del__(self):
        self._restore_package_cache()
Ejemplo n.º 24
0
    def __init__(self, writeargstofile=False):
        default_config_file = abs_path_lmfdb("config.ini")

        # 1: parsing command-line arguments
        parser = argparse.ArgumentParser(
            description="LMFDB - The L-functions and modular forms database")
        parser.add_argument(
            "-c",
            "--config-file",
            dest="config_file",
            metavar="FILE",
            help="configuration file [default: %(default)s]",
            default=default_config_file,
        )

        parser.add_argument(
            "-d",
            "--debug",
            action="store_true",
            dest="core_debug",
            help="enable debug mode",
        )

        parser.add_argument(
            "--color",
            dest="core_color",
            metavar="COLOR",
            help="color template (see lmfdb/utils/color.py)",
            default=19,
            type=int,
        )

        parser.add_argument(
            "-p",
            "--port",
            dest="web_port",
            metavar="PORT",
            help=
            "the LMFDB server will be running on PORT [default: %(default)d]",
            type=int,
            default=37777,
        )
        parser.add_argument(
            "-b",
            "--bind_ip",
            dest="web_bindip",
            metavar="HOST",
            help=
            "the LMFDB server will be listening to HOST [default: %(default)s]",
            default="127.0.0.1",
        )

        logginggroup = parser.add_argument_group("Logging options:")
        logginggroup.add_argument(
            "--logfile",
            help="logfile for flask [default: %(default)s]",
            dest="logging_logfile",
            metavar="FILE",
            default="flasklog",
        )

        logginggroup.add_argument("--logfocus",
                                  help="name of a logger to focus on",
                                  default=argparse.SUPPRESS)

        logginggroup.add_argument(
            "--slowcutoff",
            dest="logging_slowcutoff",
            metavar="SLOWCUTOFF",
            help="threshold to log slow queries [default: %(default)s]",
            default=0.1,
            type=float,
        )

        logginggroup.add_argument(
            "--slowlogfile",
            help="logfile for slow queries [default: %(default)s]",
            dest="logging_slowlogfile",
            metavar="FILE",
            default="slow_queries.log",
        )

        # PostgresSQL options
        postgresqlgroup = parser.add_argument_group("PostgreSQL options")
        postgresqlgroup.add_argument(
            "--postgresql-host",
            dest="postgresql_host",
            metavar="HOST",
            help=
            "PostgreSQL server host or socket directory [default: %(default)s]",
            default="devmirror.lmfdb.xyz",
        )
        postgresqlgroup.add_argument(
            "--postgresql-port",
            dest="postgresql_port",
            metavar="PORT",
            type=int,
            help="PostgreSQL server port [default: %(default)d]",
            default=5432,
        )

        postgresqlgroup.add_argument(
            "--postgresql-user",
            dest="postgresql_user",
            metavar="USER",
            help="PostgreSQL username [default: %(default)s]",
            default="lmfdb",
        )

        postgresqlgroup.add_argument(
            "--postgresql-pass",
            dest="postgresql_password",
            metavar="PASS",
            help="PostgreSQL password [default: %(default)s]",
            default="lmfdb",
        )

        postgresqlgroup.add_argument(
            "--postgresql-dbname",
            dest="postgresql_dbname",
            metavar="DBNAME",
            help="PostgreSQL database name [default: %(default)s]",
            default="lmfdb",
        )

        # undocumented options
        parser.add_argument(
            "--enable-profiler",
            dest="profiler",
            help=argparse.SUPPRESS,
            action="store_true",
            default=argparse.SUPPRESS,
        )

        # undocumented flask options
        parser.add_argument(
            "--enable-reloader",
            dest="use_reloader",
            help=argparse.SUPPRESS,
            action="store_true",
            default=argparse.SUPPRESS,
        )

        parser.add_argument(
            "--disable-reloader",
            dest="use_reloader",
            help=argparse.SUPPRESS,
            action="store_false",
            default=argparse.SUPPRESS,
        )

        parser.add_argument(
            "--enable-debugger",
            dest="use_debugger",
            help=argparse.SUPPRESS,
            action="store_true",
            default=argparse.SUPPRESS,
        )

        parser.add_argument(
            "--disable-debugger",
            dest="use_debugger",
            help=argparse.SUPPRESS,
            action="store_false",
            default=argparse.SUPPRESS,
        )
        if os.path.split(
                sys.argv[0])[-1] == "start-lmfdb.py" or writeargstofile:
            args = parser.parse_args()
        else:
            # only read config file
            args = parser.parse_args([])
        args_dict = vars(args)
        default_arguments_dict = vars(parser.parse_args([]))
        if writeargstofile:
            default_arguments_dict = dict(args_dict)

        del default_arguments_dict["config_file"]

        self.default_args = {}
        for key, val in default_arguments_dict.items():
            sec, opt = key.split("_", 1)
            if sec not in self.default_args:
                self.default_args[sec] = {}
            self.default_args[sec][opt] = str(val)

        # reading the config file, creating it if necessary
        # 2/1: does config file exist?
        if not os.path.exists(args.config_file):
            if not writeargstofile:
                print(
                    "Config file: %s not found, creating it with the default values"
                    % args.config_file)
            else:
                print(
                    "Config file: %s not found, creating it with the passed values"
                    % args.config_file)
            _cfgp = ConfigParser()

            # create sections
            _cfgp.add_section("core")
            _cfgp.add_section("web")
            _cfgp.add_section("postgresql")
            _cfgp.add_section("logging")

            for sec, options in self.default_args.items():
                for opt, val in options.items():
                    _cfgp.set(sec, opt, str(val))

            with open(args.config_file, "w") as configfile:
                _cfgp.write(configfile)

        # 2/2: reading the config file
        _cfgp = ConfigParser()
        _cfgp.read(args.config_file)

        # 3: override specific settings
        def all(sep="_"):
            ret = {}
            for s in _cfgp.sections():
                for k, v in _cfgp.items(s):
                    ret["%s%s%s" % (s, sep, k)] = v
            return ret

        all_set = all()

        for key, val in default_arguments_dict.items():
            # if a nondefault value was passed through command line arguments set it
            # or if a default value was not set in the config file
            if args_dict[key] != val or key not in all_set:
                sec, opt = key.split("_")
                _cfgp.set(sec, opt, str(args_dict[key]))

        # some generic functions
        def get(section, key):
            return _cfgp.get(section, key)

        def getint(section, key):
            return _cfgp.getint(section, key)

        def getboolean(section, key):
            return _cfgp.getboolean(section, key)

        self.flask_options = {
            "port": getint("web", "port"),
            "host": get("web", "bindip"),
            "debug": getboolean("core", "debug"),
        }
        for opt in ["use_debugger", "use_reloader", "profiler"]:
            if opt in args_dict:
                self.flask_options[opt] = args_dict[opt]

        self.color = getint("core", "color")

        self.postgresql_options = {
            "port": getint("postgresql", "port"),
            "host": get("postgresql", "host"),
            "dbname": get("postgresql", "dbname"),
        }

        # optional items
        for elt in ["user", "password"]:
            if _cfgp.has_option("postgresql", elt):
                self.postgresql_options[elt] = get("postgresql", elt)

        self.logging_options = {
            "logfile": get("logging", "logfile"),
            "slowcutoff": float(get("logging", "slowcutoff")),
            "slowlogfile": get("logging", "slowlogfile"),
        }
        if "logfocus" in args_dict:
            self.logging_options["logfocus"] = args_dict["logfocus"]
        if _cfgp.has_option("logging", "editor"):
            self.logging_options["editor"] = get("logging", "editor")
Ejemplo n.º 25
0
class Configuration(object):
    defaults = {}

    def __init__(self, filename=None):
        self._config = ConfigParser()
        self._set_defaults()
        self._state_drivers = {}
        if filename is not None:
            self.load(filename)

    def _set_defaults(self):
        """Set defaults for config
        """
        self._config.add_section('main')
        for key, value in six.iteritems(self.defaults):
            if isinstance(value, dict):
                self._config.add_section(key)
                for subkey, subvalue in six.iteritems(value):
                    self._config.set(key, subkey, subvalue)
            else:
                self._config.set('main', key, value)

    def load(self, filename):
        """Load the configuration by filename
        """
        self._config.read(filename)

    def save(self, filename):
        """Save the configuration to a file
        """
        with open(filename, 'w') as handle:
            self._config.write(handle)

    @staticmethod
    def sanitize(items):
        options = {}
        for key, value in items:
            if key.endswith('[int]'):
                options[key[:-5]] = int(value)
            elif key.endswith('[bool]'):
                value = value.lower()
                if value in BOOL_MAP[True]:
                    value = True
                elif value in BOOL_MAP[False]:
                    value = False
                else:
                    raise ValueError('Expected boolean for {}'.format(key))
                options[key[:-6]] = value
            else:
                options[key] = value
        return options

    def __getitem__(self, name):
        if self._config.has_section(name):
            return self.sanitize(self._config.items(name))
        elif name == 'main':
            raise ValueError('Missing main section of configuration')
        return self['main'][name]

    def state_driver(self, name='ai'):
        """Get an instance of the state driver
        """
        from database import state

        if name not in self._state_drivers:
            extras = self[name]
            driver = extras.pop('state-driver')
            if driver == 'redis':
                self._state_drivers[name] = state.RedisDriver(self, extras)
            elif driver == 'dict':
                self._state_drivers[name] = state.MemoryDriver(self, extras)
            else:
                raise ValueError('Unknown state driver')
        return self._state_drivers[name]
Ejemplo n.º 26
0
class RepositoryDnf(RepositoryBase):
    """
    **Implements repository handling for dnf package manager**

    :param str shared_dnf_dir: shared directory between image root
        and build system root
    :param str runtime_dnf_config_file: dnf runtime config file name
    :param dict command_env: customized os.environ for dnf
    :param str runtime_dnf_config: instance of :class:`ConfigParser`
    """
    def post_init(self, custom_args=None):
        """
        Post initialization method

        Store custom dnf arguments and create runtime configuration
        and environment

        :param list custom_args: dnf arguments
        """
        self.custom_args = custom_args
        self.exclude_docs = False
        if not custom_args:
            self.custom_args = []

        # extract custom arguments not used in dnf call
        if 'exclude_docs' in self.custom_args:
            self.custom_args.remove('exclude_docs')
            self.exclude_docs = True

        if 'check_signatures' in self.custom_args:
            self.custom_args.remove('check_signatures')
            self.gpg_check = '1'
        else:
            self.gpg_check = '0'

        self.repo_names = []

        # dnf support is based on creating repo files which contains
        # path names to the repo and its cache. In order to allow a
        # persistent use of the files in and outside of a chroot call
        # an active bind mount from RootBind::mount_shared_directory
        # is expected and required
        manager_base = self.shared_location + '/dnf'

        self.shared_dnf_dir = {
            'reposd-dir': manager_base + '/repos',
            'cache-dir': manager_base + '/cache',
            'pluginconf-dir': manager_base + '/pluginconf'
        }

        self.runtime_dnf_config_file = NamedTemporaryFile(
            dir=self.root_dir
        )

        self.dnf_args = [
            '-c', self.runtime_dnf_config_file.name, '-y'
        ] + self.custom_args

        self.command_env = self._create_dnf_runtime_environment()

        # config file parameters for dnf tool
        self._create_runtime_config_parser()
        self._create_runtime_plugin_config_parser()
        self._write_runtime_config()

    def use_default_location(self):
        """
        Setup dnf repository operations to store all data
        in the default places
        """
        self.shared_dnf_dir['reposd-dir'] = \
            self.root_dir + '/etc/yum.repos.d'
        self.shared_dnf_dir['cache-dir'] = \
            self.root_dir + '/var/cache/dnf'
        self.shared_dnf_dir['pluginconf-dir'] = \
            self.root_dir + '/etc/dnf/plugins'
        self._create_runtime_config_parser()
        self._create_runtime_plugin_config_parser()
        self._write_runtime_config()

    def runtime_config(self):
        """
        dnf runtime configuration and environment

        :return: dnf_args:list, command_env:dict

        :rtype: dict
        """
        return {
            'dnf_args': self.dnf_args,
            'command_env': self.command_env
        }

    def add_repo(
        self, name, uri, repo_type='rpm-md',
        prio=None, dist=None, components=None,
        user=None, secret=None, credentials_file=None,
        repo_gpgcheck=None, pkg_gpgcheck=None
    ):
        """
        Add dnf repository

        :param str name: repository base file name
        :param str uri: repository URI
        :param repo_type: repostory type name
        :param int prio: dnf repostory priority
        :param dist: unused
        :param components: unused
        :param user: unused
        :param secret: unused
        :param credentials_file: unused
        :param bool repo_gpgcheck: enable repository signature validation
        :param bool pkg_gpgcheck: enable package signature validation
        """
        repo_file = self.shared_dnf_dir['reposd-dir'] + '/' + name + '.repo'
        self.repo_names.append(name + '.repo')
        if os.path.exists(uri):
            # dnf requires local paths to take the file: type
            uri = 'file://' + uri
        repo_config = ConfigParser()
        repo_config.add_section(name)
        repo_config.set(
            name, 'name', name
        )
        repo_config.set(
            name, 'baseurl', uri
        )
        if prio:
            repo_config.set(
                name, 'priority', format(prio)
            )
        if repo_gpgcheck is not None:
            repo_config.set(
                name, 'repo_gpgcheck', '1' if repo_gpgcheck else '0'
            )
        if pkg_gpgcheck is not None:
            repo_config.set(
                name, 'gpgcheck', '1' if pkg_gpgcheck else '0'
            )
        with open(repo_file, 'w') as repo:
            repo_config.write(repo)

    def import_trusted_keys(self, signing_keys):
        """
        Imports trusted keys into the image

        :param list signing_keys: list of the key files to import
        """
        for key in signing_keys:
            Command.run(['rpm', '--root', self.root_dir, '--import', key])

    def delete_repo(self, name):
        """
        Delete dnf repository

        :param str name: repository base file name
        """
        Path.wipe(
            self.shared_dnf_dir['reposd-dir'] + '/' + name + '.repo'
        )

    def delete_all_repos(self):
        """
        Delete all dnf repositories
        """
        Path.wipe(self.shared_dnf_dir['reposd-dir'])
        Path.create(self.shared_dnf_dir['reposd-dir'])

    def delete_repo_cache(self, name):
        """
        Delete dnf repository cache

        The cache data for each repository is stored in a directory
        and additional files all starting with the repository name.
        The method glob deletes all files and directories matching
        the repository name followed by any characters to cleanup
        the cache information

        :param str name: repository name
        """
        dnf_cache_glob_pattern = ''.join(
            [self.shared_dnf_dir['cache-dir'], os.sep, name, '*']
        )
        for dnf_cache_file in glob.iglob(dnf_cache_glob_pattern):
            Path.wipe(dnf_cache_file)

    def cleanup_unused_repos(self):
        """
        Delete unused dnf repositories

        Repository configurations which are not used for this build
        must be removed otherwise they are taken into account for
        the package installations
        """
        repos_dir = self.shared_dnf_dir['reposd-dir']
        repo_files = list(os.walk(repos_dir))[0][2]
        for repo_file in repo_files:
            if repo_file not in self.repo_names:
                Path.wipe(repos_dir + '/' + repo_file)

    def _create_dnf_runtime_environment(self):
        for dnf_dir in list(self.shared_dnf_dir.values()):
            Path.create(dnf_dir)
        return dict(
            os.environ, LANG='C'
        )

    def _create_runtime_config_parser(self):
        self.runtime_dnf_config = ConfigParser()
        self.runtime_dnf_config.add_section('main')

        self.runtime_dnf_config.set(
            'main', 'cachedir', self.shared_dnf_dir['cache-dir']
        )
        self.runtime_dnf_config.set(
            'main', 'reposdir', self.shared_dnf_dir['reposd-dir']
        )
        self.runtime_dnf_config.set(
            'main', 'pluginconfpath', self.shared_dnf_dir['pluginconf-dir']
        )
        self.runtime_dnf_config.set(
            'main', 'keepcache', '1'
        )
        self.runtime_dnf_config.set(
            'main', 'debuglevel', '2'
        )
        self.runtime_dnf_config.set(
            'main', 'pkgpolicy', 'newest'
        )
        self.runtime_dnf_config.set(
            'main', 'tolerant', '0'
        )
        self.runtime_dnf_config.set(
            'main', 'exactarch', '1'
        )
        self.runtime_dnf_config.set(
            'main', 'obsoletes', '1'
        )
        self.runtime_dnf_config.set(
            'main', 'plugins', '1'
        )
        self.runtime_dnf_config.set(
            'main', 'gpgcheck', self.gpg_check
        )
        if self.exclude_docs:
            self.runtime_dnf_config.set(
                'main', 'tsflags', 'nodocs'
            )

    def _create_runtime_plugin_config_parser(self):
        self.runtime_dnf_plugin_config = ConfigParser()
        self.runtime_dnf_plugin_config.add_section('main')

        self.runtime_dnf_plugin_config.set(
            'main', 'enabled', '1'
        )

    def _write_runtime_config(self):
        with open(self.runtime_dnf_config_file.name, 'w') as config:
            self.runtime_dnf_config.write(config)
        dnf_plugin_config_file = \
            self.shared_dnf_dir['pluginconf-dir'] + '/priorities.conf'
        with open(dnf_plugin_config_file, 'w') as pluginconfig:
            self.runtime_dnf_plugin_config.write(pluginconfig)
Ejemplo n.º 27
0
def makeconfigfile(fname,beamlist,radarname,simparams_orig):
    """This will make the config file based off of the desired input parmeters.
    Inputs
        fname - Name of the file as a string.
        beamlist - A list of beams numbers used by the AMISRS
        radarname - A string that is the name of the radar being simulated.
        simparams_orig - A set of simulation parameters in a dictionary."""
    fname = Path(fname).expanduser()

    curpath = Path(__file__).resolve().parent
    d_file = curpath/'default.ini'
    fext = fname.suffix

    # reduce the number of stuff needed to be saved and avoid problems with writing
    keys2save = ['IPP', 'TimeLim', 'RangeLims', 'Pulselength', 't_s', 'Pulsetype',
                 'Tint', 'Fitinter', 'NNs', 'dtype', 'ambupsamp', 'species',
                 'numpoints', 'startfile', 'FitType','beamrate', 'outangles']

    if not 'beamrate' in simparams_orig.keys():
        simparams_orig['beamrate'] = 1
    if not 'outangles' in simparams_orig.keys():
        simparams_orig['outangles'] = beamlist
    simparams = {i:simparams_orig[i] for i in keys2save}
    if fext =='.pickle':
        pickleFile = fname.open('wb')
        pickle.dump([{'beamlist':beamlist,'radarname':radarname},simparams],pickleFile)
        pickleFile.close()
    elif fext=='.yml':
        with fname.open('w') as f:
            yaml.dump([{'beamlist':beamlist,'radarname':radarname},simparams], f)

    elif fext =='.ini':
        defaultparser = ConfigParser()
        defaultparser.read(str(d_file))
#        config = configparser()
#        config.read(fname)
        cfgfile = open(str(fname),'w')
        config = ConfigParser(allow_no_value = True)

        config.add_section('section 1')
        beamstring = ""
        for beam in beamlist:
            beamstring += str(beam)
            beamstring += " "
        config.set('section 1','; beamlist must be list of ints')
        config.set('section 1','beamlist',beamstring)
        config.set('section 1','; radarname can be pfisr, risr, or sondastrom')
        config.set('section 1','radarname',radarname)

        config.add_section('simparams')
        config.add_section('simparamsnames')
        defitems = [i[0] for i in defaultparser.items('simparamsnotes')]
        for param in simparams:
            if param=='Beamlist':
                continue
            if param.lower() in defitems:
                paramnote = defaultparser.get('simparamsnotes',param.lower())
            else:
                paramnote = 'Not in default parameters'
            config.set('simparams','; '+param +' '+paramnote)
            # for the output list of angles
            if param.lower()=='outangles':
                outstr = ''
                beamlistlist = simparams[param]
                if beamlistlist=='':
                    beamlistlist=beamlist
                for ilist in beamlistlist:
                    if isinstance(ilist,list) or isinstance(ilist,sp.ndarray):
                        for inum in ilist:
                            outstr=outstr+str(inum)+' '

                    else:
                        outstr=outstr+str(ilist)
                    outstr=outstr+', '
                outstr=outstr[:-2]
                config.set('simparams',param,outstr)

            elif isinstance(simparams[param],list):
                data = ""
                for a in simparams[param]:
                    data += str(a)
                    data += " "
                config.set('simparams',param,str(data))
            else:  #TODO config.set() is obsolete, undefined behavior!  use mapping protocol instead https://docs.python.org/3/library/configparser.html#mapping-protocol-access
                config.set('simparams',param,str(simparams[param]))
            config.set('simparamsnames',param,param)
        config.write(cfgfile)
        cfgfile.close()
    else:
        raise ValueError('fname needs to have an extension of .pickle or .ini')
Ejemplo n.º 28
0
Archivo: yum.py Proyecto: ucytech/kiwi
class RepositoryYum(RepositoryBase):
    """
    Implements repository handling for yum package manager
    """
    def post_init(self, custom_args=None):
        """
        Post initialization method

        Store custom yum arguments and create runtime configuration
        and environment

        Attributes

        * :attr:`shared_yum_dir`
            shared directory between image root and build system root

        * :attr:`runtime_yum_config_file`
            yum runtime config file name

        * :attr:`command_env`
            customized os.environ for yum

        * :attr:`runtime_yum_config`
            Instance of ConfigParser

        :param list custom_args: yum arguments
        """
        self.custom_args = custom_args
        if not custom_args:
            self.custom_args = []

        # extract custom arguments not used in yum call
        if 'exclude_docs' in self.custom_args:
            self.custom_args.remove('exclude_docs')
            log.warning('rpm-excludedocs not supported for yum: ignoring')

        self.repo_names = []

        # yum support is based on creating repo files which contains
        # path names to the repo and its cache. In order to allow a
        # persistent use of the files in and outside of a chroot call
        # an active bind mount from RootBind::mount_shared_directory
        # is expected and required
        manager_base = self.shared_location + '/yum'

        self.shared_yum_dir = {
            'reposd-dir': manager_base + '/repos',
            'cache-dir': manager_base + '/cache',
            'pluginconf-dir': manager_base + '/pluginconf'
        }

        self.runtime_yum_config_file = NamedTemporaryFile(dir=self.root_dir)

        self.yum_args = ['-c', self.runtime_yum_config_file.name, '-y'
                         ] + self.custom_args

        self.command_env = self._create_yum_runtime_environment()

        # config file parameters for yum tool
        self._create_runtime_config_parser()
        self._create_runtime_plugin_config_parser()
        self._write_runtime_config()

    def use_default_location(self):
        """
        Setup yum repository operations to store all data
        in the default places
        """
        self.shared_yum_dir['reposd-dir'] = \
            self.root_dir + '/etc/yum/repos.d'
        self.shared_yum_dir['cache-dir'] = \
            self.root_dir + '/var/cache/yum'
        self.shared_yum_dir['pluginconf-dir'] = \
            self.root_dir + '/etc/yum/pluginconf.d'
        self._create_runtime_config_parser()
        self._create_runtime_plugin_config_parser()
        self._write_runtime_config()

    def runtime_config(self):
        """
        yum runtime configuration and environment
        """
        return {'yum_args': self.yum_args, 'command_env': self.command_env}

    def add_repo(self,
                 name,
                 uri,
                 repo_type='rpm-md',
                 prio=None,
                 dist=None,
                 components=None):
        """
        Add yum repository

        :param string name: repository base file name
        :param string uri: repository URI
        :param repo_type: repostory type name
        :param int prio: yum repostory priority
        :param dist: unused
        :param components: unused
        """
        repo_file = self.shared_yum_dir['reposd-dir'] + '/' + name + '.repo'
        self.repo_names.append(name + '.repo')
        if os.path.exists(uri):
            # yum requires local paths to take the file: type
            uri = 'file://' + uri
        repo_config = ConfigParser()
        repo_config.add_section(name)
        repo_config.set(name, 'name', name)
        repo_config.set(name, 'baseurl', uri)
        if prio:
            repo_config.set(name, 'priority', format(prio))
        with open(repo_file, 'w') as repo:
            repo_config.write(repo)

    def delete_repo(self, name):
        """
        Delete yum repository

        :param string name: repository base file name
        """
        Path.wipe(self.shared_yum_dir['reposd-dir'] + '/' + name + '.repo')

    def delete_all_repos(self):
        """
        Delete all yum repositories
        """
        Path.wipe(self.shared_yum_dir['reposd-dir'])
        Path.create(self.shared_yum_dir['reposd-dir'])

    def cleanup_unused_repos(self):
        """
        Delete unused yum repositories

        Repository configurations which are not used for this build
        must be removed otherwise they are taken into account for
        the package installations
        """
        repos_dir = self.shared_yum_dir['reposd-dir']
        repo_files = list(os.walk(repos_dir))[0][2]
        for repo_file in repo_files:
            if repo_file not in self.repo_names:
                Path.wipe(repos_dir + '/' + repo_file)

    def _create_yum_runtime_environment(self):
        for yum_dir in list(self.shared_yum_dir.values()):
            Path.create(yum_dir)
        return dict(os.environ, LANG='C')

    def _create_runtime_config_parser(self):
        self.runtime_yum_config = ConfigParser()
        self.runtime_yum_config.add_section('main')

        self.runtime_yum_config.set('main', 'cachedir',
                                    self.shared_yum_dir['cache-dir'])
        self.runtime_yum_config.set('main', 'reposdir',
                                    self.shared_yum_dir['reposd-dir'])
        self.runtime_yum_config.set('main', 'pluginconfpath',
                                    self.shared_yum_dir['pluginconf-dir'])
        self.runtime_yum_config.set('main', 'keepcache', '1')
        self.runtime_yum_config.set('main', 'debuglevel', '2')
        self.runtime_yum_config.set('main', 'pkgpolicy', 'newest')
        self.runtime_yum_config.set('main', 'tolerant', '0')
        self.runtime_yum_config.set('main', 'exactarch', '1')
        self.runtime_yum_config.set('main', 'obsoletes', '1')
        self.runtime_yum_config.set('main', 'plugins', '1')
        self.runtime_yum_config.set('main', 'metadata_expire', '1800')
        self.runtime_yum_config.set('main', 'group_command', 'compat')

    def _create_runtime_plugin_config_parser(self):
        self.runtime_yum_plugin_config = ConfigParser()
        self.runtime_yum_plugin_config.add_section('main')

        self.runtime_yum_plugin_config.set('main', 'enabled', '1')

    def _write_runtime_config(self):
        with open(self.runtime_yum_config_file.name, 'w') as config:
            self.runtime_yum_config.write(config)
        yum_plugin_config_file = \
            self.shared_yum_dir['pluginconf-dir'] + '/priorities.conf'
        with open(yum_plugin_config_file, 'w') as pluginconfig:
            self.runtime_yum_plugin_config.write(pluginconfig)
Ejemplo n.º 29
0
class RepositoryZypper(RepositoryBase):
    """
    Implements repo handling for zypper package manager
    """
    def post_init(self, custom_args=None):
        """
        Post initialization method

        Store custom zypper arguments and create runtime configuration
        and environment

        Attributes

        * :attr:`shared_zypper_dir`
            shared directory between image root and build system root

        * :attr:`runtime_zypper_config_file`
            zypper runtime config file name

        * :attr:`runtime_zypp_config_file`
            libzypp runtime config file name

        * :attr:`zypper_args`
            zypper caller args plus additional custom args

        * :attr:`command_env`
            customized os.environ for zypper

        * :attr:`runtime_zypper_config`
            Instance of ConfigParser

        :param list custom_args: zypper arguments
        """
        self.custom_args = custom_args
        if not custom_args:
            self.custom_args = []

        self.repo_names = []

        # zypper support by default point all actions into the root
        # directory of the image system. This information is passed
        # as arguments to zypper and adapted if the call runs as
        # chrooted operation. Therefore the use of the shared location
        # via RootBind::mount_shared_directory is optional but
        # recommended to make use of the repo cache
        manager_base = self.root_dir + self.shared_location

        self.shared_zypper_dir = {
            'pkg-cache-dir': manager_base + '/packages',
            'reposd-dir': manager_base + '/zypper/repos',
            'solv-cache-dir': manager_base + '/zypper/solv',
            'raw-cache-dir': manager_base + '/zypper/raw',
            'cache-dir': manager_base + '/zypper'
        }

        self.runtime_zypper_config_file = NamedTemporaryFile(
            dir=self.root_dir
        )
        self.runtime_zypp_config_file = NamedTemporaryFile(
            dir=self.root_dir
        )

        self.zypper_args = [
            '--non-interactive', '--no-gpg-checks',
            '--pkg-cache-dir', self.shared_zypper_dir['pkg-cache-dir'],
            '--reposd-dir', self.shared_zypper_dir['reposd-dir'],
            '--solv-cache-dir', self.shared_zypper_dir['solv-cache-dir'],
            '--cache-dir', self.shared_zypper_dir['cache-dir'],
            '--raw-cache-dir', self.shared_zypper_dir['raw-cache-dir'],
            '--config', self.runtime_zypper_config_file.name
        ] + self.custom_args

        self.command_env = self._create_zypper_runtime_environment()

        # config file parameters for zypper tool
        self.runtime_zypper_config = ConfigParser()
        self.runtime_zypper_config.add_section('main')

        # config file parameters for libzypp library
        self.runtime_zypp_config = ConfigParser()
        self.runtime_zypp_config.add_section('main')
        self.runtime_zypp_config.set(
            'main', 'cachedir', self.shared_zypper_dir['cache-dir']
        )
        self.runtime_zypp_config.set(
            'main', 'metadatadir', self.shared_zypper_dir['raw-cache-dir']
        )
        self.runtime_zypp_config.set(
            'main', 'solvfilesdir', self.shared_zypper_dir['solv-cache-dir']
        )
        self.runtime_zypp_config.set(
            'main', 'packagesdir', self.shared_zypper_dir['pkg-cache-dir']
        )

        self._write_runtime_config()

    def use_default_location(self):
        """
        Setup zypper repository operations to store all data
        in the default places
        """
        self.shared_zypper_dir['reposd-dir'] = \
            self.root_dir + '/etc/zypp/repos.d'
        self.zypper_args = [
            '--non-interactive', '--no-gpg-checks'
        ] + self.custom_args
        self.command_env = dict(os.environ, LANG='C')

    def runtime_config(self):
        """
        zypper runtime configuration and environment
        """
        return {
            'zypper_args': self.zypper_args,
            'command_env': self.command_env
        }

    def add_repo(
        self, name, uri, repo_type='rpm-md',
        prio=None, dist=None, components=None
    ):
        """
        Add zypper repository

        :param string name: repository name
        :param string uri: repository URI
        :param repo_type: repostory type name
        :param int prio: yum repostory priority
        :param dist: unused
        :param components: unused
        """
        repo_file = self.shared_zypper_dir['reposd-dir'] + '/' + name + '.repo'
        self.repo_names.append(name + '.repo')

        if os.path.exists(repo_file):
            Path.wipe(repo_file)

        self._backup_package_cache()
        Command.run(
            ['zypper'] + self.zypper_args + [
                '--root', self.root_dir,
                'addrepo',
                '--refresh',
                '--type', self._translate_repo_type(repo_type),
                '--keep-packages',
                '-C',
                uri,
                name
            ],
            self.command_env
        )
        if prio:
            Command.run(
                ['zypper'] + self.zypper_args + [
                    '--root', self.root_dir,
                    'modifyrepo', '--priority', format(prio), name
                ],
                self.command_env
            )
        self._restore_package_cache()

    def delete_repo(self, name):
        """
        Delete zypper repository

        :param string name: repository name
        """
        Command.run(
            ['zypper'] + self.zypper_args + [
                '--root', self.root_dir, 'removerepo', name
            ],
            self.command_env
        )

    def delete_all_repos(self):
        """
        Delete all zypper repositories
        """
        Path.wipe(self.shared_zypper_dir['reposd-dir'])
        Path.create(self.shared_zypper_dir['reposd-dir'])

    def cleanup_unused_repos(self):
        """
        Delete unused zypper repositories

        zypper creates a system solvable which is unwanted for the
        purpose of building images. In addition zypper fails with
        an error message 'Failed to cache rpm database' if such a
        system solvable exists and a new root system is created

        All other repository configurations which are not used for
        this build must be removed too, otherwise they are taken into
        account for the package installations
        """
        solv_dir = self.shared_zypper_dir['solv-cache-dir']
        Path.wipe(solv_dir + '/@System')

        repos_dir = self.shared_zypper_dir['reposd-dir']
        repo_files = list(os.walk(repos_dir))[0][2]
        for repo_file in repo_files:
            if repo_file not in self.repo_names:
                Path.wipe(repos_dir + '/' + repo_file)

    def _create_zypper_runtime_environment(self):
        for zypper_dir in list(self.shared_zypper_dir.values()):
            Path.create(zypper_dir)
        return dict(
            os.environ,
            LANG='C',
            ZYPP_CONF=self.runtime_zypp_config_file.name
        )

    def _write_runtime_config(self):
        with open(self.runtime_zypper_config_file.name, 'w') as config:
            self.runtime_zypper_config.write(config)
        with open(self.runtime_zypp_config_file.name, 'w') as config:
            self.runtime_zypp_config.write(config)

    def _translate_repo_type(self, repo_type):
        """
            Translate kiwi supported common repo type names from the schema
            into the name the zyper package manager understands
        """
        zypper_type_for = {
            'rpm-md': 'YUM',
            'rpm-dir': 'Plaindir',
            'yast2': 'YaST'
        }
        try:
            return zypper_type_for[repo_type]
        except Exception:
            raise KiwiRepoTypeUnknown(
                'Unsupported zypper repo type: %s' % repo_type
            )

    def _backup_package_cache(self):
        """
        preserve package cache which otherwise will be removed by
        zypper if no repo file is found. But this situation is
        normal for an image build process which setup and remove
        repos for building at runtime
        """
        self._move_package_cache(backup=True)

    def _restore_package_cache(self):
        """
        restore preserved package cache at the location passed to zypper
        """
        self._move_package_cache(restore=True)

    def _move_package_cache(self, backup=False, restore=False):
        package_cache = self.shared_location + '/packages'
        package_cache_moved = package_cache + '.moved'
        if backup and os.path.exists(package_cache):
            Command.run(
                ['mv', '-f', package_cache, package_cache_moved]
            )
        elif restore and os.path.exists(package_cache_moved):
            Command.run(
                ['mv', '-f', package_cache_moved, package_cache]
            )

    def __del__(self):
        self._restore_package_cache()
Ejemplo n.º 30
0
Archivo: yum.py Proyecto: ChrisBr/kiwi
class RepositoryYum(RepositoryBase):
    """
    Implements repository handling for yum package manager
    """
    def post_init(self, custom_args=None):
        """
        Post initialization method

        Store custom yum arguments and create runtime configuration
        and environment

        Attributes

        * :attr:`shared_yum_dir`
            shared directory between image root and build system root

        * :attr:`runtime_yum_config_file`
            yum runtime config file name

        * :attr:`command_env`
            customized os.environ for yum

        * :attr:`runtime_yum_config`
            Instance of ConfigParser

        :param list custom_args: yum arguments
        """
        self.custom_args = custom_args
        if not custom_args:
            self.custom_args = []

        self.repo_names = []

        # yum support is based on creating repo files which contains
        # path names to the repo and its cache. In order to allow a
        # persistent use of the files in and outside of a chroot call
        # an active bind mount from RootBind::mount_shared_directory
        # is expected and required
        manager_base = self.shared_location + '/yum'

        self.shared_yum_dir = {
            'reposd-dir': manager_base + '/repos',
            'cache-dir': manager_base + '/cache'
        }

        self.runtime_yum_config_file = NamedTemporaryFile(
            dir=self.root_dir
        )

        self.yum_args = [
            '-c', self.runtime_yum_config_file.name, '-y'
        ] + self.custom_args

        self.command_env = self._create_yum_runtime_environment()

        # config file parameters for yum tool
        self._create_runtime_config_parser()
        self._write_runtime_config()

    def use_default_location(self):
        """
        Setup yum repository operations to store all data
        in the default places
        """
        self.shared_yum_dir['reposd-dir'] = \
            self.root_dir + '/etc/yum/repos.d'
        self.shared_yum_dir['cache-dir'] = \
            self.root_dir + '/var/cache/yum'
        self._create_runtime_config_parser()
        self._write_runtime_config()

    def runtime_config(self):
        """
        yum runtime configuration and environment
        """
        return {
            'yum_args': self.yum_args,
            'command_env': self.command_env
        }

    def add_repo(
        self, name, uri, repo_type='rpm-md', prio=None, dist=None, components=None
    ):
        """
        Add yum repository

        :param string name: repository base file name
        :param string uri: repository URI
        :param repo_type: repostory type name
        :param int prio: yum repostory priority
        :param dist: unused
        :param components: unused
        """
        repo_file = self.shared_yum_dir['reposd-dir'] + '/' + name + '.repo'
        self.repo_names.append(name + '.repo')
        if os.path.exists(uri):
            # yum requires local paths to take the file: type
            uri = 'file://' + uri
        repo_config = ConfigParser()
        repo_config.add_section(name)
        repo_config.set(
            name, 'name', name
        )
        repo_config.set(
            name, 'baseurl', uri
        )
        if prio:
            repo_config.set(
                name, 'priority', format(prio)
            )
        with open(repo_file, 'w') as repo:
            repo_config.write(repo)

    def delete_repo(self, name):
        """
        Delete yum repository

        :param string name: repository base file name
        """
        Path.wipe(
            self.shared_yum_dir['reposd-dir'] + '/' + name + '.repo'
        )

    def delete_all_repos(self):
        """
        Delete all yum repositories
        """
        Path.wipe(self.shared_yum_dir['reposd-dir'])
        Path.create(self.shared_yum_dir['reposd-dir'])

    def cleanup_unused_repos(self):
        """
        Delete unused yum repositories

        Repository configurations which are not used for this build
        must be removed otherwise they are taken into account for
        the package installations
        """
        repos_dir = self.shared_yum_dir['reposd-dir']
        repo_files = list(os.walk(repos_dir))[0][2]
        for repo_file in repo_files:
            if repo_file not in self.repo_names:
                Path.wipe(repos_dir + '/' + repo_file)

    def _create_yum_runtime_environment(self):
        for yum_dir in list(self.shared_yum_dir.values()):
            Path.create(yum_dir)
        return dict(
            os.environ, LANG='C'
        )

    def _create_runtime_config_parser(self):
        self.runtime_yum_config = ConfigParser()
        self.runtime_yum_config.add_section('main')

        self.runtime_yum_config.set(
            'main', 'cachedir', self.shared_yum_dir['cache-dir']
        )
        self.runtime_yum_config.set(
            'main', 'reposdir', self.shared_yum_dir['reposd-dir']
        )
        self.runtime_yum_config.set(
            'main', 'keepcache', '1'
        )
        self.runtime_yum_config.set(
            'main', 'debuglevel', '2'
        )
        self.runtime_yum_config.set(
            'main', 'pkgpolicy', 'newest'
        )
        self.runtime_yum_config.set(
            'main', 'tolerant', '0'
        )
        self.runtime_yum_config.set(
            'main', 'exactarch', '1'
        )
        self.runtime_yum_config.set(
            'main', 'obsoletes', '1'
        )
        self.runtime_yum_config.set(
            'main', 'plugins', '1'
        )
        self.runtime_yum_config.set(
            'main', 'metadata_expire', '1800'
        )
        self.runtime_yum_config.set(
            'main', 'group_command', 'compat'
        )

    def _write_runtime_config(self):
        with open(self.runtime_yum_config_file.name, 'w') as config:
            self.runtime_yum_config.write(config)
Ejemplo n.º 31
0
class RepositoryYum(RepositoryBase):
    """
    Implements repository handling for yum package manager
    """
    def post_init(self, custom_args=None):
        """
        Post initialization method

        Store custom yum arguments and create runtime configuration
        and environment

        Attributes

        * :attr:`shared_yum_dir`
            shared directory between image root and build system root

        * :attr:`runtime_yum_config_file`
            yum runtime config file name

        * :attr:`command_env`
            customized os.environ for yum

        * :attr:`runtime_yum_config`
            Instance of ConfigParser

        :param list custom_args: yum arguments
        """
        self.custom_args = custom_args
        if not custom_args:
            self.custom_args = []

        # extract custom arguments not used in yum call
        if 'exclude_docs' in self.custom_args:
            self.custom_args.remove('exclude_docs')
            log.warning('rpm-excludedocs not supported for yum: ignoring')

        if 'check_signatures' in self.custom_args:
            self.custom_args.remove('check_signatures')
            self.gpg_check = '1'
        else:
            self.gpg_check = '0'

        self.repo_names = []

        # yum support is based on creating repo files which contains
        # path names to the repo and its cache. In order to allow a
        # persistent use of the files in and outside of a chroot call
        # an active bind mount from RootBind::mount_shared_directory
        # is expected and required
        manager_base = self.shared_location + '/yum'

        self.shared_yum_dir = {
            'reposd-dir': manager_base + '/repos',
            'cache-dir': manager_base + '/cache',
            'pluginconf-dir': manager_base + '/pluginconf'
        }

        self.runtime_yum_config_file = NamedTemporaryFile(
            dir=self.root_dir
        )

        self.yum_args = [
            '-c', self.runtime_yum_config_file.name, '-y'
        ] + self.custom_args

        self.command_env = self._create_yum_runtime_environment()

        # config file parameters for yum tool
        self._create_runtime_config_parser()
        self._create_runtime_plugin_config_parser()
        self._write_runtime_config()

    def use_default_location(self):
        """
        Setup yum repository operations to store all data
        in the default places
        """
        self.shared_yum_dir['reposd-dir'] = \
            self.root_dir + '/etc/yum.repos.d'
        self.shared_yum_dir['cache-dir'] = \
            self.root_dir + '/var/cache/yum'
        self.shared_yum_dir['pluginconf-dir'] = \
            self.root_dir + '/etc/yum/pluginconf.d'
        self._create_runtime_config_parser()
        self._create_runtime_plugin_config_parser()
        self._write_runtime_config()

    def runtime_config(self):
        """
        yum runtime configuration and environment
        """
        return {
            'yum_args': self.yum_args,
            'command_env': self.command_env
        }

    def add_repo(
        self, name, uri, repo_type='rpm-md',
        prio=None, dist=None, components=None,
        user=None, secret=None, credentials_file=None,
        repo_gpgcheck=None, pkg_gpgcheck=None
    ):
        """
        Add yum repository

        :param string name: repository base file name
        :param string uri: repository URI
        :param repo_type: repostory type name
        :param int prio: yum repostory priority
        :param dist: unused
        :param components: unused
        :param user: unused
        :param secret: unused
        :param credentials_file: unused
        :param bool repo_gpgcheck: enable repository signature validation
        :param bool pkg_gpgcheck: enable package signature validation
        """
        repo_file = self.shared_yum_dir['reposd-dir'] + '/' + name + '.repo'
        self.repo_names.append(name + '.repo')
        if os.path.exists(uri):
            # yum requires local paths to take the file: type
            uri = 'file://' + uri
        repo_config = ConfigParser()
        repo_config.add_section(name)
        repo_config.set(
            name, 'name', name
        )
        repo_config.set(
            name, 'baseurl', uri
        )
        repo_config.set(
            name, 'enabled', '1'
        )
        if prio:
            repo_config.set(
                name, 'priority', format(prio)
            )
        if repo_gpgcheck is not None:
            repo_config.set(
                name, 'repo_gpgcheck', '1' if repo_gpgcheck else '0'
            )
        if pkg_gpgcheck is not None:
            repo_config.set(
                name, 'gpgcheck', '1' if pkg_gpgcheck else '0'
            )
        with open(repo_file, 'w') as repo:
            repo_config.write(RepositoryYumSpaceRemover(repo))

    def import_trusted_keys(self, signing_keys):
        """
        Imports trusted keys into the image

        :param list signing_keys: list of the key files to import
        """
        for key in signing_keys:
            Command.run(['rpm', '--root', self.root_dir, '--import', key])

    def delete_repo(self, name):
        """
        Delete yum repository

        :param string name: repository base file name
        """
        Path.wipe(
            self.shared_yum_dir['reposd-dir'] + '/' + name + '.repo'
        )

    def delete_all_repos(self):
        """
        Delete all yum repositories
        """
        Path.wipe(self.shared_yum_dir['reposd-dir'])
        Path.create(self.shared_yum_dir['reposd-dir'])

    def delete_repo_cache(self, name):
        """
        Delete yum repository cache

        The cache data for each repository is stored in a directory
        of the same name as the repository name. The method deletes
        this directory to cleanup the cache information

        :param string name: repository name
        """
        Path.wipe(
            os.sep.join([self.shared_yum_dir['cache-dir'], name])
        )

    def cleanup_unused_repos(self):
        """
        Delete unused yum repositories

        Repository configurations which are not used for this build
        must be removed otherwise they are taken into account for
        the package installations
        """
        repos_dir = self.shared_yum_dir['reposd-dir']
        repo_files = list(os.walk(repos_dir))[0][2]
        for repo_file in repo_files:
            if repo_file not in self.repo_names:
                Path.wipe(repos_dir + '/' + repo_file)

    def _create_yum_runtime_environment(self):
        for yum_dir in list(self.shared_yum_dir.values()):
            Path.create(yum_dir)
        return dict(
            os.environ, LANG='C'
        )

    def _create_runtime_config_parser(self):
        self.runtime_yum_config = ConfigParser()
        self.runtime_yum_config.add_section('main')

        self.runtime_yum_config.set(
            'main', 'cachedir', self.shared_yum_dir['cache-dir']
        )
        self.runtime_yum_config.set(
            'main', 'reposdir', self.shared_yum_dir['reposd-dir']
        )
        self.runtime_yum_config.set(
            'main', 'pluginconfpath', self.shared_yum_dir['pluginconf-dir']
        )
        self.runtime_yum_config.set(
            'main', 'keepcache', '1'
        )
        self.runtime_yum_config.set(
            'main', 'debuglevel', '2'
        )
        self.runtime_yum_config.set(
            'main', 'pkgpolicy', 'newest'
        )
        self.runtime_yum_config.set(
            'main', 'tolerant', '0'
        )
        self.runtime_yum_config.set(
            'main', 'exactarch', '1'
        )
        self.runtime_yum_config.set(
            'main', 'obsoletes', '1'
        )
        self.runtime_yum_config.set(
            'main', 'plugins', '1'
        )
        self.runtime_yum_config.set(
            'main', 'gpgcheck', self.gpg_check
        )
        self.runtime_yum_config.set(
            'main', 'metadata_expire', '1800'
        )
        self.runtime_yum_config.set(
            'main', 'group_command', 'compat'
        )

    def _create_runtime_plugin_config_parser(self):
        self.runtime_yum_plugin_config = ConfigParser()
        self.runtime_yum_plugin_config.add_section('main')

        self.runtime_yum_plugin_config.set(
            'main', 'enabled', '1'
        )

    def _write_runtime_config(self):
        with open(self.runtime_yum_config_file.name, 'w') as config:
            self.runtime_yum_config.write(
                RepositoryYumSpaceRemover(config)
            )
        yum_plugin_config_file = \
            self.shared_yum_dir['pluginconf-dir'] + '/priorities.conf'
        with open(yum_plugin_config_file, 'w') as pluginconfig:
            self.runtime_yum_plugin_config.write(
                RepositoryYumSpaceRemover(pluginconfig)
            )
 def load(self):
     config = ConfigParser()
     config.read(self.filename)
     if self.section not in config.sections():
         config.add_section(self.section)
     return config