Ejemplo n.º 1
0
def log_in(client):
    """Authorizes ImgurClient to use user account"""
    config = ConfigParser()
    config.read('auth.ini')
    access_token = config.get('credentials', 'access_token')
    refresh_token = config.get('credentials', 'refresh_token')
    if len(access_token) > 0 and len(refresh_token) > 0:
        client.set_user_auth(access_token, refresh_token)
        return client

    authorization_url = client.get_auth_url('pin')
    webbrowser.open(authorization_url)
    pin = input('Please input your pin\n>\t')

    credentials = client.authorize(pin)  # grant_type default is 'pin'

    access_token = credentials['access_token']
    refresh_token = credentials['refresh_token']

    config.set('credentials', 'access_token', access_token)
    config.set('credentials', 'refresh_token', refresh_token)

    save_config(config)
    client.set_user_auth(access_token, refresh_token)
    return client
Ejemplo n.º 2
0
    def setUp(self):
        self.tmpdir = tempfile.mkdtemp()

        self.conffile = os.path.join(self.tmpdir, 'zeyple.conf')
        self.homedir = os.path.join(self.tmpdir, 'gpg')
        self.logfile = os.path.join(self.tmpdir, 'zeyple.log')

        config = ConfigParser()

        config.add_section('zeyple')
        config.set('zeyple', 'log_file', self.logfile)
        config.set('zeyple', 'add_header', 'true')

        config.add_section('gpg')
        config.set('gpg', 'home', self.homedir)

        config.add_section('relay')
        config.set('relay', 'host', 'example.net')
        config.set('relay', 'port', '2525')

        with open(self.conffile, 'w') as fp:
            config.write(fp)

        os.mkdir(self.homedir, 0o700)
        subprocess.check_call(
            ['gpg', '--homedir', self.homedir, '--import', KEYS_FNAME],
            stderr=open('/dev/null'),
        )

        self.zeyple = zeyple.Zeyple(self.conffile)
        self.zeyple._send_message = Mock()  # don't try to send emails
Ejemplo n.º 3
0
def _load_ec_as_default_policy(proxy_conf_file, swift_conf_file, **kwargs):
    """
    Override swift.conf [storage-policy:0] section to use a 2+1 EC policy.

    :param proxy_conf_file: Source proxy conf filename
    :param swift_conf_file: Source swift conf filename
    :returns: Tuple of paths to the proxy conf file and swift conf file to use
    """
    _debug('Setting configuration for default EC policy')

    conf = ConfigParser()
    conf.read(swift_conf_file)
    # remove existing policy sections that came with swift.conf-sample
    for section in list(conf.sections()):
        if section.startswith('storage-policy'):
            conf.remove_section(section)
    # add new policy 0 section for an EC policy
    conf.add_section('storage-policy:0')
    ec_policy_spec = {
        'name': 'ec-test',
        'policy_type': 'erasure_coding',
        'ec_type': 'liberasurecode_rs_vand',
        'ec_num_data_fragments': 2,
        'ec_num_parity_fragments': 1,
        'ec_object_segment_size': 1048576,
        'default': True
    }

    for k, v in ec_policy_spec.items():
        conf.set('storage-policy:0', k, str(v))

    with open(swift_conf_file, 'w') as fp:
        conf.write(fp)
    return proxy_conf_file, swift_conf_file
Ejemplo n.º 4
0
    def _save(self, nv):
        p = get_spectrometer_config_path()
        config = ConfigParser()
        config.read(p)

        config.set('CDDParameters', 'OperatingVoltage', nv)
        config.write(open(p, 'w'))
        self.info('saving new operating voltage {:0.1f} to {}'.format(nv, p))
 def as_ini(self):
     """
     """
     context = self.context
     parser = ConfigParser()
     stream = cStringIO()
     for k, v in context.propertyItems():
         parser.set('DEFAULT', k, str(v))
     parser.write(stream)
     return stream.getvalue()
Ejemplo n.º 6
0
def _modify_wpr_file(template, outfile, version):
    config = ConfigParser()
    config.read(template)
    if sys.platform == 'darwin':
        config.set('user attributes', 'proj.pyexec',
                   text_type(dict({None: ('custom', sys.executable)})))
        config.set('user attributes', 'proj.pypath',
                   text_type(dict({None: ('custom',os.pathsep.join(sys.path))})))

    with open(outfile, 'w') as fp:
        fp.write('#!wing\n#!version=%s\n' % version)
        config.write(fp)
Ejemplo n.º 7
0
    def dump(self):
        p = get_spectrometer_config_path()

        cfp = ConfigParser()
        cfp.read(p)
        for gn, pn, v in self.itervalues():
            cfp.set(gn, pn, v)

        with open(p, 'w') as wfile:
            cfp.write(wfile)

        return p
Ejemplo n.º 8
0
def _load_domain_remap_staticweb(proxy_conf_file, swift_conf_file, **kwargs):
    """
    Load domain_remap and staticweb into proxy server pipeline.

    :param proxy_conf_file: Source proxy conf filename
    :param swift_conf_file: Source swift conf filename
    :returns: Tuple of paths to the proxy conf file and swift conf file to use
    :raises InProcessException: raised if proxy conf contents are invalid
    """
    _debug('Setting configuration for domain_remap')

    # add a domain_remap storage_domain to the test configuration
    storage_domain = 'example.net'
    global config
    config['storage_domain'] = storage_domain

    # The global conf dict cannot be used to modify the pipeline.
    # The pipeline loader requires the pipeline to be set in the local_conf.
    # If pipeline is set in the global conf dict (which in turn populates the
    # DEFAULTS options) then it prevents pipeline being loaded into the local
    # conf during wsgi load_app.
    # Therefore we must modify the [pipeline:main] section.
    conf = ConfigParser()
    conf.read(proxy_conf_file)
    try:
        section = 'pipeline:main'
        old_pipeline = conf.get(section, 'pipeline')
        pipeline = old_pipeline.replace(
            " tempauth ",
            " domain_remap tempauth staticweb ")
        if pipeline == old_pipeline:
            raise InProcessException(
                "Failed to insert domain_remap and staticweb into pipeline: %s"
                % old_pipeline)
        conf.set(section, 'pipeline', pipeline)
        # set storage_domain in domain_remap middleware to match test config
        section = 'filter:domain_remap'
        conf.set(section, 'storage_domain', storage_domain)
    except NoSectionError as err:
        msg = 'Error problem with proxy conf file %s: %s' % \
              (proxy_conf_file, err)
        raise InProcessException(msg)

    test_conf_file = os.path.join(_testdir, 'proxy-server.conf')
    with open(test_conf_file, 'w') as fp:
        conf.write(fp)

    return test_conf_file, swift_conf_file
Ejemplo n.º 9
0
    def add_repo(
        self, name, uri, repo_type='rpm-md',
        prio=None, dist=None, components=None,
        user=None, secret=None, credentials_file=None,
        repo_gpgcheck=None, pkg_gpgcheck=None
    ):
        """
        Add dnf repository

        :param str name: repository base file name
        :param str uri: repository URI
        :param repo_type: repostory type name
        :param int prio: dnf repostory priority
        :param dist: unused
        :param components: unused
        :param user: unused
        :param secret: unused
        :param credentials_file: unused
        :param bool repo_gpgcheck: enable repository signature validation
        :param bool pkg_gpgcheck: enable package signature validation
        """
        repo_file = self.shared_dnf_dir['reposd-dir'] + '/' + name + '.repo'
        self.repo_names.append(name + '.repo')
        if os.path.exists(uri):
            # dnf requires local paths to take the file: type
            uri = 'file://' + uri
        repo_config = ConfigParser()
        repo_config.add_section(name)
        repo_config.set(
            name, 'name', name
        )
        repo_config.set(
            name, 'baseurl', uri
        )
        if prio:
            repo_config.set(
                name, 'priority', format(prio)
            )
        if repo_gpgcheck is not None:
            repo_config.set(
                name, 'repo_gpgcheck', '1' if repo_gpgcheck else '0'
            )
        if pkg_gpgcheck is not None:
            repo_config.set(
                name, 'gpgcheck', '1' if pkg_gpgcheck else '0'
            )
        with open(repo_file, 'w') as repo:
            repo_config.write(repo)
Ejemplo n.º 10
0
def save_config(config):
    """Save configuration.

    :param config: Data to be written to the configuration file.
    :type config:  dict

    """
    config_parser = ConfigParser()
    config_parser.add_section("sublime")

    if len(config) == 0:
        click.echo(
            'Error: no options provided. Try "sublime setup -h" for help.')
        click.get_current_context().exit(-1)

    # If either value was not specified, load the existing values saved
    # to ensure we don't overwrite their values to null here
    saved_config = load_config()
    if 'api_key' not in config or not config['api_key']:
        config['api_key'] = saved_config['api_key']
    if 'save_dir' not in config or not config['save_dir']:
        config['save_dir'] = saved_config['save_dir']
    if 'permission' not in config or not config['permission']:
        config['permission'] = saved_config['permission']

    if config["save_dir"] and not os.path.isdir(config["save_dir"]):
        click.echo("Error: save directory is not a valid directory")
        click.get_current_context().exit(-1)

    config_parser.set("sublime", "api_key", config["api_key"])
    config_parser.set("sublime", "save_dir", config["save_dir"])
    config_parser.set("sublime", "permission", config["permission"])

    config_parser_existing = ConfigParser()
    if os.path.isfile(CONFIG_FILE):
        # LOGGER.debug("Reading configuration file: %s...", CONFIG_FILE, path=CONFIG_FILE)
        with open(CONFIG_FILE) as config_file:
            config_parser_existing.readfp(config_file)

        # if an emailrep key exists, ensure we don't overwrite it
        try:
            emailrep_key = config_parser_existing.get("emailrep", "key")
            if emailrep_key:
                config_parser.add_section("emailrep")
                config_parser.set("emailrep", "key", emailrep_key)
        except:
            pass

    config_dir = os.path.dirname(CONFIG_FILE)
    if not os.path.isdir(config_dir):
        os.makedirs(config_dir)

    with open(CONFIG_FILE, "w") as config_file:
        config_parser.write(config_file)
Ejemplo n.º 11
0
    def run(self, args, **kwargs):

        if not args.password:
            args.password = getpass.getpass()
        instance = self.resource(ttl=args.ttl) if args.ttl else self.resource()

        cli = BaseCLIApp()

        # Determine path to config file
        try:
            config_file = cli._get_config_file_path(args)
        except ValueError:
            # config file not found in args or in env, defaulting
            config_file = config_parser.ST2_CONFIG_PATH

        # Retrieve token
        manager = self.manager.create(instance,
                                      auth=(args.username, args.password),
                                      **kwargs)
        cli._cache_auth_token(token_obj=manager)

        # Update existing configuration with new credentials
        config = ConfigParser()
        config.read(config_file)

        # Modify config (and optionally populate with password)
        if not config.has_section("credentials"):
            config.add_section("credentials")

        config.set("credentials", "username", args.username)
        if args.write_password:
            config.set("credentials", "password", args.password)
        else:
            # Remove any existing password from config
            config.remove_option("credentials", "password")

        config_existed = os.path.exists(config_file)
        with open(config_file, "w") as cfg_file_out:
            config.write(cfg_file_out)
        # If we created the config file, correct the permissions
        if not config_existed:
            os.chmod(config_file, 0o660)

        return manager
Ejemplo n.º 12
0
def save_config(config):
    """Save configuration.
    :param config: Data to be written to the configuration file.
    :type config:  dict
    """
    config_parser = ConfigParser()
    config_parser.add_section("greynoise")
    config_parser.set("greynoise", "api_key", config["api_key"])
    config_parser.set("greynoise", "api_server", config["api_server"])
    config_parser.set("greynoise", "timeout", str(config["timeout"]))
    config_parser.set("greynoise", "proxy", config["proxy"])
    config_parser.set("greynoise", "offering", config["offering"])

    config_dir = os.path.dirname(CONFIG_FILE)
    if not os.path.isdir(config_dir):
        os.makedirs(config_dir)

    with open(CONFIG_FILE, "w") as config_file:
        config_parser.write(config_file)
Ejemplo n.º 13
0
def _in_process_setup_swift_conf(swift_conf_src, testdir):
    # override swift.conf contents for in-process functional test runs
    conf = ConfigParser()
    conf.read(swift_conf_src)
    try:
        section = "swift-hash"
        conf.set(section, "swift_hash_path_suffix", "inprocfunctests")
        conf.set(section, "swift_hash_path_prefix", "inprocfunctests")
        section = "swift-constraints"
        max_file_size = (8 * 1024 * 1024) + 2  # 8 MB + 2
        conf.set(section, "max_file_size", max_file_size)
    except NoSectionError:
        msg = "Conf file %s is missing section %s" % (swift_conf_src, section)
        raise InProcessException(msg)

    test_conf_file = os.path.join(testdir, "swift.conf")
    with open(test_conf_file, "w") as fp:
        conf.write(fp)

    return test_conf_file
Ejemplo n.º 14
0
def get_configured_plugins():
    """Retrieves a list of all plugins that the user has configured for availability within Vigilance.
    This list will prefer configurations from three sources in this order:
    1. The VIGILANCE_PLUGINS environment variable.
    2. A .vigilance file within the current working directory.
    3. A setup.cfg file within the current working directory.
    @returns A list of plugin specifier strings.
    @see load_suites
    """
    plugins = []
    parser = ConfigParser()
    parser.add_section('vigilance')
    _read_config_file('setup.cfg', parser)
    _read_config_file('.vigilance', parser)
    vEnv = os.getenv('VIGILANCE_PLUGINS', None)
    if vEnv:
        parser.set('vigilance', 'plugins', vEnv)
    if parser.has_option('vigilance', 'plugins'):
        plugins = parser.get('vigilance', 'plugins').split(',')
    return plugins
Ejemplo n.º 15
0
def _in_process_setup_swift_conf(swift_conf_src, testdir):
    # override swift.conf contents for in-process functional test runs
    conf = ConfigParser()
    conf.read(swift_conf_src)
    try:
        section = 'swift-hash'
        conf.set(section, 'swift_hash_path_suffix', 'inprocfunctests')
        conf.set(section, 'swift_hash_path_prefix', 'inprocfunctests')
        section = 'swift-constraints'
        max_file_size = (8 * 1024 * 1024) + 2  # 8 MB + 2
        conf.set(section, 'max_file_size', str(max_file_size))
    except NoSectionError:
        msg = 'Conf file %s is missing section %s' % (swift_conf_src, section)
        raise InProcessException(msg)

    test_conf_file = os.path.join(testdir, 'swift.conf')
    with open(test_conf_file, 'w') as fp:
        conf.write(fp)

    return test_conf_file
Ejemplo n.º 16
0
def _in_process_setup_swift_conf(swift_conf_src, testdir):
    # override swift.conf contents for in-process functional test runs
    conf = ConfigParser()
    conf.read(swift_conf_src)
    try:
        section = 'swift-hash'
        conf.set(section, 'swift_hash_path_suffix', 'inprocfunctests')
        conf.set(section, 'swift_hash_path_prefix', 'inprocfunctests')
        section = 'swift-constraints'
        max_file_size = (8 * 1024 * 1024) + 2  # 8 MB + 2
        conf.set(section, 'max_file_size', max_file_size)
    except NoSectionError:
        msg = 'Conf file %s is missing section %s' % (swift_conf_src, section)
        raise InProcessException(msg)

    test_conf_file = os.path.join(testdir, 'swift.conf')
    with open(test_conf_file, 'w') as fp:
        conf.write(fp)

    return test_conf_file
Ejemplo n.º 17
0
    def _update_fstab(newroot):
        newfstab = Fstab("%s/etc/fstab" % newroot)

        if not newfstab.exists():
            log.info("The new layer contains no fstab, skipping.")
            return

        log.debug("Checking new fstab: %s" % newfstab)
        log.info("Updating fstab of new layer")
        rootentry = newfstab.by_target("/")
        rootentry.source = new_lv.path
        newfstab.update(rootentry)

        # Ensure that discard is used
        # This can also be done in anaconda once it is fixed
        targets = list(constants.volume_paths().keys()) + ["/"]
        for tgt in targets:
            try:
                e = newfstab.by_target(tgt)
                if "discard" not in e.options:
                    e.options += ["discard"]
                    newfstab.update(e)
            except KeyError:
                # Created with imgbased.volume?
                log.debug("{} not found in /etc/fstab. "
                          "not created by Anaconda".format(tgt))
                from six.moves.configparser import ConfigParser
                c = ConfigParser()
                c.optionxform = str

                sub = re.sub(r'^/', '', tgt)
                sub = re.sub(r'/', '-', sub)
                fname = "{}/etc/systemd/system/{}.mount".format(newroot, sub)
                c.read(fname)

                if 'discard' not in c.get('Mount', 'Options'):
                    c.set('Mount', 'Options',
                          ','.join([c.get('Mount', 'Options'), 'discard']))

                with open(fname, 'w') as mountfile:
                    c.write(mountfile)
Ejemplo n.º 18
0
def _load_domain_remap_staticweb(proxy_conf_file, swift_conf_file, **kwargs):
    """
    Load domain_remap and staticweb into proxy server pipeline.

    :param proxy_conf_file: Source proxy conf filename
    :param swift_conf_file: Source swift conf filename
    :returns: Tuple of paths to the proxy conf file and swift conf file to use
    :raises InProcessException: raised if proxy conf contents are invalid
    """
    _debug('Setting configuration for domain_remap')

    # The global conf dict cannot be used to modify the pipeline.
    # The pipeline loader requires the pipeline to be set in the local_conf.
    # If pipeline is set in the global conf dict (which in turn populates the
    # DEFAULTS options) then it prevents pipeline being loaded into the local
    # conf during wsgi load_app.
    # Therefore we must modify the [pipeline:main] section.

    conf = ConfigParser()
    conf.read(proxy_conf_file)
    try:
        section = 'pipeline:main'
        old_pipeline = conf.get(section, 'pipeline')
        pipeline = old_pipeline.replace("tempauth",
                                        "domain_remap tempauth staticweb")
        if pipeline == old_pipeline:
            raise InProcessException(
                "Failed to insert domain_remap and staticweb into pipeline: %s"
                % old_pipeline)
        conf.set(section, 'pipeline', pipeline)
    except NoSectionError as err:
        msg = 'Error problem with proxy conf file %s: %s' % \
              (proxy_conf_file, err)
        raise InProcessException(msg)

    test_conf_file = os.path.join(_testdir, 'proxy-server.conf')
    with open(test_conf_file, 'w') as fp:
        conf.write(fp)

    return test_conf_file, swift_conf_file
Ejemplo n.º 19
0
Archivo: dnf.py Proyecto: ucytech/kiwi
    def add_repo(
        self, name, uri, repo_type='rpm-md', prio=None, dist=None, components=None
    ):
        """
        Add dnf repository

        :param string name: repository base file name
        :param string uri: repository URI
        :param repo_type: repostory type name
        :param int prio: dnf repostory priority
        :param dist: unused
        :param components: unused
        """
        repo_file = self.shared_dnf_dir['reposd-dir'] + '/' + name + '.repo'
        self.repo_names.append(name + '.repo')
        if os.path.exists(uri):
            # dnf requires local paths to take the file: type
            uri = 'file://' + uri
        repo_config = ConfigParser()
        repo_config.add_section(name)
        repo_config.set(
            name, 'name', name
        )
        repo_config.set(
            name, 'baseurl', uri
        )
        if prio:
            repo_config.set(
                name, 'priority', format(prio)
            )
        with open(repo_file, 'w') as repo:
            repo_config.write(repo)
Ejemplo n.º 20
0
Archivo: yum.py Proyecto: SUSE/kiwi
    def add_repo(
        self, name, uri, repo_type='rpm-md', prio=None, dist=None, components=None
    ):
        """
        Add yum repository

        :param string name: repository base file name
        :param string uri: repository URI
        :param repo_type: repostory type name
        :param int prio: yum repostory priority
        :param dist: unused
        :param components: unused
        """
        repo_file = self.shared_yum_dir['reposd-dir'] + '/' + name + '.repo'
        self.repo_names.append(name + '.repo')
        if os.path.exists(uri):
            # yum requires local paths to take the file: type
            uri = 'file://' + uri
        repo_config = ConfigParser()
        repo_config.add_section(name)
        repo_config.set(
            name, 'name', name
        )
        repo_config.set(
            name, 'baseurl', uri
        )
        if prio:
            repo_config.set(
                name, 'priority', format(prio)
            )
        with open(repo_file, 'w') as repo:
            repo_config.write(repo)
Ejemplo n.º 21
0
def process_mistral_config(config_path):
    """
    Remove sensitive data (credentials) from the Mistral config.

    :param config_path: Full absolute path to the mistral config inside /tmp.
    :type config_path: ``str``
    """
    assert config_path.startswith('/tmp')

    if not os.path.isfile(config_path):
        return

    config = ConfigParser()
    config.read(config_path)

    for section, options in MISTRAL_CONF_OPTIONS_TO_REMOVE.items():
        for option in options:
            if config.has_option(section, option):
                config.set(section, option, REMOVED_VALUE_NAME)

    with open(config_path, 'w') as fp:
        config.write(fp)
Ejemplo n.º 22
0
def _load_encryption(proxy_conf_file, swift_conf_file, **kwargs):
    """
    Load encryption configuration and override proxy-server.conf contents.

    :param proxy_conf_file: Source proxy conf filename
    :param swift_conf_file: Source swift conf filename
    :returns: Tuple of paths to the proxy conf file and swift conf file to use
    :raises InProcessException: raised if proxy conf contents are invalid
    """
    _debug('Setting configuration for encryption')

    # The global conf dict cannot be used to modify the pipeline.
    # The pipeline loader requires the pipeline to be set in the local_conf.
    # If pipeline is set in the global conf dict (which in turn populates the
    # DEFAULTS options) then it prevents pipeline being loaded into the local
    # conf during wsgi load_app.
    # Therefore we must modify the [pipeline:main] section.

    conf = ConfigParser()
    conf.read(proxy_conf_file)
    try:
        section = 'pipeline:main'
        pipeline = conf.get(section, 'pipeline')
        pipeline = pipeline.replace(
            "proxy-logging proxy-server",
            "keymaster encryption proxy-logging proxy-server")
        conf.set(section, 'pipeline', pipeline)
        root_secret = os.urandom(32).encode("base64")
        conf.set('filter:keymaster', 'encryption_root_secret', root_secret)
    except NoSectionError as err:
        msg = 'Error problem with proxy conf file %s: %s' % \
              (proxy_conf_file, err)
        raise InProcessException(msg)

    test_conf_file = os.path.join(_testdir, 'proxy-server.conf')
    with open(test_conf_file, 'w') as fp:
        conf.write(fp)

    return test_conf_file, swift_conf_file
Ejemplo n.º 23
0
def process_mistral_config(config_path):
    """
    Remove sensitive data (credentials) from the Mistral config.

    :param config_path: Full absolute path to the mistral config inside /tmp.
    :type config_path: ``str``
    """
    assert config_path.startswith('/tmp')

    if not os.path.isfile(config_path):
        return

    config = ConfigParser()
    config.read(config_path)

    for section, options in MISTRAL_CONF_OPTIONS_TO_REMOVE.items():
        for option in options:
            if config.has_option(section, option):
                config.set(section, option, REMOVED_VALUE_NAME)

    with open(config_path, 'w') as fp:
        config.write(fp)
Ejemplo n.º 24
0
def _load_encryption(proxy_conf_file, swift_conf_file, **kwargs):
    """
    Load encryption configuration and override proxy-server.conf contents.

    :param proxy_conf_file: Source proxy conf filename
    :param swift_conf_file: Source swift conf filename
    :returns: Tuple of paths to the proxy conf file and swift conf file to use
    :raises InProcessException: raised if proxy conf contents are invalid
    """
    _debug('Setting configuration for encryption')

    # The global conf dict cannot be used to modify the pipeline.
    # The pipeline loader requires the pipeline to be set in the local_conf.
    # If pipeline is set in the global conf dict (which in turn populates the
    # DEFAULTS options) then it prevents pipeline being loaded into the local
    # conf during wsgi load_app.
    # Therefore we must modify the [pipeline:main] section.

    conf = ConfigParser()
    conf.read(proxy_conf_file)
    try:
        section = 'pipeline:main'
        pipeline = conf.get(section, 'pipeline')
        pipeline = pipeline.replace(
            "proxy-logging proxy-server",
            "keymaster encryption proxy-logging proxy-server")
        conf.set(section, 'pipeline', pipeline)
        root_secret = os.urandom(32).encode("base64")
        conf.set('filter:keymaster', 'encryption_root_secret', root_secret)
    except NoSectionError as err:
        msg = 'Error problem with proxy conf file %s: %s' % \
              (proxy_conf_file, err)
        raise InProcessException(msg)

    test_conf_file = os.path.join(_testdir, 'proxy-server.conf')
    with open(test_conf_file, 'w') as fp:
        conf.write(fp)

    return test_conf_file, swift_conf_file
Ejemplo n.º 25
0
def load_config():
    """Load configuration.

    :returns:
        Current configuration based on configuration file and environment variables.
    :rtype: dict

    """
    config_parser = ConfigParser(
        {key: str(value)
         for key, value in DEFAULT_CONFIG.items()})
    config_parser.add_section("sublime")

    if os.path.isfile(CONFIG_FILE):
        # LOGGER.debug("Parsing configuration file: %s..." % CONFIG_FILE)
        with open(CONFIG_FILE) as config_file:
            config_parser.readfp(config_file)
    else:
        # LOGGER.debug("Configuration file not found: %s" % CONFIG_FILE)
        pass

    if "SUBLIME_API_KEY" in os.environ:
        api_key = os.environ["SUBLIME_API_KEY"]
        # LOGGER.debug("API key found in environment variable: %s", api_key, api_key=api_key)
        # Environment variable takes precedence over configuration file content
        config_parser.set("sublime", "api_key", api_key)

    if "SUBLIME_SAVE_DIR" in os.environ:
        save_dir = os.environ["SUBLIME_SAVE_DIR"]
        # LOGGER.debug("Save dir found in environment variable: %s", save_dir, save_dir=save_dir)
        # Environment variable takes precedence over configuration file content
        config_parser.set("sublime", "save_dir", save_dir)

    return {
        "api_key": config_parser.get("sublime", "api_key"),
        "save_dir": config_parser.get("sublime", "save_dir"),
        "permission": config_parser.get("sublime", "permission"),
    }
Ejemplo n.º 26
0
Archivo: auth.py Proyecto: lyandut/st2
    def run(self, args, **kwargs):

        if not args.password:
            args.password = getpass.getpass()
        instance = self.resource(ttl=args.ttl) if args.ttl else self.resource()

        cli = BaseCLIApp()

        # Determine path to config file
        try:
            config_file = cli._get_config_file_path(args)
        except ValueError:
            # config file not found in args or in env, defaulting
            config_file = config_parser.ST2_CONFIG_PATH

        # Retrieve token
        manager = self.manager.create(instance, auth=(args.username, args.password), **kwargs)
        cli._cache_auth_token(token_obj=manager)

        # Update existing configuration with new credentials
        config = ConfigParser()
        config.read(config_file)

        # Modify config (and optionally populate with password)
        if not config.has_section('credentials'):
            config.add_section('credentials')

        config.set('credentials', 'username', args.username)
        if args.write_password:
            config.set('credentials', 'password', args.password)
        else:
            # Remove any existing password from config
            config.remove_option('credentials', 'password')

        with open(config_file, 'w') as cfg_file_out:
            config.write(cfg_file_out)

        return manager
Ejemplo n.º 27
0
    def test_get_user_list(self):
        config = ConfigParser()
        config.add_section('slapformat')
        config.set('slapformat', 'partition_amount', '3')
        config.set('slapformat', 'user_base_name', 'slapuser')
        config.set('slapformat', 'partition_base_name', 'slappart')
        config.add_section('slapos')
        config.set('slapos', 'instance_root', self.instance_root)
 
        user_dict = entity.get_user_list(config)
        username_set = {'slapuser0', 'slapuser1', 'slapuser2'} 
        self.assertEqual(username_set, set(user_dict))
       
        for name in username_set:
          self.assertEqual(user_dict[name].name, name)
          self.assertEqual(user_dict[name].snapshot_list, [])
          expected_path = "%s/slappart%s" % (self.instance_root, name.strip("slapuser")) 
          self.assertEqual(user_dict[name].path, expected_path) 
Ejemplo n.º 28
0
class RepositoryDnf(RepositoryBase):
    """
    **Implements repository handling for dnf package manager**

    :param str shared_dnf_dir: shared directory between image root
        and build system root
    :param str runtime_dnf_config_file: dnf runtime config file name
    :param dict command_env: customized os.environ for dnf
    :param str runtime_dnf_config: instance of :class:`ConfigParser`
    """
    def post_init(self, custom_args=None):
        """
        Post initialization method

        Store custom dnf arguments and create runtime configuration
        and environment

        :param list custom_args: dnf arguments
        """
        self.custom_args = custom_args
        self.exclude_docs = False
        if not custom_args:
            self.custom_args = []

        # extract custom arguments not used in dnf call
        if 'exclude_docs' in self.custom_args:
            self.custom_args.remove('exclude_docs')
            self.exclude_docs = True

        if 'check_signatures' in self.custom_args:
            self.custom_args.remove('check_signatures')
            self.gpg_check = '1'
        else:
            self.gpg_check = '0'

        self.locale = list(item for item in self.custom_args
                           if '_install_langs' in item)
        if self.locale:
            self.custom_args.remove(self.locale[0])

        self.repo_names = []

        # dnf support is based on creating repo files which contains
        # path names to the repo and its cache. In order to allow a
        # persistent use of the files in and outside of a chroot call
        # an active bind mount from RootBind::mount_shared_directory
        # is expected and required
        manager_base = self.shared_location + '/dnf'

        self.shared_dnf_dir = {
            'reposd-dir': manager_base + '/repos',
            'cache-dir': manager_base + '/cache',
            'pluginconf-dir': manager_base + '/pluginconf'
        }

        self.runtime_dnf_config_file = NamedTemporaryFile(dir=self.root_dir)

        self.dnf_args = ['-c', self.runtime_dnf_config_file.name, '-y'
                         ] + self.custom_args

        self.command_env = self._create_dnf_runtime_environment()

        # config file parameters for dnf tool
        self._create_runtime_config_parser()
        self._create_runtime_plugin_config_parser()
        self._write_runtime_config()

    def setup_package_database_configuration(self):
        """
        Setup rpm macros for bootstrapping and image building

        1. Create the rpm image macro which persists during the build
        2. Create the rpm bootstrap macro to make sure for bootstrapping
           the rpm database location matches the host rpm database setup.
           This macro only persists during the bootstrap phase. If the
           image was already bootstrapped a compat link is created instead.
        """
        rpmdb = RpmDataBase(self.root_dir,
                            Defaults.get_custom_rpm_image_macro_name())
        if self.locale:
            rpmdb.set_macro_from_string(self.locale[0])
        rpmdb.write_config()

        rpmdb = RpmDataBase(self.root_dir)
        if rpmdb.has_rpm():
            rpmdb.link_database_to_host_path()
        else:
            rpmdb.set_database_to_host_path()

    def use_default_location(self):
        """
        Setup dnf repository operations to store all data
        in the default places
        """
        self.shared_dnf_dir['reposd-dir'] = \
            self.root_dir + '/etc/yum.repos.d'
        self.shared_dnf_dir['cache-dir'] = \
            self.root_dir + '/var/cache/dnf'
        self.shared_dnf_dir['pluginconf-dir'] = \
            self.root_dir + '/etc/dnf/plugins'
        self._create_runtime_config_parser()
        self._create_runtime_plugin_config_parser()
        self._write_runtime_config()

    def runtime_config(self):
        """
        dnf runtime configuration and environment

        :return: dnf_args:list, command_env:dict

        :rtype: dict
        """
        return {'dnf_args': self.dnf_args, 'command_env': self.command_env}

    def add_repo(self,
                 name,
                 uri,
                 repo_type='rpm-md',
                 prio=None,
                 dist=None,
                 components=None,
                 user=None,
                 secret=None,
                 credentials_file=None,
                 repo_gpgcheck=None,
                 pkg_gpgcheck=None):
        """
        Add dnf repository

        :param str name: repository base file name
        :param str uri: repository URI
        :param repo_type: repostory type name
        :param int prio: dnf repostory priority
        :param dist: unused
        :param components: unused
        :param user: unused
        :param secret: unused
        :param credentials_file: unused
        :param bool repo_gpgcheck: enable repository signature validation
        :param bool pkg_gpgcheck: enable package signature validation
        """
        repo_file = self.shared_dnf_dir['reposd-dir'] + '/' + name + '.repo'
        self.repo_names.append(name + '.repo')
        if os.path.exists(uri):
            # dnf requires local paths to take the file: type
            uri = 'file://' + uri
        repo_config = ConfigParser()
        repo_config.add_section(name)
        repo_config.set(name, 'name', name)
        repo_config.set(name, 'baseurl', uri)
        if prio:
            repo_config.set(name, 'priority', format(prio))
        if repo_gpgcheck is not None:
            repo_config.set(name, 'repo_gpgcheck',
                            '1' if repo_gpgcheck else '0')
        if pkg_gpgcheck is not None:
            repo_config.set(name, 'gpgcheck', '1' if pkg_gpgcheck else '0')
        with open(repo_file, 'w') as repo:
            repo_config.write(repo)

    def import_trusted_keys(self, signing_keys):
        """
        Imports trusted keys into the image

        :param list signing_keys: list of the key files to import
        """
        rpmdb = RpmDataBase(self.root_dir)
        for key in signing_keys:
            rpmdb.import_signing_key_to_image(key)

    def delete_repo(self, name):
        """
        Delete dnf repository

        :param str name: repository base file name
        """
        Path.wipe(self.shared_dnf_dir['reposd-dir'] + '/' + name + '.repo')

    def delete_all_repos(self):
        """
        Delete all dnf repositories
        """
        Path.wipe(self.shared_dnf_dir['reposd-dir'])
        Path.create(self.shared_dnf_dir['reposd-dir'])

    def delete_repo_cache(self, name):
        """
        Delete dnf repository cache

        The cache data for each repository is stored in a directory
        and additional files all starting with the repository name.
        The method glob deletes all files and directories matching
        the repository name followed by any characters to cleanup
        the cache information

        :param str name: repository name
        """
        dnf_cache_glob_pattern = ''.join(
            [self.shared_dnf_dir['cache-dir'], os.sep, name, '*'])
        for dnf_cache_file in glob.iglob(dnf_cache_glob_pattern):
            Path.wipe(dnf_cache_file)

    def cleanup_unused_repos(self):
        """
        Delete unused dnf repositories

        Repository configurations which are not used for this build
        must be removed otherwise they are taken into account for
        the package installations
        """
        repos_dir = self.shared_dnf_dir['reposd-dir']
        repo_files = list(os.walk(repos_dir))[0][2]
        for repo_file in repo_files:
            if repo_file not in self.repo_names:
                Path.wipe(repos_dir + '/' + repo_file)

    def _create_dnf_runtime_environment(self):
        for dnf_dir in list(self.shared_dnf_dir.values()):
            Path.create(dnf_dir)
        return dict(os.environ, LANG='C')

    def _create_runtime_config_parser(self):
        self.runtime_dnf_config = ConfigParser()
        self.runtime_dnf_config.add_section('main')

        self.runtime_dnf_config.set('main', 'cachedir',
                                    self.shared_dnf_dir['cache-dir'])
        self.runtime_dnf_config.set('main', 'reposdir',
                                    self.shared_dnf_dir['reposd-dir'])
        self.runtime_dnf_config.set('main', 'pluginconfpath',
                                    self.shared_dnf_dir['pluginconf-dir'])
        self.runtime_dnf_config.set('main', 'keepcache', '1')
        self.runtime_dnf_config.set('main', 'debuglevel', '2')
        self.runtime_dnf_config.set('main', 'best', '1')
        self.runtime_dnf_config.set('main', 'obsoletes', '1')
        self.runtime_dnf_config.set('main', 'plugins', '1')
        self.runtime_dnf_config.set('main', 'gpgcheck', self.gpg_check)
        if self.exclude_docs:
            self.runtime_dnf_config.set('main', 'tsflags', 'nodocs')

    def _create_runtime_plugin_config_parser(self):
        self.runtime_dnf_plugin_config = ConfigParser()
        self.runtime_dnf_plugin_config.add_section('main')

        self.runtime_dnf_plugin_config.set('main', 'enabled', '1')

    def _write_runtime_config(self):
        with open(self.runtime_dnf_config_file.name, 'w') as config:
            self.runtime_dnf_config.write(config)
        dnf_plugin_config_file = \
            self.shared_dnf_dir['pluginconf-dir'] + '/priorities.conf'
        with open(dnf_plugin_config_file, 'w') as pluginconfig:
            self.runtime_dnf_plugin_config.write(pluginconfig)
Ejemplo n.º 29
0
    def _post_execute(self):
        """
            calculate all peak centers

            calculate relative shifts to a reference detector. not necessarily the same
            as the reference detector used for setting the magnet
        """
        graph = self.graph
        plot = graph.plots[0]
        # time.sleep(0.05)

        # wait for graph to fully update
        # time.sleep(0.1)

        # def get_peak_center(i, di):
        def get_peak_center(di):
            try:
                lp = plot.plots[di][0]
            except KeyError:
                lp = plot.plots['*{}'.format(di)][0]

            xs = lp.index.get_data()
            ys = lp.value.get_data()

            cx = None
            if len(xs) and len(ys):
                try:
                    result = calculate_peak_center(xs, ys)
                    cx = result[0][1]
                except PeakCenterError:
                    self.warning('no peak center for {}'.format(di))

            return cx

        spec = self.spectrometer

        centers = {d: get_peak_center(d) for d in self.active_detectors}
        print(centers)
        ref = self.reference_detector
        post = centers[ref]
        if post is None:
            return

        results = []
        for di in self.active_detectors:
            di = spec.get_detector(di)
            cen = centers[di.name]
            if cen is None:
                continue

            dac_dev = post - cen
            if self.spectrometer.simulation:
                dac_dev = -random()

            if abs(dac_dev) < 0.001:
                self.info('no offset detected between {} and {}'.format(ref, di.name))
                continue

            defl = di.map_dac_to_deflection(dac_dev)
            self.info('{} dac dev. {:0.5f}. converted to deflection voltage {:0.1f}.'.format(di.name, dac_dev, defl))

            curdefl = di.deflection
            newdefl = int(curdefl + defl)
            newdefl = max(0, min(newdefl, self.spectrometer.max_deflection))

            if newdefl >= 0:
                results.append(DeflectionResult(di.name, curdefl, newdefl))

        if not results:
            self.information_dialog('no deflection changes needed')
        else:
            rv = ResultsView(results=results)
            info = rv.edit_traits()
            if info.result:
                config = ConfigParser()
                # p = os.path.join(paths.spectrometer_dir, 'config.cfg')
                p = get_spectrometer_config_path()
                config.read(p)
                for v in rv.clean_results:
                    config.set('Deflections', v.name, v.new_deflection)
                    det = next((d for d in self.active_detectors if d.lower() == v.name.lower()))
                    det = spec.get_detector(det)
                    det.deflection = v.new_deflection

                with open(p, 'w') as wfile:
                    config.write(wfile)

                self.spectrometer.clear_cached_config()
Ejemplo n.º 30
0
class SetupConfig(object):
    """Wrapper around the setup.cfg file if available.

    One reason is to cleanup setup.cfg from these settings::

        [egg_info]
        tag_build = dev
        tag_svn_revision = true

    Another is for optional zest.releaser-specific settings::

        [zest.releaser]
        python-file-with-version = reinout/maurits.py


    """

    config_filename = SETUP_CONFIG_FILE

    def __init__(self):
        """Grab the configuration (overridable for test purposes)"""
        # If there is a setup.cfg in the package, parse it
        if not os.path.exists(self.config_filename):
            self.config = None
            return
        self.config = ConfigParser()
        with codecs.open(self.config_filename, 'r', 'utf8') as fp:
            self.config.readfp(fp)

    def has_bad_commands(self):
        if self.config is None:
            return False
        if not self.config.has_section('egg_info'):
            # bail out early as the main section is not there
            return False
        bad = False
        # Check 1.
        if self.config.has_option('egg_info', 'tag_build'):
            # Might still be empty.
            value = self.config.get('egg_info', 'tag_build')
            if value:
                logger.warn("%s has [egg_info] tag_build set to %r",
                            self.config_filename, value)
                bad = True
        # Check 2.
        if self.config.has_option('egg_info', 'tag_svn_revision'):
            if self.config.getboolean('egg_info', 'tag_svn_revision'):
                value = self.config.get('egg_info', 'tag_svn_revision')
                logger.warn("%s has [egg_info] tag_svn_revision set to %r",
                            self.config_filename, value)
                bad = True
        return bad

    def fix_config(self):
        if not self.has_bad_commands():
            logger.warn("Cannot fix already fine %s.", self.config_filename)
            return
        if self.config.has_option('egg_info', 'tag_build'):
            self.config.set('egg_info', 'tag_build', '')
        if self.config.has_option('egg_info', 'tag_svn_revision'):
            self.config.set('egg_info', 'tag_svn_revision', 'false')
        new_setup = open(self.config_filename, 'w')
        try:
            self.config.write(new_setup)
        finally:
            new_setup.close()
        logger.info("New setup.cfg contents:")
        print(''.join(open(self.config_filename).readlines()))

    def python_file_with_version(self):
        """Return Python filename with ``__version__`` marker, if configured.

        Enable this by adding a ``python-file-with-version`` option::

            [zest.releaser]
            python-file-with-version = reinout/maurits.py

        Return None when nothing has been configured.

        """
        default = None
        if self.config is None:
            return default
        try:
            result = self.config.get('zest.releaser',
                                     'python-file-with-version')
        except (NoSectionError, NoOptionError, ValueError):
            return default
        return result
Ejemplo n.º 31
0
Archivo: yum.py Proyecto: ChrisBr/kiwi
class RepositoryYum(RepositoryBase):
    """
    Implements repository handling for yum package manager
    """
    def post_init(self, custom_args=None):
        """
        Post initialization method

        Store custom yum arguments and create runtime configuration
        and environment

        Attributes

        * :attr:`shared_yum_dir`
            shared directory between image root and build system root

        * :attr:`runtime_yum_config_file`
            yum runtime config file name

        * :attr:`command_env`
            customized os.environ for yum

        * :attr:`runtime_yum_config`
            Instance of ConfigParser

        :param list custom_args: yum arguments
        """
        self.custom_args = custom_args
        if not custom_args:
            self.custom_args = []

        self.repo_names = []

        # yum support is based on creating repo files which contains
        # path names to the repo and its cache. In order to allow a
        # persistent use of the files in and outside of a chroot call
        # an active bind mount from RootBind::mount_shared_directory
        # is expected and required
        manager_base = self.shared_location + '/yum'

        self.shared_yum_dir = {
            'reposd-dir': manager_base + '/repos',
            'cache-dir': manager_base + '/cache'
        }

        self.runtime_yum_config_file = NamedTemporaryFile(
            dir=self.root_dir
        )

        self.yum_args = [
            '-c', self.runtime_yum_config_file.name, '-y'
        ] + self.custom_args

        self.command_env = self._create_yum_runtime_environment()

        # config file parameters for yum tool
        self._create_runtime_config_parser()
        self._write_runtime_config()

    def use_default_location(self):
        """
        Setup yum repository operations to store all data
        in the default places
        """
        self.shared_yum_dir['reposd-dir'] = \
            self.root_dir + '/etc/yum/repos.d'
        self.shared_yum_dir['cache-dir'] = \
            self.root_dir + '/var/cache/yum'
        self._create_runtime_config_parser()
        self._write_runtime_config()

    def runtime_config(self):
        """
        yum runtime configuration and environment
        """
        return {
            'yum_args': self.yum_args,
            'command_env': self.command_env
        }

    def add_repo(
        self, name, uri, repo_type='rpm-md', prio=None, dist=None, components=None
    ):
        """
        Add yum repository

        :param string name: repository base file name
        :param string uri: repository URI
        :param repo_type: repostory type name
        :param int prio: yum repostory priority
        :param dist: unused
        :param components: unused
        """
        repo_file = self.shared_yum_dir['reposd-dir'] + '/' + name + '.repo'
        self.repo_names.append(name + '.repo')
        if os.path.exists(uri):
            # yum requires local paths to take the file: type
            uri = 'file://' + uri
        repo_config = ConfigParser()
        repo_config.add_section(name)
        repo_config.set(
            name, 'name', name
        )
        repo_config.set(
            name, 'baseurl', uri
        )
        if prio:
            repo_config.set(
                name, 'priority', format(prio)
            )
        with open(repo_file, 'w') as repo:
            repo_config.write(repo)

    def delete_repo(self, name):
        """
        Delete yum repository

        :param string name: repository base file name
        """
        Path.wipe(
            self.shared_yum_dir['reposd-dir'] + '/' + name + '.repo'
        )

    def delete_all_repos(self):
        """
        Delete all yum repositories
        """
        Path.wipe(self.shared_yum_dir['reposd-dir'])
        Path.create(self.shared_yum_dir['reposd-dir'])

    def cleanup_unused_repos(self):
        """
        Delete unused yum repositories

        Repository configurations which are not used for this build
        must be removed otherwise they are taken into account for
        the package installations
        """
        repos_dir = self.shared_yum_dir['reposd-dir']
        repo_files = list(os.walk(repos_dir))[0][2]
        for repo_file in repo_files:
            if repo_file not in self.repo_names:
                Path.wipe(repos_dir + '/' + repo_file)

    def _create_yum_runtime_environment(self):
        for yum_dir in list(self.shared_yum_dir.values()):
            Path.create(yum_dir)
        return dict(
            os.environ, LANG='C'
        )

    def _create_runtime_config_parser(self):
        self.runtime_yum_config = ConfigParser()
        self.runtime_yum_config.add_section('main')

        self.runtime_yum_config.set(
            'main', 'cachedir', self.shared_yum_dir['cache-dir']
        )
        self.runtime_yum_config.set(
            'main', 'reposdir', self.shared_yum_dir['reposd-dir']
        )
        self.runtime_yum_config.set(
            'main', 'keepcache', '1'
        )
        self.runtime_yum_config.set(
            'main', 'debuglevel', '2'
        )
        self.runtime_yum_config.set(
            'main', 'pkgpolicy', 'newest'
        )
        self.runtime_yum_config.set(
            'main', 'tolerant', '0'
        )
        self.runtime_yum_config.set(
            'main', 'exactarch', '1'
        )
        self.runtime_yum_config.set(
            'main', 'obsoletes', '1'
        )
        self.runtime_yum_config.set(
            'main', 'plugins', '1'
        )
        self.runtime_yum_config.set(
            'main', 'metadata_expire', '1800'
        )
        self.runtime_yum_config.set(
            'main', 'group_command', 'compat'
        )

    def _write_runtime_config(self):
        with open(self.runtime_yum_config_file.name, 'w') as config:
            self.runtime_yum_config.write(config)
Ejemplo n.º 32
0
def update(version, expected_md5):
    """
    Execute the actual update: extract the archive and execute the bash update script.

    :param version: the new version (after the update).
    :param expected_md5: the md5 sum provided by the server.
    """
    version_mapping = {}
    has_master_hardware = Platform.has_master_hardware()

    try:
        config = ConfigParser()
        config.read(constants.get_config_file())
        from_version = config.get('OpenMotics', 'version')
        logger.info('==================================')
        logger.info('Starting update {} -> {}'.format(from_version, version))

        update_file = constants.get_update_file()
        update_dir = os.path.dirname(update_file)
        # Change to update directory.
        os.chdir(update_dir)

        if os.path.exists(update_file):
            logger.info(' -> Extracting update.tgz')
            extract_legacy_update(update_file, expected_md5)
        else:
            logger.info(' -> Fetching metadata')
            meta = fetch_metadata(config, version, expected_md5)
            logger.info(' -> Downloading firmware for update {}'.format(meta['version']))
            for data in meta['firmwares']:
                download_firmware(data['type'], data['url'], data['sha256'])
                version_mapping[data['type']] = data['version']
    except Exception:
        logger.exception('failed to preprepare update')
        raise SystemExit(EXIT_CODES['failed_preprepare_update'])

    errors = []
    services_running = True
    try:
        date = datetime.now().strftime('%Y%m%d%H%M%S')

        # TODO: should update and re-execute itself before proceeding?

        logger.info(' -> Checking services')
        check_services()

        logger.info(' -> Stopping services')
        stop_services()
        services_running = False

        if has_master_hardware:
            gateway_os = FIRMWARE_FILES['gateway_os']
            if os.path.exists(gateway_os):
                os_version = version_mapping.get('gateway_os')
                logger.info(' -> Updating Gateway OS to {0}'.format(os_version if os_version else 'unknown version'))
                error = update_gateway_os(gateway_os, os_version)
                if error:
                    errors.append(error)

        gateway_service = FIRMWARE_FILES['gateway_service']
        if os.path.exists(gateway_service):
            service_version = version_mapping.get('gateway_service')
            logger.info(' -> Updating Gateway service to {0}'.format(service_version if service_version else 'unknown version'))
            error = update_gateway_backend(gateway_service, date, service_version)
            if error:
                errors.append(error)

        if has_master_hardware:
            master_type = get_master_type()
            master_firmware = FIRMWARE_FILES[master_type]
            if os.path.exists(master_firmware):
                master_version = version_mapping.get(master_type)
                logger.info(' -> Updating Master firmware to {0}'.format(master_version if master_version else 'unknown version'))
                error = update_master_firmware(master_type, master_firmware, master_version)
                if error:
                    errors.append(error)

            for module, filename, arguments in [('energy', FIRMWARE_FILES['energy'], []),
                                                ('power', FIRMWARE_FILES['power'], ['--8'])]:
                if os.path.exists(filename):
                    energy_version = version_mapping.get(module)
                    logger.info(' -> Updating {0} firmware to {1}'.format(module, energy_version if energy_version else 'unknown version'))
                    error = update_energy_firmware(module, filename, energy_version, arguments)
                    if error:
                        errors.append(error)

            for module in MODULE_TYPES:
                module_firmware = FIRMWARE_FILES[module]
                module_version = version_mapping.get(module)
                if os.path.exists(module_firmware):
                    logger.info(' -> Updating {0} firmware to {1}'.format(module, module_version if module_version else 'unknown version'))
                    error = update_module_firmware(module, module_firmware, module_version)
                    if error:
                        errors.append(error)

            logger.info('Checking master communication')
            check_master_communication()

        gateway_frontend = FIRMWARE_FILES['gateway_frontend']
        if os.path.exists(gateway_frontend):
            frontend_version = version_mapping.get('gateway_frontend')
            logger.info(' -> Updating Gateway frontend to {0}'.format(frontend_version if frontend_version else 'unknown version'))
            error = update_gateway_frontend(gateway_frontend, date, frontend_version)
            if error:
                errors.append(error)

        if os.path.exists(gateway_frontend) or os.path.exists(gateway_service):
            clean_update_backups()

        logger.info(' -> Starting services')
        start_services()
        services_running = True

        logger.info(' -> Waiting for health check')
        check_gateway_health()

    except Exception as exc:
        logger.exception('Unexpected exception updating')
        errors.append(exc)
        # TODO: rollback
    finally:
        if not services_running:
            logger.info(' -> Starting services')
            start_services()

        logger.info(' -> Running cleanup')
        cmd('rm -v -rf {}/*'.format(update_dir), shell=True)

        if errors:
            logger.error('Exceptions:')
            for error in errors:
                logger.error('- {0}'.format(error))
            raise errors[0]

        config.set('OpenMotics', 'version', version)
        temp_file = constants.get_config_file() + '.update'
        with open(temp_file, 'w') as configfile:
            config.write(configfile)
        shutil.move(temp_file, constants.get_config_file())
        cmd(['sync'])

        if os.path.exists('/tmp/post_update_reboot'):
            logger.info('Scheduling reboot in 5 minutes')
            subprocess.Popen('sleep 300 && reboot', close_fds=True, shell=True)

        logger.info('DONE')
        logger.info('exit 0')
Ejemplo n.º 33
0
class MultiPortConfig(object):

    HW_LB = "HW"

    @staticmethod
    def float_x_plus_one_tenth_of_y(x, y):
        return float(x) + float(y) / 10.0

    @staticmethod
    def make_str(base, iterator):
        return ' '.join((base.format(x) for x in iterator))

    @classmethod
    def make_range_str(cls, base, start, stop=0, offset=0):
        if offset and not stop:
            stop = start + offset
        return cls.make_str(base, range(start, stop))

    @staticmethod
    def parser_get(parser, section, key, default=None):
        if parser.has_option(section, key):
            return parser.get(section, key)
        return default

    @staticmethod
    def make_ip_addr(ip, mask):
        """
        :param ip: ip adddress
        :type ip: str
        :param mask: /24 prefix of 255.255.255.0 netmask
        :type mask: str
        :return: interface
        :rtype: IPv4Interface
        """

        try:
            return ipaddress.ip_interface(six.text_type('/'.join([ip, mask])))
        except (TypeError, ValueError):
            # None so we can skip later
            return None

    @classmethod
    def validate_ip_and_prefixlen(cls, ip_addr, prefixlen):
        ip_addr = cls.make_ip_addr(ip_addr, prefixlen)
        return ip_addr.ip.exploded, ip_addr.network.prefixlen

    def __init__(self,
                 topology_file,
                 config_tpl,
                 tmp_file,
                 vnfd_helper,
                 vnf_type='CGNAT',
                 lb_count=2,
                 worker_threads=3,
                 worker_config='1C/1T',
                 lb_config='SW',
                 socket=0):

        super(MultiPortConfig, self).__init__()
        self.topology_file = topology_file
        self.worker_config = worker_config.split('/')[1].lower()
        self.worker_threads = self.get_worker_threads(worker_threads)
        self.vnf_type = vnf_type
        self.pipe_line = 0
        self.vnfd_helper = vnfd_helper
        self.write_parser = ConfigParser()
        self.read_parser = ConfigParser()
        self.read_parser.read(config_tpl)
        self.master_core = self.read_parser.get("PIPELINE0", "core")
        self.master_tpl = self.get_config_tpl_data('MASTER')
        self.arpicmp_tpl = self.get_config_tpl_data('ARPICMP')
        self.txrx_tpl = self.get_config_tpl_data('TXRX')
        self.loadb_tpl = self.get_config_tpl_data('LOADB')
        self.vnf_tpl = self.get_config_tpl_data(vnf_type)
        self.swq = 0
        self.lb_count = int(lb_count)
        self.lb_config = lb_config
        self.tmp_file = os.path.join("/tmp", tmp_file)
        self.pktq_out_os = []
        self.socket = socket
        self.start_core = ""
        self.pipeline_counter = ""
        self.txrx_pipeline = ""
        self._port_pairs = None
        self.all_ports = []
        self.port_pair_list = []
        self.lb_to_port_pair_mapping = {}
        self.init_eal()

        self.lb_index = None
        self.mul = 0
        self.port_pairs = []
        self.ports_len = 0
        self.prv_que_handler = None
        self.vnfd = None
        self.rules = None
        self.pktq_out = []

    @staticmethod
    def gen_core(core):
        # return "s{}c{}".format(self.socket, core)
        # don't use sockets for VNFs, because we don't want to have to
        # adjust VM CPU topology.  It is virtual anyway
        return str(core)

    def make_port_pairs_iter(self, operand, iterable):
        return (operand(self.vnfd_helper.port_num(x), y) for y in iterable
                for x in chain.from_iterable(self.port_pairs))

    def make_range_port_pairs_iter(self, operand, start, end):
        return self.make_port_pairs_iter(operand, range(start, end))

    def init_eal(self):
        lines = ['[EAL]\n']
        vpci = (v['virtual-interface']["vpci"]
                for v in self.vnfd_helper.interfaces)
        lines.extend('w = {0}\n'.format(item) for item in vpci)
        lines.append('\n')
        with open(self.tmp_file, 'w') as fh:
            fh.writelines(lines)

    def update_timer(self):
        timer_tpl = self.get_config_tpl_data('TIMER')
        timer_tpl['core'] = self.gen_core(self.start_core)
        self.update_write_parser(timer_tpl)
        self.start_core += 1

    def get_config_tpl_data(self, type_value):
        for section in self.read_parser.sections():
            if self.read_parser.has_option(section, 'type'):
                if type_value == self.read_parser.get(section, 'type'):
                    tpl = OrderedDict(self.read_parser.items(section))
                    return tpl

    def get_txrx_tpl_data(self, value):
        for section in self.read_parser.sections():
            if self.read_parser.has_option(section, 'pipeline_txrx_type'):
                if value == self.read_parser.get(section,
                                                 'pipeline_txrx_type'):
                    tpl = OrderedDict(self.read_parser.items(section))
                    return tpl

    def init_write_parser_template(self, type_value='ARPICMP'):
        for section in self.read_parser.sections():
            if type_value == self.parser_get(self.read_parser, section, 'type',
                                             object()):
                self.start_core = self.read_parser.getint(section, 'core')
                self.pipeline_counter = self.read_parser.getint(
                    section, 'core')
                self.txrx_pipeline = self.read_parser.getint(section, 'core')
                return
            self.write_parser.add_section(section)
            for name, value in self.read_parser.items(section):
                self.write_parser.set(section, name, value)

    def update_write_parser(self, data):
        section = "PIPELINE{0}".format(self.pipeline_counter)
        self.write_parser.add_section(section)
        for name, value in data.items():
            self.write_parser.set(section, name, value)

    def get_worker_threads(self, worker_threads):
        if self.worker_config == '1t':
            return worker_threads
        else:
            return worker_threads - worker_threads % 2

    def generate_next_core_id(self):
        if self.worker_config == '1t':
            self.start_core += 1
            return

        try:
            self.start_core = '{}h'.format(int(self.start_core))
        except ValueError:
            self.start_core = int(self.start_core[:-1]) + 1

    def get_lb_count(self):
        self.lb_count = int(min(len(self.port_pair_list), self.lb_count))

    def generate_lb_to_port_pair_mapping(self):
        self.lb_to_port_pair_mapping = defaultdict(int)
        port_pair_count = len(self.port_pair_list)
        lb_pair_count = int(port_pair_count / self.lb_count)
        extra = port_pair_count % self.lb_count
        extra_iter = repeat(lb_pair_count + 1, extra)
        norm_iter = repeat(lb_pair_count, port_pair_count - extra)
        new_values = {
            i: v
            for i, v in enumerate(chain(extra_iter, norm_iter), 1)
        }
        self.lb_to_port_pair_mapping.update(new_values)

    def set_priv_to_pub_mapping(self):
        port_nums = [
            tuple(self.vnfd_helper.port_nums(x)) for x in self.port_pair_list
        ]
        return "".join(str(y).replace(" ", "") for y in port_nums)

    def set_priv_que_handler(self):
        # iterated twice, can't be generator
        priv_to_pub_map = [
            tuple(self.vnfd_helper.port_nums(x)) for x in self.port_pairs
        ]
        # must be list to use .index()
        port_list = list(chain.from_iterable(priv_to_pub_map))
        uplink_ports = (x[0] for x in priv_to_pub_map)
        self.prv_que_handler = '({})'.format("".join(
            ("{},".format(port_list.index(x)) for x in uplink_ports)))

    def generate_arp_route_tbl(self):
        arp_route_tbl_tmpl = "({port0_dst_ip_hex},{port0_netmask_hex},{port_num}," \
                             "{next_hop_ip_hex})"

        def build_arp_config(port):
            dpdk_port_num = self.vnfd_helper.port_num(port)
            interface = self.vnfd_helper.find_interface(
                name=port)["virtual-interface"]
            # We must use the dst because we are on the VNF and we need to
            # reach the TG.
            dst_port0_ip = ipaddress.ip_interface(
                six.text_type("%s/%s" %
                              (interface["dst_ip"], interface["netmask"])))

            arp_vars = {
                "port0_dst_ip_hex":
                ip_to_hex(dst_port0_ip.network.network_address.exploded),
                "port0_netmask_hex":
                ip_to_hex(dst_port0_ip.network.netmask.exploded),
                # this is the port num that contains port0 subnet and next_hop_ip_hex
                # this is LINKID which should be based on DPDK port number
                "port_num":
                dpdk_port_num,
                # next hop is dst in this case
                # must be within subnet
                "next_hop_ip_hex":
                ip_to_hex(dst_port0_ip.ip.exploded),
            }
            return arp_route_tbl_tmpl.format(**arp_vars)

        return ' '.join(build_arp_config(port) for port in self.all_ports)

    def generate_arpicmp_data(self):
        swq_in_str = self.make_range_str('SWQ{}',
                                         self.swq,
                                         offset=self.lb_count)
        self.swq += self.lb_count
        swq_out_str = self.make_range_str('SWQ{}',
                                          self.swq,
                                          offset=self.lb_count)
        self.swq += self.lb_count
        # ports_mac_list is disabled for some reason

        # mac_iter = (self.vnfd_helper.find_interface(name=port)['virtual-interface']['local_mac']
        #             for port in self.all_ports)
        pktq_in_iter = ('RXQ{}.0'.format(self.vnfd_helper.port_num(x[0]))
                        for x in self.port_pair_list)

        arpicmp_data = {
            'core': self.gen_core(self.start_core),
            'pktq_in': swq_in_str,
            'pktq_out': swq_out_str,
            # we need to disable ports_mac_list?
            # it looks like ports_mac_list is no longer required
            # 'ports_mac_list': ' '.join(mac_iter),
            'pktq_in_prv': ' '.join(pktq_in_iter),
            'prv_to_pub_map': self.set_priv_to_pub_mapping(),
            'arp_route_tbl': self.generate_arp_route_tbl(),
            # nd_route_tbl must be set or we get segault on random OpenStack IPv6 traffic
            # 'nd_route_tbl': "(0064:ff9b:0:0:0:0:9810:6414,120,0,0064:ff9b:0:0:0:0:9810:6414)"
            # safe default?  route discard prefix to localhost
            'nd_route_tbl': "(0100::,64,0,::1)"
        }
        self.pktq_out_os = swq_out_str.split(' ')
        # HWLB is a run to complition. So override the pktq_in/pktq_out
        if self.lb_config == self.HW_LB:
            self.swq = 0
            swq_in_str = \
                self.make_range_str('SWQ{}', self.swq,
                                    offset=(self.lb_count * self.worker_threads))
            arpicmp_data['pktq_in'] = swq_in_str
            # WA: Since port_pairs will not be populated during arp pipeline
            self.port_pairs = self.port_pair_list
            port_iter = \
                self.make_port_pairs_iter(self.float_x_plus_one_tenth_of_y, [self.mul])
            pktq_out = self.make_str('TXQ{}', port_iter)
            arpicmp_data['pktq_out'] = pktq_out

        return arpicmp_data

    def generate_final_txrx_data(self):
        swq_start = self.swq - self.ports_len * self.worker_threads

        txq_start = 0
        txq_end = self.worker_threads

        pktq_out_iter = self.make_range_port_pairs_iter(
            self.float_x_plus_one_tenth_of_y, txq_start, txq_end)

        swq_str = self.make_range_str('SWQ{}', swq_start, self.swq)
        txq_str = self.make_str('TXQ{}', pktq_out_iter)
        rxtx_data = {
            'pktq_in': swq_str,
            'pktq_out': txq_str,
            'pipeline_txrx_type': 'TXTX',
            'core': self.gen_core(self.start_core),
        }
        pktq_in = rxtx_data['pktq_in']
        pktq_in = '{0} {1}'.format(pktq_in,
                                   self.pktq_out_os[self.lb_index - 1])
        rxtx_data['pktq_in'] = pktq_in
        self.pipeline_counter += 1
        return rxtx_data

    def generate_initial_txrx_data(self):
        pktq_iter = self.make_range_port_pairs_iter(
            self.float_x_plus_one_tenth_of_y, 0, self.worker_threads)

        rxq_str = self.make_str('RXQ{}', pktq_iter)
        swq_str = self.make_range_str('SWQ{}', self.swq, offset=self.ports_len)
        txrx_data = {
            'pktq_in': rxq_str,
            'pktq_out': swq_str + ' SWQ{0}'.format(self.lb_index - 1),
            'pipeline_txrx_type': 'RXRX',
            'core': self.gen_core(self.start_core),
        }
        self.pipeline_counter += 1
        return txrx_data

    def generate_lb_data(self):
        pktq_in = self.make_range_str('SWQ{}', self.swq, offset=self.ports_len)
        self.swq += self.ports_len

        offset = self.ports_len * self.worker_threads
        pktq_out = self.make_range_str('SWQ{}', self.swq, offset=offset)
        self.pktq_out = pktq_out.split()

        self.swq += (self.ports_len * self.worker_threads)
        lb_data = {
            'prv_que_handler': self.prv_que_handler,
            'pktq_in': pktq_in,
            'pktq_out': pktq_out,
            'n_vnf_threads': str(self.worker_threads),
            'core': self.gen_core(self.start_core),
        }
        self.pipeline_counter += 1
        return lb_data

    def generate_vnf_data(self):
        if self.lb_config == self.HW_LB:
            port_iter = self.make_port_pairs_iter(
                self.float_x_plus_one_tenth_of_y, [self.mul])
            pktq_in = self.make_str('RXQ{}', port_iter)

            self.mul += 1
            port_iter = self.make_port_pairs_iter(
                self.float_x_plus_one_tenth_of_y, [self.mul])
            pktq_out = self.make_str('TXQ{}', port_iter)

            pipe_line_data = {
                'pktq_in': pktq_in,
                'pktq_out': pktq_out + ' SWQ{0}'.format(self.swq),
                'prv_que_handler': self.prv_que_handler,
                'core': self.gen_core(self.start_core),
            }
            self.swq += 1
        else:
            pipe_line_data = {
                'pktq_in':
                ' '.join(
                    (self.pktq_out.pop(0) for _ in range(self.ports_len))),
                'pktq_out':
                self.make_range_str('SWQ{}', self.swq, offset=self.ports_len),
                'prv_que_handler':
                self.prv_que_handler,
                'core':
                self.gen_core(self.start_core),
            }
            self.swq += self.ports_len

        if self.vnf_type in ('ACL', 'VFW'):
            pipe_line_data.pop('prv_que_handler')

        if self.vnf_tpl.get('vnf_set'):
            public_ip_port_range_list = self.vnf_tpl[
                'public_ip_port_range'].split(':')
            ip_in_hex = '{:x}'.format(
                int(public_ip_port_range_list[0], 16) + self.lb_index - 1)
            public_ip_port_range_list[0] = ip_in_hex
            self.vnf_tpl['public_ip_port_range'] = ':'.join(
                public_ip_port_range_list)

        self.pipeline_counter += 1
        return pipe_line_data

    def generate_config_data(self):
        self.init_write_parser_template()

        # use master core for master, don't use self.start_core
        self.write_parser.set('PIPELINE0', 'core',
                              self.gen_core(self.master_core))
        arpicmp_data = self.generate_arpicmp_data()
        self.arpicmp_tpl.update(arpicmp_data)
        self.update_write_parser(self.arpicmp_tpl)

        self.start_core += 1
        if self.vnf_type == 'CGNAPT':
            self.pipeline_counter += 1
            self.update_timer()

        for lb in self.lb_to_port_pair_mapping:
            self.lb_index = lb
            self.mul = 0
            port_pair_count = self.lb_to_port_pair_mapping[lb]
            if not self.port_pair_list:
                continue

            self.port_pairs = self.port_pair_list[:port_pair_count]
            self.port_pair_list = self.port_pair_list[port_pair_count:]
            self.ports_len = port_pair_count * 2
            self.set_priv_que_handler()
            if self.lb_config == 'SW':
                txrx_data = self.generate_initial_txrx_data()
                self.txrx_tpl.update(txrx_data)
                self.update_write_parser(self.txrx_tpl)
                self.start_core += 1
                lb_data = self.generate_lb_data()
                self.loadb_tpl.update(lb_data)
                self.update_write_parser(self.loadb_tpl)
                self.start_core += 1

            for i in range(self.worker_threads):
                vnf_data = self.generate_vnf_data()
                if not self.vnf_tpl:
                    self.vnf_tpl = {}
                self.vnf_tpl.update(vnf_data)
                self.update_write_parser(self.vnf_tpl)
                try:
                    self.vnf_tpl.pop('vnf_set')
                except KeyError:
                    pass
                else:
                    self.vnf_tpl.pop('public_ip_port_range')
                self.generate_next_core_id()

            if self.lb_config == 'SW':
                txrx_data = self.generate_final_txrx_data()
                self.txrx_tpl.update(txrx_data)
                self.update_write_parser(self.txrx_tpl)
                self.start_core += 1
            self.vnf_tpl = self.get_config_tpl_data(self.vnf_type)

    def generate_config(self):
        self._port_pairs = PortPairs(self.vnfd_helper.interfaces)
        self.port_pair_list = self._port_pairs.port_pair_list
        self.all_ports = self._port_pairs.all_ports

        self.get_lb_count()
        self.generate_lb_to_port_pair_mapping()
        self.generate_config_data()
        self.write_parser.write(sys.stdout)
        with open(self.tmp_file, 'a') as tfh:
            self.write_parser.write(tfh)

    def generate_link_config(self):
        def build_args(port):
            # lookup interface by name
            virtual_interface = self.vnfd_helper.find_interface(
                name=port)["virtual-interface"]
            local_ip = virtual_interface["local_ip"]
            netmask = virtual_interface["netmask"]
            port_num = self.vnfd_helper.port_num(port)
            port_ip, prefix_len = self.validate_ip_and_prefixlen(
                local_ip, netmask)
            return LINK_CONFIG_TEMPLATE.format(port_num, port_ip, prefix_len)

        return ''.join(build_args(port) for port in self.all_ports)

    def get_route_data(self, src_key, data_key, port):
        route_list = self.vnfd['vdu'][0].get(src_key, [])
        try:
            return next((route[data_key]
                         for route in route_list if route['if'] == port), None)
        except (TypeError, StopIteration, KeyError):
            return None

    def get_ports_gateway(self, port):
        return self.get_route_data('routing_table', 'gateway', port)

    def get_ports_gateway6(self, port):
        return self.get_route_data('nd_route_tbl', 'gateway', port)

    def get_netmask_gateway(self, port):
        return self.get_route_data('routing_table', 'netmask', port)

    def get_netmask_gateway6(self, port):
        return self.get_route_data('nd_route_tbl', 'netmask', port)

    def generate_arp_config(self):
        arp_config = []
        for port in self.all_ports:
            # ignore gateway, always use TG IP
            # gateway = self.get_ports_gateway(port)
            vintf = self.vnfd_helper.find_interface(
                name=port)["virtual-interface"]
            dst_mac = vintf["dst_mac"]
            dst_ip = vintf["dst_ip"]
            # arp_config.append(
            #     (self.vnfd_helper.port_num(port), gateway, dst_mac, self.txrx_pipeline))
            # so dst_mac is the TG dest mac, so we need TG dest IP.
            # should be dpdk_port_num
            arp_config.append((self.vnfd_helper.port_num(port), dst_ip,
                               dst_mac, self.txrx_pipeline))

        return '\n'.join(('p {3} arpadd {0} {1} {2}'.format(*values)
                          for values in arp_config))

    def generate_arp_config6(self):
        arp_config6 = []
        for port in self.all_ports:
            # ignore gateway, always use TG IP
            # gateway6 = self.get_ports_gateway6(port)
            vintf = self.vnfd_helper.find_interface(
                name=port)["virtual-interface"]
            dst_mac6 = vintf["dst_mac"]
            dst_ip6 = vintf["dst_ip"]
            # arp_config6.append(
            #     (self.vnfd_helper.port_num(port), gateway6, dst_mac6, self.txrx_pipeline))
            arp_config6.append((self.vnfd_helper.port_num(port), dst_ip6,
                                dst_mac6, self.txrx_pipeline))

        return '\n'.join(('p {3} arpadd {0} {1} {2}'.format(*values)
                          for values in arp_config6))

    def generate_action_config(self):
        port_list = (self.vnfd_helper.port_num(p) for p in self.all_ports)
        if self.vnf_type == "VFW":
            template = FW_ACTION_TEMPLATE
        else:
            template = ACTION_TEMPLATE

        return ''.join((template.format(port) for port in port_list))

    def get_ip_from_port(self, port):
        # we can't use gateway because in OpenStack gateways interfer with floating ip routing
        # return self.make_ip_addr(self.get_ports_gateway(port), self.get_netmask_gateway(port))
        vintf = self.vnfd_helper.find_interface(name=port)["virtual-interface"]
        ip = vintf["local_ip"]
        netmask = vintf["netmask"]
        return self.make_ip_addr(ip, netmask)

    def get_network_and_prefixlen_from_ip_of_port(self, port):
        ip_addr = self.get_ip_from_port(port)
        # handle cases with no gateway
        if ip_addr:
            return ip_addr.network.network_address.exploded, ip_addr.network.prefixlen
        else:
            return None, None

    def generate_rule_config(self):
        cmd = 'acl' if self.vnf_type == "ACL" else "vfw"
        rules_config = self.rules if self.rules else ''
        new_rules = []
        new_ipv6_rules = []
        pattern = 'p {0} add {1} {2} {3} {4} {5} 0 65535 0 65535 0 0 {6}'
        for src_intf, dst_intf in self.port_pair_list:
            src_port = self.vnfd_helper.port_num(src_intf)
            dst_port = self.vnfd_helper.port_num(dst_intf)

            src_net, src_prefix_len = self.get_network_and_prefixlen_from_ip_of_port(
                src_intf)
            dst_net, dst_prefix_len = self.get_network_and_prefixlen_from_ip_of_port(
                dst_intf)
            # ignore entires with empty values
            if all((src_net, src_prefix_len, dst_net, dst_prefix_len)):
                new_rules.append(
                    (cmd, self.txrx_pipeline, src_net, src_prefix_len, dst_net,
                     dst_prefix_len, dst_port))
                new_rules.append(
                    (cmd, self.txrx_pipeline, dst_net, dst_prefix_len, src_net,
                     src_prefix_len, src_port))

            # src_net = self.get_ports_gateway6(port_pair[0])
            # src_prefix_len = self.get_netmask_gateway6(port_pair[0])
            # dst_net = self.get_ports_gateway6(port_pair[1])
            # dst_prefix_len = self.get_netmask_gateway6(port_pair[0])
            # # ignore entires with empty values
            # if all((src_net, src_prefix_len, dst_net, dst_prefix_len)):
            #     new_ipv6_rules.append((cmd, self.txrx_pipeline, src_net, src_prefix_len,
            #                            dst_net, dst_prefix_len, dst_port))
            #     new_ipv6_rules.append((cmd, self.txrx_pipeline, dst_net, dst_prefix_len,
            #                            src_net, src_prefix_len, src_port))

        acl_apply = "\np %s applyruleset" % cmd
        new_rules_config = '\n'.join(
            pattern.format(*values)
            for values in chain(new_rules, new_ipv6_rules))
        return ''.join([rules_config, new_rules_config, acl_apply])

    def generate_script_data(self):
        self._port_pairs = PortPairs(self.vnfd_helper.interfaces)
        self.port_pair_list = self._port_pairs.port_pair_list
        self.get_lb_count()
        script_data = {
            'link_config': self.generate_link_config(),
            'arp_config': self.generate_arp_config(),
            # disable IPv6 for now
            # 'arp_config6': self.generate_arp_config6(),
            'arp_config6': "",
            'actions': '',
            'rules': '',
        }

        if self.vnf_type in ('ACL', 'VFW'):
            script_data.update({
                'actions': self.generate_action_config(),
                'rules': self.generate_rule_config(),
            })

        return script_data

    def generate_script(self, vnfd, rules=None):
        self.vnfd = vnfd
        self.rules = rules
        script_data = self.generate_script_data()
        script = SCRIPT_TPL.format(**script_data)
        if self.lb_config == self.HW_LB:
            script += 'set fwd rxonly'
            hwlb_tpl = """
set_sym_hash_ena_per_port {0} enable
set_hash_global_config {0} simple_xor ipv4-udp enable
set_sym_hash_ena_per_port {1} enable
set_hash_global_config {1} simple_xor ipv4-udp enable
set_hash_input_set {0} ipv4-udp src-ipv4 udp-src-port add
set_hash_input_set {1} ipv4-udp dst-ipv4 udp-dst-port add
set_hash_input_set {0} ipv6-udp src-ipv6 udp-src-port add
set_hash_input_set {1} ipv6-udp dst-ipv6 udp-dst-port add
"""
            for port_pair in self.port_pair_list:
                script += hwlb_tpl.format(
                    *(self.vnfd_helper.port_nums(port_pair)))
        return script
Ejemplo n.º 34
0
 def set(self, section, option, value, *args, **kwargs):
   ConfigParser.set(self, section, option, str(value), *args, **kwargs)
Ejemplo n.º 35
0
class RepositoryZypper(RepositoryBase):
    """
    Implements repo handling for zypper package manager
    """
    def post_init(self, custom_args=None):
        """
        Post initialization method

        Store custom zypper arguments and create runtime configuration
        and environment

        Attributes

        * :attr:`shared_zypper_dir`
            shared directory between image root and build system root

        * :attr:`runtime_zypper_config_file`
            zypper runtime config file name

        * :attr:`runtime_zypp_config_file`
            libzypp runtime config file name

        * :attr:`zypper_args`
            zypper caller args plus additional custom args

        * :attr:`command_env`
            customized os.environ for zypper

        * :attr:`runtime_zypper_config`
            Instance of ConfigParser

        :param list custom_args: zypper arguments
        """
        self.custom_args = custom_args
        if not custom_args:
            self.custom_args = []

        self.repo_names = []

        # zypper support by default point all actions into the root
        # directory of the image system. This information is passed
        # as arguments to zypper and adapted if the call runs as
        # chrooted operation. Therefore the use of the shared location
        # via RootBind::mount_shared_directory is optional but
        # recommended to make use of the repo cache
        manager_base = self.root_dir + self.shared_location

        self.shared_zypper_dir = {
            'pkg-cache-dir': manager_base + '/packages',
            'reposd-dir': manager_base + '/zypper/repos',
            'solv-cache-dir': manager_base + '/zypper/solv',
            'raw-cache-dir': manager_base + '/zypper/raw',
            'cache-dir': manager_base + '/zypper'
        }

        self.runtime_zypper_config_file = NamedTemporaryFile(
            dir=self.root_dir
        )
        self.runtime_zypp_config_file = NamedTemporaryFile(
            dir=self.root_dir
        )

        self.zypper_args = [
            '--non-interactive', '--no-gpg-checks',
            '--pkg-cache-dir', self.shared_zypper_dir['pkg-cache-dir'],
            '--reposd-dir', self.shared_zypper_dir['reposd-dir'],
            '--solv-cache-dir', self.shared_zypper_dir['solv-cache-dir'],
            '--cache-dir', self.shared_zypper_dir['cache-dir'],
            '--raw-cache-dir', self.shared_zypper_dir['raw-cache-dir'],
            '--config', self.runtime_zypper_config_file.name
        ] + self.custom_args

        self.command_env = self._create_zypper_runtime_environment()

        # config file parameters for zypper tool
        self.runtime_zypper_config = ConfigParser()
        self.runtime_zypper_config.add_section('main')

        # config file parameters for libzypp library
        self.runtime_zypp_config = ConfigParser()
        self.runtime_zypp_config.add_section('main')
        self.runtime_zypp_config.set(
            'main', 'cachedir', self.shared_zypper_dir['cache-dir']
        )
        self.runtime_zypp_config.set(
            'main', 'metadatadir', self.shared_zypper_dir['raw-cache-dir']
        )
        self.runtime_zypp_config.set(
            'main', 'solvfilesdir', self.shared_zypper_dir['solv-cache-dir']
        )
        self.runtime_zypp_config.set(
            'main', 'packagesdir', self.shared_zypper_dir['pkg-cache-dir']
        )

        self._write_runtime_config()

    def use_default_location(self):
        """
        Setup zypper repository operations to store all data
        in the default places
        """
        self.shared_zypper_dir['reposd-dir'] = \
            self.root_dir + '/etc/zypp/repos.d'
        self.zypper_args = [
            '--non-interactive', '--no-gpg-checks'
        ] + self.custom_args
        self.command_env = dict(os.environ, LANG='C')

    def runtime_config(self):
        """
        zypper runtime configuration and environment
        """
        return {
            'zypper_args': self.zypper_args,
            'command_env': self.command_env
        }

    def add_repo(
        self, name, uri, repo_type='rpm-md',
        prio=None, dist=None, components=None
    ):
        """
        Add zypper repository

        :param string name: repository name
        :param string uri: repository URI
        :param repo_type: repostory type name
        :param int prio: yum repostory priority
        :param dist: unused
        :param components: unused
        """
        repo_file = self.shared_zypper_dir['reposd-dir'] + '/' + name + '.repo'
        self.repo_names.append(name + '.repo')

        if os.path.exists(repo_file):
            Path.wipe(repo_file)

        self._backup_package_cache()
        Command.run(
            ['zypper'] + self.zypper_args + [
                '--root', self.root_dir,
                'addrepo',
                '--refresh',
                '--type', self._translate_repo_type(repo_type),
                '--keep-packages',
                '-C',
                uri,
                name
            ],
            self.command_env
        )
        if prio:
            Command.run(
                ['zypper'] + self.zypper_args + [
                    '--root', self.root_dir,
                    'modifyrepo', '--priority', format(prio), name
                ],
                self.command_env
            )
        self._restore_package_cache()

    def delete_repo(self, name):
        """
        Delete zypper repository

        :param string name: repository name
        """
        Command.run(
            ['zypper'] + self.zypper_args + [
                '--root', self.root_dir, 'removerepo', name
            ],
            self.command_env
        )

    def delete_all_repos(self):
        """
        Delete all zypper repositories
        """
        Path.wipe(self.shared_zypper_dir['reposd-dir'])
        Path.create(self.shared_zypper_dir['reposd-dir'])

    def cleanup_unused_repos(self):
        """
        Delete unused zypper repositories

        zypper creates a system solvable which is unwanted for the
        purpose of building images. In addition zypper fails with
        an error message 'Failed to cache rpm database' if such a
        system solvable exists and a new root system is created

        All other repository configurations which are not used for
        this build must be removed too, otherwise they are taken into
        account for the package installations
        """
        solv_dir = self.shared_zypper_dir['solv-cache-dir']
        Path.wipe(solv_dir + '/@System')

        repos_dir = self.shared_zypper_dir['reposd-dir']
        repo_files = list(os.walk(repos_dir))[0][2]
        for repo_file in repo_files:
            if repo_file not in self.repo_names:
                Path.wipe(repos_dir + '/' + repo_file)

    def _create_zypper_runtime_environment(self):
        for zypper_dir in list(self.shared_zypper_dir.values()):
            Path.create(zypper_dir)
        return dict(
            os.environ,
            LANG='C',
            ZYPP_CONF=self.runtime_zypp_config_file.name
        )

    def _write_runtime_config(self):
        with open(self.runtime_zypper_config_file.name, 'w') as config:
            self.runtime_zypper_config.write(config)
        with open(self.runtime_zypp_config_file.name, 'w') as config:
            self.runtime_zypp_config.write(config)

    def _translate_repo_type(self, repo_type):
        """
            Translate kiwi supported common repo type names from the schema
            into the name the zyper package manager understands
        """
        zypper_type_for = {
            'rpm-md': 'YUM',
            'rpm-dir': 'Plaindir',
            'yast2': 'YaST'
        }
        try:
            return zypper_type_for[repo_type]
        except Exception:
            raise KiwiRepoTypeUnknown(
                'Unsupported zypper repo type: %s' % repo_type
            )

    def _backup_package_cache(self):
        """
        preserve package cache which otherwise will be removed by
        zypper if no repo file is found. But this situation is
        normal for an image build process which setup and remove
        repos for building at runtime
        """
        self._move_package_cache(backup=True)

    def _restore_package_cache(self):
        """
        restore preserved package cache at the location passed to zypper
        """
        self._move_package_cache(restore=True)

    def _move_package_cache(self, backup=False, restore=False):
        package_cache = self.shared_location + '/packages'
        package_cache_moved = package_cache + '.moved'
        if backup and os.path.exists(package_cache):
            Command.run(
                ['mv', '-f', package_cache, package_cache_moved]
            )
        elif restore and os.path.exists(package_cache_moved):
            Command.run(
                ['mv', '-f', package_cache_moved, package_cache]
            )

    def __del__(self):
        self._restore_package_cache()
Ejemplo n.º 36
0
class RepositoryZypper(RepositoryBase):
    """
    Implements repo handling for zypper package manager
    """
    def post_init(self, custom_args=None):
        """
        Post initialization method

        Store custom zypper arguments and create runtime configuration
        and environment

        Attributes

        * :attr:`shared_zypper_dir`
            shared directory between image root and build system root

        * :attr:`runtime_zypper_config_file`
            zypper runtime config file name

        * :attr:`runtime_zypp_config_file`
            libzypp runtime config file name

        * :attr:`zypper_args`
            zypper caller args plus additional custom args

        * :attr:`command_env`
            customized os.environ for zypper

        * :attr:`runtime_zypper_config`
            Instance of ConfigParser

        :param list custom_args: zypper arguments
        """
        self.custom_args = custom_args
        self.exclude_docs = False
        self.gpgcheck = False
        if not custom_args:
            self.custom_args = []

        # extract custom arguments used for zypp config only
        if 'exclude_docs' in self.custom_args:
            self.custom_args.remove('exclude_docs')
            self.exclude_docs = True

        if 'check_signatures' in self.custom_args:
            self.custom_args.remove('check_signatures')
            self.gpgcheck = True

        self.repo_names = []

        # zypper support by default point all actions into the root
        # directory of the image system. This information is passed
        # as arguments to zypper and adapted if the call runs as
        # chrooted operation. Therefore the use of the shared location
        # via RootBind::mount_shared_directory is optional but
        # recommended to make use of the repo cache
        manager_base = self.root_dir + self.shared_location

        self.shared_zypper_dir = {
            'pkg-cache-dir': os.sep.join(
                [manager_base, 'packages']
            ),
            'reposd-dir': os.sep.join(
                [manager_base, 'zypper/repos']
            ),
            'credentials-dir': os.sep.join(
                [manager_base, 'zypper/credentials']
            ),
            'solv-cache-dir': os.sep.join(
                [manager_base, 'zypper/solv']
            ),
            'raw-cache-dir': os.sep.join(
                [manager_base, 'zypper/raw']
            ),
            'cache-dir': os.sep.join(
                [manager_base, 'zypper']
            )
        }

        self.runtime_zypper_config_file = NamedTemporaryFile(
            dir=self.root_dir
        )
        self.runtime_zypp_config_file = NamedTemporaryFile(
            dir=self.root_dir
        )

        self.zypper_args = [
            '--non-interactive',
            '--pkg-cache-dir', self.shared_zypper_dir['pkg-cache-dir'],
            '--reposd-dir', self.shared_zypper_dir['reposd-dir'],
            '--solv-cache-dir', self.shared_zypper_dir['solv-cache-dir'],
            '--cache-dir', self.shared_zypper_dir['cache-dir'],
            '--raw-cache-dir', self.shared_zypper_dir['raw-cache-dir'],
            '--config', self.runtime_zypper_config_file.name
        ] + self.custom_args

        self.command_env = self._create_zypper_runtime_environment()

        # config file parameters for zypper tool
        self.runtime_zypper_config = ConfigParser()
        self.runtime_zypper_config.add_section('main')

        # config file parameters for libzypp library
        self.runtime_zypp_config = ConfigParser()
        self.runtime_zypp_config.add_section('main')
        self.runtime_zypp_config.set(
            'main', 'credentials.global.dir',
            self.shared_zypper_dir['credentials-dir']
        )
        if self.exclude_docs:
            self.runtime_zypp_config.set(
                'main', 'rpm.install.excludedocs', 'yes'
            )

        if self.gpgcheck:
            self.runtime_zypp_config.set(
                'main', 'gpgcheck', '1'
            )
        else:
            self.runtime_zypp_config.set(
                'main', 'gpgcheck', '0'
            )

        self._write_runtime_config()

    def use_default_location(self):
        """
        Setup zypper repository operations to store all data
        in the default places
        """
        self.shared_zypper_dir['reposd-dir'] = \
            self.root_dir + '/etc/zypp/repos.d'
        self.shared_zypper_dir['credentials-dir'] = \
            self.root_dir + '/etc/zypp/credentials.d'
        self.zypper_args = [
            '--non-interactive',
        ] + self.custom_args
        self.command_env = dict(os.environ, LANG='C')

    def runtime_config(self):
        """
        zypper runtime configuration and environment
        """
        return {
            'zypper_args': self.zypper_args,
            'command_env': self.command_env
        }

    def add_repo(
        self, name, uri, repo_type='rpm-md',
        prio=None, dist=None, components=None,
        user=None, secret=None, credentials_file=None,
        repo_gpgcheck=None, pkg_gpgcheck=None
    ):
        """
        Add zypper repository

        :param string name: repository name
        :param string uri: repository URI
        :param repo_type: repostory type name
        :param int prio: zypper repostory priority
        :param dist: unused
        :param components: unused
        :param user: credentials username
        :param secret: credentials password
        :param credentials_file: zypper credentials file
        :param bool repo_gpgcheck: enable repository signature validation
        :param bool pkg_gpgcheck: enable package signature validation
        """
        if credentials_file:
            repo_secret = os.sep.join(
                [self.shared_zypper_dir['credentials-dir'], credentials_file]
            )
            if os.path.exists(repo_secret):
                Path.wipe(repo_secret)

            if user and secret:
                uri = ''.join([uri, '?credentials=', credentials_file])
                with open(repo_secret, 'w') as credentials:
                    credentials.write('username={0}{1}'.format(
                        user, os.linesep)
                    )
                    credentials.write('password={0}{1}'.format(
                        secret, os.linesep)
                    )

        repo_file = ''.join(
            [self.shared_zypper_dir['reposd-dir'], '/', name, '.repo']
        )
        self.repo_names.append(''.join([name, '.repo']))

        if os.path.exists(repo_file):
            Path.wipe(repo_file)

        self._backup_package_cache()
        Command.run(
            ['zypper'] + self.zypper_args + [
                '--root', self.root_dir,
                'addrepo',
                '--refresh',
                '--type', self._translate_repo_type(repo_type),
                '--keep-packages',
                '-C',
                uri,
                name
            ],
            self.command_env
        )
        if prio or repo_gpgcheck is not None or pkg_gpgcheck is not None:
            repo_config = ConfigParser()
            repo_config.read(repo_file)
            if repo_gpgcheck is not None:
                repo_config.set(
                    name, 'repo_gpgcheck', '1' if repo_gpgcheck else '0'
                )
            if pkg_gpgcheck is not None:
                repo_config.set(
                    name, 'pkg_gpgcheck', '1' if pkg_gpgcheck else '0'
                )
            if prio:
                repo_config.set(
                    name, 'priority', format(prio)
                )
            with open(repo_file, 'w') as repo:
                repo_config.write(repo)
        self._restore_package_cache()

    def import_trusted_keys(self, signing_keys):
        """
        Imports trusted keys into the image

        :param list signing_keys: list of the key files to import
        """
        for key in signing_keys:
            Command.run(['rpm', '--root', self.root_dir, '--import', key])

    def delete_repo(self, name):
        """
        Delete zypper repository

        :param string name: repository name
        """
        Command.run(
            ['zypper'] + self.zypper_args + [
                '--root', self.root_dir, 'removerepo', name
            ],
            self.command_env
        )

    def delete_all_repos(self):
        """
        Delete all zypper repositories
        """
        Path.wipe(self.shared_zypper_dir['reposd-dir'])
        Path.create(self.shared_zypper_dir['reposd-dir'])

    def delete_repo_cache(self, name):
        """
        Delete zypper repository cache

        The cache data for each repository is stored in a list of
        directories of the same name as the repository name. The method
        deletes these directories to cleanup the cache information

        :param string name: repository name
        """
        Path.wipe(
            os.sep.join([self.shared_zypper_dir['pkg-cache-dir'], name])
        )
        Path.wipe(
            os.sep.join([self.shared_zypper_dir['solv-cache-dir'], name])
        )
        Path.wipe(
            os.sep.join([self.shared_zypper_dir['raw-cache-dir'], name])
        )

    def cleanup_unused_repos(self):
        """
        Delete unused zypper repositories

        zypper creates a system solvable which is unwanted for the
        purpose of building images. In addition zypper fails with
        an error message 'Failed to cache rpm database' if such a
        system solvable exists and a new root system is created

        All other repository configurations which are not used for
        this build must be removed too, otherwise they are taken into
        account for the package installations
        """
        solv_dir = self.shared_zypper_dir['solv-cache-dir']
        Path.wipe(solv_dir + '/@System')

        repos_dir = self.shared_zypper_dir['reposd-dir']
        repo_files = list(os.walk(repos_dir))[0][2]
        for repo_file in repo_files:
            if repo_file not in self.repo_names:
                Path.wipe(repos_dir + '/' + repo_file)

    def _create_zypper_runtime_environment(self):
        for zypper_dir in list(self.shared_zypper_dir.values()):
            Path.create(zypper_dir)
        return dict(
            os.environ,
            LANG='C',
            ZYPP_CONF=self.runtime_zypp_config_file.name
        )

    def _write_runtime_config(self):
        with open(self.runtime_zypper_config_file.name, 'w') as config:
            self.runtime_zypper_config.write(config)
        with open(self.runtime_zypp_config_file.name, 'w') as config:
            self.runtime_zypp_config.write(config)

    def _translate_repo_type(self, repo_type):
        """
            Translate kiwi supported common repo type names from the schema
            into the name the zyper package manager understands
        """
        zypper_type_for = {
            'rpm-md': 'YUM',
            'rpm-dir': 'Plaindir',
            'yast2': 'YaST'
        }
        try:
            return zypper_type_for[repo_type]
        except Exception:
            raise KiwiRepoTypeUnknown(
                'Unsupported zypper repo type: %s' % repo_type
            )

    def _backup_package_cache(self):
        """
        preserve package cache which otherwise will be removed by
        zypper if no repo file is found. But this situation is
        normal for an image build process which setup and remove
        repos for building at runtime
        """
        self._move_package_cache(backup=True)

    def _restore_package_cache(self):
        """
        restore preserved package cache at the location passed to zypper
        """
        self._move_package_cache(restore=True)

    def _move_package_cache(self, backup=False, restore=False):
        package_cache = self.shared_location + '/packages'
        package_cache_moved = package_cache + '.moved'
        if backup and os.path.exists(package_cache):
            Command.run(
                ['mv', '-f', package_cache, package_cache_moved]
            )
        elif restore and os.path.exists(package_cache_moved):
            Command.run(
                ['mv', '-f', package_cache_moved, package_cache]
            )

    def __del__(self):
        self._restore_package_cache()
Ejemplo n.º 37
0
    def add_repo(
        self, name, uri, repo_type='rpm-md',
        prio=None, dist=None, components=None,
        user=None, secret=None, credentials_file=None,
        repo_gpgcheck=None, pkg_gpgcheck=None
    ):
        """
        Add zypper repository

        :param string name: repository name
        :param string uri: repository URI
        :param repo_type: repostory type name
        :param int prio: zypper repostory priority
        :param dist: unused
        :param components: unused
        :param user: credentials username
        :param secret: credentials password
        :param credentials_file: zypper credentials file
        :param bool repo_gpgcheck: enable repository signature validation
        :param bool pkg_gpgcheck: enable package signature validation
        """
        if credentials_file:
            repo_secret = os.sep.join(
                [self.shared_zypper_dir['credentials-dir'], credentials_file]
            )
            if os.path.exists(repo_secret):
                Path.wipe(repo_secret)

            if user and secret:
                uri = ''.join([uri, '?credentials=', credentials_file])
                with open(repo_secret, 'w') as credentials:
                    credentials.write('username={0}{1}'.format(
                        user, os.linesep)
                    )
                    credentials.write('password={0}{1}'.format(
                        secret, os.linesep)
                    )

        repo_file = ''.join(
            [self.shared_zypper_dir['reposd-dir'], '/', name, '.repo']
        )
        self.repo_names.append(''.join([name, '.repo']))

        if os.path.exists(repo_file):
            Path.wipe(repo_file)

        self._backup_package_cache()
        Command.run(
            ['zypper'] + self.zypper_args + [
                '--root', self.root_dir,
                'addrepo',
                '--refresh',
                '--type', self._translate_repo_type(repo_type),
                '--keep-packages',
                '-C',
                uri,
                name
            ],
            self.command_env
        )
        if prio or repo_gpgcheck is not None or pkg_gpgcheck is not None:
            repo_config = ConfigParser()
            repo_config.read(repo_file)
            if repo_gpgcheck is not None:
                repo_config.set(
                    name, 'repo_gpgcheck', '1' if repo_gpgcheck else '0'
                )
            if pkg_gpgcheck is not None:
                repo_config.set(
                    name, 'pkg_gpgcheck', '1' if pkg_gpgcheck else '0'
                )
            if prio:
                repo_config.set(
                    name, 'priority', format(prio)
                )
            with open(repo_file, 'w') as repo:
                repo_config.write(repo)
        self._restore_package_cache()
Ejemplo n.º 38
0
class SetupConfig(object):
    """Wrapper around the setup.cfg file if available.

    One reason is to cleanup setup.cfg from these settings::

        [egg_info]
        tag_build = dev
        tag_svn_revision = true

    Another is for optional zest.releaser-specific settings::

        [zest.releaser]
        no-input = yes


    """

    config_filename = SETUP_CONFIG_FILE

    def __init__(self):
        """Grab the configuration (overridable for test purposes)"""
        # If there is a setup.cfg in the package, parse it
        if not os.path.exists(os.path.join(utils.PACKAGE_ROOT, self.config_filename)):
            self.config = None
            return
        self.config = ConfigParser()
        with codecs.open(self.config_filename, 'r', 'utf8') as fp:
            self.config.readfp(fp)

    def has_bad_commands(self):
        if self.config is None:
            return False
        if not self.config.has_section('egg_info'):
            # bail out early as the main section is not there
            return False
        bad = False
        # Check 1.
        if self.config.has_option('egg_info', 'tag_build'):
            # Might still be empty.
            value = self.config.get('egg_info', 'tag_build')
            if value:
                logger.warn("%s has [egg_info] tag_build set to %r",
                            self.config_filename, value)
                bad = True
        # Check 2.
        if self.config.has_option('egg_info', 'tag_svn_revision'):
            if self.config.getboolean('egg_info', 'tag_svn_revision'):
                value = self.config.get('egg_info', 'tag_svn_revision')
                logger.warn("%s has [egg_info] tag_svn_revision set to %r",
                            self.config_filename, value)
                bad = True
        return bad

    def fix_config(self):
        if not self.has_bad_commands():
            logger.warn("Cannot fix already fine %s.", self.config_filename)
            return
        if self.config.has_option('egg_info', 'tag_build'):
            self.config.set('egg_info', 'tag_build', '')
        if self.config.has_option('egg_info', 'tag_svn_revision'):
            self.config.set('egg_info', 'tag_svn_revision', 'false')
        new_setup = open(self.config_filename, 'w')
        try:
            self.config.write(new_setup)
        finally:
            new_setup.close()
        logger.info("New setup.cfg contents:")
        print(''.join(open(self.config_filename).readlines()))

    def no_input(self):
        """Return whether the user wants to run in no-input mode.

        Enable this mode by adding a ``no-input`` option::

            [zest.releaser]
            no-input = yes

        The default when this option has not been set is False.

        Standard config rules apply, so you can use upper or lower or
        mixed case and specify 0, false, no or off for boolean False,
        and 1, on, true or yes for boolean True.
        """
        default = False
        if self.config is None:
            return default
        try:
            result = self.config.getboolean('zest.releaser', 'no-input')
        except (NoSectionError, NoOptionError, ValueError):
            return default
        return result

    def python_file_with_version(self):
        """Return Python filename with ``__version__`` marker, if configured.

        Enable this by adding a ``python-file-with-version`` option::

            [zest.releaser]
            python-file-with-version = reinout/maurits.py

        Return None when nothing has been configured.

        """
        default = None
        if self.config is None:
            return default
        try:
            result = self.config.get(
                'zest.releaser',
                'python-file-with-version')
        except (NoSectionError, NoOptionError, ValueError):
            return default
        return result
Ejemplo n.º 39
0
    def export(self, export_context, subdir, root=False):
        """ See IFilesystemExporter.
        """
        content_type = 'text/comma-separated-values'

        # Enumerate exportable children
        exportable = self.context.contentItems()
        exportable = [x + (IFilesystemExporter(x, None), ) for x in exportable]
        exportable = [x for x in exportable if x[1] is not None]

        objects_stream = StringIO()
        objects_csv_writer = writer(objects_stream)
        wf_stream = StringIO()
        wf_csv_writer = writer(wf_stream)

        if not root:
            subdir = '%s/%s' % (subdir, self.context.getId())

        try:
            wft = self.context.portal_workflow
        except AttributeError:
            # No workflow tool to export definitions from
            for object_id, object, ignored in exportable:
                objects_csv_writer.writerow(
                    (object_id, object.getPortalTypeName()))
        else:
            for object_id, object, ignored in exportable:
                objects_csv_writer.writerow(
                    (object_id, object.getPortalTypeName()))

                workflows = wft.getWorkflowsFor(object)
                for workflow in workflows:
                    workflow_id = workflow.getId()
                    state_variable = workflow.state_var
                    state_record = wft.getStatusOf(workflow_id, object)
                    if state_record is None:
                        continue
                    state = state_record.get(state_variable)
                    wf_csv_writer.writerow((object_id, workflow_id, state))

            export_context.writeDataFile('.workflow_states',
                                         text=wf_stream.getvalue(),
                                         content_type=content_type,
                                         subdir=subdir)

        export_context.writeDataFile('.objects',
                                     text=objects_stream.getvalue(),
                                     content_type=content_type,
                                     subdir=subdir)

        parser = ConfigParser()

        title = self.context.Title()
        description = self.context.Description()
        # encode if needed; ConfigParser does not support unicode !
        title_str = encode_if_needed(title, self._encoding)
        description_str = encode_if_needed(description, self._encoding)
        parser.set('DEFAULT', 'Title', title_str)
        parser.set('DEFAULT', 'Description', description_str)

        stream = StringIO()
        parser.write(stream)

        try:
            FolderishDAVAwareFileAdapter(self.context).export(
                export_context, subdir, root)
        except (AttributeError, MethodNotAllowed):
            export_context.writeDataFile('.properties',
                                         text=stream.getvalue(),
                                         content_type='text/plain',
                                         subdir=subdir)

        for id, object in self.context.objectItems():

            adapter = IFilesystemExporter(object, None)

            if adapter is not None:
                adapter.export(export_context, subdir)
Ejemplo n.º 40
0
def makeconfigfile(fname,beamlist,radarname,simparams_orig):
    """This will make the config file based off of the desired input parmeters.
    Inputs
        fname - Name of the file as a string.
        beamlist - A list of beams numbers used by the AMISRS
        radarname - A string that is the name of the radar being simulated.
        simparams_orig - A set of simulation parameters in a dictionary."""
    fname = Path(fname).expanduser()

    curpath = Path(__file__).resolve().parent
    d_file = curpath/'default.ini'
    fext = fname.suffix

    # reduce the number of stuff needed to be saved and avoid problems with writing
    keys2save = ['IPP', 'TimeLim', 'RangeLims', 'Pulselength', 't_s', 'Pulsetype',
                 'Tint', 'Fitinter', 'NNs', 'dtype', 'ambupsamp', 'species',
                 'numpoints', 'startfile', 'FitType','beamrate', 'outangles']

    if not 'beamrate' in simparams_orig.keys():
        simparams_orig['beamrate'] = 1
    if not 'outangles' in simparams_orig.keys():
        simparams_orig['outangles'] = beamlist
    simparams = {i:simparams_orig[i] for i in keys2save}
    if fext =='.pickle':
        pickleFile = fname.open('wb')
        pickle.dump([{'beamlist':beamlist,'radarname':radarname},simparams],pickleFile)
        pickleFile.close()
    elif fext=='.yml':
        with fname.open('w') as f:
            yaml.dump([{'beamlist':beamlist,'radarname':radarname},simparams], f)

    elif fext =='.ini':
        defaultparser = ConfigParser()
        defaultparser.read(str(d_file))
#        config = configparser()
#        config.read(fname)
        cfgfile = open(str(fname),'w')
        config = ConfigParser(allow_no_value = True)

        config.add_section('section 1')
        beamstring = ""
        for beam in beamlist:
            beamstring += str(beam)
            beamstring += " "
        config.set('section 1','; beamlist must be list of ints')
        config.set('section 1','beamlist',beamstring)
        config.set('section 1','; radarname can be pfisr, risr, or sondastrom')
        config.set('section 1','radarname',radarname)

        config.add_section('simparams')
        config.add_section('simparamsnames')
        defitems = [i[0] for i in defaultparser.items('simparamsnotes')]
        for param in simparams:
            if param=='Beamlist':
                continue
            if param.lower() in defitems:
                paramnote = defaultparser.get('simparamsnotes',param.lower())
            else:
                paramnote = 'Not in default parameters'
            config.set('simparams','; '+param +' '+paramnote)
            # for the output list of angles
            if param.lower()=='outangles':
                outstr = ''
                beamlistlist = simparams[param]
                if beamlistlist=='':
                    beamlistlist=beamlist
                for ilist in beamlistlist:
                    if isinstance(ilist,list) or isinstance(ilist,sp.ndarray):
                        for inum in ilist:
                            outstr=outstr+str(inum)+' '

                    else:
                        outstr=outstr+str(ilist)
                    outstr=outstr+', '
                outstr=outstr[:-2]
                config.set('simparams',param,outstr)

            elif isinstance(simparams[param],list):
                data = ""
                for a in simparams[param]:
                    data += str(a)
                    data += " "
                config.set('simparams',param,str(data))
            else:  #TODO config.set() is obsolete, undefined behavior!  use mapping protocol instead https://docs.python.org/3/library/configparser.html#mapping-protocol-access
                config.set('simparams',param,str(simparams[param]))
            config.set('simparamsnames',param,param)
        config.write(cfgfile)
        cfgfile.close()
    else:
        raise ValueError('fname needs to have an extension of .pickle or .ini')
Ejemplo n.º 41
0
def _in_process_setup_ring(swift_conf, conf_src_dir, testdir):
    """
    If SWIFT_TEST_POLICY is set:
    - look in swift.conf file for specified policy
    - move this to be policy-0 but preserving its options
    - copy its ring file to test dir, changing its devices to suit
      in process testing, and renaming it to suit policy-0
    Otherwise, create a default ring file.
    """
    conf = ConfigParser()
    conf.read(swift_conf)
    sp_prefix = 'storage-policy:'

    try:
        # policy index 0 will be created if no policy exists in conf
        policies = parse_storage_policies(conf)
    except PolicyError as e:
        raise InProcessException(e)

    # clear all policies from test swift.conf before adding test policy back
    for policy in policies:
        conf.remove_section(sp_prefix + str(policy.idx))

    if policy_specified:
        policy_to_test = policies.get_by_name(policy_specified)
        if policy_to_test is None:
            raise InProcessException('Failed to find policy name "%s"'
                                     % policy_specified)
        _info('Using specified policy %s' % policy_to_test.name)
    else:
        policy_to_test = policies.default
        _info('Defaulting to policy %s' % policy_to_test.name)

    # make policy_to_test be policy index 0 and default for the test config
    sp_zero_section = sp_prefix + '0'
    conf.add_section(sp_zero_section)
    for (k, v) in policy_to_test.get_info(config=True).items():
        conf.set(sp_zero_section, k, v)
    conf.set(sp_zero_section, 'default', True)

    with open(swift_conf, 'w') as fp:
        conf.write(fp)

    # look for a source ring file
    ring_file_src = ring_file_test = 'object.ring.gz'
    if policy_to_test.idx:
        ring_file_src = 'object-%s.ring.gz' % policy_to_test.idx
    try:
        ring_file_src = _in_process_find_conf_file(conf_src_dir, ring_file_src,
                                                   use_sample=False)
    except InProcessException as e:
        if policy_specified:
            raise InProcessException('Failed to find ring file %s'
                                     % ring_file_src)
        ring_file_src = None

    ring_file_test = os.path.join(testdir, ring_file_test)
    if ring_file_src:
        # copy source ring file to a policy-0 test ring file, re-homing servers
        _info('Using source ring file %s' % ring_file_src)
        ring_data = ring.RingData.load(ring_file_src)
        obj_sockets = []
        for dev in ring_data.devs:
            device = 'sd%c1' % chr(len(obj_sockets) + ord('a'))
            utils.mkdirs(os.path.join(_testdir, 'sda1'))
            utils.mkdirs(os.path.join(_testdir, 'sda1', 'tmp'))
            obj_socket = listen_zero()
            obj_sockets.append(obj_socket)
            dev['port'] = obj_socket.getsockname()[1]
            dev['ip'] = '127.0.0.1'
            dev['device'] = device
            dev['replication_port'] = dev['port']
            dev['replication_ip'] = dev['ip']
        ring_data.save(ring_file_test)
    else:
        # make default test ring, 3 replicas, 4 partitions, 3 devices
        # which will work for a replication policy or a 2+1 EC policy
        _info('No source object ring file, creating 3rep/4part/3dev ring')
        obj_sockets = [listen_zero() for _ in (0, 1, 2)]
        replica2part2dev_id = [[0, 1, 2, 0],
                               [1, 2, 0, 1],
                               [2, 0, 1, 2]]
        devs = [{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
                 'port': obj_sockets[0].getsockname()[1]},
                {'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
                 'port': obj_sockets[1].getsockname()[1]},
                {'id': 2, 'zone': 2, 'device': 'sdc1', 'ip': '127.0.0.1',
                 'port': obj_sockets[2].getsockname()[1]}]
        ring_data = ring.RingData(replica2part2dev_id, devs, 30)
        with closing(GzipFile(ring_file_test, 'wb')) as f:
            pickle.dump(ring_data, f)

    for dev in ring_data.devs:
        _debug('Ring file dev: %s' % dev)

    return obj_sockets
Ejemplo n.º 42
0
class SHOUTcasterFavorites:
    configfile = "/etc/NETcaster.conf"

    def __init__(self):
        self.configparser = ConfigParser()
        self.configparser.read(self.configfile)

    def getStreams(self):
        streams = []
        sections = self.configparser.sections()
        print(sections)
        for section in sections:
            stream = self.getStreamByName(section)
            streams.append(stream)
        return streams

    def isStream(self, streamname):
        if self.configparser.has_section(streamname) is True:
            return True
        else:
            return False

    def getStreamByName(self, streamname):
        print("[" + myname + "] load " + streamname + " from config")
        if self.isStream(streamname) is True:
            stream = Stream(streamname,
                            self.configparser.get(streamname, "description"),
                            self.configparser.get(streamname, "url"),
                            type=self.configparser.get(streamname, "type"))
            stream.setFavorite(True)
            return stream
        else:
            return False

    def addStream(self, stream):
        print("[" + myname + "] adding " + stream.getName() + " to config")
        try:
            self.configparser.add_section(stream.getName())
        except DuplicateSectionError as e:
            print("[" + myname + "] error while adding stream to config:", e)
            return False, e
        else:
            # XXX: I hope this still works properly if we make a optimistic
            # return here since otherwise the interface would need to be changed
            # to work with a callback
            stream.getURL(boundFunction(self.addStreamCb, stream))
            return True, "Stream added"

    def addStreamCb(self, stream, url=None):
        self.configparser.set(stream.getName(), "description",
                              stream.getDescription())
        self.configparser.set(stream.getName(), "url", url)
        self.configparser.set(stream.getName(), "type", stream.getType())
        self.writeConfig()

    def changeStream(self, streamold, streamnew):
        if self.configparser.has_section(streamold.getName()) is False:
            return False, "stream not found in config"
        elif self.configparser.has_section(streamnew.getName()) is True:
            return False, "stream with that name exists already"
        else:
            self.configparser.remove_section(streamold.getName())
            return self.addStream(streamnew)

    def deleteStreamWithName(self, streamname):
        self.configparser.remove_section(streamname)
        self.writeConfig()

    def writeConfig(self):
        print("[" + myname + "] writing config to " + self.configfile)

        fp = open(self.configfile, "w")
        self.configparser.write(fp)
        fp.close()
Ejemplo n.º 43
0
    def __spawn_instance(self):
        """
        Create and configure a new KRA instance using pkispawn.
        Creates a configuration file with IPA-specific
        parameters and passes it to the base class to call pkispawn
        """

        # Create an empty and secured file
        (cfg_fd, cfg_file) = tempfile.mkstemp()
        os.close(cfg_fd)
        pent = pwd.getpwnam(self.service_user)
        os.chown(cfg_file, pent.pw_uid, pent.pw_gid)

        # Create KRA configuration
        config = ConfigParser()
        config.optionxform = str
        config.add_section("KRA")

        # Security Domain Authentication
        config.set("KRA", "pki_security_domain_https_port", "443")
        config.set("KRA", "pki_security_domain_password", self.admin_password)
        config.set("KRA", "pki_security_domain_user", self.admin_user)

        # issuing ca
        config.set("KRA", "pki_issuing_ca_uri", "https://%s" %
                   ipautil.format_netloc(self.fqdn, 443))

        # Server
        config.set("KRA", "pki_enable_proxy", "True")
        config.set("KRA", "pki_restart_configured_instance", "False")
        config.set("KRA", "pki_backup_keys", "True")
        config.set("KRA", "pki_backup_password", self.admin_password)

        # Client security database
        config.set("KRA", "pki_client_database_dir", self.agent_db)
        config.set("KRA", "pki_client_database_password", self.admin_password)
        config.set("KRA", "pki_client_database_purge", "False")
        config.set("KRA", "pki_client_pkcs12_password", self.admin_password)

        # Administrator
        config.set("KRA", "pki_admin_name", self.admin_user)
        config.set("KRA", "pki_admin_uid", self.admin_user)
        config.set("KRA", "pki_admin_email", "root@localhost")
        config.set("KRA", "pki_admin_password", self.admin_password)
        config.set("KRA", "pki_admin_nickname", "ipa-ca-agent")
        config.set("KRA", "pki_admin_subject_dn",
                   str(DN(('cn', 'ipa-ca-agent'), self.subject_base)))
        config.set("KRA", "pki_import_admin_cert", "True")
        config.set("KRA", "pki_admin_cert_file", paths.ADMIN_CERT_PATH)
        config.set("KRA", "pki_client_admin_cert_p12", paths.DOGTAG_ADMIN_P12)

        # Directory server
        config.set("KRA", "pki_ds_ldap_port", "389")
        config.set("KRA", "pki_ds_password", self.dm_password)
        config.set("KRA", "pki_ds_base_dn", self.basedn)
        config.set("KRA", "pki_ds_database", "ipaca")
        config.set("KRA", "pki_ds_create_new_db", "False")

        self._use_ldaps_during_spawn(config)

        # Certificate subject DNs
        config.set("KRA", "pki_subsystem_subject_dn",
                   str(DN(('cn', 'CA Subsystem'), self.subject_base)))
        config.set("KRA", "pki_ssl_server_subject_dn",
                   str(DN(('cn', self.fqdn), self.subject_base)))
        config.set("KRA", "pki_audit_signing_subject_dn",
                   str(DN(('cn', 'KRA Audit'), self.subject_base)))
        config.set(
            "KRA", "pki_transport_subject_dn",
            str(DN(('cn', 'KRA Transport Certificate'), self.subject_base)))
        config.set(
            "KRA", "pki_storage_subject_dn",
            str(DN(('cn', 'KRA Storage Certificate'), self.subject_base)))

        # Certificate nicknames
        # Note that both the server certs and subsystem certs reuse
        # the ca certs.
        config.set("KRA", "pki_subsystem_nickname",
                   "subsystemCert cert-pki-ca")
        config.set("KRA", "pki_ssl_server_nickname",
                   "Server-Cert cert-pki-ca")
        config.set("KRA", "pki_audit_signing_nickname",
                   "auditSigningCert cert-pki-kra")
        config.set("KRA", "pki_transport_nickname",
                   "transportCert cert-pki-kra")
        config.set("KRA", "pki_storage_nickname",
                   "storageCert cert-pki-kra")

        # Shared db settings
        # Needed because CA and KRA share the same database
        # We will use the dbuser created for the CA
        config.set("KRA", "pki_share_db", "True")
        config.set(
            "KRA", "pki_share_dbuser_dn",
            str(DN(('uid', 'pkidbuser'), ('ou', 'people'), ('o', 'ipaca'))))

        _p12_tmpfile_handle, p12_tmpfile_name = tempfile.mkstemp(dir=paths.TMP)

        if self.clone:
            krafile = self.pkcs12_info[0]
            shutil.copy(krafile, p12_tmpfile_name)
            pent = pwd.getpwnam(self.service_user)
            os.chown(p12_tmpfile_name, pent.pw_uid, pent.pw_gid)

            # Security domain registration
            config.set("KRA", "pki_security_domain_hostname", self.master_host)
            config.set("KRA", "pki_security_domain_https_port", "443")
            config.set("KRA", "pki_security_domain_user", self.admin_user)
            config.set("KRA", "pki_security_domain_password",
                       self.admin_password)

            # Clone
            config.set("KRA", "pki_clone", "True")
            config.set("KRA", "pki_clone_pkcs12_path", p12_tmpfile_name)
            config.set("KRA", "pki_clone_pkcs12_password", self.dm_password)
            config.set("KRA", "pki_clone_setup_replication", "False")
            config.set(
                "KRA", "pki_clone_uri",
                "https://%s" % ipautil.format_netloc(self.master_host, 443))
        else:
            # the admin cert file is needed for the first instance of KRA
            cert = DogtagInstance.get_admin_cert(self)
            with open(paths.ADMIN_CERT_PATH, "w") as admin_path:
                admin_path.write(cert)

        # Generate configuration file
        with open(cfg_file, "wb") as f:
            config.write(f)

        try:
            DogtagInstance.spawn_instance(
                self, cfg_file,
                nolog_list=(self.dm_password, self.admin_password)
            )
        finally:
            os.remove(p12_tmpfile_name)
            os.remove(cfg_file)

        shutil.move(paths.KRA_BACKUP_KEYS_P12, paths.KRACERT_P12)

        export_kra_agent_pem()

        self.log.debug("completed creating KRA instance")
Ejemplo n.º 44
0
class RepositoryYum(RepositoryBase):
    """
    Implements repository handling for yum package manager
    """
    def post_init(self, custom_args=None):
        """
        Post initialization method

        Store custom yum arguments and create runtime configuration
        and environment

        Attributes

        * :attr:`shared_yum_dir`
            shared directory between image root and build system root

        * :attr:`runtime_yum_config_file`
            yum runtime config file name

        * :attr:`command_env`
            customized os.environ for yum

        * :attr:`runtime_yum_config`
            Instance of ConfigParser

        :param list custom_args: yum arguments
        """
        self.custom_args = custom_args
        if not custom_args:
            self.custom_args = []

        # extract custom arguments not used in yum call
        if 'exclude_docs' in self.custom_args:
            self.custom_args.remove('exclude_docs')
            log.warning('rpm-excludedocs not supported for yum: ignoring')

        if 'check_signatures' in self.custom_args:
            self.custom_args.remove('check_signatures')
            self.gpg_check = '1'
        else:
            self.gpg_check = '0'

        self.repo_names = []

        # yum support is based on creating repo files which contains
        # path names to the repo and its cache. In order to allow a
        # persistent use of the files in and outside of a chroot call
        # an active bind mount from RootBind::mount_shared_directory
        # is expected and required
        manager_base = self.shared_location + '/yum'

        self.shared_yum_dir = {
            'reposd-dir': manager_base + '/repos',
            'cache-dir': manager_base + '/cache',
            'pluginconf-dir': manager_base + '/pluginconf'
        }

        self.runtime_yum_config_file = NamedTemporaryFile(
            dir=self.root_dir
        )

        self.yum_args = [
            '-c', self.runtime_yum_config_file.name, '-y'
        ] + self.custom_args

        self.command_env = self._create_yum_runtime_environment()

        # config file parameters for yum tool
        self._create_runtime_config_parser()
        self._create_runtime_plugin_config_parser()
        self._write_runtime_config()

    def use_default_location(self):
        """
        Setup yum repository operations to store all data
        in the default places
        """
        self.shared_yum_dir['reposd-dir'] = \
            self.root_dir + '/etc/yum.repos.d'
        self.shared_yum_dir['cache-dir'] = \
            self.root_dir + '/var/cache/yum'
        self.shared_yum_dir['pluginconf-dir'] = \
            self.root_dir + '/etc/yum/pluginconf.d'
        self._create_runtime_config_parser()
        self._create_runtime_plugin_config_parser()
        self._write_runtime_config()

    def runtime_config(self):
        """
        yum runtime configuration and environment
        """
        return {
            'yum_args': self.yum_args,
            'command_env': self.command_env
        }

    def add_repo(
        self, name, uri, repo_type='rpm-md',
        prio=None, dist=None, components=None,
        user=None, secret=None, credentials_file=None,
        repo_gpgcheck=None, pkg_gpgcheck=None
    ):
        """
        Add yum repository

        :param string name: repository base file name
        :param string uri: repository URI
        :param repo_type: repostory type name
        :param int prio: yum repostory priority
        :param dist: unused
        :param components: unused
        :param user: unused
        :param secret: unused
        :param credentials_file: unused
        :param bool repo_gpgcheck: enable repository signature validation
        :param bool pkg_gpgcheck: enable package signature validation
        """
        repo_file = self.shared_yum_dir['reposd-dir'] + '/' + name + '.repo'
        self.repo_names.append(name + '.repo')
        if os.path.exists(uri):
            # yum requires local paths to take the file: type
            uri = 'file://' + uri
        repo_config = ConfigParser()
        repo_config.add_section(name)
        repo_config.set(
            name, 'name', name
        )
        repo_config.set(
            name, 'baseurl', uri
        )
        repo_config.set(
            name, 'enabled', '1'
        )
        if prio:
            repo_config.set(
                name, 'priority', format(prio)
            )
        if repo_gpgcheck is not None:
            repo_config.set(
                name, 'repo_gpgcheck', '1' if repo_gpgcheck else '0'
            )
        if pkg_gpgcheck is not None:
            repo_config.set(
                name, 'gpgcheck', '1' if pkg_gpgcheck else '0'
            )
        with open(repo_file, 'w') as repo:
            repo_config.write(RepositoryYumSpaceRemover(repo))

    def import_trusted_keys(self, signing_keys):
        """
        Imports trusted keys into the image

        :param list signing_keys: list of the key files to import
        """
        for key in signing_keys:
            Command.run(['rpm', '--root', self.root_dir, '--import', key])

    def delete_repo(self, name):
        """
        Delete yum repository

        :param string name: repository base file name
        """
        Path.wipe(
            self.shared_yum_dir['reposd-dir'] + '/' + name + '.repo'
        )

    def delete_all_repos(self):
        """
        Delete all yum repositories
        """
        Path.wipe(self.shared_yum_dir['reposd-dir'])
        Path.create(self.shared_yum_dir['reposd-dir'])

    def delete_repo_cache(self, name):
        """
        Delete yum repository cache

        The cache data for each repository is stored in a directory
        of the same name as the repository name. The method deletes
        this directory to cleanup the cache information

        :param string name: repository name
        """
        Path.wipe(
            os.sep.join([self.shared_yum_dir['cache-dir'], name])
        )

    def cleanup_unused_repos(self):
        """
        Delete unused yum repositories

        Repository configurations which are not used for this build
        must be removed otherwise they are taken into account for
        the package installations
        """
        repos_dir = self.shared_yum_dir['reposd-dir']
        repo_files = list(os.walk(repos_dir))[0][2]
        for repo_file in repo_files:
            if repo_file not in self.repo_names:
                Path.wipe(repos_dir + '/' + repo_file)

    def _create_yum_runtime_environment(self):
        for yum_dir in list(self.shared_yum_dir.values()):
            Path.create(yum_dir)
        return dict(
            os.environ, LANG='C'
        )

    def _create_runtime_config_parser(self):
        self.runtime_yum_config = ConfigParser()
        self.runtime_yum_config.add_section('main')

        self.runtime_yum_config.set(
            'main', 'cachedir', self.shared_yum_dir['cache-dir']
        )
        self.runtime_yum_config.set(
            'main', 'reposdir', self.shared_yum_dir['reposd-dir']
        )
        self.runtime_yum_config.set(
            'main', 'pluginconfpath', self.shared_yum_dir['pluginconf-dir']
        )
        self.runtime_yum_config.set(
            'main', 'keepcache', '1'
        )
        self.runtime_yum_config.set(
            'main', 'debuglevel', '2'
        )
        self.runtime_yum_config.set(
            'main', 'pkgpolicy', 'newest'
        )
        self.runtime_yum_config.set(
            'main', 'tolerant', '0'
        )
        self.runtime_yum_config.set(
            'main', 'exactarch', '1'
        )
        self.runtime_yum_config.set(
            'main', 'obsoletes', '1'
        )
        self.runtime_yum_config.set(
            'main', 'plugins', '1'
        )
        self.runtime_yum_config.set(
            'main', 'gpgcheck', self.gpg_check
        )
        self.runtime_yum_config.set(
            'main', 'metadata_expire', '1800'
        )
        self.runtime_yum_config.set(
            'main', 'group_command', 'compat'
        )

    def _create_runtime_plugin_config_parser(self):
        self.runtime_yum_plugin_config = ConfigParser()
        self.runtime_yum_plugin_config.add_section('main')

        self.runtime_yum_plugin_config.set(
            'main', 'enabled', '1'
        )

    def _write_runtime_config(self):
        with open(self.runtime_yum_config_file.name, 'w') as config:
            self.runtime_yum_config.write(
                RepositoryYumSpaceRemover(config)
            )
        yum_plugin_config_file = \
            self.shared_yum_dir['pluginconf-dir'] + '/priorities.conf'
        with open(yum_plugin_config_file, 'w') as pluginconfig:
            self.runtime_yum_plugin_config.write(
                RepositoryYumSpaceRemover(pluginconfig)
            )
Ejemplo n.º 45
0
    def _install_desktop_file(self, prefix, activity_path):
        cp = ConfigParser()
        section = 'Desktop Entry'
        cp.add_section(section)
        cp.optionxform = str  # Allow CamelCase entries

        # Get it from the activity.info for the non-translated version
        info = ConfigParser()
        info.read(os.path.join(activity_path, 'activity', 'activity.info'))
        cp.set(section, 'Name', info.get('Activity', 'name'))
        if info.has_option('Activity', 'summary'):
            cp.set(section, 'Comment', info.get('Activity', 'summary'))

        for path in sorted(
                glob(
                    os.path.join(activity_path, 'locale', '*',
                                 'activity.linfo'))):
            locale = path.split(os.path.sep)[-2]
            info = ConfigParser()
            info.read(path)
            if info.has_option('Activity', 'name'):
                cp.set(section, 'Name[{}]'.format(locale),
                       info.get('Activity', 'name'))
            if info.has_option('Activity', 'summary'):
                cp.set(section, 'Comment[{}]'.format(locale),
                       info.get('Activity', 'summary'))

        cp.set(section, 'Terminal', 'false')
        cp.set(section, 'Type', 'Application')
        cp.set(section, 'Categories', 'Education;')
        cp.set(
            section, 'Icon',
            os.path.join(activity_path, 'activity',
                         self.config.bundle.get_icon_filename()))
        cp.set(section, 'Exec', self.config.bundle.get_command())
        cp.set(section, 'Path', activity_path)  # Path == CWD for running

        name = '{}.activity.desktop'.format(self.config.bundle_id)
        path = os.path.join(prefix, 'share', 'applications', name)
        if not os.path.isdir(os.path.dirname(path)):
            os.makedirs(os.path.dirname(path))
        with open(path, 'w') as f:
            cp.write(f)
Ejemplo n.º 46
0
Archivo: dnf.py Proyecto: ucytech/kiwi
class RepositoryDnf(RepositoryBase):
    """
    Implements repository handling for dnf package manager
    """
    def post_init(self, custom_args=None):
        """
        Post initialization method

        Store custom dnf arguments and create runtime configuration
        and environment

        Attributes

        * :attr:`shared_dnf_dir`
            shared directory between image root and build system root

        * :attr:`runtime_dnf_config_file`
            dnf runtime config file name

        * :attr:`command_env`
            customized os.environ for dnf

        * :attr:`runtime_dnf_config`
            Instance of ConfigParser

        :param list custom_args: dnf arguments
        """
        self.custom_args = custom_args
        self.exclude_docs = False
        if not custom_args:
            self.custom_args = []

        # extract custom arguments not used in dnf call
        if 'exclude_docs' in self.custom_args:
            self.custom_args.remove('exclude_docs')
            self.exclude_docs = True

        self.repo_names = []

        # dnf support is based on creating repo files which contains
        # path names to the repo and its cache. In order to allow a
        # persistent use of the files in and outside of a chroot call
        # an active bind mount from RootBind::mount_shared_directory
        # is expected and required
        manager_base = self.shared_location + '/dnf'

        self.shared_dnf_dir = {
            'reposd-dir': manager_base + '/repos',
            'cache-dir': manager_base + '/cache',
            'pluginconf-dir': manager_base + '/pluginconf'
        }

        self.runtime_dnf_config_file = NamedTemporaryFile(
            dir=self.root_dir
        )

        self.dnf_args = [
            '-c', self.runtime_dnf_config_file.name, '-y'
        ] + self.custom_args

        self.command_env = self._create_dnf_runtime_environment()

        # config file parameters for dnf tool
        self._create_runtime_config_parser()
        self._create_runtime_plugin_config_parser()
        self._write_runtime_config()

    def use_default_location(self):
        """
        Setup dnf repository operations to store all data
        in the default places
        """
        self.shared_dnf_dir['reposd-dir'] = \
            self.root_dir + '/etc/yum/repos.d'
        self.shared_dnf_dir['cache-dir'] = \
            self.root_dir + '/var/cache/dnf'
        self.shared_dnf_dir['pluginconf-dir'] = \
            self.root_dir + '/etc/dnf/plugins'
        self._create_runtime_config_parser()
        self._create_runtime_plugin_config_parser()
        self._write_runtime_config()

    def runtime_config(self):
        """
        dnf runtime configuration and environment
        """
        return {
            'dnf_args': self.dnf_args,
            'command_env': self.command_env
        }

    def add_repo(
        self, name, uri, repo_type='rpm-md', prio=None, dist=None, components=None
    ):
        """
        Add dnf repository

        :param string name: repository base file name
        :param string uri: repository URI
        :param repo_type: repostory type name
        :param int prio: dnf repostory priority
        :param dist: unused
        :param components: unused
        """
        repo_file = self.shared_dnf_dir['reposd-dir'] + '/' + name + '.repo'
        self.repo_names.append(name + '.repo')
        if os.path.exists(uri):
            # dnf requires local paths to take the file: type
            uri = 'file://' + uri
        repo_config = ConfigParser()
        repo_config.add_section(name)
        repo_config.set(
            name, 'name', name
        )
        repo_config.set(
            name, 'baseurl', uri
        )
        if prio:
            repo_config.set(
                name, 'priority', format(prio)
            )
        with open(repo_file, 'w') as repo:
            repo_config.write(repo)

    def delete_repo(self, name):
        """
        Delete dnf repository

        :param string name: repository base file name
        """
        Path.wipe(
            self.shared_dnf_dir['reposd-dir'] + '/' + name + '.repo'
        )

    def delete_all_repos(self):
        """
        Delete all dnf repositories
        """
        Path.wipe(self.shared_dnf_dir['reposd-dir'])
        Path.create(self.shared_dnf_dir['reposd-dir'])

    def cleanup_unused_repos(self):
        """
        Delete unused dnf repositories

        Repository configurations which are not used for this build
        must be removed otherwise they are taken into account for
        the package installations
        """
        repos_dir = self.shared_dnf_dir['reposd-dir']
        repo_files = list(os.walk(repos_dir))[0][2]
        for repo_file in repo_files:
            if repo_file not in self.repo_names:
                Path.wipe(repos_dir + '/' + repo_file)

    def _create_dnf_runtime_environment(self):
        for dnf_dir in list(self.shared_dnf_dir.values()):
            Path.create(dnf_dir)
        return dict(
            os.environ, LANG='C'
        )

    def _create_runtime_config_parser(self):
        self.runtime_dnf_config = ConfigParser()
        self.runtime_dnf_config.add_section('main')

        self.runtime_dnf_config.set(
            'main', 'cachedir', self.shared_dnf_dir['cache-dir']
        )
        self.runtime_dnf_config.set(
            'main', 'reposdir', self.shared_dnf_dir['reposd-dir']
        )
        self.runtime_dnf_config.set(
            'main', 'pluginconfpath', self.shared_dnf_dir['pluginconf-dir']
        )
        self.runtime_dnf_config.set(
            'main', 'keepcache', '1'
        )
        self.runtime_dnf_config.set(
            'main', 'debuglevel', '2'
        )
        self.runtime_dnf_config.set(
            'main', 'pkgpolicy', 'newest'
        )
        self.runtime_dnf_config.set(
            'main', 'tolerant', '0'
        )
        self.runtime_dnf_config.set(
            'main', 'exactarch', '1'
        )
        self.runtime_dnf_config.set(
            'main', 'obsoletes', '1'
        )
        self.runtime_dnf_config.set(
            'main', 'plugins', '1'
        )
        if self.exclude_docs:
            self.runtime_dnf_config.set(
                'main', 'tsflags', 'nodocs'
            )

    def _create_runtime_plugin_config_parser(self):
        self.runtime_dnf_plugin_config = ConfigParser()
        self.runtime_dnf_plugin_config.add_section('main')

        self.runtime_dnf_plugin_config.set(
            'main', 'enabled', '1'
        )

    def _write_runtime_config(self):
        with open(self.runtime_dnf_config_file.name, 'w') as config:
            self.runtime_dnf_config.write(config)
        dnf_plugin_config_file = \
            self.shared_dnf_dir['pluginconf-dir'] + '/priorities.conf'
        with open(dnf_plugin_config_file, 'w') as pluginconfig:
            self.runtime_dnf_plugin_config.write(pluginconfig)
Ejemplo n.º 47
0
def makeconfigfile(fname, beamlist, radarname, simparams_orig):
    """This will make the config file based off of the desired input parmeters.
    Inputs
        fname - Name of the file as a string.
        beamlist - A list of beams numbers used by the AMISRS
        radarname - A string that is the name of the radar being simulated.
        simparams_orig - A set of simulation parameters in a dictionary."""
    fname = Path(fname).expanduser()

    curpath = Path(__file__).resolve().parent
    d_file = curpath / 'default.ini'
    fext = fname.suffix

    # reduce the number of stuff needed to be saved and avoid problems with writing
    keys2save = [
        'IPP', 'TimeLim', 'RangeLims', 'Pulselength', 't_s', 'Pulsetype',
        'Tint', 'Fitinter', 'NNs', 'dtype', 'ambupsamp', 'species',
        'numpoints', 'startfile', 'FitType', 'beamrate', 'outangles'
    ]

    if not 'beamrate' in simparams_orig.keys():
        simparams_orig['beamrate'] = 1
    if not 'outangles' in simparams_orig.keys():
        simparams_orig['outangles'] = beamlist
    simparams = {i: simparams_orig[i] for i in keys2save}
    if fext == '.pickle':
        pickleFile = fname.open('wb')
        pickle.dump([{
            'beamlist': beamlist,
            'radarname': radarname
        }, simparams], pickleFile)
        pickleFile.close()
    elif fext == '.yml':
        with fname.open('w') as f:
            yaml.dump([{
                'beamlist': beamlist,
                'radarname': radarname
            }, simparams], f)

    elif fext == '.ini':
        defaultparser = ConfigParser()
        defaultparser.read(str(d_file))
        #        config = configparser()
        #        config.read(fname)
        cfgfile = open(str(fname), 'w')
        config = ConfigParser(allow_no_value=True)

        config.add_section('section 1')
        beamstring = ""
        for beam in beamlist:
            beamstring += str(beam)
            beamstring += " "
        config.set('section 1', '; beamlist must be list of ints')
        config.set('section 1', 'beamlist', beamstring)
        config.set('section 1',
                   '; radarname can be pfisr, risr, or sondastrom')
        config.set('section 1', 'radarname', radarname)

        config.add_section('simparams')
        config.add_section('simparamsnames')
        defitems = [i[0] for i in defaultparser.items('simparamsnotes')]
        for param in simparams:
            if param == 'Beamlist':
                continue
            if param.lower() in defitems:
                paramnote = defaultparser.get('simparamsnotes', param.lower())
            else:
                paramnote = 'Not in default parameters'
            config.set('simparams', '; ' + param + ' ' + paramnote)
            # for the output list of angles
            if param.lower() == 'outangles':
                outstr = ''
                beamlistlist = simparams[param]
                if beamlistlist == '':
                    beamlistlist = beamlist
                for ilist in beamlistlist:
                    if isinstance(ilist, list) or isinstance(
                            ilist, sp.ndarray):
                        for inum in ilist:
                            outstr = outstr + str(inum) + ' '

                    else:
                        outstr = outstr + str(ilist)
                    outstr = outstr + ', '
                outstr = outstr[:-2]
                config.set('simparams', param, outstr)

            elif isinstance(simparams[param], list):
                data = ""
                for a in simparams[param]:
                    data += str(a)
                    data += " "
                config.set('simparams', param, str(data))
            else:  #TODO config.set() is obsolete, undefined behavior!  use mapping protocol instead https://docs.python.org/3/library/configparser.html#mapping-protocol-access
                config.set('simparams', param, str(simparams[param]))
            config.set('simparamsnames', param, param)
        config.write(cfgfile)
        cfgfile.close()
    else:
        raise ValueError('fname needs to have an extension of .pickle or .ini')
Ejemplo n.º 48
0
inifile = args[0]

# Set up the configuration for the sub-dags

prior_cp = ConfigParser()
prior_cp.optionxform = str
prior_cp.readfp(open(inifile))

main_cp = ConfigParser()
main_cp.optionxform = str
main_cp.readfp(open(inifile))

rundir = os.path.abspath(opts.run_path)

if opts.daglog_path is not None:
    prior_cp.set('paths', 'daglogdir',
                 os.path.join(os.path.abspath(opts.daglog_path), 'prior'))
    main_cp.set('paths', 'daglogdir',
                os.path.join(os.path.abspath(opts.daglog_path), 'main'))
    daglogdir = os.path.abspath(opts.daglog_path)
else:
    prior_cp.set('paths', 'daglogdir',
                 os.path.join(os.path.abspath(opts.run_path), 'prior'))
    main_cp.set('paths', 'daglogdir',
                os.path.join(os.path.abspath(opts.run_path), 'main'))
    daglogdir = os.path.abspath(opts.run_path)

webdir = main_cp.get('ppanalysis', 'webdir')
priordir = os.path.join(rundir, 'prior')
maindir = os.path.join(rundir, 'main')
priorwebdir = os.path.join(webdir, 'prior')
mainwebdir = os.path.join(webdir, 'injections')
Ejemplo n.º 49
0
def _in_process_setup_ring(swift_conf, conf_src_dir, testdir):
    """
    If SWIFT_TEST_POLICY is set:
    - look in swift.conf file for specified policy
    - move this to be policy-0 but preserving its options
    - copy its ring file to test dir, changing its devices to suit
      in process testing, and renaming it to suit policy-0
    Otherwise, create a default ring file.
    """
    conf = ConfigParser()
    conf.read(swift_conf)
    sp_prefix = 'storage-policy:'

    try:
        # policy index 0 will be created if no policy exists in conf
        policies = parse_storage_policies(conf)
    except PolicyError as e:
        raise InProcessException(e)

    # clear all policies from test swift.conf before adding test policy back
    for policy in policies:
        conf.remove_section(sp_prefix + str(policy.idx))

    if policy_specified:
        policy_to_test = policies.get_by_name(policy_specified)
        if policy_to_test is None:
            raise InProcessException('Failed to find policy name "%s"'
                                     % policy_specified)
        _info('Using specified policy %s' % policy_to_test.name)
    else:
        policy_to_test = policies.default
        _info('Defaulting to policy %s' % policy_to_test.name)

    # make policy_to_test be policy index 0 and default for the test config
    sp_zero_section = sp_prefix + '0'
    conf.add_section(sp_zero_section)
    for (k, v) in policy_to_test.get_info(config=True).items():
        conf.set(sp_zero_section, k, str(v))
    conf.set(sp_zero_section, 'default', 'True')

    with open(swift_conf, 'w') as fp:
        conf.write(fp)

    # look for a source ring file
    ring_file_src = ring_file_test = 'object.ring.gz'
    if policy_to_test.idx:
        ring_file_src = 'object-%s.ring.gz' % policy_to_test.idx
    try:
        ring_file_src = _in_process_find_conf_file(conf_src_dir, ring_file_src,
                                                   use_sample=False)
    except InProcessException as e:
        if policy_specified:
            raise InProcessException('Failed to find ring file %s'
                                     % ring_file_src)
        ring_file_src = None

    ring_file_test = os.path.join(testdir, ring_file_test)
    if ring_file_src:
        # copy source ring file to a policy-0 test ring file, re-homing servers
        _info('Using source ring file %s' % ring_file_src)
        ring_data = ring.RingData.load(ring_file_src)
        obj_sockets = []
        for dev in ring_data.devs:
            device = 'sd%c1' % chr(len(obj_sockets) + ord('a'))
            utils.mkdirs(os.path.join(_testdir, 'sda1'))
            utils.mkdirs(os.path.join(_testdir, 'sda1', 'tmp'))
            obj_socket = listen_zero()
            obj_sockets.append(obj_socket)
            dev['port'] = obj_socket.getsockname()[1]
            dev['ip'] = '127.0.0.1'
            dev['device'] = device
            dev['replication_port'] = dev['port']
            dev['replication_ip'] = dev['ip']
        ring_data.save(ring_file_test)
    else:
        # make default test ring, 3 replicas, 4 partitions, 3 devices
        # which will work for a replication policy or a 2+1 EC policy
        _info('No source object ring file, creating 3rep/4part/3dev ring')
        obj_sockets = [listen_zero() for _ in (0, 1, 2)]
        replica2part2dev_id = [[0, 1, 2, 0],
                               [1, 2, 0, 1],
                               [2, 0, 1, 2]]
        devs = [{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
                 'port': obj_sockets[0].getsockname()[1]},
                {'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
                 'port': obj_sockets[1].getsockname()[1]},
                {'id': 2, 'zone': 2, 'device': 'sdc1', 'ip': '127.0.0.1',
                 'port': obj_sockets[2].getsockname()[1]}]
        ring_data = ring.RingData(replica2part2dev_id, devs, 30)
        with closing(GzipFile(ring_file_test, 'wb')) as f:
            pickle.dump(ring_data, f)

    for dev in ring_data.devs:
        _debug('Ring file dev: %s' % dev)

    return obj_sockets
Ejemplo n.º 50
0
            if file.endswith('.gz'):
                handle = gzip.open(file_path, 'rb')
            else:
                handle = open(file_path, 'r')

            # Run each access log line through a regex to extract the IPv4 addresses of remote hosts.
            for line in handle:
                match = re.match(NGINX_ACCESS_PATTERN, line)
                if match:
                    total_hits += 1
                    unique_hits.add(match.group('ipaddress'))
            handle.close()

        stats['unique_hits'] = len(unique_hits)
        stats['total_hits'] = total_hits

    # Build the ConfigParser data.
    config = ConfigParser()
    config.add_section(args.config_section)
    for key, value in stats.items():
        config.set(args.config_section, key, str(value))

    # Output the data in ConfigParser format to stdout and to a file.
    config.write(sys.stdout)
    if args.out:
        print('Writing to file passed via parameter: {filename}'.format(
            filename=args.out),
              file=sys.stderr)
        with open(args.out, 'w') as output_file:
            config.write(output_file)
Ejemplo n.º 51
0
def read_config(filenames=SEARCH_PATH):
    """Attempt to read local configuration files to determine spalloc client
    settings.

    Parameters
    ----------
    filenames : [str, ...]
        Filenames to attempt to read. Later config file have higher priority.

    Returns
    -------
    dict
        The configuration loaded.
    """
    parser = ConfigParser()

    # Set default config values (NB: No read_dict in Python 2.7)
    parser.add_section("spalloc")
    for key, value in iteritems({"port": "22244",
                                 "keepalive": "60.0",
                                 "reconnect_delay": "5.0",
                                 "timeout": "5.0",
                                 "machine": "None",
                                 "tags": "None",
                                 "min_ratio": "0.333",
                                 "max_dead_boards": "0",
                                 "max_dead_links": "None",
                                 "require_torus": "False"}):
        parser.set("spalloc", key, value)

    # Attempt to read from each possible file location in turn
    for filename in filenames:
        try:
            with open(filename, "r") as f:
                parser.readfp(f, filename)
        except (IOError, OSError):
            # File did not exist, keep trying
            pass

    cfg = {}

    try:
        cfg["hostname"] = parser.get("spalloc", "hostname")
    except NoOptionError:
        cfg["hostname"] = None

    cfg["port"] = parser.getint("spalloc", "port")

    try:
        cfg["owner"] = parser.get("spalloc", "owner")
    except NoOptionError:
        cfg["owner"] = None

    if parser.get("spalloc", "keepalive") == "None":
        cfg["keepalive"] = None
    else:
        cfg["keepalive"] = parser.getfloat("spalloc", "keepalive")

    cfg["reconnect_delay"] = parser.getfloat("spalloc", "reconnect_delay")

    if parser.get("spalloc", "timeout") == "None":
        cfg["timeout"] = None
    else:
        cfg["timeout"] = parser.getfloat("spalloc", "timeout")

    if parser.get("spalloc", "machine") == "None":
        cfg["machine"] = None
    else:
        cfg["machine"] = parser.get("spalloc", "machine")

    if parser.get("spalloc", "tags") == "None":
        cfg["tags"] = None
    else:
        cfg["tags"] = list(map(str.strip,
                               parser.get("spalloc", "tags").split(",")))

    cfg["min_ratio"] = parser.getfloat("spalloc", "min_ratio")

    if parser.get("spalloc", "max_dead_boards") == "None":
        cfg["max_dead_boards"] = None
    else:
        cfg["max_dead_boards"] = parser.getint("spalloc", "max_dead_boards")

    if parser.get("spalloc", "max_dead_links") == "None":
        cfg["max_dead_links"] = None
    else:
        cfg["max_dead_links"] = parser.getint("spalloc", "max_dead_links")

    cfg["require_torus"] = parser.getboolean("spalloc", "require_torus")

    return cfg
Ejemplo n.º 52
0
opts = parser.parse_args()

# check that at least the ini file has been given
inifile = opts.inifile

# parser .ini file
try:
  cp = ConfigParser()
  cp.optionxform = str
  cp.readfp(open(inifile))
except:
  print("Error... problem parsing '%s' configuration file" % inifile, file=sys.stderr)
  sys.exit(1)

if opts.runpath is not None:
  cp.set('analysis', 'run_dir', opts.runpath)

# Check if we're running in automated mode or not
try:
  automated = cp.getboolean('analysis', 'autonomous')
except:
  automated = False

# Check if configuration file says to submit the DAG
submitdag = opts.condor_submit
if not submitdag:
  try:
    submitdag = cp.getboolean('analysis', 'submit_dag')
  except:
    submitdag = False
Ejemplo n.º 53
0
    def add_repo(
        self, name, uri, repo_type='rpm-md',
        prio=None, dist=None, components=None,
        user=None, secret=None, credentials_file=None,
        repo_gpgcheck=None, pkg_gpgcheck=None
    ):
        """
        Add yum repository

        :param string name: repository base file name
        :param string uri: repository URI
        :param repo_type: repostory type name
        :param int prio: yum repostory priority
        :param dist: unused
        :param components: unused
        :param user: unused
        :param secret: unused
        :param credentials_file: unused
        :param bool repo_gpgcheck: enable repository signature validation
        :param bool pkg_gpgcheck: enable package signature validation
        """
        repo_file = self.shared_yum_dir['reposd-dir'] + '/' + name + '.repo'
        self.repo_names.append(name + '.repo')
        if os.path.exists(uri):
            # yum requires local paths to take the file: type
            uri = 'file://' + uri
        repo_config = ConfigParser()
        repo_config.add_section(name)
        repo_config.set(
            name, 'name', name
        )
        repo_config.set(
            name, 'baseurl', uri
        )
        repo_config.set(
            name, 'enabled', '1'
        )
        if prio:
            repo_config.set(
                name, 'priority', format(prio)
            )
        if repo_gpgcheck is not None:
            repo_config.set(
                name, 'repo_gpgcheck', '1' if repo_gpgcheck else '0'
            )
        if pkg_gpgcheck is not None:
            repo_config.set(
                name, 'gpgcheck', '1' if pkg_gpgcheck else '0'
            )
        with open(repo_file, 'w') as repo:
            repo_config.write(RepositoryYumSpaceRemover(repo))
Ejemplo n.º 54
0
class SetupConfig(object):
    """Wrapper around the setup.cfg file if available.

    One reason is to cleanup setup.cfg from these settings::

        [egg_info]
        tag_build = dev
        tag_svn_revision = true

    Another is for optional zest.releaser-specific settings::

        [zest.releaser]
        python-file-with-version = reinout/maurits.py


    """

    config_filename = SETUP_CONFIG_FILE

    def __init__(self):
        """Grab the configuration (overridable for test purposes)"""
        # If there is a setup.cfg in the package, parse it
        if not os.path.exists(self.config_filename):
            self.config = None
            return
        self.config = ConfigParser()
        with codecs.open(self.config_filename, 'r', 'utf8') as fp:
            self.config.readfp(fp)

    def development_marker(self):
        """Return development marker to be appended in postrelease

        Override the default ``.dev0`` in setup.cfg using
        a ``development-marker`` option::

            [zest.releaser]
            development-marker = .dev1

        Returns default of `.dev0` when nothing has been configured.

        """
        try:
            result = self.config.get('zest.releaser',
                                     'development-marker')
        except (NoSectionError, NoOptionError, ValueError):
            result = ".dev0"
        return result

    def has_bad_commands(self):
        if self.config is None:
            return False
        if not self.config.has_section('egg_info'):
            # bail out early as the main section is not there
            return False
        bad = False
        # Check 1.
        if self.config.has_option('egg_info', 'tag_build'):
            # Might still be empty.
            value = self.config.get('egg_info', 'tag_build')
            if value:
                logger.warn("%s has [egg_info] tag_build set to %r",
                            self.config_filename, value)
                bad = True
        # Check 2.
        if self.config.has_option('egg_info', 'tag_svn_revision'):
            if self.config.getboolean('egg_info', 'tag_svn_revision'):
                value = self.config.get('egg_info', 'tag_svn_revision')
                logger.warn("%s has [egg_info] tag_svn_revision set to %r",
                            self.config_filename, value)
                bad = True
        return bad

    def fix_config(self):
        if not self.has_bad_commands():
            logger.warn("Cannot fix already fine %s.", self.config_filename)
            return
        if self.config.has_option('egg_info', 'tag_build'):
            self.config.set('egg_info', 'tag_build', '')
        if self.config.has_option('egg_info', 'tag_svn_revision'):
            self.config.set('egg_info', 'tag_svn_revision', 'false')
        new_setup = open(self.config_filename, 'w')
        try:
            self.config.write(new_setup)
        finally:
            new_setup.close()
        logger.info("New setup.cfg contents:")
        print(''.join(open(self.config_filename).readlines()))

    def python_file_with_version(self):
        """Return Python filename with ``__version__`` marker, if configured.

        Enable this by adding a ``python-file-with-version`` option::

            [zest.releaser]
            python-file-with-version = reinout/maurits.py

        Return None when nothing has been configured.

        """
        default = None
        if self.config is None:
            return default
        try:
            result = self.config.get(
                'zest.releaser',
                'python-file-with-version')
        except (NoSectionError, NoOptionError, ValueError):
            return default
        return result
Ejemplo n.º 55
0
            for bucket in response["aggregations"]["unique_hits"]["buckets"]:
                ip = bucket["key"]

                # Ignore requests without a proxy_ip
                if ip == "-":
                    continue

                unique_ips.add(ip)
                total_hits += bucket["doc_count"]

            date += timedelta(days=1)

        stats = {
            'unique_hits': len(unique_ips),
            'total_hits': total_hits,
        }

        # Build the ConfigParser data
        config.add_section(name_prefix)
        for key, value in stats.items():
            config.set(name_prefix, key, str(value))

    # Output the data in ConfigParser format to stdout and to a file.
    config.write(sys.stdout)
    if args.out:
        print('Writing to file passed via parameter: {filename}'.format(
            filename=args.out),
              file=sys.stderr)
        with open(args.out, 'w') as output_file:
            config.write(output_file)
Ejemplo n.º 56
0
    def run(self):
        # Here we write a mini config for the server
        smbConfig = ConfigParser()
        smbConfig.add_section('global')
        smbConfig.set('global', 'server_name', 'server_name')
        smbConfig.set('global', 'server_os', 'UNIX')
        smbConfig.set('global', 'server_domain', 'WORKGROUP')
        smbConfig.set('global', 'log_file', self.__smbserver_log)
        smbConfig.set('global', 'credentials_file', '')

        # Let's add a dummy share
        smbConfig.add_section(self.__smbserver_share)
        smbConfig.set(self.__smbserver_share, 'comment', '')
        smbConfig.set(self.__smbserver_share, 'read only', 'no')
        smbConfig.set(self.__smbserver_share, 'share type', '0')
        smbConfig.set(self.__smbserver_share, 'path', self.__smbserver_dir)

        # IPC always needed
        smbConfig.add_section('IPC$')
        smbConfig.set('IPC$', 'comment', '')
        smbConfig.set('IPC$', 'read only', 'yes')
        smbConfig.set('IPC$', 'share type', '3')
        smbConfig.set('IPC$', 'path')

        self.localsmb = smbserver.SMBSERVER(('0.0.0.0', 445), config_parser=smbConfig)

        logger.info('Setting up SMB Server')
        self.localsmb.processConfigFile()
        logger.debug('Ready to listen...')

        try:
            self.localsmb.serve_forever()
        except Exception as _:
            pass
Ejemplo n.º 57
0
    def __init__(self,
                 parser=None,
                 defaults={},
                 writeargstofile=False,
                 readargs=True):
        if parser is None:
            parser = argparse.ArgumentParser(
                description="Default psycodict parser")

            parser.add_argument(
                "-c",
                "--config-file",
                dest="config_file",
                metavar="FILE",
                help="configuration file [default: %(default)s]",
                default=defaults.get("config_file", "config.ini"),
            )
            parser.add_argument(
                "-s",
                "--secrets-file",
                dest="secrets_file",
                metavar="SECRETS",
                help="secrets file [default: %(default)s]",
                default=defaults.get("secrets_file", "secrets.ini"),
            )

            logginggroup = parser.add_argument_group("Logging options:")
            logginggroup.add_argument(
                "--slowcutoff",
                dest="logging_slowcutoff",
                metavar="SLOWCUTOFF",
                help="threshold to log slow queries [default: %(default)s]",
                default=defaults.get("logging_slowcutoff", 0.1),
                type=float,
            )

            logginggroup.add_argument(
                "--slowlogfile",
                help="logfile for slow queries [default: %(default)s]",
                dest="logging_slowlogfile",
                metavar="FILE",
                default=defaults.get("logging_slowlogfile",
                                     "slow_queries.log"),
            )

            # PostgresSQL options
            postgresqlgroup = parser.add_argument_group("PostgreSQL options")
            postgresqlgroup.add_argument(
                "--postgresql-host",
                dest="postgresql_host",
                metavar="HOST",
                help=
                "PostgreSQL server host or socket directory [default: %(default)s]",
                default=defaults.get("postgresql_host", "localhost"),
            )
            postgresqlgroup.add_argument(
                "--postgresql-port",
                dest="postgresql_port",
                metavar="PORT",
                type=int,
                help="PostgreSQL server port [default: %(default)d]",
                default=defaults.get("postgresql_port", 5432),
            )

            postgresqlgroup.add_argument(
                "--postgresql-user",
                dest="postgresql_user",
                metavar="USER",
                help="PostgreSQL username [default: %(default)s]",
                default=defaults.get("postgresql_user", "postgres"),
            )

            postgresqlgroup.add_argument(
                "--postgresql-pass",
                dest="postgresql_password",
                metavar="PASS",
                help="PostgreSQL password [default: %(default)s]",
                default=defaults.get("postgres_password", ""),
            )

            postgresqlgroup.add_argument(
                "--postgresql-dbname",
                dest="postgresql_dbname",
                metavar="DBNAME",
                help="PostgreSQL database name [default: %(default)s]",
                default="lmfdb",
            )

        def sec_opt(key):
            if "_" in key:
                sec, opt = key.split("_", 1)
            else:
                sec = "misc"
                opt = key
            return sec, opt

        # 1: parsing command-line arguments
        if readargs:
            args = parser.parse_args()
        else:
            # only read config file
            args = parser.parse_args([])

        args_dict = vars(args)
        default_arguments_dict = vars(parser.parse_args([]))

        del default_arguments_dict["config_file"]
        del default_arguments_dict["secrets_file"]

        self.default_args = defaultdict(dict)
        for key, val in default_arguments_dict.items():
            sec, opt = sec_opt(key)
            self.default_args[sec][opt] = str(val)

        # reading the config file, creating it if necessary
        # 2/1: does config file exist?
        if not os.path.exists(args.config_file):
            write_args = deepcopy(self.default_args)
            if not writeargstofile:
                print(
                    "Config file: %s not found, creating it with the default values"
                    % args.config_file)
            else:
                print(
                    "Config file: %s not found, creating it with the passed values"
                    % args.config_file)
                # overwrite default arguments passed via command line args
                for key, val in args_dict.items():
                    if key in default_arguments_dict:
                        sec, opt = sec_opt(key)
                        write_args[sec][opt] = str(val)

            _cfgp = ConfigParser()
            # create sections
            for sec, options in write_args.items():
                _cfgp.add_section(sec)
                for opt, val in options.items():
                    _cfgp.set(sec, opt, str(val))

            with open(args.config_file, "w") as configfile:
                _cfgp.write(configfile)

        # 2/2: reading the config file
        _cfgp = ConfigParser()
        _cfgp.read(args.config_file)
        # 2/3: reading the secrets file, which can override the config
        if os.path.exists(args.secrets_file):
            _cfgp.read(args.secrets_file)

        # 3: override specific settings
        def file_to_args(sep="_"):
            ret = {}
            for s in _cfgp.sections():
                for k, v in _cfgp.items(s):
                    ret["%s%s%s" % (s, sep, k)] = v
            return ret

        args_file = file_to_args()

        for key, val in default_arguments_dict.items():
            # if a nondefault value was passed through command line arguments set it
            # or if a default value was not set in the config file
            if args_dict[key] != val or key not in args_file:
                sec, opt = sec_opt(key)
                if sec not in _cfgp.sections():
                    _cfgp.add_section(sec)
                _cfgp.set(sec, opt, str(args_dict[key]))

        # We can derive the types from the parser
        type_dict = {}
        for action in parser._actions:
            if isinstance(
                    action,
                (argparse._StoreTrueAction, argparse._StoreFalseAction)):
                type_dict[action.dest] = strbool
            else:
                type_dict[action.dest] = action.type

        def get(section, key):
            val = _cfgp.get(section, key)
            full = section + "_" + key
            type_func = type_dict.get(full)
            if type_func is not None:
                val = type_func(val)
            return val

        self.options = defaultdict(dict)
        for sec, options in self.default_args.items():
            for opt in options:
                self.options[sec][opt] = get(sec, opt)

        self.extra_options = {}  # not stored in the config file
        for key, val in args_dict.items():
            if key not in default_arguments_dict:
                self.extra_options[key] = val
Ejemplo n.º 58
0
class Configuration(object):
    defaults = {}

    def __init__(self, filename=None):
        self._config = ConfigParser()
        self._set_defaults()
        self._state_drivers = {}
        if filename is not None:
            self.load(filename)

    def _set_defaults(self):
        """Set defaults for config
        """
        self._config.add_section('main')
        for key, value in six.iteritems(self.defaults):
            if isinstance(value, dict):
                self._config.add_section(key)
                for subkey, subvalue in six.iteritems(value):
                    self._config.set(key, subkey, subvalue)
            else:
                self._config.set('main', key, value)

    def load(self, filename):
        """Load the configuration by filename
        """
        self._config.read(filename)

    def save(self, filename):
        """Save the configuration to a file
        """
        with open(filename, 'w') as handle:
            self._config.write(handle)

    @staticmethod
    def sanitize(items):
        options = {}
        for key, value in items:
            if key.endswith('[int]'):
                options[key[:-5]] = int(value)
            elif key.endswith('[bool]'):
                value = value.lower()
                if value in BOOL_MAP[True]:
                    value = True
                elif value in BOOL_MAP[False]:
                    value = False
                else:
                    raise ValueError('Expected boolean for {}'.format(key))
                options[key[:-6]] = value
            else:
                options[key] = value
        return options

    def __getitem__(self, name):
        if self._config.has_section(name):
            return self.sanitize(self._config.items(name))
        elif name == 'main':
            raise ValueError('Missing main section of configuration')
        return self['main'][name]

    def state_driver(self, name='ai'):
        """Get an instance of the state driver
        """
        from database import state

        if name not in self._state_drivers:
            extras = self[name]
            driver = extras.pop('state-driver')
            if driver == 'redis':
                self._state_drivers[name] = state.RedisDriver(self, extras)
            elif driver == 'dict':
                self._state_drivers[name] = state.MemoryDriver(self, extras)
            else:
                raise ValueError('Unknown state driver')
        return self._state_drivers[name]