def parse_args(args_str=None):
    defaults = {
        'old_rabbit_user': '******',
        'old_rabbit_password': '******',
        'old_rabbit_ha_mode': False,
        'old_rabbit_q_name': 'vnc-config.issu-queue',
        'old_rabbit_vhost': None,
        'old_rabbit_port': '5672',
        'old_rabbit_use_ssl': False,
        'old_rabbit_ssl_version': None,
        'old_rabbit_ssl_ca_certs': None,
        'old_rabbit_ssl_keyfile': None,
        'old_rabbit_ssl_certfile': None,
        'new_rabbit_user': '******',
        'new_rabbit_password': '******',
        'new_rabbit_ha_mode': False,
        'new_rabbit_q_name': 'vnc-config.issu-queue',
        'new_rabbit_vhost': '',
        'new_rabbit_port': '5672',
        'new_rabbit_use_ssl': False,
        'new_rabbit_ssl_version': None,
        'new_rabbit_ssl_ca_certs': None,
        'new_rabbit_ssl_keyfile': None,
        'new_rabbit_ssl_certfile': None,
        'odb_prefix': '',
        'ndb_prefix': '',
        'reset_config': None,
        'old_cassandra_user': None,
        'old_cassandra_password': None,
        'old_cassandra_use_ssl': False,
        'old_cassandra_ca_certs': None,
        'new_cassandra_user': None,
        'new_cassandra_password': None,
        'new_cassandra_use_ssl': False,
        'new_cassandra_ca_certs': None,
        'old_cassandra_address_list': '10.84.24.35:9160',
        'old_zookeeper_address_list': '10.84.24.35:2181',
        'old_rabbit_address_list': '10.84.24.35',
        'new_cassandra_address_list': '10.84.24.35:9160',
        'new_zookeeper_address_list': '10.84.24.35:2181',
        'new_rabbit_address_list': '10.84.24.35',
        'new_api_info': '{"10.84.24.52": [("root"), ("c0ntrail123")]}'

    }
    if not args_str:
        args_str = ' '.join(sys.argv[1:])
    conf_parser = argparse.ArgumentParser(add_help=False)
    conf_parser.add_argument(
        "-c", "--conf_file", action='append',
        help="Specify config file", metavar="FILE")
    args, remaining_argv = conf_parser.parse_known_args(args_str.split())
    if args.conf_file:
        config = configparser.SafeConfigParser()
        config.read(args.conf_file)
        defaults.update(dict(config.items("DEFAULTS")))

    # Override with CLI options
    # Don't surpress add_help here so it will handle -h
    parser = argparse.ArgumentParser(
        # Inherit options from config_parser
        parents=[conf_parser],
        # print script description with -h/--help
        description=__doc__,
        # Don't mess with format of description
        formatter_class=argparse.RawDescriptionHelpFormatter,
    )

    parser.set_defaults(**defaults)
    parser.add_argument(
        "--old_rabbit_user",
        help="Old RMQ user name")
    parser.add_argument(
        "--old_rabbit_password",
        help="Old RMQ passwd")
    parser.add_argument(
        "--old_rabbit_ha_mode",
        help="Old RMQ HA mode")
    parser.add_argument(
        "--old_rabbit_q_name",
        help="Q name in old RMQ")
    parser.add_argument(
        "--old_rabbit_vhost",
        help="Old RMQ Vhost")
    parser.add_argument(
        "--old_rabbit_port",
        help="Old RMQ port")
    parser.add_argument(
        "--old_rabbit_use_ssl",
        help="Old RMQ use ssl flag")
    parser.add_argument(
        "--old_rabbit_ssl_ca_certs",
        help="Old RMQ SSL CA certs file path")
    parser.add_argument(
        "--old_rabbit_ssl_keyfile",
        help="Old RMQ SSL key file path")
    parser.add_argument(
        "--old_rabbit_ssl_certfile",
        help="Old RMQ SSL certificate file path")
    parser.add_argument(
        "--old_rabbit_ssl_version",
        help="Old RMQ SSL version")
    parser.add_argument(
        "--new_rabbit_user",
        help="New RMQ user name")
    parser.add_argument(
        "--new_rabbit_password",
        help="New RMQ passwd")
    parser.add_argument(
        "--new_rabbit_ha_mode",
        help="New RMQ HA mode")
    parser.add_argument(
        "--new_rabbit_q_name",
        help="Q name in new RMQ")
    parser.add_argument(
        "--new_rabbit_vhost",
        help="New RMQ Vhost")
    parser.add_argument(
        "--new_rabbit_port",
        help="New RMQ port")
    parser.add_argument(
        "--new_rabbit_use_ssl",
        help="New RMQ use ssl flag")
    parser.add_argument(
        "--new_rabbit_ssl_ca_certs",
        help="New RMQ SSL CA certs file path")
    parser.add_argument(
        "--new_rabbit_ssl_keyfile",
        help="New RMQ SSL key file path")
    parser.add_argument(
        "--new_rabbit_ssl_certfile",
        help="New RMQ SSL certificate file path")
    parser.add_argument(
        "--new_rabbit_ssl_version",
        help="New RMQ SSL version")
    parser.add_argument(
        "--old_cassandra_user",
        help="Old Cassandra user name")
    parser.add_argument(
        "--old_cassandra_password",
        help="Old Cassandra passwd")
    parser.add_argument(
        "--new_cassandra_user",
        help="New Cassandra user name")
    parser.add_argument(
        "--new_cassandra_password",
        help="New Cassandra passwd")
    parser.add_argument(
        "--old_cassandra_use_ssl",
        help="Old Cassandra use ssl flag")
    parser.add_argument(
        "--old_cassandra_ca_certs",
        help="Old Cassandra CA certs file path")
    parser.add_argument(
        "--new_cassandra_use_ssl",
        help="New Cassandra use ssl flag")
    parser.add_argument(
        "--new_cassandra_ca_certs",
        help="New Cassandra CA certs file path")
    parser.add_argument(
        "--old_rabbit_address_list",
        help="Old RMQ addresses")
    parser.add_argument(
        "--old_cassandra_address_list",
        help="Old Cassandra addresses",
        nargs='+')
    parser.add_argument(
        "--old_zookeeper_address_list",
        help="Old zookeeper addresses")
    parser.add_argument(
        "--new_rabbit_address_list",
        help="New RMQ addresses")
    parser.add_argument(
        "--new_cassandra_address_list",
        help="New Cassandra addresses",
        nargs='+')
    parser.add_argument(
        "--new_zookeeper_address_list",
        help="New zookeeper addresses")
    parser.add_argument(
        "--old_db_prefix",
        help="Old DB prefix")
    parser.add_argument(
        "--new_db_prefix",
        help="New DB prefix")
    parser.add_argument(
        "--reset_config",
        help="Reset config")
    parser.add_argument(
        "--new_api_info",
        help="New API info",
        nargs="+")
    args_obj, remaining_argv = parser.parse_known_args(remaining_argv)
    if args.conf_file:
        args_obj.config_sections = config
    if isinstance(args_obj.old_cassandra_address_list, string_types):
        args_obj.old_cassandra_address_list =\
            args_obj.old_cassandra_address_list.split()
    if isinstance(args_obj.new_cassandra_address_list, string_types):
        args_obj.new_cassandra_address_list =\
            args_obj.new_cassandra_address_list.split()
    args_obj.old_rabbit_use_ssl = (str(args_obj.old_rabbit_use_ssl).lower() == 'true')
    args_obj.new_rabbit_use_ssl = (str(args_obj.new_rabbit_use_ssl).lower() == 'true')
    args_obj.old_rabbit_ha_mode = (str(args_obj.old_rabbit_ha_mode).lower() == 'true')
    args_obj.new_rabbit_ha_mode = (str(args_obj.new_rabbit_ha_mode).lower() == 'true')
    args_obj.old_cassandra_use_ssl = (str(args_obj.old_cassandra_use_ssl).lower() == 'true')
    args_obj.new_cassandra_use_ssl = (str(args_obj.new_cassandra_use_ssl).lower() == 'true')

    return args_obj, remaining_argv
Beispiel #2
0
    def parse_args(self):
        """ 
        Eg. python stats.py --analytics-api-ip 127.0.0.1
                          --analytics-api-port 8181
                          --table NodeStatus.process_mem_cpu_usage
                          --where name=a6s40 cpu_info.module_id=Collector
                          --select "T=60 SUM(cpu_info.cpu_share)"
                          --sort "SUM(cpu_info.cpu_share)"
                          [--start-time now-10m --end-time now] | --last 10m

            python stats.py --table NodeStatus.process_mem_cpu_usage
        """
        defaults = {
            'analytics_api_ip': '127.0.0.1',
            'analytics_api_port': '8181',
            'start_time': 'now-10m',
            'end_time': 'now',
            'select' : [],
            'where' : ['Source=*'],
            'sort': [],
            'admin_user': '******',
            'admin_password': '******',
            'conf_file': '/etc/contrail/contrail-keystone-auth.conf',
        }

        conf_parser = argparse.ArgumentParser(add_help=False)
        conf_parser.add_argument("--admin-user", help="Name of admin user")
        conf_parser.add_argument("--admin-password", help="Password of admin user")
        conf_parser.add_argument("--conf-file", help="Configuration file")
        conf_parser.add_argument("--analytics-api-ip", help="IP address of Analytics API Server")
        conf_parser.add_argument("--analytics-api-port", help="Port of Analytcis API Server")
        args, remaining_argv = conf_parser.parse_known_args();

        configfile = defaults['conf_file']
        if args.conf_file:
            configfile = args.conf_file

        config = configparser.SafeConfigParser()
        config.read(configfile)
        if 'KEYSTONE' in config.sections():
            if args.admin_user == None:
                args.admin_user = config.get('KEYSTONE', 'admin_user')
            if args.admin_password == None:
                args.admin_password = config.get('KEYSTONE','admin_password')

        if args.admin_user == None:
            args.admin_user = defaults['admin_user']
        if args.admin_password == None:
            args.admin_password = defaults['admin_password']

        if args.analytics_api_ip == None:
            args.analytics_api_ip = defaults['analytics_api_ip']
        if args.analytics_api_port == None:
            args.analytics_api_port = defaults['analytics_api_port']

        stat_table_list = [xx.stat_type + "." + xx.stat_attr for xx in _STAT_TABLES]
        tab_url = "http://" + args.analytics_api_ip + ":" +\
            args.analytics_api_port + "/analytics/tables"
        tables = OpServerUtils.get_url_http(tab_url,
            args.admin_user, args.admin_password)
        if tables != {}:
            if tables.status_code == 200:
                table_list = json.loads(tables.text)
                for table in table_list:
                    if table['type'] == 'STAT':
                        table_name = '.'.join(table['name'].split('.')[1:])
                        # append to stat_table_list only if not existing
                        if table_name not in stat_table_list:
                            stat_table_list.append(table_name)

        parser = argparse.ArgumentParser(
                  # Inherit options from config_parser
                  parents=[conf_parser],
                  # print script description with -h/--help
                  description=__doc__,
                  formatter_class=argparse.ArgumentDefaultsHelpFormatter)
        parser.set_defaults(**defaults)
        parser.add_argument(
            "--start-time", help="Logs start time (format now-10m, now-1h)")
        parser.add_argument("--end-time", help="Logs end time")
        parser.add_argument(
            "--last", help="Logs from last time period (format 10m, 1d)")
        parser.add_argument(
            "--table", help="StatTable to query", choices=stat_table_list)
        parser.add_argument(
            "--dtable", help="Dynamic StatTable to query")
        parser.add_argument(
            "--select", help="List of Select Terms", nargs='+')
        parser.add_argument(
            "--where", help="List of Where Terms to be ANDed", nargs='+')
        parser.add_argument(
            "--sort", help="List of Sort Terms", nargs='+')
        self._args = parser.parse_args(remaining_argv)

        self._args.admin_user = args.admin_user
        self._args.admin_password = args.admin_password
        self._args.analytics_api_ip = args.analytics_api_ip
        self._args.analytics_api_port = args.analytics_api_port

        if self._args.table is None and self._args.dtable is None:
            return -1

        try:
            self._start_time, self._end_time = \
                OpServerUtils.parse_start_end_time(
                    start_time = self._args.start_time,
                    end_time = self._args.end_time,
                    last = self._args.last)
        except:
            return -1

        return 0
Beispiel #3
0
    sys.exit(5)

if CFG is None and COMMAND not in ['post_xml', 'get_sysprof', 'validate_xml']:
    print('ERROR: -f <cfg> is a required argument')
    sys.exit(6)

if COMMAND in ['load_records', 'export_records'] and XML_DIRPATH is None:
    print('ERROR: -p </path/to/records> is a required argument')
    sys.exit(7)

if COMMAND == 'gen_sitemap' and OUTPUT_FILE is None:
    print('ERROR: -o </path/to/sitemap.xml> is a required argument')
    sys.exit(8)

if COMMAND not in ['post_xml', 'get_sysprof', 'validate_xml']:
    SCP = configparser.SafeConfigParser()
    with open(CFG) as f:
        SCP.readfp(f)

    DATABASE = SCP.get('repository', 'database')

    EXTRA_COLUMNS = []

    try:
        extra_columns = SCP.get('repository', 'extra_columns').split(",")
    except configparser.NoOptionError:
        extra_columns = None

    if extra_columns is not None:

        from sqlalchemy import Column, Text
Beispiel #4
0
def parce_confs(opts):
    # configs
    confs = configparser.SafeConfigParser()
    confs.read(opts.config)
    return confs
Beispiel #5
0
    def _sync_latest(self):
        base_url = self.url

        checksum_file = base_url + "SHA256SUMS"
        checksum_file = requests.get(checksum_file)
        if checksum_file.status_code != 200:
            LOG.error("Could not get checksums file %s" % checksum_file.url)
            return

        aux = dict([list(reversed(line.split()))
                    for line in checksum_file.text.splitlines()])

        filename = None
        for k, v in aux.items():
            if k.endswith(".qcow2"):
                filename = k
                checksum = v
                break

        if filename is None:
            LOG.error("Could not get image file")
            return

        LOG.info("Downloading %s", filename)

        # Get the revision from the index file
        index = requests.get(base_url + filename + ".index")
        if not index.ok:
            LOG.error("Cannot download image from server, got %s", index.status_code)
            return
        parser = configparser.SafeConfigParser()
        parser.readfp(six.StringIO(index.text))
        section = parser.sections()[0]
        revision = parser.get(section, "revision")

        url = base_url + filename
        architecture = "x86_64"
        file_format = "qcow2"

        prefix = CONF.prefix
        name = "%sDebian %s [%s]" % (prefix, self.version, revision)

        image = self.glance.get_image_by_name(name)
        if image:
            if image.get("imgsync.sha256") != checksum:
                LOG.error("Glance image chechsum (%s, %s)and official "
                          "checksum %s missmatch.",
                          image.id, image.get("imgsync.sha256"), checksum)
            else:
                LOG.info("Image already downloaded and synchroniced")
            return

        location = None
        try:
            location = self._download_one(url, ("sha256", checksum))
            self.glance.upload(location,
                               name,
                               architecture=architecture,
                               file_format=file_format,
                               container_format="bare",
                               checksum={"sha256": checksum},
                               os_distro="ubuntu",
                               os_version=self.version)
            LOG.info("Synchronized %s", name)
        finally:
            if location is not None:
                os.remove(location.name)
Beispiel #6
0
 def __init__(self):
     self.config_parser = configparser.SafeConfigParser()
Beispiel #7
0
    def read_config(self, config_file):
        """
        Parses the specified configuration file and stores the values. Raises
        an InvalidConfigurationFile exception if the file is not well-formed.
        """
        cfg = ConfigParser.SafeConfigParser()
        try:
            cfg.read(config_file)
        except ConfigParser.MissingSectionHeaderError as e:
            # The file exists, but doesn't have the correct format.
            raise exc.InvalidConfigurationFile(e)

        def safe_get(section, option, default=None):
            try:
                return cfg.get(section, option)
            except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
                return default

        # A common mistake is including credentials in the config file. If any
        # values are found, issue a warning so that the developer can correct
        # this problem.
        creds_found = False
        for section in cfg.sections():
            if section == "settings":
                section_name = "default"
                self._default_set = True
            else:
                section_name = section
            # Check for included credentials
            for key in ("username", "password", "api_key"):
                if creds_found:
                    break
                if safe_get(section, key):
                    creds_found = True
            dct = self._settings[section_name] = {}
            dct["region"] = safe_get(section, "region", default_region)
            ityp = safe_get(section, "identity_type")
            if ityp:
                dct["identity_type"] = _id_type(ityp)
                dct["identity_class"] = _import_identity(ityp)
            # Handle both the old and new names for this setting.
            debug = safe_get(section, "debug")
            if debug is None:
                debug = safe_get(section, "http_debug", "False")
            dct["http_debug"] = debug == "True"
            verify_ssl = safe_get(section, "verify_ssl", "True")
            dct["verify_ssl"] = verify_ssl == "True"
            dct["keyring_username"] = safe_get(section, "keyring_username")
            dct["encoding"] = safe_get(section, "encoding", default_encoding)
            dct["auth_endpoint"] = safe_get(section, "auth_endpoint")
            dct["tenant_name"] = safe_get(section, "tenant_name")
            dct["tenant_id"] = safe_get(section, "tenant_id")
            use_servicenet = safe_get(section, "use_servicenet", "False")
            dct["use_servicenet"] = use_servicenet == "True"
            app_agent = safe_get(section, "custom_user_agent")
            if app_agent:
                # Customize the user-agent string with the app name.
                dct["user_agent"] = "%s %s" % (app_agent, USER_AGENT)
            else:
                dct["user_agent"] = USER_AGENT

            # If this is the first section, make it the default
            if not self._default_set:
                self._settings["default"] = self._settings[section]
                self._default_set = True
        if creds_found:
            warnings.warn("Login credentials were detected in your .pyrax.cfg "
                    "file. These have been ignored, but you should remove "
                    "them and either place them in a credential file, or "
                    "consider using another means of authentication. More "
                    "information on the use of credential files can be found "
                    "in the 'docs/getting_started.md' document.")
def get_config_parser():
    import sys

    python_version = sys.version_info.major
    return configparser.ConfigParser(
    ) if python_version == 3 else configparser.SafeConfigParser()
    def read_ini_settings(self):
        ''' Read ini file settings '''

        scriptbasename = "ocp-on-vmware"
        defaults = {
            'vmware': {
                'ini_path':
                os.path.join(os.path.dirname(__file__),
                             '%s.ini' % scriptbasename),
                'console_port':
                '8443',
                'container_storage':
                'none',
                'deployment_type':
                'openshift-enterprise',
                'openshift_vers':
                'v3_4',
                'vcenter_host':
                '',
                'vcenter_username':
                '******',
                'vcenter_password':
                '',
                'vcenter_template_name':
                'ocp-server-template-2.0.2',
                'vcenter_folder':
                'ocp',
                'vcenter_cluster':
                '',
                'vcenter_resource_pool':
                '/Resources/OCP3',
                'public_hosted_zone':
                '',
                'app_dns_prefix':
                'apps',
                'vm_dns':
                '',
                'vm_gw':
                '',
                'vm_netmask':
                '',
                'vm_network':
                'VM Network',
                'rhel_subscription_user':
                '',
                'rhel_subscription_pass':
                '',
                'rhel_subscription_server':
                '',
                'rhel_subscription_pool':
                'Red Hat OpenShift Container Platform, Premium*',
                'openshift_sdn':
                'redhat/openshift-ovs-subnet',
                'byo_lb':
                'no',
                'lb_host':
                'haproxy-',
                'byo_nfs':
                'no',
                'nfs_host':
                'nfs-0',
                'nfs_registry_mountpoint':
                '/exports',
                'master_nodes':
                '3',
                'infra_nodes':
                '2',
                'app_nodes':
                '3',
                'vm_ipaddr_start':
                '',
                'ocp_hostname_prefix':
                '',
                'auth_type':
                'ldap',
                'ldap_user':
                '******',
                'ldap_user_password':
                '',
                'node_type':
                self.args.node_type,
                'node_number':
                self.args.node_number,
                'ldap_fqdn':
                ''
            }
        }
        if six.PY3:
            config = configparser.ConfigParser()
        else:
            config = configparser.SafeConfigParser()

        # where is the config?
        vmware_ini_path = os.environ.get('VMWARE_INI_PATH',
                                         defaults['vmware']['ini_path'])
        vmware_ini_path = os.path.expanduser(
            os.path.expandvars(vmware_ini_path))
        config.read(vmware_ini_path)

        # apply defaults
        for k, v in defaults['vmware'].iteritems():
            if not config.has_option('vmware', k):
                config.set('vmware', k, str(v))

        self.console_port = config.get('vmware', 'console_port')
        self.container_storage = config.get('vmware', 'container_storage')
        self.deployment_type = config.get('vmware', 'deployment_type')
        self.openshift_vers = config.get('vmware', 'openshift_vers')
        self.vcenter_host = config.get('vmware', 'vcenter_host')
        self.vcenter_username = config.get('vmware', 'vcenter_username')
        self.vcenter_password = config.get('vmware', 'vcenter_password')
        self.vcenter_template_name = config.get('vmware',
                                                'vcenter_template_name')
        self.vcenter_folder = config.get('vmware', 'vcenter_folder')
        self.vcenter_cluster = config.get('vmware', 'vcenter_cluster')
        self.vcenter_datacenter = config.get('vmware', 'vcenter_datacenter')
        self.vcenter_resource_pool = config.get('vmware',
                                                'vcenter_resource_pool')
        self.public_hosted_zone = config.get('vmware', 'public_hosted_zone')
        self.app_dns_prefix = config.get('vmware', 'app_dns_prefix')
        self.vm_dns = config.get('vmware', 'vm_dns')
        self.vm_gw = config.get('vmware', 'vm_gw')
        self.vm_netmask = config.get('vmware', 'vm_netmask')
        self.vm_network = config.get('vmware', 'vm_network')
        self.rhel_subscription_user = config.get('vmware',
                                                 'rhel_subscription_user')
        self.rhel_subscription_pass = config.get('vmware',
                                                 'rhel_subscription_pass')
        self.rhel_subscription_server = config.get('vmware',
                                                   'rhel_subscription_server')
        self.rhel_subscription_pool = config.get('vmware',
                                                 'rhel_subscription_pool')
        self.openshift_sdn = config.get('vmware', 'openshift_sdn')
        self.byo_lb = config.get('vmware', 'byo_lb')
        self.lb_host = config.get('vmware', 'lb_host')
        self.byo_nfs = config.get('vmware', 'byo_nfs')
        self.nfs_host = config.get('vmware', 'nfs_host')
        self.nfs_registry_mountpoint = config.get('vmware',
                                                  'nfs_registry_mountpoint')
        self.master_nodes = config.get('vmware', 'master_nodes')
        self.infra_nodes = config.get('vmware', 'infra_nodes')
        self.app_nodes = config.get('vmware', 'app_nodes')
        self.vm_ipaddr_start = config.get('vmware', 'vm_ipaddr_start')
        self.ocp_hostname_prefix = config.get('vmware', 'ocp_hostname_prefix')
        self.auth_type = config.get('vmware', 'auth_type')
        self.ldap_user = config.get('vmware', 'ldap_user')
        self.ldap_user_password = config.get('vmware', 'ldap_user_password')
        self.ldap_fqdn = config.get('vmware', 'ldap_fqdn')
        self.node_type = config.get('vmware', 'node_type')
        self.node_number = config.get('vmware', 'node_number')
        err_count = 0

        if 'storage' in self.node_type:
            self.node_number = 3

        required_vars = {
            'public_hosted_zone': self.public_hosted_zone,
            'vcenter_host': self.vcenter_host,
            'vcenter_password': self.vcenter_password,
            'vm_ipaddr_start': self.vm_ipaddr_start,
            'ldap_fqdn': self.ldap_fqdn,
            'ldap_user_password': self.ldap_user_password,
            'vm_dns': self.vm_dns,
            'vm_gw': self.vm_gw,
            'vm_netmask': self.vm_netmask,
            'vcenter_datacenter': self.vcenter_datacenter
        }
        for k, v in required_vars.items():
            if v == '':
                err_count += 1
                print "Missing %s " % k
        if err_count > 0:
            print "Please fill out the missing variables in %s " % vmware_ini_path
            exit(1)
        self.wildcard_zone = "%s.%s" % (self.app_dns_prefix,
                                        self.public_hosted_zone)
        self.support_nodes = 0

        print 'Configured inventory values:'
        for each_section in config.sections():
            for (key, val) in config.items(each_section):
                print '\t %s:  %s' % (key, val)
Beispiel #10
0
    def _load_support_matrix(self):
        """Reads the support-matrix.ini file and populates an instance
        of the SupportMatrix class with all the data.

        :returns: SupportMatrix instance
        """

        # SafeConfigParser was deprecated in Python 3.2
        if sys.version_info >= (3, 2):
            cfg = config_parser.ConfigParser()
        else:
            cfg = config_parser.SafeConfigParser()
        env = self.state.document.settings.env
        fname = self.options.get("support-matrix", "support-matrix.ini")
        rel_fpath, fpath = env.relfn2path(fname)
        with open(fpath) as fp:
            cfg.readfp(fp)

        # This ensures that the docs are rebuilt whenever the
        # .ini file changes
        env.note_dependency(rel_fpath)

        matrix = SupportMatrix()

        # The 'targets' section is special - it lists all the
        # hypervisors that this file records data for
        for item in cfg.options("backends"):
            if not item.startswith("backend-impl-"):
                continue

            # The driver string will optionally contain
            # a hypervisor and architecture qualifier
            # so we expect between 1 and 3 components
            # in the name
            key = item[13:]
            title = cfg.get("backends", item)
            name = key.split("-")

            try:
                status = cfg.get("backends.%s" % item, "status")
            except config_parser.NoOptionError:
                if cfg.get("backends.%s" % item, "type") == "xfr":
                    backend = Backend.get_driver(name[0])
                elif cfg.get("backends.%s" % item, "type") == "agent":
                    backend = AgentBackend.get_driver(name[0])
                status = backend.__backend_status__

            if len(name) == 1:
                backend = SupportMatrixBackend(key, title, status, name[0])
            elif len(name) == 2:
                backend = SupportMatrixBackend(key,
                                               title,
                                               status,
                                               name[0],
                                               variations=name[1])
            else:
                raise Exception("'%s' field is malformed in '[%s]' section" %
                                (item, "DEFAULT"))

            backend.in_tree = cfg.getboolean("backends.%s" % item, "in-tree")
            backend.type = cfg.get("backends.%s" % item, "type")
            backend.notes = cfg.get("backends.%s" % item, "notes")
            backend.repository = cfg.get("backends.%s" % item, "repository")
            backend.maintainers = cfg.get("backends.%s" % item, "maintainers")

            matrix.backends[key] = backend

        grades = cfg.get("grades", "valid-grades")

        grades = grades.split(",")

        for grade in grades:
            title = cfg.get("grades.%s" % grade, "title")
            notes = cfg.get("grades.%s" % grade, "notes")
            in_tree = cfg.get("grades.%s" % grade, "in-tree")
            css_class = cfg.get("grades.%s" % grade, "css-class")

            matrix.grade_names[grade] = title
            matrix.grade_classes[grade] = css_class

            grade = SupportMatrixGrade(grade, title, notes, in_tree, css_class)

            matrix.grades.append(grade)

        return matrix
Beispiel #11
0
def get_config_parser():
    return configparser.ConfigParser() if sys.version_info.major == 3 else configparser.SafeConfigParser()
Beispiel #12
0
def launch_refarch_env(console_port=8443,
                       deployment_type=None,
                       openshift_vers=None,
                       vcenter_host=None,
                       vcenter_username=None,
                       vcenter_password=None,
                       vcenter_template_name=None,
                       vcenter_folder=None,
                       vcenter_cluster=None,
                       vcenter_datacenter=None,
                       vcenter_resource_pool=None,
                       public_hosted_zone=None,
                       app_dns_prefix=None,
                       vm_dns=None,
                       vm_gw=None,
                       vm_netmask=None,
                       vm_network=None,
                       rhel_subscription_user=None,
                       rhel_subscription_pass=None,
                       rhel_subscription_server=None,
                       rhel_subscription_pool=None,
                       byo_lb=None,
                       lb_host=None,
                       byo_nfs=None,
                       nfs_host=None,
                       nfs_registry_mountpoint=None,
                       no_confirm=False,
                       tag=None,
                       verbose=0,
                       create_inventory=None,
                       master_nodes=None,
                       infra_nodes=None,
                       app_nodes=None,
                       vm_ipaddr_start=None,
                       ocp_hostname_prefix=None,
                       create_ocp_vars=None,
                       auth_type=None,
                       ldap_user=None,
                       ldap_user_password=None,
                       ldap_fqdn=None,
                       openshift_sdn=None,
                       containerized=None,
                       container_storage=None,
                       openshift_hosted_metrics_deploy=None,
                       clean=None):

    # Open config file INI for values first
    scriptbasename = __file__
    scriptbasename = os.path.basename(scriptbasename)
    scriptbasename = scriptbasename.replace('.py', '')
    defaults = {
        'vmware': {
            'ini_path':
            os.path.join(os.path.dirname(__file__), '%s.ini' % scriptbasename),
            'console_port':
            '8443',
            'deployment_type':
            'openshift-enterprise',
            'openshift_vers':
            'v3_5',
            'vcenter_host':
            '',
            'vcenter_username':
            '******',
            'vcenter_password':
            '',
            'vcenter_template_name':
            'ocp-server-template-2.0.2',
            'vcenter_folder':
            'ocp',
            'vcenter_cluster':
            'devel',
            'vcenter_cluster':
            '',
            'vcenter_resource_pool':
            '/Resources/OCP3',
            'public_hosted_zone':
            '',
            'app_dns_prefix':
            'apps',
            'vm_dns':
            '',
            'vm_gw':
            '',
            'vm_netmask':
            '',
            'vm_network':
            'VM Network',
            'rhel_subscription_user':
            '',
            'rhel_subscription_pass':
            '',
            'rhel_subscription_server':
            '',
            'rhel_subscription_pool':
            'Red Hat OpenShift Container Platform, Premium*',
            'openshift_sdn':
            'redhat/openshift-ovs-subnet',
            'containerized':
            'containerized',
            'container_storage':
            'none',
            'openshift_hosted_metrics_deploy':
            'false',
            'byo_lb':
            'no',
            'lb_host':
            'haproxy-',
            'byo_nfs':
            'no',
            'nfs_host':
            'nfs-0',
            'nfs_registry_mountpoint':
            '/exports',
            'master_nodes':
            '3',
            'infra_nodes':
            '2',
            'app_nodes':
            '3',
            'vm_ipaddr_start':
            '',
            'ocp_hostname_prefix':
            '',
            'auth_type':
            'ldap',
            'ldap_user':
            '******',
            'ldap_user_password':
            '',
            'ldap_fqdn':
            ''
        }
    }
    if six.PY3:
        config = configparser.ConfigParser()
    else:
        config = configparser.SafeConfigParser()

    # where is the config?
    vmware_ini_path = os.environ.get('VMWARE_INI_PATH',
                                     defaults['vmware']['ini_path'])
    vmware_ini_path = os.path.expanduser(os.path.expandvars(vmware_ini_path))
    config.read(vmware_ini_path)

    # apply defaults
    for k, v in defaults['vmware'].iteritems():
        if not config.has_option('vmware', k):
            config.set('vmware', k, str(v))

    console_port = config.get('vmware', 'console_port')
    deployment_type = config.get('vmware', 'deployment_type')
    openshift_vers = config.get('vmware', 'openshift_vers')
    vcenter_host = config.get('vmware', 'vcenter_host')
    vcenter_username = config.get('vmware', 'vcenter_username')
    vcenter_password = config.get('vmware', 'vcenter_password')
    vcenter_template_name = config.get('vmware', 'vcenter_template_name')
    vcenter_folder = config.get('vmware', 'vcenter_folder')
    vcenter_cluster = config.get('vmware', 'vcenter_cluster')
    vcenter_datacenter = config.get('vmware', 'vcenter_datacenter')
    vcenter_resource_pool = config.get('vmware', 'vcenter_resource_pool')
    public_hosted_zone = config.get('vmware', 'public_hosted_zone')
    app_dns_prefix = config.get('vmware', 'app_dns_prefix')
    vm_dns = config.get('vmware', 'vm_dns')
    vm_gw = config.get('vmware', 'vm_gw')
    vm_netmask = config.get('vmware', 'vm_netmask')
    vm_network = config.get('vmware', 'vm_network')
    rhel_subscription_user = config.get('vmware', 'rhel_subscription_user')
    rhel_subscription_pass = config.get('vmware', 'rhel_subscription_pass')
    rhel_subscription_server = config.get('vmware', 'rhel_subscription_server')
    rhel_subscription_pool = config.get('vmware', 'rhel_subscription_pool')
    openshift_sdn = config.get('vmware', 'openshift_sdn')
    containerized = config.get('vmware', 'containerized')
    container_storage = config.get('vmware', 'container_storage')
    openshift_hosted_metrics_deploy = config.get(
        'vmware', 'openshift_hosted_metrics_deploy')
    byo_lb = config.get('vmware', 'byo_lb')
    lb_host = config.get('vmware', 'lb_host')
    byo_nfs = config.get('vmware', 'byo_nfs')
    nfs_host = config.get('vmware', 'nfs_host')
    nfs_registry_mountpoint = config.get('vmware', 'nfs_registry_mountpoint')
    master_nodes = config.get('vmware', 'master_nodes')
    infra_nodes = config.get('vmware', 'infra_nodes')
    app_nodes = config.get('vmware', 'app_nodes')
    vm_ipaddr_start = config.get('vmware', 'vm_ipaddr_start')
    ocp_hostname_prefix = config.get('vmware', 'ocp_hostname_prefix')
    auth_type = config.get('vmware', 'auth_type')
    ldap_user = config.get('vmware', 'ldap_user')
    ldap_user_password = config.get('vmware', 'ldap_user_password')
    ldap_fqdn = config.get('vmware', 'ldap_fqdn')

    err_count = 0
    required_vars = {
        'public_hosted_zone': public_hosted_zone,
        'vcenter_host': vcenter_host,
        'vcenter_password': vcenter_password,
        'vm_ipaddr_start': vm_ipaddr_start,
        'ldap_fqdn': ldap_fqdn,
        'ldap_user_password': ldap_user_password,
        'vm_dns': vm_dns,
        'vm_gw': vm_gw,
        'vm_netmask': vm_netmask,
        'vcenter_datacenter': vcenter_datacenter
    }
    for k, v in required_vars.items():
        if v == '':
            err_count += 1
            print "Missing %s " % k
    if err_count > 0:
        print "Please fill out the missing variables in %s " % vmware_ini_path
        exit(1)
    wildcard_zone = "%s.%s" % (app_dns_prefix, public_hosted_zone)

    # fix nfs_host and lb_host vars with nfs_ocp_hostname_prefix
    if 'no' in byo_lb:
        if ocp_hostname_prefix is not None:
            lb_host = ocp_hostname_prefix + "haproxy-0"
        else:
            lb_host = "haproxy-0"

    if 'no' in byo_nfs:
        if ocp_hostname_prefix is not None:
            nfs_host = ocp_hostname_prefix + "nfs-0"
        else:
            nfs_host = "nfs-0"

    tags = []
    tags.append('setup')

    # Our initial support node is the wildcard_ip
    support_nodes = 1
    if byo_nfs == "no":
        support_nodes = support_nodes + 1
        tags.append('nfs')
    else:
        if nfs_host == '':
            nfs_host = click.prompt(
                "Please enter the NFS Server fqdn for persistent registry:")
        if nfs_registry_mountpoint is '':
            nfs_registry_mountpoint = click.prompt(
                "Please enter NFS share name for persistent registry:")

    tags.append('prod')

    if byo_lb == "no":
        tags.append('haproxy')
    else:
        if lb_host == '':
            lb_host = click.prompt(
                "Please enter the load balancer hostname for installation:")
            lb_host = lb_host + '.' + public_hosted_zone

    if create_ocp_vars is True:
        click.echo('Configured OCP variables:')
        click.echo('\tauth_type: %s' % auth_type)
        click.echo('\tldap_fqdn: %s' % ldap_fqdn)
        click.echo('\tldap_user: %s' % ldap_user)
        click.echo('\tldap_user_password: %s' % ldap_user_password)
        click.echo('\tpublic_hosted_zone: %s' % public_hosted_zone)
        click.echo('\tapp_dns_prefix: %s' % app_dns_prefix)
        click.echo('\tbyo_lb: %s' % byo_lb)
        click.echo('\tlb_host: %s' % lb_host)
        click.echo('\tUsing values from: %s' % vmware_ini_path)
        if not no_confirm:
            click.confirm('Continue using these values?', abort=True)
        if auth_type == 'ldap':
            l_bdn = ""

            for d in ldap_fqdn.split("."):
                l_bdn = l_bdn + "dc=" + d + ","

            l = ldap.initialize("ldap://" + ldap_fqdn)
            try:
                l.protocol_version = ldap.VERSION3
                l.set_option(ldap.OPT_REFERRALS, 0)
                bind = l.simple_bind_s(ldap_user, ldap_user_password)

                base = l_bdn[:-1]
                criteria = "(&(objectClass=user)(sAMAccountName=" + ldap_user + "))"
                attributes = 'displayName', 'distinguishedName'
                result = l.search_s(base, ldap.SCOPE_SUBTREE, criteria,
                                    attributes)

                results = [
                    entry for dn, entry in result if isinstance(entry, dict)
                ]
            finally:
                l.unbind()

            for result in results:

                bindDN = str(result['distinguishedName']).strip("'[]")
                url_base = bindDN.replace(("CN=" + ldap_user + ","), "")
                url = "ldap://" + ldap_fqdn + ":389/" + url_base + "?sAMAccountName"

            install_file = "playbooks/openshift-install.yaml"

            for line in fileinput.input(install_file, inplace=True):
                # Parse our ldap url
                if line.startswith("      url:"):
                    print "      url: " + url
                elif line.startswith("      bindPassword:"******"      bindPassword: "******"      bindDN:"):
                    print "      bindDN: " + bindDN
                elif line.startswith("    wildcard_zone:"):
                    print "    wildcard_zone: " + app_dns_prefix + "." + public_hosted_zone
                elif line.startswith("    load_balancer_hostname:"):
                    print "    load_balancer_hostname: " + lb_host + "." + public_hosted_zone
                elif line.startswith("    deployment_type:"):
                    print "    deployment_type: " + deployment_type
                elif line.startswith(
                        "    openshift_hosted_registry_storage_host:"):
                    print "    openshift_hosted_registry_storage_host: " + nfs_host + "." + public_hosted_zone
                elif line.startswith(
                        "    openshift_hosted_registry_storage_nfs_directory:"
                ):
                    print "    openshift_hosted_registry_storage_nfs_directory: " + nfs_host + "." + public_hosted_zone
                elif line.startswith(
                        "    openshift_hosted_metrics_storage_host:"):
                    print "    openshift_hosted_metrics_storage_host: " + nfs_host + "." + public_hosted_zone
                elif line.startswith(
                        "    openshift_hosted_metrics_storage_nfs_directory:"):
                    print "    openshift_hosted_metrics_storage_nfs_directory: " + nfs_registry_mountpoint
                else:
                    print line,

            # Provide values for update and add node playbooks
            update_file = "playbooks/minor-update.yaml"
            for line in fileinput.input(update_file, inplace=True):
                if line.startswith("    wildcard_zone:"):
                    print "    wildcard_zone: " + app_dns_prefix + "." + public_hosted_zone
                elif line.startswith("    load_balancer_hostname:"):
                    print "    load_balancer_hostname: " + lb_host + "." + public_hosted_zone
                elif line.startswith("    deployment_type:"):
                    print "    deployment_type: " + deployment_type
                else:
                    print line,
                #End create_ocp_vars
            exit(0)

        if auth_type == 'none':
            playbooks = [
                "playbooks/openshift-install.yaml",
                "playbooks/minor-update.yaml"
            ]
            for ocp_file in playbooks:
                for line in fileinput.input(ocp_file, inplace=True):
                    if line.startswith(
                            '#openshift_master_identity_providers:'):
                        line = line.replace('#', '    ')
                        print line
                    else:
                        print line,
        exit(0)

    if create_inventory is True:
        click.echo('Configured inventory values:')
        click.echo('\tmaster_nodes: %s' % master_nodes)
        click.echo('\tinfra_nodes: %s' % infra_nodes)
        click.echo('\tapp_nodes: %s' % app_nodes)
        click.echo('\tpublic_hosted_zone: %s' % public_hosted_zone)
        click.echo('\tapp_dns_prefix: %s' % app_dns_prefix)
        click.echo('\tocp_hostname_prefix: %s' % ocp_hostname_prefix)
        click.echo('\tbyo_nfs: %s' % byo_nfs)
        if byo_nfs == "no":
            click.echo('\tnfs_host: %s' % nfs_host)
        click.echo('\tbyo_lb: %s' % byo_lb)
        if byo_lb == "no":
            click.echo('\tlb_host: %s' % lb_host)
        click.echo('\tvm_ipaddr_start: %s' % vm_ipaddr_start)
        click.echo('\tUsing values from: %s' % vmware_ini_path)
        click.echo("")
        if not no_confirm:
            click.confirm('Continue using these values?', abort=True)
        # Create the inventory file and exit
        total_nodes = int(master_nodes) + int(app_nodes) + int(
            infra_nodes) + int(support_nodes)

        if vm_ipaddr_start is None:
            vm_ipaddr_start = click.prompt("Starting IP address to use?")

        ip4addr = []
        for i in range(total_nodes):
            p = iptools.ipv4.ip2long(vm_ipaddr_start) + i
            ip4addr.append(iptools.ipv4.long2ip(p))
        wild_ip = ip4addr.pop()

        bind_entry = []
        bind_entry.append("$ORIGIN " + app_dns_prefix + "." +
                          public_hosted_zone + ".")
        bind_entry.append("*\tA\t" + wild_ip)
        bind_entry.append("$ORIGIN " + public_hosted_zone + ".")

        d = {}
        d['host_inventory'] = {}
        d['infrastructure_hosts'] = {}

        support_list = []
        if byo_nfs == "no":
            if ocp_hostname_prefix not in nfs_host:
                nfs_host = ocp_hostname_prefix + "nfs-0"
            d['host_inventory'][nfs_host] = {}
            d['host_inventory'][nfs_host]['guestname'] = nfs_host
            d['host_inventory'][nfs_host]['ip4addr'] = ip4addr[0]
            d['host_inventory'][nfs_host]['tag'] = "infra-nfs"
            d['infrastructure_hosts']["nfs_server"] = {}
            d['infrastructure_hosts']["nfs_server"]['guestname'] = nfs_host
            d['infrastructure_hosts']["nfs_server"]['tag'] = "infra-nfs"
            support_list.append(nfs_host)
            bind_entry.append(nfs_host + "\tA\t" + ip4addr[0])
            del ip4addr[0]

        if byo_lb == "no":
            if ocp_hostname_prefix not in lb_host:
                lb_host = ocp_hostname_prefix + "haproxy-0"
            d['host_inventory'][lb_host] = {}
            d['host_inventory'][lb_host]['guestname'] = lb_host
            d['host_inventory'][lb_host]['ip4addr'] = wild_ip
            d['host_inventory'][lb_host]['tag'] = "loadbalancer"
            d['infrastructure_hosts']["haproxy"] = {}
            d['infrastructure_hosts']["haproxy"]['guestname'] = lb_host
            d['infrastructure_hosts']["haproxy"]['tag'] = "loadbalancer"
            support_list.append(lb_host)
            bind_entry.append(lb_host + "\tA\t" + wild_ip)

        master_list = []
        d['production_hosts'] = {}
        for i in range(0, int(master_nodes)):
            if ocp_hostname_prefix is not None:
                master_name = ocp_hostname_prefix + "master-" + str(i)
            else:
                master_name = "master-" + str(i)
            d['host_inventory'][master_name] = {}
            d['host_inventory'][master_name]['guestname'] = master_name
            d['host_inventory'][master_name]['ip4addr'] = ip4addr[0]
            d['host_inventory'][master_name]['tag'] = "master"
            d['production_hosts'][master_name] = {}
            d['production_hosts'][master_name]['guestname'] = master_name
            d['production_hosts'][master_name]['tag'] = "master"
            master_list.append(master_name)
            bind_entry.append(master_name + "\tA\t" + ip4addr[0])
            del ip4addr[0]
        app_list = []
        for i in range(0, int(app_nodes)):
            if ocp_hostname_prefix is not None:
                app_name = ocp_hostname_prefix + "app-" + str(i)

            else:
                app_name = "app-" + str(i)

            d['host_inventory'][app_name] = {}
            d['host_inventory'][app_name]['guestname'] = app_name
            d['host_inventory'][app_name]['ip4addr'] = ip4addr[0]
            d['host_inventory'][app_name]['tag'] = "app"
            d['production_hosts'][app_name] = {}
            d['production_hosts'][app_name]['guestname'] = app_name
            d['production_hosts'][app_name]['tag'] = "app"
            app_list.append(app_name)
            bind_entry.append(app_name + "\tA\t" + ip4addr[0])
            del ip4addr[0]
        infra_list = []
        for i in range(0, int(infra_nodes)):
            if ocp_hostname_prefix is not None:
                infra_name = ocp_hostname_prefix + "infra-" + str(i)
            else:
                infra_name = "infra-" + str(i)
            d['host_inventory'][infra_name] = {}
            d['host_inventory'][infra_name]['guestname'] = infra_name
            d['host_inventory'][infra_name]['ip4addr'] = ip4addr[0]
            d['host_inventory'][infra_name]['tag'] = "infra"
            d['production_hosts'][infra_name] = {}
            d['production_hosts'][infra_name]['guestname'] = infra_name
            d['production_hosts'][infra_name]['tag'] = "infra"
            infra_list.append(infra_name)
            bind_entry.append(infra_name + "        A       " + ip4addr[0])
            del ip4addr[0]
        print "# Here is what should go into your DNS records"
        print("\n".join(bind_entry))
        print "# Please note, if you have chosen to bring your own loadbalancer and NFS Server you will need to ensure that these records are added to DNS and properly resolve. "

        with open('infrastructure.json', 'w') as outfile:
            json.dump(d, outfile)
        exit(0)
    # End create inventory

    # Display information to the user about their choices
    click.echo('Configured values:')
    for each_section in config.sections():
        for (key, val) in config.items(each_section):
            print '\t %s:  %s' % (key, val)
    click.echo("")

    if not no_confirm:
        click.confirm('Continue using these values?', abort=True)

    if not os.path.isfile('infrastructure.json'):
        print "Please create your inventory file first by running the --create_inventory flag"
        exit(1)

    inventory_file = "inventory/vsphere/vms/vmware_inventory.ini"
    # Add section here to modify inventory file based on input from user check your vmmark scripts for parsing the file and adding the values
    for line in fileinput.input(inventory_file, inplace=True):
        if line.startswith("server="):
            print "server=" + vcenter_host
        elif line.startswith("password="******"password="******"username="******"username="******"
        command = 'cp -f ~/.ssh/id_rsa ssh_key/ocp3-installer'
        os.system(command)
        command = 'cp -f ~/.ssh/id_rsa ssh_key/ocp-installer'
        os.system(command)
        # make sure the ssh keys have the proper permissions
        command = 'chmod 600 ssh_key/ocp-installer'
        os.system(command)

        # remove any cached facts to prevent stale data during a re-run
        command = 'rm -rf .ansible/cached_facts'
        os.system(command)
        tags = ",".join(tags)
        if clean is True:
            # recreate inventory with added nodes to clean up
            tags = 'clean'
            command = './ocp-on-vmware --create_inventory --no-confirm'
            os.system(command)
        if tag:
            tags = tag

        #if local:
        #command='ansible-playbook'
        #else:
        #   command='docker run -t --rm --volume `pwd`:/opt/ansible:z -v ~/.ssh:/root/.ssh:z -v /tmp:/tmp:z --net=host ansible:2.2-latest'
        command = 'ansible-playbook'
        command = command + ' --extra-vars "@./infrastructure.json" --tags %s -e \'vcenter_host=%s \
    vcenter_username=%s \
    vcenter_password=%s \
    vcenter_template_name=%s \
    vcenter_folder=%s \
    vcenter_cluster=%s \
    vcenter_datacenter=%s \
    vcenter_resource_pool=%s \
    public_hosted_zone=%s \
    app_dns_prefix=%s \
    vm_dns=%s \
    vm_gw=%s \
    vm_netmask=%s \
    vm_network=%s \
    wildcard_zone=%s \
    console_port=%s \
    deployment_type=%s \
    openshift_vers=%s \
    rhel_subscription_user=%s \
    rhel_subscription_pass=%s \
    rhel_subscription_server=%s \
    rhel_subscription_pool="%s" \
    openshift_sdn=%s \
    containerized=%s \
    container_storage=%s \
    openshift_hosted_metrics_deploy=%s \
    lb_host=%s \
    nfs_host=%s \
    nfs_registry_mountpoint=%s \' %s' % (
            tags, vcenter_host, vcenter_username, vcenter_password,
            vcenter_template_name, vcenter_folder, vcenter_cluster,
            vcenter_datacenter, vcenter_resource_pool, public_hosted_zone,
            app_dns_prefix, vm_dns, vm_gw, vm_netmask, vm_network,
            wildcard_zone, console_port, deployment_type, openshift_vers,
            rhel_subscription_user, rhel_subscription_pass,
            rhel_subscription_server, rhel_subscription_pool, openshift_sdn,
            containerized, container_storage, openshift_hosted_metrics_deploy,
            lb_host, nfs_host, nfs_registry_mountpoint, playbook)
        if verbose > 0:
            command += " -" + "".join(['v'] * verbose)
            click.echo('We are running: %s' % command)

        status = os.system(command)
        if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0:
            return os.WEXITSTATUS(status)
Beispiel #13
0
    def _parse_args(self, args_str):
        '''
        Eg. python provision_control.py --host_name a3s30.contrail.juniper.net
                                        --host_ip 10.1.1.1
                                        --router_asn 64512
                                        --enable_4byte_as
                                        --ibgp_auto_mesh|--no_ibgp_auto_mesh
                                        --api_server_ip 127.0.0.1
                                        --api_server_port 8082
                                        --api_server_use_ssl False
                                        --oper <add | del>
                                        --md5 <key value>|None(optional)
                                        --bgp_server_port <port>|None(optional)
                                        --local_autonomous_system <ASN value>|None(optional)
                                        --graceful_restart_time 300
                                        --long_lived_graceful_restart_time 300
                                        --end_of_rib_timeout 300
                                        --set_graceful_restart_parameters False
                                        --graceful_restart_bgp_helper_enable False
                                        --graceful_restart_xmpp_helper_enable False
                                        --graceful_restart_enable False

        '''

        # Source any specified config/ini file
        # Turn off help, so we print all options in response to -h
        conf_parser = argparse.ArgumentParser(add_help=False)

        conf_parser.add_argument("-c",
                                 "--conf_file",
                                 help="Specify config file",
                                 metavar="FILE")
        args, remaining_argv = conf_parser.parse_known_args(args_str.split())

        defaults = {
            'router_asn': '64512',
            'enable_4byte_as': None,
            'bgp_server_port': 179,
            'local_autonomous_system': None,
            'ibgp_auto_mesh': None,
            'api_server_ip': '127.0.0.1',
            'api_server_port': '8082',
            'api_server_use_ssl': False,
            'oper': None,
            'admin_user': None,
            'admin_password': None,
            'admin_tenant_name': None,
            'md5': None,
            'graceful_restart_time': 300,
            'long_lived_graceful_restart_time': 300,
            'end_of_rib_timeout': 300,
            'graceful_restart_bgp_helper_enable': False,
            'graceful_restart_xmpp_helper_enable': False,
            'graceful_restart_enable': False,
            'set_graceful_restart_parameters': False,
            'sub_cluster_name': None,
            'peer_list': None,
        }

        if args.conf_file:
            config = configparser.SafeConfigParser()
            config.read([args.conf_file])
            defaults.update(dict(config.items("DEFAULTS")))

        # Override with CLI options
        # Don't surpress add_help here so it will handle -h
        parser = argparse.ArgumentParser(
            # Inherit options from config_parser
            parents=[conf_parser],
            # print script description with -h/--help
            description=__doc__,
            # Don't mess with format of description
            formatter_class=argparse.RawDescriptionHelpFormatter,
        )
        parser.set_defaults(**defaults)

        parser.add_argument("--host_name",
                            help="hostname name of control-node")
        parser.add_argument("--host_ip", help="IP address of control-node")
        parser.add_argument("--router_asn",
                            help="AS Number the control-node is in",
                            required=True)
        parser.add_argument("--enable_4byte_as",
                            help="If set, AS Number can be 4 byte wide",
                            dest='enable_4byte_as',
                            action='store_true')
        parser.add_argument("--bgp_server_port",
                            help="BGP server port number (Default: 179)")
        parser.add_argument(
            "--local_autonomous_system",
            help=
            "Local autonomous-system number used to peer contrail-control bgp speakers across different geographic locations"
        )
        parser.add_argument("--address_families",
                            help="Address family list",
                            choices=[
                                "route-target", "inet-vpn", "e-vpn", "erm-vpn",
                                "inet6-vpn"
                            ],
                            nargs="*",
                            default=[])
        parser.add_argument("--md5", help="Md5 config for the node")
        parser.add_argument("--ibgp_auto_mesh",
                            help="Create iBGP mesh automatically",
                            dest='ibgp_auto_mesh',
                            action='store_true')
        parser.add_argument("--no_ibgp_auto_mesh",
                            help="Don't create iBGP mesh automatically",
                            dest='ibgp_auto_mesh',
                            action='store_false')
        parser.add_argument("--api_server_port", help="Port of api server")
        parser.add_argument("--api_server_use_ssl",
                            help="Use SSL to connect with API server")
        parser.add_argument("--oper",
                            help="Provision operation to be done(add or del)")
        parser.add_argument("--admin_user", help="Name of keystone admin user")
        parser.add_argument("--admin_password",
                            help="Password of keystone admin user")
        parser.add_argument("--admin_tenant_name",
                            help="Tenamt name for keystone admin user")
        parser.add_argument("--graceful_restart_time",
                            help="Graceful Restart Time in seconds (0..4095)",
                            type=self.gr_time_type,
                            default=300,
                            required=False)
        parser.add_argument(
            "--long_lived_graceful_restart_time",
            help="Long Lived Graceful Restart Time in seconds (0..16777215)",
            type=self.llgr_time_type,
            default=300,
            required=False)
        parser.add_argument("--end_of_rib_timeout",
                            help="EndOfRib timeout value in seconds (0..4095)",
                            type=self.gr_time_type,
                            default=300,
                            required=False)
        parser.add_argument("--graceful_restart_bgp_helper_enable",
                            action='store_true',
                            help="Enable helper mode for BGP graceful restart")
        parser.add_argument(
            "--graceful_restart_xmpp_helper_enable",
            action='store_true',
            help="Enable helper mode for XMPP graceful restart")
        parser.add_argument("--graceful_restart_enable",
                            action='store_true',
                            help="Enable Graceful Restart")
        parser.add_argument("--set_graceful_restart_parameters",
                            action='store_true',
                            help="Set Graceful Restart Parameters")
        parser.add_argument("--sub_cluster_name",
                            help="sub cluster to associate to",
                            required=False)
        parser.add_argument("--peer_list",
                            help="list of control node names to peer",
                            required=False)
        group = parser.add_mutually_exclusive_group(required=True)
        group.add_argument("--api_server_ip",
                           help="IP address of api server",
                           nargs='+',
                           type=str)
        group.add_argument("--use_admin_api",
                           default=False,
                           help="Connect to local api-server on admin port",
                           action="store_true")

        self._args = parser.parse_args(remaining_argv)
Beispiel #14
0
def parse_args(argv, ipython_mode=False):
    """Parse list of arguments.

    If a config file is provided (via -c), it will read in the
    supplied options and overwrite any global defaults.

    All other directly supplied arguments will overwrite the config
    file settings.

    Arguments:
        * argv : list of strings
            List of arguments, e.g. ['-c', 'my.conf']
        * ipython_mode : bool <default=True>
            Whether to parse IPython specific arguments
            like --local_namespace

    Notes:
    Default settings can be found in zipline.utils.cli.DEFAULTS.

    """
    # Parse any conf_file specification
    # We make this parser with add_help=False so that
    # it doesn't parse -h and print help.
    conf_parser = argparse.ArgumentParser(
        # Don't mess with format of description
        formatter_class=argparse.RawDescriptionHelpFormatter,
        # Turn off help, so we print all options in response to -h
        add_help=False)
    conf_parser.add_argument("-c",
                             "--conf_file",
                             help="Specify config file",
                             metavar="FILE")
    args, remaining_argv = conf_parser.parse_known_args(argv)

    defaults = copy(DEFAULTS)

    if args.conf_file:
        config = configparser.SafeConfigParser()
        config.read([args.conf_file])
        defaults.update(dict(config.items("Defaults")))

    # Parse rest of arguments
    # Don't suppress add_help here so it will handle -h
    parser = argparse.ArgumentParser(
        # Inherit options from config_parser
        description="Zipline version %s." % zipline.__version__,
        parents=[conf_parser])

    parser.set_defaults(**defaults)

    parser.add_argument('--algofile', '-f')
    parser.add_argument('--data-frequency', choices=('minute', 'daily'))
    parser.add_argument('--start', '-s')
    parser.add_argument('--end', '-e')
    parser.add_argument('--capital_base')
    parser.add_argument('--source', '-d', choices=('yahoo', ))
    parser.add_argument('--source_time_column', '-t')
    parser.add_argument('--symbols')
    parser.add_argument('--output', '-o')
    parser.add_argument('--metadata_path', '-m')
    parser.add_argument('--metadata_index', '-x')
    parser.add_argument('--print-algo',
                        '-p',
                        dest='print_algo',
                        action='store_true')
    parser.add_argument('--no-print-algo',
                        '-q',
                        dest='print_algo',
                        action='store_false')

    if ipython_mode:
        parser.add_argument('--local_namespace', action='store_true')

    args = parser.parse_args(remaining_argv)

    return (vars(args))
Beispiel #15
0
    def read_settings(self):
        ''' Reads the settings from the ec2.ini file '''
        if six.PY2:
            config = configparser.SafeConfigParser()
        else:
            config = configparser.ConfigParser()
        ec2_default_ini_path = os.path.join(
            os.path.dirname(os.path.realpath(__file__)), 'ec2.ini')
        ec2_ini_path = os.environ.get('EC2_INI_PATH', ec2_default_ini_path)
        config.read(ec2_ini_path)

        # is eucalyptus?
        self.eucalyptus_host = None
        self.eucalyptus = False
        if config.has_option('ec2', 'eucalyptus'):
            self.eucalyptus = config.getboolean('ec2', 'eucalyptus')
        if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'):
            self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')

        # Regions
        self.regions = []
        configRegions = config.get('ec2', 'regions')
        configRegions_exclude = config.get('ec2', 'regions_exclude')
        if (configRegions == 'all'):
            if self.eucalyptus_host:
                self.regions.append(
                    boto.connect_euca(host=self.eucalyptus_host).region.name)
            else:
                for regionInfo in ec2.regions():
                    if regionInfo.name not in configRegions_exclude:
                        self.regions.append(regionInfo.name)
        else:
            self.regions = configRegions.split(",")

        # Destination addresses
        self.destination_variable = config.get('ec2', 'destination_variable')
        self.vpc_destination_variable = config.get('ec2',
                                                   'vpc_destination_variable')

        # Route53
        self.route53_enabled = config.getboolean('ec2', 'route53')
        self.route53_excluded_zones = []
        if config.has_option('ec2', 'route53_excluded_zones'):
            self.route53_excluded_zones.extend(
                config.get('ec2', 'route53_excluded_zones', '').split(','))

        # Include RDS instances?
        self.rds_enabled = True
        if config.has_option('ec2', 'rds'):
            self.rds_enabled = config.getboolean('ec2', 'rds')

        # Return all EC2 and RDS instances (if RDS is enabled)
        if config.has_option('ec2', 'all_instances'):
            self.all_instances = config.getboolean('ec2', 'all_instances')
        else:
            self.all_instances = False
        if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled:
            self.all_rds_instances = config.getboolean('ec2',
                                                       'all_rds_instances')
        else:
            self.all_rds_instances = False

        # Cache related
        cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
        if not os.path.exists(cache_dir):
            os.makedirs(cache_dir)

        self.cache_path_cache = cache_dir + "/ansible-ec2.cache"
        self.cache_path_index = cache_dir + "/ansible-ec2.index"
        self.cache_max_age = config.getint('ec2', 'cache_max_age')

        # Configure nested groups instead of flat namespace.
        if config.has_option('ec2', 'nested_groups'):
            self.nested_groups = config.getboolean('ec2', 'nested_groups')
        else:
            self.nested_groups = False

        # Configure which groups should be created.
        group_by_options = [
            'group_by_instance_id',
            'group_by_region',
            'group_by_availability_zone',
            'group_by_ami_id',
            'group_by_instance_type',
            'group_by_key_pair',
            'group_by_vpc_id',
            'group_by_security_group',
            'group_by_tag_keys',
            'group_by_tag_none',
            'group_by_route53_names',
            'group_by_rds_engine',
            'group_by_rds_parameter_group',
        ]
        for option in group_by_options:
            if config.has_option('ec2', option):
                setattr(self, option, config.getboolean('ec2', option))
            else:
                setattr(self, option, True)

        # Do we need to just include hosts that match a pattern?
        try:
            pattern_include = config.get('ec2', 'pattern_include')
            if pattern_include and len(pattern_include) > 0:
                self.pattern_include = re.compile(pattern_include)
            else:
                self.pattern_include = None
        except configparser.NoOptionError as e:
            self.pattern_include = None

        # Do we need to exclude hosts that match a pattern?
        try:
            pattern_exclude = config.get('ec2', 'pattern_exclude')
            if pattern_exclude and len(pattern_exclude) > 0:
                self.pattern_exclude = re.compile(pattern_exclude)
            else:
                self.pattern_exclude = None
        except configparser.NoOptionError as e:
            self.pattern_exclude = None

        # Instance filters (see boto and EC2 API docs). Ignore invalid filters.
        self.ec2_instance_filters = defaultdict(list)
        if config.has_option('ec2', 'instance_filters'):
            for instance_filter in config.get('ec2', 'instance_filters',
                                              '').split(','):
                instance_filter = instance_filter.strip()
                if not instance_filter or '=' not in instance_filter:
                    continue
                filter_key, filter_value = [
                    x.strip() for x in instance_filter.split('=', 1)
                ]
                if not filter_key:
                    continue
                self.ec2_instance_filters[filter_key].append(filter_value)
Beispiel #16
0
def read_configuration(path):
    cfg = configparser.SafeConfigParser()
    cfg.read(path)
    cfgdir = os.path.dirname(path)
    result = {}
    for fixed, keywords in options:
        if 'dest' in keywords:
            dest = keywords['dest']
        else:
            for opt in fixed:
                if opt.startswith('--'):
                    dest = opt[2:].replace('-', '_')
                else:
                    assert len(opt) == 2
                    dest = opt[1:]
        if dest in 'tags_help lang_list lang_help version'.split():
            continue
        if not cfg.has_option('behave', dest):
            continue
        action = keywords.get('action', 'store')
        if action == 'store':
            use_raw_value = dest in raw_value_options
            result[dest] = cfg.get('behave', dest, raw=use_raw_value)
        elif action in ('store_true', 'store_false'):
            result[dest] = cfg.getboolean('behave', dest)
        elif action == 'append':
            if dest == 'userdata_defines':
                continue  # -- SKIP-CONFIGFILE: Command-line only option.
            result[dest] = \
                [s.strip() for s in cfg.get('behave', dest).splitlines()]
        else:
            raise ValueError('action "%s" not implemented' % action)

    if 'format' in result:
        # -- OPTIONS: format/outfiles are coupled in configuration file.
        formatters = result['format']
        formatter_size = len(formatters)
        outfiles = result.get('outfiles', [])
        outfiles_size = len(outfiles)
        if outfiles_size < formatter_size:
            for formatter_name in formatters[outfiles_size:]:
                outfile = "%s.output" % formatter_name
                outfiles.append(outfile)
            result['outfiles'] = outfiles
        elif len(outfiles) > formatter_size:
            print("CONFIG-ERROR: Too many outfiles (%d) provided." %
                  outfiles_size)
            result['outfiles'] = outfiles[:formatter_size]

    for paths_name in ('paths', 'outfiles'):
        if paths_name in result:
            # -- Evaluate relative paths relative to configfile location.
            # NOTE: Absolute paths are preserved by os.path.join().
            paths = result[paths_name]
            result[paths_name] = \
                [os.path.normpath(os.path.join(cfgdir, p)) for p in paths]

    result['userdata'] = {}
    if cfg.has_section('behave.userdata'):
        result['userdata'].update(cfg.items('behave.userdata'))
    return result
Beispiel #17
0
def read_timing_mode_waveform(mode_ini, iq_dir=None):
    """Read waveforms in timing mode format.

    This class supports the loading of a timing mode. The mode configuration
    file specifies the needed IQ data for each transmit sweep in the mode
    sequence. This is then used to load and RF waveform IQ data.


    Parameters
    ----------

    mode_ini : str
        Path to mode INI file. The '.iq.ini' extension can optionally be left
        off.

    iq_dir : str
        Path to directory with binary IQ sweep files. If None, defaults to
        '`os.path.dirname(mode_ini)`/../iq'.


    Returns
    -------

    dict
        Dictionary with entries::

            waveform : np.complex64 array
                Waveform (sequence) array.

            samplerate : float
                Waveform sample rate in Hz.

            center_freq : float
                Default waveform center frequency in Hz.

    """
    # make sure mode_ini exists as named, or add extension
    mode_ini = os.path.abspath(mode_ini)
    if not os.path.isfile(mode_ini):
        mode_ini = mode_ini + ".iq.ini"

    # parse mode INI file (so we can fail early if it's bad)
    cparser = configparser.SafeConfigParser()
    try:
        cparser.read(mode_ini)
    except configparser.MissingSectionHeaderError:
        raise ValueError("Cannot read timing mode INI file.")
    if not cparser.has_section("mode"):
        raise ValueError("Cannot read timing mode INI file.")

    # get mode name and default iq directory if unspecified
    mode_dir = os.path.dirname(mode_ini)
    base_dir = os.path.dirname(mode_dir)
    if iq_dir is None:
        iq_dir = os.path.join(base_dir, "iq")

    # extract waveform characteristics and default frequency from mode acronym
    acronym = cparser.get("mode", "name")
    name_segs = acronym.split("_")
    # grp = name_segs[0]
    # type_letter = name_segs[1]
    # ipp_ms = int(name_segs[2])
    # pulse_len_ms = int(name_segs[3])
    if name_segs[4].startswith("f"):
        if len(name_segs) > 5 and name_segs[5].startswith("f"):
            # desc = 'chirp'
            f0 = 1e5 * int(name_segs[4][1:])
            f1 = 1e5 * int(name_segs[5][1:])
            center_freq = (f0 + f1) / 2
        else:
            # desc = 'unc'
            center_freq = 1e5 * int(name_segs[4][1:])
    else:
        # baud_length_us = int(name_segs[4])
        # desc = name_segs[5]
        center_freq = 1e5 * int(name_segs[6][1:])

    # read sample rate from INI file
    chip_ns = int(cparser.get("mode", "chip_length"))
    samplerate_frac = Fraction(1000000000, chip_ns)
    samplerate = float(samplerate_frac)

    # now get list of iq data files from the mode sweep string
    sweep_seq = cparser.get("mode", "sweeps").strip("[]").split(",")
    sweep_files = [os.path.join(iq_dir, s + ".iq.dat") for s in sweep_seq]

    # read the waveforms from the binary IQ files, stored as interleaved 16-bit
    # integers
    # keep each as a separate array initially since we may want to introduce
    # an IPP between each
    waveforms_int = [np.fromfile(f, dtype=np.int16) for f in sweep_files]
    # cast to complex64 and scale integer range to [0, 1]
    waveforms = [(w_i[0::2] + 1j * w_i[1::2]) / (2**15 - 1)
                 for w_i in waveforms_int]

    # join waveform sequence into single waveform assuming no extra IPP
    waveform = np.concatenate(waveforms)

    return dict(waveform=waveform,
                center_freq=center_freq,
                samplerate=samplerate)
Beispiel #18
0
    def read_settings(self):
        ''' Reads the settings from the vmware_inventory.ini file '''

        scriptbasename = __file__
        scriptbasename = os.path.basename(scriptbasename)
        scriptbasename = scriptbasename.replace('.py', '')

        defaults = {
            'vmware': {
                'server':
                '',
                'port':
                443,
                'username':
                '',
                'password':
                '',
                'ini_path':
                os.path.join(os.path.dirname(__file__),
                             '%s.ini' % scriptbasename),
                'cache_name':
                'ansible-vmware',
                'cache_path':
                '~/.ansible/tmp',
                'cache_max_age':
                3600,
                'max_object_level':
                1,
                'alias_pattern':
                '{{ config.name + "_" + config.uuid }}',
                'host_pattern':
                '{{ guest.ipaddress }}',
                'host_filters':
                '{{ guest.gueststate == "running" }}',
                'groupby_patterns':
                '{{ guest.guestid }},{{ "templates" if config.template else "guests"}}',
                'lower_var_keys':
                True
            }
        }

        if six.PY3:
            config = configparser.ConfigParser()
        else:
            config = configparser.SafeConfigParser()

        # where is the config?
        vmware_ini_path = os.environ.get('VMWARE_INI_PATH',
                                         defaults['vmware']['ini_path'])
        vmware_ini_path = os.path.expanduser(
            os.path.expandvars(vmware_ini_path))
        config.read(vmware_ini_path)

        # apply defaults
        for k, v in defaults['vmware'].iteritems():
            if not config.has_option('vmware', k):
                config.set('vmware', k, str(v))

        # where is the cache?
        self.cache_dir = os.path.expanduser(config.get('vmware', 'cache_path'))
        if self.cache_dir and not os.path.exists(self.cache_dir):
            os.makedirs(self.cache_dir)

        # set the cache filename and max age
        cache_name = config.get('vmware', 'cache_name')
        self.cache_path_cache = self.cache_dir + "/%s.cache" % cache_name
        self.cache_max_age = int(config.getint('vmware', 'cache_max_age'))

        # mark the connection info
        self.server = os.environ.get('VMWARE_SERVER',
                                     config.get('vmware', 'server'))
        self.port = int(
            os.environ.get('VMWARE_PORT', config.get('vmware', 'port')))
        self.username = os.environ.get('VMWARE_USERNAME',
                                       config.get('vmware', 'username'))
        self.password = os.environ.get('VMWARE_PASSWORD',
                                       config.get('vmware', 'password'))

        # behavior control
        self.maxlevel = int(config.get('vmware', 'max_object_level'))
        self.lowerkeys = config.get('vmware', 'lower_var_keys')
        if type(self.lowerkeys) != bool:
            if str(self.lowerkeys).lower() in ['yes', 'true', '1']:
                self.lowerkeys = True
            else:
                self.lowerkeys = False

        self.host_filters = list(
            config.get('vmware', 'host_filters').split(','))
        self.groupby_patterns = list(
            config.get('vmware', 'groupby_patterns').split(','))

        # save the config
        self.config = config
Beispiel #19
0
def _load():
    global SW_VERSION, nodetype, subfunctions
    # Read the build.info file
    build_info = '/etc/build.info'

    if not os.path.isfile(build_info):
        # Assume that we are in a test environment. Dirty, dirty, dirty...
        SW_VERSION = 'TEST.SW.VERSION'
        nodetype = 'controller'
        subfunctions = ['controller']
        return

    # The build.info file has no section headers, which causes problems
    # for ConfigParser. So we'll fake it out.
    ini_str = u'[build_info]\n' + open(build_info, 'r').read()
    ini_fp = io.StringIO(ini_str)

    config = configparser.SafeConfigParser()
    config.readfp(ini_fp)

    try:
        value = str(config.get('build_info', 'SW_VERSION'))

        SW_VERSION = value.strip('"')
    except configparser.Error:
        logging.exception("Failed to read SW_VERSION from /etc/build.info")
        return False

    # Read the platform.conf file

    # The platform.conf file has no section headers, which causes problems
    # for ConfigParser. So we'll fake it out.
    ini_str = u'[platform_conf]\n' + open(PLATFORM_CONF_FILE, 'r').read()
    ini_fp = io.StringIO(ini_str)
    config.readfp(ini_fp)

    try:
        value = str(config.get('platform_conf', 'nodetype'))

        nodetype = value

        value = str(config.get('platform_conf', 'subfunction'))

        subfunctions = value.split(",")

        global region_config
        if config.has_option('platform_conf', 'region_config'):
            region_config = str(config.get('platform_conf', 'region_config'))

        global region_1_name
        if config.has_option('platform_conf', 'region_1_name'):
            region_1_name = str(config.get('platform_conf', 'region_1_name'))

        global region_2_name
        if config.has_option('platform_conf', 'region_2_name'):
            region_2_name = str(config.get('platform_conf', 'region_2_name'))

        global vswitch_type
        if config.has_option('platform_conf', 'vswitch_type'):
            vswitch_type = str(config.get('platform_conf', 'vswitch_type'))

        global management_interface
        if config.has_option('platform_conf', 'management_interface'):
            management_interface = str(
                config.get('platform_conf', 'management_interface'))

        global oam_interface
        if config.has_option('platform_conf', 'oam_interface'):
            oam_interface = str(config.get('platform_conf', 'oam_interface'))

        global infrastructure_interface
        if config.has_option('platform_conf', 'infrastructure_interface'):
            infrastructure_interface = str(
                config.get('platform_conf', 'infrastructure_interface'))
        global sdn_enabled
        if config.has_option('platform_conf', 'sdn_enabled'):
            sdn_enabled = str(config.get('platform_conf', 'sdn_enabled'))

        global host_uuid
        if config.has_option('platform_conf', 'UUID'):
            host_uuid = str(config.get('platform_conf', 'UUID'))

        global install_uuid
        if config.has_option('platform_conf', 'INSTALL_UUID'):
            install_uuid = str(config.get('platform_conf', 'INSTALL_UUID'))

        global system_type
        if config.has_option('platform_conf', 'system_type'):
            system_type = str(config.get('platform_conf', 'system_type'))

        global system_mode
        if config.has_option('platform_conf', 'system_mode'):
            system_mode = str(config.get('platform_conf', 'system_mode'))

        global security_profile
        if config.has_option('platform_conf', 'security_profile'):
            security_profile = str(
                config.get('platform_conf', 'security_profile'))

        global distributed_cloud_role
        if config.has_option('platform_conf', 'distributed_cloud_role'):
            distributed_cloud_role = str(
                config.get('platform_conf', 'distributed_cloud_role'))

        global security_feature
        if config.has_option('platform_conf', 'security_feature'):
            security_feature = str(
                config.get('platform_conf', 'security_feature'))

        global http_port
        if config.has_option('platform_conf', 'http_port'):
            http_port = str(config.get('platform_conf', 'http_port'))

    except configparser.Error:
        logging.exception("Failed to read platform.conf")
        return False
Beispiel #20
0
    def instances_to_inventory(self, instances):
        ''' Convert a list of vm objects into a json compliant inventory '''

        inventory = self._empty_inventory()
        inventory['all'] = {}
        inventory['all']['hosts'] = []
        last_idata = None
        total = len(instances)
        for idx, instance in enumerate(instances):

            # make a unique id for this object to avoid vmware's
            # numerous uuid's which aren't all unique.
            thisid = str(uuid.uuid4())
            idata = instance[1]

            # Put it in the inventory
            inventory['all']['hosts'].append(thisid)
            inventory['_meta']['hostvars'][thisid] = idata.copy()
            inventory['_meta']['hostvars'][thisid]['ansible_uuid'] = thisid

        # Make a map of the uuid to the name the user wants
        name_mapping = self.create_template_mapping(
            inventory, self.config.get('vmware', 'alias_pattern'))

        # Make a map of the uuid to the ssh hostname the user wants
        host_mapping = self.create_template_mapping(
            inventory, self.config.get('vmware', 'host_pattern'))

        # Reset the inventory keys
        for k, v in name_mapping.iteritems():

            # set ansible_host (2.x)
            inventory['_meta']['hostvars'][k]['ansible_host'] = host_mapping[k]
            # 1.9.x backwards compliance
            inventory['_meta']['hostvars'][k][
                'ansible_ssh_host'] = host_mapping[k]

            if k == v:
                continue

            # add new key
            inventory['all']['hosts'].append(v)
            inventory['_meta']['hostvars'][v] = inventory['_meta']['hostvars'][
                k]

            # cleanup old key
            inventory['all']['hosts'].remove(k)
            inventory['_meta']['hostvars'].pop(k, None)

        self.debugl('PREFILTER_HOSTS:')
        for i in inventory['all']['hosts']:
            self.debugl(i)

        # Create special host filter removing all the hosts which
        # are not related to the configured cluster.
        if six.PY3:
            ocp_config = configparser.ConfigParser()
        else:
            ocp_config = configparser.SafeConfigParser()
        default_ocp_config = os.path.join(os.path.dirname(__file__),
                                          '../../../ocp-on-vmware.ini')
        ocp_ini_path = os.environ.get('VMWARE_INI_PATH', default_ocp_config)
        ocp_ini_path = os.path.expanduser(os.path.expandvars(ocp_ini_path))
        ocp_config.read(ocp_ini_path)
        cluster_id_filter = ("{{ config.annotation is not none and "
                             "'%s' in config.annotation }}") % ocp_config.get(
                                 'vmware', 'cluster_id')
        self.host_filters.append(cluster_id_filter)

        # Apply host filters
        for hf in self.host_filters:
            if not hf:
                continue
            self.debugl('FILTER: %s' % hf)
            filter_map = self.create_template_mapping(inventory,
                                                      hf,
                                                      dtype='boolean')
            for k, v in filter_map.iteritems():
                if not v:
                    # delete this host
                    inventory['all']['hosts'].remove(k)
                    inventory['_meta']['hostvars'].pop(k, None)

        self.debugl('POSTFILTER_HOSTS:')
        for i in inventory['all']['hosts']:
            self.debugl(i)

        # Create groups
        for gbp in self.groupby_patterns:
            groupby_map = self.create_template_mapping(inventory, gbp)
            for k, v in groupby_map.iteritems():
                if v not in inventory:
                    inventory[v] = {}
                    inventory[v]['hosts'] = []
                if k not in inventory[v]['hosts']:
                    inventory[v]['hosts'].append(k)

        return inventory
def run_test(testdir):
    # Compile each 'input.c', using 'script.py'
    # Assume success and empty stdout; compare against expected stderr, or empty if file not present
    inputfiles = get_source_files(testdir)
    outfile = os.path.join(testdir, 'output.o')
    script_py = os.path.join(testdir, 'script.py')
    out = TestStream(os.path.join(testdir, 'stdout.txt'))
    err = TestStream(os.path.join(testdir, 'stderr.txt'))

    cp = configparser.SafeConfigParser()
    metadatapath = os.path.join(testdir, 'metadata.ini')
    cp.read([metadatapath])

    if cp.has_section('WhenToRun'):
        if cp.has_option('WhenToRun', 'required_features'):
            required_features = cp.get('WhenToRun',
                                       'required_features').split()
            for feature in required_features:
                if feature not in features:
                    raise ValueError('%s in %s not found in %s' %
                                     (feature, metadatapath, config_h))
                if not features[feature]:
                    raise SkipTest('required feature %s not available in %s' %
                                   (feature, config_h))

    env = dict(os.environ)
    env['LC_ALL'] = 'C'

    # Generate the command-line for invoking gcc:
    args = [CC]
    if len(inputfiles) == 1:
        args += ['-c']  # (don't run the linker)
    else:
        args += ['-fPIC', '-shared']
        # Force LTO when there's more than one source file:
        args += ['-flto', '-flto-partition=none']

    if GCC_VERSION >= 4008:
        # GCC 4.8 started showing the source line where the problem is,
        # followed by another line showing a caret indicating column.
        # This is a great usability feature, but totally breaks our "gold"
        # output, so turn it off for running tests:
        args += ['-fno-diagnostics-show-caret']

        # Similarly, the macro expansion tracking is great for usability,
        # but breaks the "gold" output, so we disable it during tests:
        args += ['-ftrack-macro-expansion=0']

    args += ['-o', outfile]
    args += [
        '-fplugin=%s' % os.path.abspath('%s.so' % PLUGIN_NAME),
        '-fplugin-arg-%s-script=%s' % (PLUGIN_NAME, script_py)
    ]

    # Force the signedness of char so that the tests have consistent
    # behavior across all archs:
    args += ['-fsigned-char']

    # Special-case: add the python include dir (for this runtime) if the C code
    # uses Python.h:
    def uses_python_headers():
        for inputfile in inputfiles:
            with open(inputfile, 'r') as f:
                code = f.read()
            if '#include <Python.h>' in code:
                return True

    if uses_python_headers():
        args += ['-I' + get_python_inc()]

    # If there's a getopts.py, run it to get additional test-specific
    # command-line options:
    getopts_py = os.path.join(testdir, 'getopts.py')
    if os.path.exists(getopts_py):
        p = Popen([sys.executable, getopts_py], stdout=PIPE, stderr=PIPE)
        opts_out, opts_err = p.communicate()
        if six.PY3:
            opts_out = opts_out.decode()
            opts_err = opts_err.decode()
        c = p.wait()
        if c != 0:
            raise CommandError()
        args += opts_out.split()

    # and the source files go at the end:
    args += inputfiles

    if uses_dg_directives(inputfiles):
        dg_context = DgContext(inputfiles)
        dg_context.echo_results = True
        for inputfile in inputfiles:
            dg_context.parse_directives(inputfile)
        args += dg_context.get_args()
    else:
        dg_context = None

    if options.show:
        # Show the gcc invocation:
        print(' '.join(args))

    # Invoke the compiler:
    p = Popen(args, env=env, stdout=PIPE, stderr=PIPE)
    out.actual, err.actual = p.communicate()
    if six.PY3:
        out.actual = out.actual.decode()
        err.actual = err.actual.decode()
    #print 'out: %r' % out.actual
    #print 'err: %r' % err.actual
    exitcode_actual = p.wait()

    if options.show:
        # then the user wants to see the gcc invocation directly
        sys.stdout.write(out.actual)
        sys.stderr.write(err.actual)

    if dg_context:
        dg_context.check_result(out.actual, err.actual, exitcode_actual)
        if dg_context.num_failures() > 0:
            raise DejaGnuError(dg_context)
        return

    # Expected exit code
    # By default, we expect success if the expected stderr is empty, and
    # and failure if it's non-empty.
    # This can be overridden if the test has a metadata.ini, by setting
    # exitcode within the [ExpectedBehavior] section:
    if err.expdata == '':
        exitcode_expected = 0
    else:
        exitcode_expected = 1
    if cp.has_section('ExpectedBehavior'):
        if cp.has_option('ExpectedBehavior', 'exitcode'):
            exitcode_expected = cp.getint('ExpectedBehavior', 'exitcode')

    # Check exit code:
    if exitcode_actual != exitcode_expected:
        sys.stderr.write(out.diff('stdout'))
        sys.stderr.write(err.diff('stderr'))
        raise CompilationError(out.actual, err.actual, p, args)

    if exitcode_expected == 0:
        assert os.path.exists(outfile)

    out.check_for_diff(out.actual, err.actual, p, args, 'stdout', WRITEBACK)
    err.check_for_diff(out.actual, err.actual, p, args, 'stderr', WRITEBACK)
Beispiel #22
0
from collections import OrderedDict
import re

from oslo_config import cfg
from oslo_log import log as logging
from oslo_policy import policy
from six.moves import configparser

import glance.api.policy
from glance.common import exception
from glance.i18n import _, _LE

# NOTE(bourke): The default dict_type is collections.OrderedDict in py27, but
# we must set manually for compatibility with py26
CONFIG = configparser.SafeConfigParser(dict_type=OrderedDict)
LOG = logging.getLogger(__name__)

property_opts = [
    cfg.StrOpt('property_protection_file',
               help=_('The location of the property protection file.'
                      'This file contains the rules for property protections '
                      'and the roles/policies associated with it. If this '
                      'config value is not specified, by default, property '
                      'protections won\'t be enforced. If a value is '
                      'specified and the file is not found, then the '
                      'glance-api service will not start.')),
    cfg.StrOpt('property_protection_rule_format',
               default='roles',
               choices=('roles', 'policies'),
               help=_('This config value indicates whether "roles" or '
Beispiel #23
0
    def read_settings(self):
        ''' Reads the settings from the packet_net.ini file '''
        if six.PY3:
            config = configparser.ConfigParser()
        else:
            config = configparser.SafeConfigParser()
        packet_default_ini_path = os.path.join(
            os.path.dirname(os.path.realpath(__file__)), 'packet_net.ini')
        packet_ini_path = os.path.expanduser(
            os.path.expandvars(
                os.environ.get('PACKET_NET_INI_PATH',
                               packet_default_ini_path)))
        config.read(packet_ini_path)

        # items per page
        self.items_per_page = 999
        if config.has_option(ini_section, 'items_per_page'):
            config.get(ini_section, 'items_per_page')

        # Instance states to be gathered in inventory. Default is all of them.
        packet_valid_device_states = [
            'active', 'inactive', 'queued', 'provisioning'
        ]
        self.packet_device_states = []
        if config.has_option(ini_section, 'device_states'):
            for device_state in config.get(ini_section,
                                           'device_states').split(','):
                device_state = device_state.strip()
                if device_state not in packet_valid_device_states:
                    continue
                self.packet_device_states.append(device_state)
        else:
            self.packet_device_states = packet_valid_device_states

        # Cache related
        cache_dir = os.path.expanduser(config.get(ini_section, 'cache_path'))
        if not os.path.exists(cache_dir):
            os.makedirs(cache_dir)

        self.cache_path_cache = cache_dir + "/ansible-packet.cache"
        self.cache_path_index = cache_dir + "/ansible-packet.index"
        self.cache_max_age = config.getint(ini_section, 'cache_max_age')

        # Configure nested groups instead of flat namespace.
        if config.has_option(ini_section, 'nested_groups'):
            self.nested_groups = config.getboolean(ini_section,
                                                   'nested_groups')
        else:
            self.nested_groups = False

        # Replace dash or not in group names
        if config.has_option(ini_section, 'replace_dash_in_groups'):
            self.replace_dash_in_groups = config.getboolean(
                ini_section, 'replace_dash_in_groups')
        else:
            self.replace_dash_in_groups = True

        # Configure which groups should be created.
        group_by_options = [
            'group_by_device_id',
            'group_by_facility',
            'group_by_project',
            'group_by_operating_system',
            'group_by_plan_type',
            'group_by_tags',
            'group_by_tag_none',
        ]
        for option in group_by_options:
            if config.has_option(ini_section, option):
                setattr(self, option, config.getboolean(ini_section, option))
            else:
                setattr(self, option, True)

        # Do we need to just include hosts that match a pattern?
        try:
            pattern_include = config.get(ini_section, 'pattern_include')
            if pattern_include and len(pattern_include) > 0:
                self.pattern_include = re.compile(pattern_include)
            else:
                self.pattern_include = None
        except configparser.NoOptionError:
            self.pattern_include = None

        # Do we need to exclude hosts that match a pattern?
        try:
            pattern_exclude = config.get(ini_section, 'pattern_exclude')
            if pattern_exclude and len(pattern_exclude) > 0:
                self.pattern_exclude = re.compile(pattern_exclude)
            else:
                self.pattern_exclude = None
        except configparser.NoOptionError:
            self.pattern_exclude = None

        # Projects
        self.projects = []
        configProjects = config.get(ini_section, 'projects')
        configProjects_exclude = config.get(ini_section, 'projects_exclude')
        if (configProjects == 'all'):
            for projectInfo in self.get_projects():
                if projectInfo.name not in configProjects_exclude:
                    self.projects.append(projectInfo.name)
        else:
            self.projects = configProjects.split(",")
Beispiel #24
0
def submit(self,
           job=None,
           no_batch=False,
           prereq=None,
           allow_fail=False,
           resubmit=False,
           resubmit_immediate=False,
           skip_pnl=False,
           mail_user=None,
           mail_type=None,
           batch_args=None):
    if resubmit_immediate and self.get_value("MACH") in ['mira', 'cetus']:
        logger.warning(
            "resubmit_immediate does not work on Mira/Cetus, submitting normally"
        )
        resubmit_immediate = False

    if self.get_value("TEST"):
        caseroot = self.get_value("CASEROOT")
        casebaseid = self.get_value("CASEBASEID")
        # This should take care of the race condition where the submitted job
        # begins immediately and tries to set RUN phase. We proactively assume
        # a passed SUBMIT phase. If this state is already PASS, don't set it again
        # because then we'll lose RUN phase info if it's there. This info is important
        # for system_tests_common to know if it needs to reinitialize the test or not.
        with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts:
            phase_status = ts.get_status(SUBMIT_PHASE)
            if phase_status != TEST_PASS_STATUS:
                ts.set_status(SUBMIT_PHASE, TEST_PASS_STATUS)

    # If this is a resubmit check the hidden file .submit_options for
    # any submit options used on the original submit and use them again
    caseroot = self.get_value("CASEROOT")
    submit_options = os.path.join(caseroot, ".submit_options")
    if resubmit and os.path.exists(submit_options):
        config = configparser.SafeConfigParser()
        config.read(submit_options)
        if not skip_pnl and config.has_option('SubmitOptions', 'skip_pnl'):
            skip_pnl = config.getboolean('SubmitOptions', 'skip_pnl')
        if mail_user is None and config.has_option('SubmitOptions',
                                                   'mail_user'):
            mail_user = config.get('SubmitOptions', 'mail_user')
        if mail_type is None and config.has_option('SubmitOptions',
                                                   'mail_type'):
            mail_type = str(config.get('SubmitOptions',
                                       'mail_type')).split(',')
        if batch_args is None and config.has_option('SubmitOptions',
                                                    'batch_args'):
            batch_args = config.get('SubmitOptions', 'batch_args')

    try:
        functor = lambda: _submit(self,
                                  job=job,
                                  no_batch=no_batch,
                                  prereq=prereq,
                                  allow_fail=allow_fail,
                                  resubmit=resubmit,
                                  resubmit_immediate=resubmit_immediate,
                                  skip_pnl=skip_pnl,
                                  mail_user=mail_user,
                                  mail_type=mail_type,
                                  batch_args=batch_args)
        run_and_log_case_status(
            functor,
            "case.submit",
            caseroot=caseroot,
            custom_success_msg_functor=verbatim_success_msg)
    except:
        # If something failed in the batch system, make sure to mark
        # the test as failed if we are running a test.
        if self.get_value("TEST"):
            with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts:
                ts.set_status(SUBMIT_PHASE, TEST_FAIL_STATUS)

        raise
Beispiel #25
0
    def read_settings(self):
        ''' Reads the settings from the vmware_inventory.ini file '''

        scriptbasename = __file__
        scriptbasename = os.path.basename(scriptbasename)
        scriptbasename = scriptbasename.replace('.py', '')

        defaults = {'vmware': {
            'server': '',
            'port': 443,
            'username': '',
            'password': '',
            'validate_certs': True,
            'ini_path': os.path.join(os.path.dirname(__file__), '%s.yml' % scriptbasename),
            #'ini_path': os.path.join(os.path.dirname(__file__), '../group_vars/vmware_inventory.ini'),
            'cache_name': 'ansible-vmware',
            'cache_path': '~/.ansible/tmp',
            'cache_max_age': 3600,
            'max_object_level': 1,
            'skip_keys': 'declaredalarmstate,'
                         'disabledmethod,'
                         'dynamicproperty,'
                         'dynamictype,'
                         'environmentbrowser,'
                         'managedby,'
                         'parent,'
                         'childtype,'
                         'resourceconfig',
            'alias_pattern': '{{ config.name + "_" + config.uuid }}',
            'host_pattern': '{{ guest.ipaddress }}',
            'host_filters': '{{ runtime.powerstate == "poweredOn" }}',
            'groupby_patterns': '{{ guest.guestid }},{{ "templates" if config.template else "guests"}}',
            'lower_var_keys': True,
            'custom_field_group_prefix': 'vmware_tag_',
            'groupby_custom_field_excludes': '',
            'groupby_custom_field': False}
        }

        if PY3:
            config = configparser.ConfigParser()
        else:
            config = configparser.SafeConfigParser()

        # where is the config?
        vmware_ini_path = os.environ.get('VMWARE_INI_PATH', defaults['vmware']['ini_path'])
        vmware_ini_path = os.path.expanduser(os.path.expandvars(vmware_ini_path))
        config.read(vmware_ini_path)

        if 'vmware' not in config.sections():
            config.add_section('vmware')

        # apply defaults
        for k, v in defaults['vmware'].items():
            if not config.has_option('vmware', k):
                config.set('vmware', k, str(v))

        # where is the cache?
        self.cache_dir = os.path.expanduser(config.get('vmware', 'cache_path'))
        if self.cache_dir and not os.path.exists(self.cache_dir):
            os.makedirs(self.cache_dir)

        # set the cache filename and max age
        cache_name = config.get('vmware', 'cache_name')
        self.cache_path_cache = self.cache_dir + "/%s.cache" % cache_name
        self.debugl('cache path is %s' % self.cache_path_cache)
        self.cache_max_age = int(config.getint('vmware', 'cache_max_age'))

        # mark the connection info
        self.server = os.environ.get('VMWARE_SERVER', config.get('vmware', 'server'))
        self.debugl('server is %s' % self.server)
        self.port = int(os.environ.get('VMWARE_PORT', config.get('vmware', 'port')))
        self.username = os.environ.get('VMWARE_USERNAME', config.get('vmware', 'username'))
        self.debugl('username is %s' % self.username)
        self.password = os.environ.get('VMWARE_PASSWORD', config.get('vmware', 'password', raw=True))
        self.validate_certs = os.environ.get('VMWARE_VALIDATE_CERTS', config.get('vmware', 'validate_certs'))
        if self.validate_certs in ['no', 'false', 'False', False]:
            self.validate_certs = False

        self.debugl('cert validation is %s' % self.validate_certs)

        # behavior control
        self.maxlevel = int(config.get('vmware', 'max_object_level'))
        self.debugl('max object level is %s' % self.maxlevel)
        self.lowerkeys = config.get('vmware', 'lower_var_keys')
        if type(self.lowerkeys) != bool:
            if str(self.lowerkeys).lower() in ['yes', 'true', '1']:
                self.lowerkeys = True
            else:
                self.lowerkeys = False
        self.debugl('lower keys is %s' % self.lowerkeys)
        self.skip_keys = list(config.get('vmware', 'skip_keys').split(','))
        self.debugl('skip keys is %s' % self.skip_keys)
        temp_host_filters = list(config.get('vmware', 'host_filters').split('}},'))
        for host_filter in temp_host_filters:
            host_filter = host_filter.rstrip()
            if host_filter != "":
                if not host_filter.endswith("}}"):
                    host_filter += "}}"
                self.host_filters.append(host_filter)
        self.debugl('host filters are %s' % self.host_filters)

        temp_groupby_patterns = list(config.get('vmware', 'groupby_patterns').split('}},'))
        for groupby_pattern in temp_groupby_patterns:
            groupby_pattern = groupby_pattern.rstrip()
            if groupby_pattern != "":
                if not groupby_pattern.endswith("}}"):
                    groupby_pattern += "}}"
                self.groupby_patterns.append(groupby_pattern)
        self.debugl('groupby patterns are %s' % self.groupby_patterns)
        temp_groupby_custom_field_excludes = config.get('vmware', 'groupby_custom_field_excludes')
        self.groupby_custom_field_excludes = [x.strip('"') for x in [y.strip("'") for y in temp_groupby_custom_field_excludes.split(",")]]
        self.debugl('groupby exclude strings are %s' % self.groupby_custom_field_excludes)

        # Special feature to disable the brute force serialization of the
        # virtual machine objects. The key name for these properties does not
        # matter because the values are just items for a larger list.
        if config.has_section('properties'):
            self.guest_props = []
            for prop in config.items('properties'):
                self.guest_props.append(prop[1])

        # save the config
        self.config = config
Beispiel #26
0
    cfg.StrOpt('swift_store_config_file',
               secret=True,
               help=_('The config file that has the swift account(s)'
                      'configs.')),
]

_config_defaults = {
    'user_domain_id': 'default',
    'user_domain_name': None,
    'project_domain_id': 'default',
    'project_domain_name': None
}

# NOTE(bourke): The default dict_type is collections.OrderedDict in py27, but
# we must set manually for compatibility with py26
CONFIG = configparser.SafeConfigParser(defaults=_config_defaults,
                                       dict_type=OrderedDict)
LOG = logging.getLogger(__name__)


def is_multiple_swift_store_accounts_enabled(conf):
    if conf.glance_store.swift_store_config_file is None:
        return False
    return True


class SwiftParams(object):
    def __init__(self, conf):
        self.conf = conf
        if is_multiple_swift_store_accounts_enabled(self.conf):
            self.params = self._load_config()
        else:
    def read_ini_settings(self):
        ''' Read ini file settings '''

        scriptbasename = "ocp-on-vmware"
        defaults = {
            'vmware': {
                'ini_path':
                os.path.join(os.path.dirname(__file__),
                             '%s.ini' % scriptbasename),
                'console_port':
                '8443',
                'container_storage':
                'none',
                'container_storage_size':
                '300',
                'container_storage_disk_type':
                'eagerZeroedThick',
                'deployment_type':
                'openshift-enterprise',
                'openshift_vers':
                'v3_4',
                'vcenter_username':
                '******',
                'vcenter_template_name':
                'ocp-server-template-2.0.2',
                'vcenter_folder':
                'ocp',
                'vcenter_resource_pool':
                '/Resources/OCP3',
                'app_dns_prefix':
                'apps',
                'vm_network':
                'VM Network',
                'rhel_subscription_pool':
                'Red Hat OpenShift Container Platform, Premium*',
                'openshift_sdn':
                'redhat/openshift-ovs-subnet',
                'byo_lb':
                'no',
                'lb_host':
                'haproxy-',
                'byo_nfs':
                'no',
                'nfs_host':
                'nfs-0',
                'nfs_registry_mountpoint':
                '/exports',
                'master_nodes':
                '3',
                'infra_nodes':
                '2',
                'app_nodes':
                '3',
                'storage_nodes':
                '0',
                'vm_ipaddr_start':
                '',
                'vm_ipaddr_allocation_type':
                'static',
                'ocp_hostname_prefix':
                '',
                'auth_type':
                'ldap',
                'ldap_user':
                '******',
                'ldap_user_password':
                '',
                'node_type':
                self.args.node_type,
                'node_number':
                self.args.node_number,
                'tag':
                self.args.tag,
                'ldap_fqdn':
                ''
            }
        }
        if six.PY3:
            config = configparser.ConfigParser()
        else:
            config = configparser.SafeConfigParser()

        # where is the config?
        vmware_ini_path = os.environ.get('VMWARE_INI_PATH',
                                         defaults['vmware']['ini_path'])
        vmware_ini_path = os.path.expanduser(
            os.path.expandvars(vmware_ini_path))
        config.read(vmware_ini_path)

        # apply defaults
        for k, v in defaults['vmware'].iteritems():
            if not config.has_option('vmware', k):
                config.set('vmware', k, str(v))

        self.console_port = config.get('vmware', 'console_port')
        self.cluster_id = config.get('vmware', 'cluster_id')
        self.container_storage = config.get('vmware', 'container_storage')
        self.container_storage_size = config.get('vmware',
                                                 'container_storage_size')
        self.container_storage_disk_type = config.get(
            'vmware', 'container_storage_disk_type')
        self.deployment_type = config.get('vmware', 'deployment_type')
        self.openshift_vers = config.get('vmware', 'openshift_vers')
        self.vcenter_host = config.get('vmware', 'vcenter_host')
        self.vcenter_username = config.get('vmware', 'vcenter_username')
        self.vcenter_password = config.get('vmware', 'vcenter_password')
        self.vcenter_template_name = config.get('vmware',
                                                'vcenter_template_name')
        self.vcenter_folder = config.get('vmware', 'vcenter_folder')
        self.vcenter_datastore = config.get('vmware', 'vcenter_datastore')
        self.vcenter_datacenter = config.get('vmware', 'vcenter_datacenter')
        self.vcenter_cluster = config.get('vmware', 'vcenter_cluster')
        self.vcenter_datacenter = config.get('vmware', 'vcenter_datacenter')
        self.vcenter_resource_pool = config.get('vmware',
                                                'vcenter_resource_pool')
        self.dns_zone = config.get('vmware', 'dns_zone')
        self.app_dns_prefix = config.get('vmware', 'app_dns_prefix')
        self.vm_dns = config.get('vmware', 'vm_dns')
        self.vm_gw = config.get('vmware', 'vm_gw')
        self.vm_netmask = config.get('vmware', 'vm_netmask')
        self.vm_network = config.get('vmware', 'vm_network')
        self.rhel_subscription_user = config.get('vmware',
                                                 'rhel_subscription_user')
        self.rhel_subscription_pass = config.get('vmware',
                                                 'rhel_subscription_pass')
        self.rhel_subscription_server = config.get('vmware',
                                                   'rhel_subscription_server')
        self.rhel_subscription_pool = config.get('vmware',
                                                 'rhel_subscription_pool')
        self.rhsm_katello_url = config.get('vmware', 'rhsm_katello_url')
        self.rhsm_activation_key = config.get('vmware', 'rhsm_activation_key')
        self.rhsm_org_id = config.get('vmware', 'rhsm_org_id')
        self.openshift_sdn = config.get('vmware', 'openshift_sdn')
        self.byo_lb = config.get('vmware', 'byo_lb')
        self.lb_host = config.get('vmware', 'lb_host')
        self.byo_nfs = config.get('vmware', 'byo_nfs')
        self.nfs_host = config.get('vmware', 'nfs_host')
        self.nfs_registry_mountpoint = config.get('vmware',
                                                  'nfs_registry_mountpoint')
        self.master_nodes = config.get('vmware', 'master_nodes')
        self.infra_nodes = config.get('vmware', 'infra_nodes')
        self.app_nodes = config.get('vmware', 'app_nodes')
        self.storage_nodes = config.get('vmware', 'storage_nodes')
        self.vm_ipaddr_start = config.get('vmware', 'vm_ipaddr_start')
        self.vm_ipaddr_allocation_type = config.get(
            'vmware', 'vm_ipaddr_allocation_type')
        self.ocp_hostname_prefix = config.get('vmware',
                                              'ocp_hostname_prefix') or ''
        self.auth_type = config.get('vmware', 'auth_type')
        self.ldap_user = config.get('vmware', 'ldap_user')
        self.ldap_user_password = config.get('vmware', 'ldap_user_password')
        self.ldap_fqdn = config.get('vmware', 'ldap_fqdn')
        self.node_type = config.get('vmware', 'node_type')
        self.node_number = config.get('vmware', 'node_number')
        self.tag = config.get('vmware', 'tag')
        err_count = 0

        if 'storage' in self.node_type:
            self.node_number = 3
            if self.container_storage is None:
                print "Please specify crs or cns in container_storage in the %s." % vmware_ini_path
            if 'crs' in self.container_storage:
                self.rhel_subscription_pool = "Red Hat Gluster Storage , Standard (16 Nodes)"
                self.inventory_file = "crs-inventory.json"
            if 'cns' in self.container_storage:
                self.inventory_file = "cns-inventory.json"
        required_vars = {
            'cluster_id': self.cluster_id,
            'dns_zone': self.dns_zone,
            'vcenter_host': self.vcenter_host,
            'vcenter_password': self.vcenter_password,
            'vm_ipaddr_start': self.vm_ipaddr_start,
            'vm_ipaddr_allocation_type': self.vm_ipaddr_allocation_type,
            'ldap_fqdn': self.ldap_fqdn,
            'ldap_user_password': self.ldap_user_password,
            'vm_dns': self.vm_dns,
            'vm_gw': self.vm_gw,
            'vm_netmask': self.vm_netmask,
            'vcenter_datacenter': self.vcenter_datacenter,
        }
        for k, v in required_vars.items():
            if v == '':
                err_count += 1
                print "Missing %s " % k
        if required_vars['vm_ipaddr_allocation_type'] not in ('dhcp',
                                                              'static'):
            err_count += 1
            print(
                "'vm_ipaddr_allocation_type' can take only "
                "'dhcp' and 'static' values.")

        if err_count > 0:
            print "Please fill out the missing variables in %s " % vmware_ini_path
            exit(1)
        self.wildcard_zone = "%s.%s" % (self.app_dns_prefix, self.dns_zone)
        self.support_nodes = 0

        print 'Configured inventory values:'
        for each_section in config.sections():
            for (key, val) in config.items(each_section):
                if 'pass' in key:
                    print '\t %s:  ******' % (key)
                else:
                    print '\t %s:  %s' % (key, val)
        print '\n'
Beispiel #28
0
    def read_settings(self):
        ''' Reads the settings from the alicloud.ini file '''

        config = configparser.SafeConfigParser()
        if six.PY3:
            config = configparser.ConfigParser()

        ecs_default_ini_path = os.path.join(
            os.path.dirname(os.path.realpath(__file__)), 'alicloud.ini')
        ecs_ini_path = os.path.expanduser(
            os.path.expandvars(
                os.environ.get('ALICLOUD_INI_PATH', ecs_default_ini_path)))
        config.read(ecs_ini_path)

        access_key = os.environ.get(
            'ALICLOUD_ACCESS_KEY',
            os.environ.get('ALICLOUD_ACCESS_KEY_ID', None))
        if not access_key:
            access_key = self.get_option(config, 'credentials',
                                         'alicloud_access_key')

        secret_key = os.environ.get(
            'ALICLOUD_SECRET_KEY',
            os.environ.get('ALICLOUD_SECRET_ACCESS_KEY', None))
        if not secret_key:
            secret_key = self.get_option(config, 'credentials',
                                         'alicloud_secret_key')

        security_token = os.environ.get('ALICLOUD_SECURITY_TOKEN', None)
        if not security_token:
            security_token = self.get_option(config, 'credentials',
                                             'alicloud_security_token')

        self.credentials = {
            'acs_access_key_id': access_key,
            'acs_secret_access_key': secret_key,
            'security_token': security_token,
        }

        # Regions
        config_regions = self.get_option(config, 'ecs', 'regions')
        if not config_regions or config_regions == 'all':
            all_regions = self.connect_to_ecs(footmark.ecs,
                                              "cn-beijing").get_all_regions()

            exclude_regions = []
            if self.get_option(config, 'ecs', 'regions_exclude'):
                exclude_regions = [
                    ex.strip() for ex in self.get_option(
                        config, 'ecs', 'regions_exclude').split(',')
                    if ex.strip()
                ]

            if all_regions and exclude_regions:
                for region in all_regions:
                    if region.id not in exclude_regions:
                        self.regions.append(region.id)
        else:
            self.regions = config_regions.split(",")

        # # Destination addresses
        self.destination_variable = self.get_option(config, 'ecs',
                                                    'destination_variable', "")

        self.hostname_variable = self.get_option(config, 'ecs',
                                                 'hostname_variable', "")

        self.destination_format = self.get_option(config, 'ecs',
                                                  'destination_format', "")
        self.destination_format_tags = self.get_option(
            config, 'ecs', 'destination_format_tags', "")

        # Instance states to be gathered in inventory. Default is 'running'.
        ecs_valid_instance_states = [
            'pending', 'running', 'starting', 'stopping', 'stopped'
        ]

        if self.get_option(config, 'ecs', 'all_instances'):
            self.ecs_instance_states.extend(ecs_valid_instance_states)
        elif self.get_option(config, 'ecs', 'instance_states'):
            for instance_state in self.get_option(
                    config, 'ecs', 'instance_states').split(","):
                instance_state = instance_state.strip()
                if instance_state not in ecs_valid_instance_states:
                    continue
                self.ecs_instance_states.append(instance_state)
        else:
            self.ecs_instance_states.append('running')

        # Cache related
        cache_dir = os.path.expanduser(
            self.get_option(config, 'ecs', 'cache_path'))
        if not os.path.exists(cache_dir):
            os.makedirs(cache_dir)

        cache_name = 'ansible-alicloud'
        self.cache_path_cache = cache_dir + "/%s.cache" % cache_name
        self.cache_path_index = cache_dir + "/%s.index" % cache_name
        self.cache_max_age = float(
            self.get_option(config, 'ecs', 'cache_max_age'))

        self.expand_csv_tags = self.get_option(config, 'ecs',
                                               'expand_csv_tags')

        # Configure nested groups instead of flat namespace.
        self.nested_groups = self.get_option(config, 'ecs', 'nested_groups')

        # Configure which groups should be created.
        group_by_options = [
            'group_by_instance_id', 'group_by_region',
            'group_by_availability_zone', 'group_by_instance_type',
            'group_by_image_id', 'group_by_vpc_id', 'group_by_vswitch_id',
            'group_by_security_group', 'group_by_tag_keys', 'group_by_tag_none'
        ]
        for option in group_by_options:
            setattr(self, option, self.get_option(config, 'ecs', option))

        # Do we need to just include hosts that match a pattern?
        try:
            pattern_include = self.get_option(config, 'ecs', 'pattern_include')
            if pattern_include and len(pattern_include) > 0:
                self.pattern_include = re.compile(pattern_include)
        except configparser.NoOptionError:
            raise

        # Do we need to exclude hosts that match a pattern?
        try:
            pattern_exclude = self.get_option(config, 'ecs', 'pattern_exclude')
            if pattern_exclude and len(pattern_exclude) > 0:
                self.pattern_exclude = re.compile(pattern_exclude)
        except configparser.NoOptionError:
            raise

        instance_filters = self.get_option(config, 'ecs', 'instance_filters')
        if instance_filters and len(instance_filters) > 0:
            tags = {}
            for field in instance_filters.split(','):
                field = field.strip()
                if not field or '=' not in field:
                    continue
                key, value = [x.strip() for x in field.split('=', 1)]
                if not key:
                    continue
                elif key.startswith("tag:"):
                    tags[key[4:]] = value
                    continue
                self.ecs_instance_filters[key] = value
            if tags:
                self.ecs_instance_filters['instance_tags'] = tags
Beispiel #29
0
import sys

from oslo_config import cfg
from oslo_log import log as logging
from oslo_policy import policy
from six.moves import configparser

import glance.api.policy
from glance.common import exception
from glance.i18n import _, _LE, _LW

# SafeConfigParser was deprecated in Python 3.2
if sys.version_info >= (3, 2):
    CONFIG = configparser.ConfigParser()
else:
    CONFIG = configparser.SafeConfigParser()

LOG = logging.getLogger(__name__)

property_opts = [
    cfg.StrOpt('property_protection_file',
               help=_("""
The location of the property protection file.

Provide a valid path to the property protection file which contains
the rules for property protections and the roles/policies associated
with them.

A property protection file, when set, restricts the Glance image
properties to be created, read, updated and/or deleted by a specific
set of users that are identified by either roles or policies.
Beispiel #30
0
    def _parse_args(self, args_str):
        """
        Sample usage.

        python provision_vrouter.py
                --host_name a3s30.contrail.juniper.net
                --host_ip 10.1.1.1
                --api_server_ip 127.0.0.1
                --api_server_port 8082
                --api_server_use_ssl False
                --oper <add | del>
                [--ip_fabric_subnet 192.168.10.0/24]
                [--dpdk-enabled]
                [--sriov-physnets physnet1=eth0 physnet2=eth1]
        """
        # Source any specified config/ini file
        # Turn off help, so we print all options in response to -h
        conf_parser = argparse.ArgumentParser(add_help=False)

        conf_parser.add_argument("-c",
                                 "--conf_file",
                                 help="Specify config file",
                                 metavar="FILE")
        args, remaining_argv = conf_parser.parse_known_args(args_str.split())

        defaults = {
            'api_server_ip': '127.0.0.1',
            'api_server_port': '8082',
            'api_server_use_ssl': False,
            'oper': 'add',
            'control_names': [],
            'router_type': None,
            'dpdk_enabled': False,
            'disable_vhost_vmi': False,
            'enable_vhost_vmi_policy': False,
            'sub_cluster_name': None,
            'ip_fabric_subnet': None
        }
        ksopts = {
            'admin_user': '******',
            'admin_password': '******',
            'admin_tenant_name': 'default-domain'
        }

        if args.conf_file:
            config = configparser.SafeConfigParser()
            config.read([args.conf_file])
            defaults.update(dict(config.items("DEFAULTS")))
            if 'KEYSTONE' in config.sections():
                ksopts.update(dict(config.items("KEYSTONE")))

        # Override with CLI options
        # Don't surpress add_help here so it will handle -h
        parser = argparse.ArgumentParser(
            # Inherit options from config_parser
            parents=[conf_parser],
            # print script description with -h/--help
            description=__doc__,
            # Don't mess with format of description
            formatter_class=argparse.RawDescriptionHelpFormatter,
        )
        defaults.update(ksopts)
        parser.set_defaults(**defaults)

        parser.add_argument("--host_name",
                            help="hostname name of compute-node",
                            required=True)
        parser.add_argument("--host_ip",
                            help="IP address of compute-node",
                            required=True)
        parser.add_argument(
            "--control_names",
            help="List of control-node names compute node connects to")
        parser.add_argument("--api_server_port", help="Port of api server")
        parser.add_argument("--api_server_use_ssl",
                            help="Use SSL to connect with API server")
        parser.add_argument("--oper",
                            default='add',
                            help="Provision operation to be done(add or del)")
        parser.add_argument("--admin_user", help="Name of keystone admin user")
        parser.add_argument("--admin_password",
                            help="Password of keystone admin user")
        parser.add_argument("--admin_tenant_name",
                            help="Tenamt name for keystone admin user")
        parser.add_argument("--openstack_ip",
                            help="IP address of openstack node")
        parser.add_argument(
            "--router_type",
            help="Type of the virtual router (tor-service-node,embedded,none)")
        parser.add_argument(
            "--dpdk_enabled",
            action="store_true",
            help="Whether forwarding mode on vrouter is DPDK based")
        parser.add_argument("--disable_vhost_vmi",
                            action="store_true",
                            help="Do not create vhost0 vmi if flag is set")
        parser.add_argument("--enable_vhost_vmi_policy",
                            action="store_true",
                            help="Enable vhost0 vmi policy if flag is set")
        parser.add_argument("--sub_cluster_name",
                            help="Sub cluster this vrouter to be part of")
        parser.add_argument("--ip_fabric_subnet",
                            help="Add the ip_fabric_subnet")
        parser.add_argument("--sriov_physnets",
                            metavar="KEY=VALUE",
                            nargs='+',
                            action=SriovPhysNetAction,
                            help="physnet to vourter interface mapping")
        group = parser.add_mutually_exclusive_group(required=True)
        group.add_argument("--api_server_ip",
                           help="IP address of api server",
                           nargs='+',
                           type=str)
        group.add_argument("--use_admin_api",
                           default=False,
                           help="Connect to local api-server on admin port",
                           action="store_true")

        self._args = parser.parse_args(remaining_argv)