def collect_storage_host(hosts):
    """
    Get a valid host for storage from the user and append it to the list of
    hosts.
    """
    message = """
Setting up high-availability masters requires a storage host. Please provide a
host that will be configured as a Registry Storage.

Note: Containerized storage hosts are not currently supported.
"""
    click.echo(message)
    host_props = {}

    first_master = next(host for host in hosts if host.is_master())

    hostname_or_ip = click.prompt('Enter hostname or IP address',
                                  value_proc=validate_prompt_hostname,
                                  default=first_master.connect_to)
    existing, existing_host = is_host_already_node_or_master(
        hostname_or_ip, hosts)
    if existing and existing_host.is_node():
        existing_host.roles.append('storage')
    else:
        host_props['connect_to'] = hostname_or_ip
        host_props['preconfigured'] = False
        host_props['roles'] = ['storage']
        storage = Host(**host_props)
        hosts.append(storage)
def collect_storage_host(hosts):
    """
    Get a valid host for storage from the user and append it to the list of
    hosts.
    """
    message = """
Setting up High Availability Masters requires a storage host. Please provide a
host that will be configured as a Registry Storage.
"""
    click.echo(message)
    host_props = {}

    hostname_or_ip = click.prompt('Enter hostname or IP address',
                                  value_proc=validate_prompt_hostname)
    existing, existing_host = is_host_already_node_or_master(
        hostname_or_ip, hosts)
    if existing and existing_host.node:
        existing_host.storage = True
    else:
        host_props['connect_to'] = hostname_or_ip
        host_props['preconfigured'] = False
        host_props['master'] = False
        host_props['node'] = False
        host_props['storage'] = True
        storage = Host(**host_props)
        hosts.append(storage)
def collect_master_lb():
    """
    Get an HA proxy from the user
    """
    message = """
Setting up High Availability Masters requires a load balancing solution.
Please provide a host that will be configured as a proxy. This can either be
an existing load balancer configured to balance all masters on port 8443 or a
new host that will have HAProxy installed on it.

If the host provided does is not yet configured a reference haproxy load
balancer will be installed.  It's important to note that while the rest of the
environment will be fault tolerant this reference load balancer will not be.
It can be replaced post-installation with a load balancer with the same
hostname.
"""
    click.echo(message)
    host_props = {}
    host_props['connect_to'] = click.prompt('Enter hostname or IP address:',
                                            default='',
                                            value_proc=validate_prompt_hostname)
    install_haproxy = click.confirm('Should the reference haproxy load balancer be installed on this host?')
    host_props['preconfigured'] = not install_haproxy
    host_props['master'] = False
    host_props['node'] = False
    host_props['master_lb'] = True
    master_lb = Host(**host_props)

    return master_lb
Beispiel #4
0
def generate_hosts(num_hosts, name_prefix, roles=None, new_host=False):
    hosts = []
    for num in range(1, num_hosts + 1):
        hosts.append(
            Host(connect_to=name_prefix + str(num),
                 roles=roles,
                 new_host=new_host))
    return hosts
Beispiel #5
0
def collect_master_lb(hosts):
    """
    Get a valid load balancer from the user and append it to the list of
    hosts.

    Ensure user does not specify a system already used as a master/node as
    this is an invalid configuration.
    """
    message = """
Setting up high-availability masters requires a load balancing solution.
Please provide the FQDN of a host that will be configured as a proxy. This
can be either an existing load balancer configured to balance all masters on
port 8443 or a new host that will have HAProxy installed on it.

If the host provided is not yet configured, a reference HAProxy load
balancer will be installed. It's important to note that while the rest of the
environment will be fault-tolerant, this reference load balancer will not be.
It can be replaced post-installation with a load balancer with the same
hostname.
"""
    click.echo(message)
    host_props = {}

    # Using an embedded function here so we have access to the hosts list:
    def validate_prompt_lb(hostname):
        # Run the standard hostname check first:
        hostname = validate_prompt_hostname(hostname)

        # Make sure this host wasn't already specified:
        for host in hosts:
            if host.connect_to == hostname and (host.is_master()
                                                or host.is_node()):
                raise click.BadParameter(
                    'Cannot re-use "%s" as a load balancer, '
                    'please specify a separate host' % hostname)
        return hostname

    lb_hostname = click.prompt('Enter hostname or IP address',
                               value_proc=validate_prompt_lb)
    if lb_hostname:
        host_props['connect_to'] = lb_hostname
        install_haproxy = \
            click.confirm('Should the reference HAProxy load balancer be installed on this host?')
        host_props['preconfigured'] = not install_haproxy
        host_props['roles'] = ['master_lb']
        return Host(**host_props)
    else:
        return None
Beispiel #6
0
    def test_inventory_file_quotes_node_labels(self):
        """Verify a host entry wraps openshift_node_labels value in double quotes"""
        yaml_props = {
            'ip': '192.168.0.1',
            'hostname': 'a.example.com',
            'connect_to': 'a-private.example.com',
            'public_ip': '192.168.0.1',
            'public_hostname': 'a.example.com',
            'new_host': True,
            'roles': ['node'],
            'node_labels': {
                'region': 'infra'
            },
        }

        new_node = Host(**yaml_props)
        inventory = cStringIO()
        # This is what the 'write_host' function generates. write_host
        # has no return value, it just writes directly to the file
        # 'inventory' which in this test-case is a StringIO object
        ooinstall.openshift_ansible.write_host(new_node,
                                               'node',
                                               inventory,
                                               schedulable=True)
        # read the value of what was written to the inventory "file"
        legacy_inventory_line = inventory.getvalue()

        # Given the `yaml_props` above we should see a line like this:
        #     openshift_node_labels="{'region': 'infra'}"
        # Quotes around the hash
        node_labels_expected = '''openshift_node_labels="{'region': 'infra'}"'''
        # No quotes around the hash
        node_labels_bad = '''openshift_node_labels={'region': 'infra'}'''

        # The good line is present in the written inventory line
        self.assertIn(node_labels_expected, legacy_inventory_line)
        # An unquoted version is not present
        self.assertNotIn(node_labels_bad, legacy_inventory_line)
def collect_hosts(oo_cfg,
                  existing_env=False,
                  masters_set=False,
                  print_summary=True):
    """
        Collect host information from user. This will later be filled in using
        ansible.

        Returns: a list of host information collected from the user
    """
    click.clear()
    click.echo('*** Host Configuration ***')
    message = """
You must now specify the hosts that will compose your OpenShift cluster.

Please enter an IP or hostname to connect to for each system in the cluster.
You will then be prompted to identify what role you would like this system to
serve in the cluster.

OpenShift Masters serve the API and web console and coordinate the jobs to run
across the environment.  If desired you can specify multiple Master systems for
an HA deployment, in which case you will be prompted to identify a *separate*
system to act as the load balancer for your cluster after all Masters and Nodes
are defined.

If only one Master is specified, an etcd instance embedded within the OpenShift
Master service will be used as the datastore.  This can be later replaced with a
separate etcd instance if desired.  If multiple Masters are specified, a
separate etcd cluster will be configured with each Master serving as a member.

Any Masters configured as part of this installation process will also be
configured as Nodes.  This is so that the Master will be able to proxy to Pods
from the API.  By default this Node will be unschedulable but this can be changed
after installation with 'oadm manage-node'.

OpenShift Nodes provide the runtime environments for containers.  They will
host the required services to be managed by the Master.

http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#master
http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#node
    """
    click.echo(message)

    hosts = []
    more_hosts = True
    num_masters = 0
    while more_hosts:
        host_props = {}
        host_props['connect_to'] = click.prompt(
            'Enter hostname or IP address',
            value_proc=validate_prompt_hostname)

        if not masters_set:
            if click.confirm('Will this host be an OpenShift Master?'):
                host_props['master'] = True
                num_masters += 1

                if oo_cfg.settings['variant_version'] == '3.0':
                    masters_set = True
        host_props['node'] = True

        #TODO: Reenable this option once container installs are out of tech preview
        rpm_or_container = click.prompt(
            'Will this host be RPM or Container based (rpm/container)?',
            type=click.Choice(['rpm', 'container']),
            default='rpm')
        if rpm_or_container == 'container':
            host_props['containerized'] = True
        else:
            host_props['containerized'] = False

        if existing_env:
            host_props['new_host'] = True
        else:
            host_props['new_host'] = False

        host = Host(**host_props)

        hosts.append(host)

        if print_summary:
            print_installation_summary(hosts,
                                       oo_cfg.settings['variant_version'])

        # If we have one master, this is enough for an all-in-one deployment,
        # thus we can start asking if you wish to proceed. Otherwise we assume
        # you must.
        if masters_set or num_masters != 2:
            more_hosts = click.confirm('Do you want to add additional hosts?')

    if num_masters >= 3:
        collect_master_lb(hosts)

    return hosts
def collect_hosts(oo_cfg,
                  existing_env=False,
                  masters_set=False,
                  print_summary=True):
    """
        Collect host information from user. This will later be filled in using
        Ansible.

        Returns: a list of host information collected from the user
    """
    click.clear()
    click.echo('*** Host Configuration ***')
    message = """
You must now specify the hosts that will compose your OpenShift cluster.

Please enter an IP address or hostname to connect to for each system in the
cluster. You will then be prompted to identify what role you want this system to
serve in the cluster.

OpenShift masters serve the API and web console and coordinate the jobs to run
across the environment. Optionally, you can specify multiple master systems for
a high-availability (HA) deployment. If you choose an HA deployment, then you
are prompted to identify a *separate* system to act as the load balancer for
your cluster once you define all masters and nodes.

If only one master is specified, an etcd instance is embedded within the
OpenShift master service to use as the datastore. This can be later replaced
with a separate etcd instance, if required. If multiple masters are specified,
then a separate etcd cluster is configured with each master serving as a member.

Any masters configured as part of this installation process are also
configured as nodes. This enables the master to proxy to pods
from the API. By default, this node is unschedulable, but this can be changed
after installation with the 'oadm manage-node' command.

OpenShift nodes provide the runtime environments for containers. They host the
required services to be managed by the master.

http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#master
http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#node
    """
    click.echo(message)

    hosts = []
    roles = set(['master', 'node', 'storage', 'etcd'])
    more_hosts = True
    num_masters = 0
    while more_hosts:
        host_props = {}
        host_props['roles'] = []
        host_props['connect_to'] = click.prompt(
            'Enter hostname or IP address',
            value_proc=validate_prompt_hostname)

        if not masters_set:
            if click.confirm('Will this host be an OpenShift master?'):
                host_props['roles'].append('master')
                host_props['roles'].append('etcd')
                num_masters += 1

                if oo_cfg.settings['variant_version'] == '3.0':
                    masters_set = True
        host_props['roles'].append('node')

        host_props['containerized'] = False
        if oo_cfg.settings['variant_version'] != '3.0':
            rpm_or_container = \
                click.prompt('Will this host be RPM or Container based (rpm/container)?',
                             type=click.Choice(['rpm', 'container']),
                             default='rpm')
            if rpm_or_container == 'container':
                host_props['containerized'] = True

        host_props['new_host'] = existing_env

        host = Host(**host_props)

        hosts.append(host)

        if print_summary:
            print_installation_summary(hosts,
                                       oo_cfg.settings['variant_version'])

        # If we have one master, this is enough for an all-in-one deployment,
        # thus we can start asking if you want to proceed. Otherwise we assume
        # you must.
        if masters_set or num_masters != 2:
            more_hosts = click.confirm('Do you want to add additional hosts?')

    if num_masters >= 3:
        collect_master_lb(hosts)
        roles.add('master_lb')

    if not existing_env:
        collect_storage_host(hosts)

    return hosts, roles
def collect_hosts(version=None, masters_set=False, print_summary=True):
    """
        Collect host information from user. This will later be filled in using
        ansible.

        Returns: a list of host information collected from the user
    """
    min_masters_for_ha = 3
    click.clear()
    click.echo('***Host Configuration***')
    message = """
The OpenShift Master serves the API and web console.  It also coordinates the
jobs that have to run across the environment.  It can even run the datastore.
For wizard based installations the database will be embedded.  It's possible to
change this later using etcd from Red Hat Enterprise Linux 7.

Any Masters configured as part of this installation process will also be
configured as Nodes.  This is so that the Master will be able to proxy to Pods
from the API.  By default this Node will be unschedulable but this can be changed
after installation with 'oadm manage-node'.

The OpenShift Node provides the runtime environments for containers.  It will
host the required services to be managed by the Master.

http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#master
http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#node
    """
    click.echo(message)

    hosts = []
    more_hosts = True
    num_masters = 0
    while more_hosts:
        host_props = {}
        host_props['connect_to'] = click.prompt('Enter hostname or IP address:',
                                                default='',
                                                value_proc=validate_prompt_hostname)

        if not masters_set:
            if click.confirm('Will this host be an OpenShift Master?'):
                host_props['master'] = True
                num_masters += 1

                if num_masters >= min_masters_for_ha or version == '3.0':
                    masters_set = True
        host_props['node'] = True

        #TODO: Reenable this option once container installs are out of tech preview
        #rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?',
        #                                type=click.Choice(['rpm', 'container']),
        #                                default='rpm')
        #if rpm_or_container == 'container':
        #    host_props['containerized'] = True
        #else:
        #    host_props['containerized'] = False
        host_props['containerized'] = False

        host = Host(**host_props)

        hosts.append(host)

        if print_summary:
            click.echo('')
            click.echo('Current Masters: {}'.format(num_masters))
            click.echo('Current Nodes: {}'.format(len(hosts)))
            click.echo('Additional Masters required for HA: {}'.format(max(min_masters_for_ha - num_masters, 0)))
            click.echo('')

        if num_masters <= 1 or num_masters >= min_masters_for_ha:
            more_hosts = click.confirm('Do you want to add additional hosts?')

    if num_masters > 1:
        hosts.append(collect_master_lb())

    return hosts
Beispiel #10
0
def collect_hosts():
    """
        Collect host information from user. This will later be filled in using
        ansible.

        Returns: a list of host information collected from the user
    """
    click.clear()
    click.echo('***Host Configuration***')
    message = """
The OpenShift Master serves the API and web console.  It also coordinates the
jobs that have to run across the environment.  It can even run the datastore.
For wizard based installations the database will be embedded.  It's possible to
change this later using etcd from Red Hat Enterprise Linux 7.

Any Masters configured as part of this installation process will also be
configured as Nodes.  This is so that the Master will be able to proxy to Pods
from the API.  By default this Node will be unscheduleable but this can be changed
after installation with 'oadm manage-node'.

The OpenShift Node provides the runtime environments for containers.  It will
host the required services to be managed by the Master.

http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#master
http://docs.openshift.com/enterprise/3.0/architecture/infrastructure_components/kubernetes_infrastructure.html#node
    """
    click.echo(message)

    hosts = []
    more_hosts = True
    ip_regex = re.compile(r'^\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}$')

    while more_hosts:
        host_props = {}
        hostname_or_ip = click.prompt('Enter hostname or IP address:',
                                      default='',
                                      value_proc=validate_prompt_hostname)

        if ip_regex.match(hostname_or_ip):
            host_props['ip'] = hostname_or_ip
        else:
            host_props['hostname'] = hostname_or_ip

        host_props['master'] = click.confirm(
            'Will this host be an OpenShift Master?')
        host_props['node'] = True

        rpm_or_container = click.prompt(
            'Will this host be RPM or Container based (rpm/container)?',
            type=click.Choice(['rpm', 'container']),
            default='rpm')
        if rpm_or_container == 'container':
            host_props['containerized'] = True
        else:
            host_props['containerized'] = False

        host = Host(**host_props)

        hosts.append(host)

        more_hosts = click.confirm('Do you want to add additional hosts?')
    return hosts