Exemple #1
0
    def scp(self,
            name,
            user=None,
            source=None,
            destination=None,
            tunnel=False,
            download=False,
            recursive=False):
        """

        :param name:
        :param user:
        :param source:
        :param destination:
        :param tunnel:
        :param download:
        :param recursive:
        :return:
        """
        u, ip = self._ssh_credentials(name)
        if ip is None:
            return None
        if user is None:
            user = u
        scpcommand = common.scp(name,
                                ip=ip,
                                user=user,
                                source=source,
                                destination=destination,
                                recursive=recursive,
                                tunnel=tunnel,
                                debug=self.debug,
                                download=False)
        return scpcommand
Exemple #2
0
 def scp(self, request, context):
     print("Handling scp call for:\n%s" % request)
     name = request.name
     recursive = request.recursive
     source = request.source
     destination = request.destination
     download = request.download
     user = request.user if request.user != '' else None
     config = Kconfig()
     k = config.k
     tunnel = config.tunnel
     tunnelhost = config.tunnelhost
     tunnelport = config.tunnelport
     tunneluser = config.tunneluser
     if tunnel and tunnelhost is None:
         common.pprint("Tunnel requested but invalid tunnelhost", color='red')
         os._exit(1)
     insecure = config.insecure
     u, ip = common._ssh_credentials(k, name)
     if ip is None:
         return
     if user is None:
         user = config.vmuser if config.vmuser is not None else u
     scpcommand = common.scp(name, ip=ip, user=user, source=source, destination=destination,
                             tunnel=tunnel, tunnelhost=tunnelhost, tunnelport=tunnelport, tunneluser=tunneluser,
                             download=download, recursive=recursive, insecure=insecure)
     response = kcli_pb2.sshcmd(sshcmd=scpcommand)
     return response
Exemple #3
0
    def scp(self,
            name,
            user=None,
            source=None,
            destination=None,
            tunnel=False,
            download=False,
            recursive=False):
        """

        :param name:
        :param user:
        :param source:
        :param destination:
        :param tunnel:
        :param download:
        :param recursive:
        :return:
        """
        tunnel = False
        u, ip = self._ssh_credentials(name)
        scpcommand = common.scp(name,
                                ip=ip,
                                host=self.host,
                                user=u,
                                source=source,
                                destination=destination,
                                recursive=recursive,
                                tunnel=tunnel,
                                debug=self.debug,
                                download=False)
        if self.debug:
            print(scpcommand)
        return scpcommand
Exemple #4
0
 def scp(self,
         name,
         user=None,
         source=None,
         destination=None,
         tunnel=False,
         download=False,
         recursive=False):
     u, ip = self._ssh_credentials(name)
     if user is None:
         user = u
     tunnel = True
     scpcommand = common.scp(name,
                             ip=ip,
                             host=self.host,
                             port=self.port,
                             hostuser=self.user,
                             user=user,
                             source=source,
                             destination=destination,
                             recursive=recursive,
                             tunnel=tunnel,
                             debug=self.debug,
                             download=download)
     return scpcommand
Exemple #5
0
 def scp(self,
         name,
         user=None,
         source=None,
         destination=None,
         tunnel=False,
         tunnelhost=None,
         tunnelport=22,
         tunneluser='******',
         download=False,
         recursive=False,
         insecure=False):
     u, ip = common._ssh_credentials(self, name)
     if user is None:
         user = u
     scpcommand = common.scp(name,
                             ip=ip,
                             user=user,
                             source=source,
                             destination=destination,
                             recursive=recursive,
                             tunnel=tunnel,
                             tunnelhost=tunnelhost,
                             tunnelport=tunnelport,
                             tunneluser=tunneluser,
                             debug=self.debug,
                             download=download,
                             insecure=insecure)
     if self.debug:
         print(scpcommand)
     return scpcommand
Exemple #6
0
    def scp(self, name, user=None, source=None, destination=None, tunnel=False, tunnelhost=None, tunnelport=22,
            tunneluser='******', download=False, recursive=False, insecure=False):
        """

        :param name:
        :param user:
        :param source:
        :param destination:
        :param tunnel:
        :param download:
        :param recursive:
        :param insecure:
        :return:
        """
        u, ip = common._ssh_credentials(self, name)
        if ip is None:
            return None
        if user is None:
            user = u
        vmport = None
        if '.' not in ip:
            vmport = ip
            ip = '127.0.0.1'
        scpcommand = common.scp(name, ip=ip, user=user, source=source, destination=destination, recursive=recursive,
                                tunnel=tunnel, tunnelhost=tunnelhost, tunnelport=tunnelport, tunneluser=tunneluser,
                                debug=self.debug, download=download, vmport=vmport, insecure=insecure)
        return scpcommand
Exemple #7
0
def create(config, plandir, cluster, overrides, dnsconfig=None):
    k = config.k
    data = {'kubetype': 'kind'}
    data.update(overrides)
    if 'keys' not in overrides and get_ssh_pub_key() is None:
        error(
            "No usable public key found, which is required for the deployment")
        sys.exit(1)
    data['cluster'] = overrides.get(
        'cluster', cluster if cluster is not None else 'testk')
    plan = cluster if cluster is not None else data['cluster']
    data['kube'] = data['cluster']
    masters = data.get('masters', 1)
    if masters == 0:
        error("Invalid number of masters")
        sys.exit(1)
    clusterdir = os.path.expanduser("~/.kcli/clusters/%s" % cluster)
    if os.path.exists(clusterdir):
        error("Please remove existing directory %s first..." % clusterdir)
        sys.exit(1)
    if not os.path.exists(clusterdir):
        os.makedirs(clusterdir)
        os.mkdir("%s/auth" % clusterdir)
        with open("%s/kcli_parameters.yml" % clusterdir, 'w') as p:
            installparam = overrides.copy()
            installparam['plan'] = plan
            installparam['kubetype'] = 'kind'
            yaml.safe_dump(installparam,
                           p,
                           default_flow_style=False,
                           encoding='utf-8',
                           allow_unicode=True)
    result = config.plan(plan,
                         inputfile='%s/kcli_plan.yml' % plandir,
                         overrides=data)
    if result['result'] != 'success':
        sys.exit(1)
    kindnode = "%s-kind" % cluster
    kindnodeip = "%s-kind" % cluster
    kindnodeip, kindnodevmport = _ssh_credentials(k, kindnode)[1:]
    source, destination = data['KUBECONFIG'], "%s/auth/kubeconfig" % clusterdir
    scpcmd = scp(kindnode,
                 ip=kindnodeip,
                 user='******',
                 source=source,
                 destination=destination,
                 tunnel=config.tunnel,
                 tunnelhost=config.tunnelhost,
                 tunnelport=config.tunnelport,
                 tunneluser=config.tunneluser,
                 download=True,
                 insecure=True,
                 vmport=kindnodevmport)
    os.system(scpcmd)
    success("Kubernetes cluster %s deployed!!!" % cluster)
    info2("export KUBECONFIG=$HOME/.kcli/clusters/%s/auth/kubeconfig" %
          cluster)
    info2("export PATH=$PWD:$PATH")
Exemple #8
0
def create(config, plandir, cluster, overrides):
    platform = config.type
    data = {'kubetype': 'generic', 'xip': False, 'domain': 'karmalabs.com'}
    data.update(overrides)
    data['cluster'] = overrides['cluster'] if 'cluster' in overrides else cluster
    data['kube'] = data['cluster']
    masters = data.get('masters', 1)
    if masters == 0:
        pprint("Invalid number of masters", color='red')
        os._exit(1)
    network = data.get('network', 'default')
    xip = data['xip']
    api_ip = data.get('api_ip')
    if masters > 1:
        if platform in cloudplatforms:
            domain = data.get('domain', 'karmalabs.com')
            api_ip = "%s-master.%s" % (cluster, domain)
        elif api_ip is None:
            if network == 'default' and platform == 'kvm':
                pprint("Using 192.168.122.253 as api_ip", color='yellow')
                data['api_ip'] = "192.168.122.253"
                api_ip = "192.168.122.253"
            else:
                pprint("You need to define api_ip in your parameters file", color='red')
                os._exit(1)
        if xip and platform not in cloudplatforms:
            data['domain'] = "%s.xip.io" % api_ip
    version = data.get('version')
    if version is not None and not version.startswith('1.'):
        pprint("Invalid version %s" % version, color='red')
        os._exit(1)
    data['basedir'] = '/workdir' if os.path.exists("/i_am_a_container") else '.'
    cluster = data.get('cluster')
    image = data.get('image', 'centos7')
    data['ubuntu'] = True if image in UBUNTUS or 'ubuntu' in image.lower() else False
    clusterdir = pwd_path("clusters/%s" % cluster)
    firstmaster = "%s-master-0" % cluster
    if os.path.exists(clusterdir):
        pprint("Please remove existing directory %s first..." % clusterdir, color='red')
        sys.exit(1)
    if find_executable('kubectl') is None:
        get_kubectl()
    if not os.path.exists(clusterdir):
        os.makedirs(clusterdir)
        os.mkdir("%s/auth" % clusterdir)
    k = config.k
    result = config.plan(cluster, inputfile='%s/masters.yml' % plandir, overrides=data, wait=True)
    if result['result'] != "success":
        os._exit(1)
    source, destination = "/root/join.sh", "%s/join.sh" % clusterdir
    firstmasterip = k.info(firstmaster)['ip']
    scpcmd = scp(firstmaster, ip=firstmasterip, user='******', source=source, destination=destination,
                 tunnel=config.tunnel, tunnelhost=config.tunnelhost, tunnelport=config.tunnelport,
                 tunneluser=config.tunneluser, download=True, insecure=True)
    os.system(scpcmd)
    source, destination = "/etc/kubernetes/admin.conf", "%s/auth/kubeconfig" % clusterdir
    scpcmd = scp(firstmaster, ip=firstmasterip, user='******', source=source, destination=destination,
                 tunnel=config.tunnel, tunnelhost=config.tunnelhost, tunnelport=config.tunnelport,
                 tunneluser=config.tunneluser, download=True, insecure=True)
    os.system(scpcmd)
    workers = data.get('workers', 0)
    if workers > 0:
        pprint("Deploying workers", color='blue')
        if 'name' in data:
            del data['name']
        config.plan(cluster, inputfile='%s/workers.yml' % plandir, overrides=data)
    pprint("Kubernetes cluster %s deployed!!!" % cluster)
    masters = data.get('masters', 1)
    info("export KUBECONFIG=clusters/%s/auth/kubeconfig" % cluster)
    info("export PATH=$PWD:$PATH")
    prefile = 'pre_ubuntu.sh' if data['ubuntu'] else 'pre_el.sh'
    predata = config.process_inputfile(cluster, "%s/%s" % (plandir, prefile), overrides=data)
    with open("%s/pre.sh" % clusterdir, 'w') as f:
        f.write(predata)
    os.environ['KUBECONFIG'] = "%s/%s/auth/kubeconfig" % (os.getcwd(), clusterdir)
    apps = data.get('apps', [])
    if apps:
        for app in apps:
            appdir = "%s/apps/%s" % (plandir, app)
            if not os.path.exists(appdir):
                pprint("Skipping unsupported app %s" % app, color='yellow')
            else:
                pprint("Adding app %s" % app, color='blue')
                if '%s_version' % app not in overrides:
                    data['%s_version' % app] = 'latest'
                kube_create_app(config, appdir, overrides=data)
Exemple #9
0
def create(config, plandir, cluster, overrides):
    platform = config.type
    data = {'kubetype': 'k3s'}
    data.update(overrides)
    data['cluster'] = overrides[
        'cluster'] if 'cluster' in overrides else cluster
    data['kube'] = data['cluster']
    masters = data.get('masters', 1)
    network = data.get('network', 'default')
    api_ip = data.get('api_ip')
    if masters > 1:
        if platform in cloudplatforms:
            domain = data.get('domain', 'karmalabs.com')
            api_ip = "%s-master.%s" % (cluster, domain)
        elif api_ip is None:
            if network == 'default' and platform == 'kvm':
                pprint("Using 192.168.122.253 as api_ip", color='yellow')
                data['api_ip'] = "192.168.122.253"
            else:
                pprint("You need to define api_ip in your parameters file",
                       color='red')
                os._exit(1)
    version = data.get('version')
    if version is not None and not version.startswith('1.'):
        pprint("Invalid version %s" % version, color='red')
        os._exit(1)
    data['basedir'] = '/workdir' if os.path.exists(
        "/i_am_a_container") else '.'
    cluster = data.get('cluster')
    clusterdir = pwd_path("clusters/%s" % cluster)
    firstmaster = "%s-master-0" % cluster
    if os.path.exists(clusterdir):
        pprint("Please remove existing directory %s first..." % clusterdir,
               color='red')
        sys.exit(1)
    if find_executable('kubectl') is None:
        get_kubectl()
    if not os.path.exists(clusterdir):
        os.makedirs(clusterdir)
        os.mkdir("%s/auth" % clusterdir)
    if masters > 1:
        datastore_endpoint = data.get('datastore_endpoint')
        if datastore_endpoint is None:
            result = config.plan(cluster,
                                 inputfile='%s/datastore.yml' % plandir,
                                 overrides=data,
                                 wait=True)
            if result['result'] != "success":
                os._exit(1)
            datastore_type = data['datastore_type']
            datastore_user = data['datastore_user']
            datastore_password = data['datastore_password']
            datastore_ip = config.k.info("%s-datastore" % cluster).get('ip')
            datastore_name = data['datastore_name']
            if datastore_type == 'mysql':
                datastore_name = "tcp(%s)" % datastore_name
            datastore_endpoint = "%s://%s:%s@%s/%s" % (
                datastore_type, datastore_user, datastore_password,
                datastore_ip, datastore_name)
        data['datastore_endpoint'] = datastore_endpoint
    k = config.k
    result = config.plan(cluster,
                         inputfile='%s/masters.yml' % plandir,
                         overrides=data,
                         wait=True)
    if result['result'] != "success":
        os._exit(1)
    source, destination = "/root/join.sh", "%s/join.sh" % clusterdir
    firstmasterip = k.info(firstmaster)['ip']
    scpcmd = scp(firstmaster,
                 ip=firstmasterip,
                 user='******',
                 source=source,
                 destination=destination,
                 tunnel=config.tunnel,
                 tunnelhost=config.tunnelhost,
                 tunnelport=config.tunnelport,
                 tunneluser=config.tunneluser,
                 download=True,
                 insecure=True)
    os.system(scpcmd)
    source, destination = "/root/kubeconfig", "%s/auth/kubeconfig" % clusterdir
    scpcmd = scp(firstmaster,
                 ip=firstmasterip,
                 user='******',
                 source=source,
                 destination=destination,
                 tunnel=config.tunnel,
                 tunnelhost=config.tunnelhost,
                 tunnelport=config.tunnelport,
                 tunneluser=config.tunneluser,
                 download=True,
                 insecure=True)
    os.system(scpcmd)
    workers = data.get('workers', 0)
    if workers > 0:
        pprint("Deploying workers", color='blue')
        if 'name' in data:
            del data['name']
        config.plan(cluster,
                    inputfile='%s/workers.yml' % plandir,
                    overrides=data)
    pprint("K3s cluster %s deployed!!!" % cluster)
    info("export KUBECONFIG=clusters/%s/auth/kubeconfig" % cluster)
    info("export PATH=$PWD:$PATH")
Exemple #10
0
def create(config, plandir, cluster, overrides):
    k = config.k
    client = config.client
    platform = config.type
    pprint("Deploying on client %s" % client, color='blue')
    data = {'helper_image': 'CentOS-7-x86_64-GenericCloud.qcow2',
            'domain': 'karmalabs.com',
            'network': 'default',
            'masters': 1,
            'workers': 0,
            'tag': DEFAULT_TAG,
            'ipv6': False,
            'pub_key': '%s/.ssh/id_rsa.pub' % os.environ['HOME'],
            'pull_secret': 'openshift_pull.json',
            'version': 'nightly',
            'macosx': False,
            'upstream': False,
            'baremetal': False,
            'fips': False,
            'apps': [],
            'minimal': False}
    data.update(overrides)
    data['cluster'] = overrides.get('cluster', cluster if cluster is not None else 'testk')
    plan = cluster if cluster is not None else data['cluster']
    overrides['kubetype'] = 'openshift'
    apps = overrides.get('apps', [])
    if ('localstorage' in apps or 'ocs' in apps) and 'extra_disks' not in overrides\
            and 'extra_master_disks' not in overrides and 'extra_worker_disks' not in overrides:
        pprint("Storage apps require extra disks to be set", color='yellow')
    data['cluster'] = overrides.get('cluster', cluster)
    overrides['kube'] = data['cluster']
    installparam = overrides.copy()
    masters = data.get('masters', 1)
    if masters == 0:
        pprint("Invalid number of masters", color='red')
        os._exit(1)
    network = data.get('network')
    ipv6 = data['ipv6']
    upstream = data.get('upstream')
    version = data.get('version')
    tag = data.get('tag')
    if os.path.exists('openshift-install'):
        pprint("Removing old openshift-install", color='blue')
        os.remove('openshift-install')
    baremetal = data.get('baremetal')
    minimal = data.get('minimal')
    if version not in ['ci', 'nightly']:
        pprint("Using stable version", color='blue')
    else:
        pprint("Using %s version" % version, color='blue')
    cluster = data.get('cluster')
    helper_image = data.get('helper_image')
    image = data.get('image')
    api_ip = data.get('api_ip')
    if platform in virtplatforms and api_ip is None:
        if network == 'default' and platform == 'kvm':
            pprint("Using 192.168.122.253 as api_ip", color='yellow')
            overrides['api_ip'] = "192.168.122.253"
            api_ip = "192.168.122.253"
        else:
            pprint("You need to define api_ip in your parameters file", color='red')
            os._exit(1)
    if platform in virtplatforms and baremetal and data.get('baremetal_machine_cidr') is None:
        pprint("You need to define baremetal_machine_cidr in your parameters file", color='red')
        os._exit(1)
    if ':' in api_ip:
        ipv6 = True
    ingress_ip = data.get('ingress_ip')
    if ingress_ip is None:
        ingress_ip = api_ip
    public_api_ip = data.get('public_api_ip')
    bootstrap_api_ip = data.get('bootstrap_api_ip')
    network = data.get('network')
    if platform == 'packet':
        if network == 'default':
            pprint("You need to indicate a specific vlan network", color='red')
            os._exit(1)
        else:
            facilities = [n['domain'] for n in k.list_networks().values() if str(n['cidr']) == network]
            if not facilities:
                pprint("Vlan network %s not found in any facility" % network, color='red')
                os._exit(1)
            elif k.facility not in facilities:
                pprint("Vlan network %s not found in facility %s" % (network, k.facility), color='red')
                os._exit(1)
    masters = data.get('masters')
    workers = data.get('workers')
    disconnected_deploy = data.get('disconnected_deploy', False)
    disconnected_url = data.get('disconnected_url')
    disconnected_user = data.get('disconnected_user')
    disconnected_password = data.get('disconnected_password')
    tag = data.get('tag')
    pub_key = data.get('pub_key')
    pull_secret = pwd_path(data.get('pull_secret')) if not upstream else "%s/fake_pull.json" % plandir
    pull_secret = os.path.expanduser(pull_secret)
    macosx = data.get('macosx')
    if macosx and not os.path.exists('/i_am_a_container'):
        macosx = False
    if platform == 'openstack' and (api_ip is None or public_api_ip is None):
        pprint("You need to define both api_ip and public_api_ip in your parameters file", color='red')
        os._exit(1)
    if not os.path.exists(pull_secret):
        pprint("Missing pull secret file %s" % pull_secret, color='red')
        sys.exit(1)
    if not os.path.exists(pub_key):
        if os.path.exists('/%s/.kcli/id_rsa.pub' % os.environ['HOME']):
            pub_key = '%s/.kcli/id_rsa.pub' % os.environ['HOME']
        else:
            pprint("Missing public key file %s" % pub_key, color='red')
            sys.exit(1)
    clusterdir = os.path.expanduser("~/.kcli/clusters/%s" % cluster)
    if os.path.exists(clusterdir):
        if [v for v in config.k.list() if v['plan'] == cluster]:
            pprint("Please remove existing directory %s first..." % clusterdir, color='red')
            sys.exit(1)
        else:
            pprint("Removing directory %s" % clusterdir, color='blue')
            rmtree(clusterdir)
    os.environ['KUBECONFIG'] = "%s/auth/kubeconfig" % clusterdir
    if find_executable('oc') is None:
        get_oc(macosx)
    if version == 'ci':
        if '/' not in str(tag):
            basetag = 'ocp' if not upstream else 'origin'
            tag = 'registry.svc.ci.openshift.org/%s/release:%s' % (basetag, tag)
        os.environ['OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE'] = tag
        pprint("Setting OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE to %s" % tag, color='blue')
    if disconnected_url is not None:
        if '/' not in str(tag):
            tag = '%s/release:%s' % (disconnected_url, tag)
            os.environ['OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE'] = tag
        pprint("Setting OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE to %s" % tag, color='blue')
    if find_executable('openshift-install') is None:
        if version == 'ci':
            get_ci_installer(pull_secret, tag=tag, upstream=upstream)
        elif version == 'nightly':
            get_downstream_installer(nightly=True, tag=tag)
        elif upstream:
            get_upstream_installer(tag=tag)
        else:
            get_downstream_installer(tag=tag)
        pprint("Move downloaded openshift-install somewhere in your path if you want to reuse it", color='blue')
    INSTALLER_VERSION = get_installer_version()
    COMMIT_ID = os.popen('openshift-install version').readlines()[1].replace('built from commit', '').strip()
    if platform == 'packet' and not upstream:
        overrides['commit_id'] = COMMIT_ID
    pprint("Using installer version %s" % INSTALLER_VERSION, color='blue')
    OPENSHIFT_VERSION = INSTALLER_VERSION[0:3].replace('.', '')
    curl_header = "Accept: application/vnd.coreos.ignition+json; version=3.1.0"
    if upstream:
        curl_header = "User-Agent: Ignition/2.3.0"
    elif OPENSHIFT_VERSION.isdigit() and int(OPENSHIFT_VERSION) < 46:
        curl_header = "User-Agent: Ignition/0.35.0"
    overrides['curl_header'] = curl_header
    if image is None:
        if upstream:
            fcos_base = 'stable' if version == 'stable' else 'testing'
            fcos_url = "https://builds.coreos.fedoraproject.org/streams/%s.json" % fcos_base
            image_url = get_latest_fcos(fcos_url, _type=config.type)
        else:
            image_url = get_commit_rhcos(COMMIT_ID, _type=config.type)
        image = os.path.basename(os.path.splitext(image_url)[0])
        images = [v for v in k.volumes() if image in v]
        if not images:
            result = config.handle_host(pool=config.pool, image=image, download=True, update_profile=False,
                                        url=image_url)
            if result['result'] != 'success':
                os._exit(1)
        else:
            pprint("Using image %s" % image, color='blue')
    elif platform != 'packet':
        pprint("Checking if image %s is available" % image, color='blue')
        images = [v for v in k.volumes() if image in v]
        if not images:
            pprint("Missing %s. Indicate correct image in your parameters file..." % image, color='red')
            os._exit(1)
    else:
        pprint("Missing image in your parameters file. This is required for packet", color='red')
        os._exit(1)
    overrides['image'] = image
    overrides['cluster'] = cluster
    if not os.path.exists(clusterdir):
        os.makedirs(clusterdir)
        with open("%s/kcli_parameters.yml" % clusterdir, 'w') as p:
            installparam['plan'] = plan
            yaml.safe_dump(installparam, p, default_flow_style=False, encoding='utf-8', allow_unicode=True)
    data['pub_key'] = open(pub_key).read().strip()
    if disconnected_url is not None and disconnected_user is not None and disconnected_password is not None:
        key = "%s:%s" % (disconnected_user, disconnected_password)
        key = str(b64encode(key.encode('utf-8')), 'utf-8')
        auths = {'auths': {disconnected_url: {'auth': key, 'email': '*****@*****.**'}}}
        data['pull_secret'] = json.dumps(auths)
    else:
        data['pull_secret'] = re.sub(r"\s", "", open(pull_secret).read())
    if 'network_type' not in data:
        default_sdn = 'OVNKubernetes' if ipv6 else 'OpenShiftSDN'
        data['network_type'] = default_sdn
    installconfig = config.process_inputfile(cluster, "%s/install-config.yaml" % plandir, overrides=data)
    with open("%s/install-config.yaml" % clusterdir, 'w') as f:
        f.write(installconfig)
    with open("%s/install-config.yaml.bck" % clusterdir, 'w') as f:
        f.write(installconfig)
    autoapprover = config.process_inputfile(cluster, "%s/autoapprovercron.yml" % plandir, overrides=data)
    with open("%s/autoapprovercron.yml" % clusterdir, 'w') as f:
        f.write(autoapprover)
    run = call('openshift-install --dir=%s create manifests' % clusterdir, shell=True)
    if run != 0:
        pprint("Leaving environment for debugging purposes", color='red')
        pprint("You can delete it with kcli delete kube --yes %s" % cluster, color='red')
        os._exit(run)
    if minimal:
        pprint("Deploying cvo overrides to provide a minimal install", color='yellow')
        with open("%s/cvo-overrides.yaml" % plandir) as f:
            cvo_override = f.read()
        with open("%s/manifests/cvo-overrides.yaml" % clusterdir, "a") as f:
            f.write(cvo_override)
    if baremetal:
        for f in glob("%s/openshift/99_openshift-cluster-api_master-machines-*.yaml" % clusterdir):
            os.remove(f)
        for f in glob("%s/openshift/99_openshift-cluster-api_worker-machineset-*.yaml" % clusterdir):
            os.remove(f)
        rhcos_image_url = get_rhcos_openstack_url()
        installconfig = config.process_inputfile(cluster, "%s/metal3-config.yaml" % plandir,
                                                 overrides={'rhcos_image_url': rhcos_image_url})
        with open("%s/openshift/99-metal3-config.yaml" % clusterdir, 'w') as f:
            f.write(installconfig)
    for f in glob("%s/customisation/*.yaml" % plandir):
        if '99-ingress-controller.yaml' in f:
            ingressrole = 'master' if workers == 0 else 'worker'
            replicas = masters if workers == 0 else workers
            ingressconfig = config.process_inputfile(cluster, f, overrides={'replicas': replicas, 'role': ingressrole})
            with open("%s/openshift/99-ingress-controller.yaml" % clusterdir, 'w') as f:
                f.write(ingressconfig)
        else:
            copy2(f, "%s/openshift" % clusterdir)
    manifestsdir = pwd_path("manifests")
    if os.path.exists(manifestsdir) and os.path.isdir(manifestsdir):
        for f in glob("%s/*.yaml" % manifestsdir):
            copy2(f, "%s/openshift" % clusterdir)
    if 'network_type' in data and data['network_type'] == 'Calico':
        for asset in calicoassets:
            fetch(asset, manifestsdir)
    call('openshift-install --dir=%s create ignition-configs' % clusterdir, shell=True)
    staticdata = gather_dhcp(data, platform)
    domain = data.get('domain')
    if staticdata:
        pprint("Deploying helper dhcp node" % image, color='green')
        staticdata.update({'network': network, 'dhcp_image': helper_image, 'prefix': cluster,
                          domain: '%s.%s' % (cluster, domain)})
        result = config.plan(plan, inputfile='%s/dhcp.yml' % plandir, overrides=staticdata)
        if result['result'] != 'success':
            os._exit(1)
    if platform in virtplatforms:
        if 'virtual_router_id' not in data:
            data['virtual_router_id'] = randint(1, 255)
        host_ip = ingress_ip if platform != "openstack" else public_api_ip
        pprint("Using %s for api vip...." % api_ip, color='blue')
        ignore_hosts = data.get('ignore_hosts', False)
        if ignore_hosts:
            pprint("Ignoring /etc/hosts", color='yellow')
        elif not os.path.exists("/i_am_a_container"):
            hosts = open("/etc/hosts").readlines()
            wronglines = [e for e in hosts if not e.startswith('#') and "api.%s.%s" % (cluster, domain) in e and
                          host_ip not in e]
            for wrong in wronglines:
                pprint("Cleaning duplicate entries for api.%s.%s in /etc/hosts" % (cluster, domain), color='blue')
                call("sudo sed -i '/api.%s.%s/d' /etc/hosts" % (cluster, domain), shell=True)
            hosts = open("/etc/hosts").readlines()
            correct = [e for e in hosts if not e.startswith('#') and "api.%s.%s" % (cluster, domain) in e and
                       host_ip in e]
            if not correct:
                entries = ["%s.%s.%s" % (x, cluster, domain) for x in ['api', 'console-openshift-console.apps',
                                                                       'oauth-openshift.apps',
                                                                       'prometheus-k8s-openshift-monitoring.apps']]
                entries = ' '.join(entries)
                call("sudo sh -c 'echo %s %s >> /etc/hosts'" % (host_ip, entries), shell=True)
        else:
            entries = ["%s.%s.%s" % (x, cluster, domain) for x in ['api', 'console-openshift-console.apps',
                                                                   'oauth-openshift.apps',
                                                                   'prometheus-k8s-openshift-monitoring.apps']]
            entries = ' '.join(entries)
            call("sh -c 'echo %s %s >> /etc/hosts'" % (host_ip, entries), shell=True)
            if os.path.exists('/etcdir/hosts'):
                call("sh -c 'echo %s %s >> /etcdir/hosts'" % (host_ip, entries), shell=True)
        if platform in ['kubevirt', 'openstack', 'vsphere'] or (platform == 'packet' and config.k.tunnelhost is None):
            # bootstrap ignition is too big in those platforms so we deploy a temporary web server to serve it
            helper_overrides = {}
            if platform == 'kubevirt':
                helper_overrides['helper_image'] = "kubevirt/fedora-cloud-container-disk-demo"
                iptype = "ip"
            else:
                if helper_image is None:
                    images = [v for v in k.volumes() if 'centos' in v.lower() or 'fedora' in v.lower()]
                    if images:
                        image = os.path.basename(images[0])
                    else:
                        helper_image = "CentOS-7-x86_64-GenericCloud.qcow2"
                        pprint("Downloading centos helper image", color='blue')
                        result = config.handle_host(pool=config.pool, image="centos7", download=True,
                                                    update_profile=False)
                    pprint("Using helper image %s" % helper_image, color='blue')
                else:
                    images = [v for v in k.volumes() if helper_image in v]
                    if not images:
                        pprint("Missing image %s. Indicate correct helper image in your parameters file" % helper_image,
                               color='red')
                        os._exit(1)
                iptype = 'ip'
                if platform == 'openstack':
                    helper_overrides['flavor'] = "m1.medium"
                    iptype = "privateip"
            helper_overrides['nets'] = [network]
            helper_overrides['plan'] = cluster
            bootstrap_helper_name = "%s-bootstrap-helper" % cluster
            config.create_vm("%s-bootstrap-helper" % cluster, helper_image, overrides=helper_overrides)
            while bootstrap_api_ip is None:
                bootstrap_api_ip = k.info(bootstrap_helper_name).get(iptype)
                pprint("Waiting 5s for bootstrap helper node to get an ip...", color='blue')
                sleep(5)
            cmd = "iptables -F ; yum -y install httpd"
            if platform == 'packet':
                cmd += "; sed 's/apache/root/' /etc/httpd/conf/httpd.conf"
                status = 'provisioning'
                config.k.tunnelhost = bootstrap_api_ip
                while status != 'active':
                    status = k.info(bootstrap_helper_name).get('status')
                    pprint("Waiting 5s for bootstrap helper node to be fully provisioned...", color='blue')
                    sleep(5)
            sleep(5)
            cmd += "; systemctl start httpd"
            sshcmd = ssh(bootstrap_helper_name, ip=bootstrap_api_ip, user='******', tunnel=config.tunnel,
                         tunnelhost=config.tunnelhost, tunnelport=config.tunnelport,
                         tunneluser=config.tunneluser, insecure=True, cmd=cmd)
            os.system(sshcmd)
            source, destination = "%s/bootstrap.ign" % clusterdir, "/var/www/html/bootstrap"
            scpcmd = scp(bootstrap_helper_name, ip=bootstrap_api_ip, user='******', source=source,
                         destination=destination, tunnel=config.tunnel, tunnelhost=config.tunnelhost,
                         tunnelport=config.tunnelport, tunneluser=config.tunneluser, download=False, insecure=True)
            os.system(scpcmd)
            sedcmd = 'sed "s@https://api-int.%s.%s:22623/config/master@http://%s/bootstrap@" ' % (cluster, domain,
                                                                                                  bootstrap_api_ip)
            sedcmd += '%s/master.ign' % clusterdir
            sedcmd += ' > %s/bootstrap.ign' % clusterdir
            call(sedcmd, shell=True)
        if baremetal:
            new_api_ip = api_ip if not ipv6 else "[%s]" % api_ip
            sedcmd = 'sed -i "s@https://192.168.125.1:22623/config@http://%s@"' % new_api_ip
            sedcmd += ' %s/master.ign' % clusterdir
            call(sedcmd, shell=True)
        else:
            new_api_ip = api_ip if not ipv6 else "[%s]" % api_ip
            sedcmd = 'sed -i "s@https://api-int.%s.%s:22623/config@http://%s@"' % (cluster, domain, new_api_ip)
            sedcmd += ' %s/master.ign' % clusterdir
            call(sedcmd, shell=True)
    if platform in cloudplatforms:
        bootstrap_helper_name = "%s-bootstrap-helper" % cluster
        helper_overrides = {'reservedns': True, 'domain': '%s.%s' % (cluster, domain), 'tags': [tag], 'plan': cluster,
                            'nets': [network]}
        config.create_vm("%s-bootstrap-helper" % cluster, helper_image, overrides=helper_overrides)
        status = ""
        while status != "running":
            status = k.info(bootstrap_helper_name).get('status')
            pprint("Waiting 5s for bootstrap helper node to be running...", color='blue')
            sleep(5)
        sleep(5)
        bootstrap_helper_ip = _ssh_credentials(k, bootstrap_helper_name)[1]
        cmd = "iptables -F ; yum -y install httpd ; systemctl start httpd"
        sshcmd = ssh(bootstrap_helper_name, ip=bootstrap_helper_ip, user='******', tunnel=config.tunnel,
                     tunnelhost=config.tunnelhost, tunnelport=config.tunnelport, tunneluser=config.tunneluser,
                     insecure=True, cmd=cmd)
        os.system(sshcmd)
        source, destination = "%s/bootstrap.ign" % clusterdir, "/var/www/html/bootstrap"
        scpcmd = scp(bootstrap_helper_name, ip=bootstrap_helper_ip, user='******', source=source, destination=destination,
                     tunnel=config.tunnel, tunnelhost=config.tunnelhost, tunnelport=config.tunnelport,
                     tunneluser=config.tunneluser, download=False, insecure=True)
        os.system(scpcmd)
        sedcmd = 'sed "s@https://api-int.%s.%s:22623/config/master@' % (cluster, domain)
        sedcmd += 'http://%s-bootstrap-helper.%s.%s/bootstrap@ "' % (cluster, domain)
        sedcmd += '%s/master.ign' % clusterdir
        sedcmd += ' > %s/bootstrap.ign' % clusterdir
        call(sedcmd, shell=True)
    if masters == 1:
        version_match = re.match("4.([0-9]*).*", INSTALLER_VERSION)
        COS_VERSION = "4%s" % version_match.group(1) if version_match is not None else '45'
        if upstream or int(COS_VERSION) > 43:
            overrides['fix_ceo'] = True
    if platform in virtplatforms:
        if disconnected_deploy:
            disconnected_vm = "%s-disconnecter" % cluster
            cmd = "cat /opt/registry/certs/domain.crt"
            pprint("Deploying disconnected vm %s" % disconnected_vm, color='blue')
            result = config.plan(plan, inputfile='%s/disconnected' % plandir, overrides=overrides, wait=True)
            if result['result'] != 'success':
                os._exit(1)
            disconnected_ip = _ssh_credentials(k, disconnected_vm)[1]
            cacmd = ssh(disconnected_vm, ip=disconnected_ip, user='******', tunnel=config.tunnel,
                        tunnelhost=config.tunnelhost, tunnelport=config.tunnelport, tunneluser=config.tunneluser,
                        insecure=True, cmd=cmd)
            disconnected_ca = os.popen(cacmd).read()
            if 'ca' in overrides:
                overrides['ca'] += disconnected_ca
            else:
                overrides['ca'] = disconnected_ca
        pprint("Deploying masters", color='blue')
        result = config.plan(plan, inputfile='%s/masters.yml' % plandir, overrides=overrides)
        if result['result'] != 'success':
            os._exit(1)
        if platform == 'packet':
            allnodes = ["%s-bootstrap" % cluster] + ["%s-master-%s" % (cluster, num) for num in range(masters)]
            for node in allnodes:
                try:
                    k.add_nic(node, network)
                except Exception as e:
                    pprint("Hit %s. Continuing still" % str(e), color='red')
                    continue
        bootstrapcommand = 'openshift-install --dir=%s wait-for bootstrap-complete' % clusterdir
        bootstrapcommand += ' || %s' % bootstrapcommand
        run = call(bootstrapcommand, shell=True)
        if run != 0:
            pprint("Leaving environment for debugging purposes", color='red')
            pprint("You can delete it with kcli delete kube --yes %s" % cluster, color='red')
            os._exit(run)
        todelete = ["%s-bootstrap" % cluster]
        if platform in ['kubevirt', 'openstack', 'vsphere', 'packet']:
            todelete.append("%s-bootstrap-helper" % cluster)
    else:
        result = config.plan(plan, inputfile='%s/cloud_masters.yml' % plandir, overrides=overrides)
        if result['result'] != 'success':
            os._exit(1)
        call('openshift-install --dir=%s wait-for bootstrap-complete || exit 1' % clusterdir, shell=True)
        todelete = ["%s-bootstrap" % cluster, "%s-bootstrap-helper" % cluster]
    if platform in virtplatforms:
        ignitionworkerfile = "%s/worker.ign" % clusterdir
        os.remove(ignitionworkerfile)
        while not os.path.exists(ignitionworkerfile) or os.stat(ignitionworkerfile).st_size == 0:
            try:
                with open(ignitionworkerfile, 'w') as w:
                    workerdata = insecure_fetch("https://api.%s.%s:22623/config/worker" % (cluster, domain),
                                                headers=[curl_header])
                    w.write(workerdata)
            except:
                pprint("Waiting 5s before retrieving workers ignition data", color='blue')
                sleep(5)
        if workers > 0:
            pprint("Deploying workers", color='blue')
            if 'name' in overrides:
                del overrides['name']
            if platform in virtplatforms:
                result = config.plan(plan, inputfile='%s/workers.yml' % plandir, overrides=overrides)
            elif platform in cloudplatforms:
                result = config.plan(plan, inputfile='%s/cloud_workers.yml' % plandir, overrides=overrides)
            if result['result'] != 'success':
                os._exit(1)
            if platform == 'packet':
                allnodes = ["%s-worker-%s" % (cluster, num) for num in range(workers)]
                for node in allnodes:
                    k.add_nic(node, network)
    call("oc adm taint nodes -l node-role.kubernetes.io/master node-role.kubernetes.io/master:NoSchedule-", shell=True)
    pprint("Deploying certs autoapprover cronjob", color='blue')
    call("oc create -f %s/autoapprovercron.yml" % clusterdir, shell=True)
    if not minimal:
        installcommand = 'openshift-install --dir=%s wait-for install-complete' % clusterdir
        installcommand += " || %s" % installcommand
        pprint("Launching install-complete step. It will be retried one extra time in case of timeouts",
               color='blue')
        call(installcommand, shell=True)
    else:
        kubeconf = os.environ['KUBECONFIG']
        kubepassword = open("%s/auth/auth/kubeadmin-password" % clusterdir).read()
        info("Minimal Cluster ready to be used")
        info("INFO Install Complete")
        info("To access the cluster as the system:admin user when running 'oc', run export KUBECONFIG=%s" % kubeconf)
        info("Access the Openshift web-console here: https://console-openshift-console.apps.%s.%s" % (cluster, domain))
        info("Login to the console with user: kubeadmin, password: %s" % kubepassword)
    for vm in todelete:
        pprint("Deleting %s" % vm)
        k.delete(vm)
    os.environ['KUBECONFIG'] = "%s/auth/kubeconfig" % clusterdir
    if apps:
        overrides['openshift_version'] = INSTALLER_VERSION[0:3]
        for app in apps:
            appdir = "%s/apps/%s" % (plandir, app)
            if not os.path.exists(appdir):
                pprint("Skipping unsupported app %s" % app, color='yellow')
            else:
                pprint("Adding app %s" % app, color='blue')
                kube_create_app(config, appdir, overrides=overrides)
Exemple #11
0
def create(config, plandir, cluster, overrides):
    platform = config.type
    k = config.k
    data = {'kubetype': 'generic', 'xip': False, 'domain': 'karmalabs.com'}
    data.update(overrides)
    if 'keys' not in overrides and not os.path.exists(os.path.expanduser("~/.ssh/id_rsa.pub"))\
            and not os.path.exists(os.path.expanduser("~/.ssh/id_dsa.pub"))\
            and not os.path.exists(os.path.expanduser("~/.kcli/id_rsa.pub"))\
            and not os.path.exists(os.path.expanduser("~/.kcli/id_dsa.pub")):
        error(
            "No usable public key found, which is required for the deployment")
        os._exit(1)
    data['cluster'] = overrides.get(
        'cluster', cluster if cluster is not None else 'testk')
    plan = cluster if cluster is not None else data['cluster']
    data['kube'] = data['cluster']
    masters = data.get('masters', 1)
    if masters == 0:
        error("Invalid number of masters")
        os._exit(1)
    network = data.get('network', 'default')
    xip = data['xip']
    api_ip = data.get('api_ip')
    if platform in cloudplatforms:
        domain = data.get('domain', 'karmalabs.com')
        api_ip = "%s-master.%s" % (cluster, domain)
    elif api_ip is None:
        if network == 'default' and platform == 'kvm':
            warning("Using 192.168.122.253 as api_ip")
            data['api_ip'] = "192.168.122.253"
            api_ip = "192.168.122.253"
        elif platform == 'kubevirt':
            selector = {'kcli/plan': plan, 'kcli/role': 'master'}
            api_ip = config.k.create_service("%s-api" % cluster,
                                             config.k.namespace,
                                             selector,
                                             _type="LoadBalancer",
                                             ports=[6443])
            if api_ip is None:
                os._exit(1)
            else:
                pprint("Using api_ip %s" % api_ip)
                data['api_ip'] = api_ip
        else:
            error("You need to define api_ip in your parameters file")
            os._exit(1)
    if xip and platform not in cloudplatforms:
        data['domain'] = "%s.xip.io" % api_ip
    if data.get('virtual_router_id') is None:
        data['virtual_router_id'] = hash(data['cluster']) % 254 + 1
    pprint("Using keepalived virtual_router_id %s" % data['virtual_router_id'])
    version = data.get('version')
    if version is not None and not str(version).startswith('1.'):
        error("Invalid version %s" % version)
        os._exit(1)
    data['basedir'] = '/workdir' if os.path.exists(
        "/i_am_a_container") else '.'
    cluster = data.get('cluster')
    image = data.get('image', 'centos7')
    data['ubuntu'] = True if 'ubuntu' in image.lower() or [
        entry for entry in UBUNTUS if entry in image
    ] else False
    clusterdir = os.path.expanduser("~/.kcli/clusters/%s" % cluster)
    firstmaster = "%s-master-0" % cluster
    if os.path.exists(clusterdir):
        error("Please remove existing directory %s first..." % clusterdir)
        sys.exit(1)
    if find_executable('kubectl') is None:
        get_kubectl()
    if not os.path.exists(clusterdir):
        os.makedirs(clusterdir)
        os.mkdir("%s/auth" % clusterdir)
        with open("%s/kcli_parameters.yml" % clusterdir, 'w') as p:
            installparam = overrides.copy()
            installparam['api_ip'] = api_ip
            installparam['virtual_router_id'] = data['virtual_router_id']
            installparam['plan'] = plan
            yaml.safe_dump(installparam,
                           p,
                           default_flow_style=False,
                           encoding='utf-8',
                           allow_unicode=True)
    result = config.plan(plan,
                         inputfile='%s/masters.yml' % plandir,
                         overrides=data)
    if result['result'] != "success":
        os._exit(1)
    source, destination = "/root/join.sh", "%s/join.sh" % clusterdir
    firstmasterip, firstmastervmport = _ssh_credentials(k, firstmaster)[1:]
    scpcmd = scp(firstmaster,
                 ip=firstmasterip,
                 user='******',
                 source=source,
                 destination=destination,
                 tunnel=config.tunnel,
                 tunnelhost=config.tunnelhost,
                 tunnelport=config.tunnelport,
                 tunneluser=config.tunneluser,
                 download=True,
                 insecure=True,
                 vmport=firstmastervmport)
    os.system(scpcmd)
    source, destination = "/etc/kubernetes/admin.conf", "%s/auth/kubeconfig" % clusterdir
    scpcmd = scp(firstmaster,
                 ip=firstmasterip,
                 user='******',
                 source=source,
                 destination=destination,
                 tunnel=config.tunnel,
                 tunnelhost=config.tunnelhost,
                 tunnelport=config.tunnelport,
                 tunneluser=config.tunneluser,
                 download=True,
                 insecure=True,
                 vmport=firstmastervmport)
    os.system(scpcmd)
    workers = data.get('workers', 0)
    if workers > 0:
        pprint("Deploying workers")
        if 'name' in data:
            del data['name']
        os.chdir(os.path.expanduser("~/.kcli"))
        config.plan(plan, inputfile='%s/workers.yml' % plandir, overrides=data)
    success("Kubernetes cluster %s deployed!!!" % cluster)
    masters = data.get('masters', 1)
    info2("export KUBECONFIG=$HOME/.kcli/clusters/%s/auth/kubeconfig" %
          cluster)
    info2("export PATH=$PWD:$PATH")
    prefile = 'pre_ubuntu.sh' if data['ubuntu'] else 'pre_el.sh'
    predata = config.process_inputfile(plan,
                                       "%s/%s" % (plandir, prefile),
                                       overrides=data)
    with open("%s/pre.sh" % clusterdir, 'w') as f:
        f.write(predata)
    os.environ['KUBECONFIG'] = "%s/auth/kubeconfig" % clusterdir
    apps = data.get('apps', [])
    if apps:
        os.environ["PATH"] += ":%s" % os.getcwd()
        for app in apps:
            appdir = "%s/apps/%s" % (plandir, app)
            if not os.path.exists(appdir):
                warning("Skipping unsupported app %s" % app)
            else:
                pprint("Adding app %s" % app)
                if '%s_version' % app not in overrides:
                    data['%s_version' % app] = 'latest'
                kube_create_app(config, appdir, overrides=data)
Exemple #12
0
def create(config, plandir, cluster, overrides):
    platform = config.type
    data = {'kubetype': 'k3s'}
    data.update(overrides)
    data['cluster'] = overrides.get('cluster', cluster if cluster is not None else 'testk')
    plan = cluster if cluster is not None else data['cluster']
    data['kube'] = data['cluster']
    masters = data.get('masters', 1)
    network = data.get('network', 'default')
    token = data.get('token', 'supersecret')
    api_ip = data.get('api_ip')
    if masters > 1:
        if platform in cloudplatforms:
            domain = data.get('domain', 'karmalabs.com')
            api_ip = "%s-master.%s" % (cluster, domain)
        elif api_ip is None:
            if network == 'default' and platform == 'kvm':
                pprint("Using 192.168.122.253 as api_ip", color='yellow')
                data['api_ip'] = "192.168.122.253"
            elif platform == 'kubevirt':
                selector = {'kcli/plan': plan, 'kcli/role': 'master'}
                api_ip = config.k.create_service("%s-api" % cluster, config.k.namespace, selector,
                                                 _type="LoadBalancer", port=6443)
                if api_ip is None:
                    os._exit(1)
                else:
                    pprint("Using api_ip %s" % api_ip, color='blue')
                    data['api_ip'] = api_ip
            else:
                pprint("You need to define api_ip in your parameters file", color='red')
                os._exit(1)
    version = data.get('version')
    if version not in ['stable', 'latest', 'testing']:
        pprint("Invalid version %s" % version, color='red')
        os._exit(1)
    data['basedir'] = '/workdir' if os.path.exists("/i_am_a_container") else '.'
    cluster = data.get('cluster')
    clusterdir = os.path.expanduser("~/.kcli/clusters/%s" % cluster)
    firstmaster = "%s-master-0" % cluster
    if os.path.exists(clusterdir):
        pprint("Please remove existing directory %s first..." % clusterdir, color='red')
        sys.exit(1)
    if find_executable('kubectl') is None:
        get_kubectl()
    if not os.path.exists(clusterdir):
        os.makedirs(clusterdir)
        os.mkdir("%s/auth" % clusterdir)
        with open("%s/kcli_parameters.yml" % clusterdir, 'w') as p:
            installparam = overrides.copy()
            installparam['plan'] = plan
            yaml.safe_dump(installparam, p, default_flow_style=False, encoding='utf-8', allow_unicode=True)
    k = config.k
    result = config.plan(cluster, inputfile='%s/bootstrap.yml' % plandir, overrides=data)
    if result['result'] != "success":
        os._exit(1)
    if masters > 1:
        pprint("Deploying extra masters", color='blue')
        if 'name' in data:
            del data['name']
        config.plan(cluster, inputfile='%s/masters.yml' % plandir, overrides=data)
    firstmasterip, firstmastervmport = _ssh_credentials(k, firstmaster)[1:]
    with open("%s/join.sh" % clusterdir, 'w') as f:
        if api_ip is None:
            api_ip = k.info(firstmaster)['ip']
        f.write("curl -sfL https://get.k3s.io | K3S_URL=https://%s:6443 K3S_TOKEN=%s sh -\n" % (api_ip, token))
    source, destination = "/root/kubeconfig", "%s/auth/kubeconfig" % clusterdir
    scpcmd = scp(firstmaster, ip=firstmasterip, user='******', source=source, destination=destination,
                 tunnel=config.tunnel, tunnelhost=config.tunnelhost, tunnelport=config.tunnelport,
                 tunneluser=config.tunneluser, download=True, insecure=True, vmport=firstmastervmport)
    os.system(scpcmd)
    workers = data.get('workers', 0)
    if workers > 0:
        pprint("Deploying workers", color='blue')
        if 'name' in data:
            del data['name']
        os.chdir(os.path.expanduser("~/.kcli"))
        config.plan(cluster, inputfile='%s/workers.yml' % plandir, overrides=data)
    pprint("K3s cluster %s deployed!!!" % cluster)
    info("export KUBECONFIG=$HOME/.kcli/clusters/%s/auth/kubeconfig" % cluster)
    info("export PATH=$PWD:$PATH")
Exemple #13
0
def create(config, plandir, cluster, overrides):
    platform = config.type
    data = {'kubetype': 'k3s'}
    data.update(overrides)
    data['cluster'] = overrides.get(
        'cluster', cluster if cluster is not None else 'testk')
    plan = cluster if cluster is not None else data['cluster']
    data['kube'] = data['cluster']
    masters = data.get('masters', 1)
    network = data.get('network', 'default')
    sdn = data.get('sdn')
    token = data.get('token', 'supersecret')
    api_ip = data.get('api_ip')
    if masters > 1:
        if platform in cloudplatforms:
            domain = data.get('domain', 'karmalabs.com')
            api_ip = "%s-master.%s" % (cluster, domain)
        elif api_ip is None:
            if network == 'default' and platform == 'kvm':
                warning("Using 192.168.122.253 as api_ip")
                data['api_ip'] = "192.168.122.253"
                api_ip = "192.168.122.253"
            elif platform == 'kubevirt':
                selector = {'kcli/plan': plan, 'kcli/role': 'master'}
                api_ip = config.k.create_service("%s-api" % cluster,
                                                 config.k.namespace,
                                                 selector,
                                                 _type="LoadBalancer",
                                                 ports=[6443])
                if api_ip is None:
                    os._exit(1)
                else:
                    pprint("Using api_ip %s" % api_ip)
                    data['api_ip'] = api_ip
            else:
                error("You need to define api_ip in your parameters file")
                os._exit(1)
    data['basedir'] = '/workdir' if os.path.exists(
        "/i_am_a_container") else '.'
    install_k3s_args = []
    for arg in data:
        if arg.startswith('install_k3s'):
            install_k3s_args.append("%s=%s" % (arg.upper(), data[arg]))
    cluster = data.get('cluster')
    clusterdir = os.path.expanduser("~/.kcli/clusters/%s" % cluster)
    firstmaster = "%s-master-0" % cluster
    if os.path.exists(clusterdir):
        error("Please remove existing directory %s first..." % clusterdir)
        sys.exit(1)
    if find_executable('kubectl') is None:
        get_kubectl()
    if not os.path.exists(clusterdir):
        os.makedirs(clusterdir)
        os.mkdir("%s/auth" % clusterdir)
        with open("%s/kcli_parameters.yml" % clusterdir, 'w') as p:
            installparam = overrides.copy()
            installparam['api_ip'] = api_ip
            installparam['plan'] = plan
            yaml.safe_dump(installparam,
                           p,
                           default_flow_style=False,
                           encoding='utf-8',
                           allow_unicode=True)
    if os.path.exists("manifests") and os.path.isdir("manifests"):
        data['files'] = [{
            "path": "/root/manifests",
            "currentdir": True,
            "origin": "manifests"
        }]
    k = config.k
    bootstrap_overrides = data.copy()
    bootstrap_install_k3s_args = install_k3s_args.copy()
    if sdn is None or sdn != 'flannel':
        bootstrap_install_k3s_args.append(
            "INSTALL_K3S_EXEC='--flannel-backend=none'")
    bootstrap_install_k3s_args = ' '.join(bootstrap_install_k3s_args)
    bootstrap_overrides['install_k3s_args'] = bootstrap_install_k3s_args
    result = config.plan(plan,
                         inputfile='%s/bootstrap.yml' % plandir,
                         overrides=bootstrap_overrides)
    if result['result'] != "success":
        os._exit(1)
    nodes_overrides = data.copy()
    nodes_install_k3s_args = install_k3s_args.copy()
    if sdn is None or sdn != 'flannel':
        nodes_install_k3s_args.append(
            "INSTALL_K3S_EXEC='--disable-network-policy --no-flannel'")
    nodes_install_k3s_args = ' '.join(nodes_install_k3s_args)
    nodes_overrides['install_k3s_args'] = nodes_install_k3s_args
    if masters > 1:
        pprint("Deploying extra masters")
        config.plan(plan,
                    inputfile='%s/masters.yml' % plandir,
                    overrides=nodes_overrides)
    firstmasterip, firstmastervmport = _ssh_credentials(k, firstmaster)[1:]
    with open("%s/join.sh" % clusterdir, 'w') as f:
        if api_ip is None:
            api_ip = k.info(firstmaster)['ip']
        joincmd = "curl -sfL https://get.k3s.io | %s K3S_URL=https://%s:6443 K3S_TOKEN=%s" % (
            nodes_install_k3s_args, api_ip, token)
        extra_args = data['extra_worker_args'] if data.get(
            'extra_worker_args', []) else data.get('extra_args', [])
        extra_args = ' '.join(extra_args)
        f.write("%s sh -s - agent %s \n" % (joincmd, extra_args))
    source, destination = "/root/kubeconfig", "%s/auth/kubeconfig" % clusterdir
    scpcmd = scp(firstmaster,
                 ip=firstmasterip,
                 user='******',
                 source=source,
                 destination=destination,
                 tunnel=config.tunnel,
                 tunnelhost=config.tunnelhost,
                 tunnelport=config.tunnelport,
                 tunneluser=config.tunneluser,
                 download=True,
                 insecure=True,
                 vmport=firstmastervmport)
    os.system(scpcmd)
    workers = data.get('workers', 0)
    if workers > 0:
        pprint("Deploying workers")
        if 'name' in data:
            del data['name']
        os.chdir(os.path.expanduser("~/.kcli"))
        config.plan(plan, inputfile='%s/workers.yml' % plandir, overrides=data)
    success("K3s cluster %s deployed!!!" % cluster)
    info2("export KUBECONFIG=$HOME/.kcli/clusters/%s/auth/kubeconfig" %
          cluster)
    info2("export PATH=$PWD:$PATH")
Exemple #14
0
def create(config, plandir, cluster, overrides):
    k = config.k
    bootstrap_helper_ip = None
    client = config.client
    platform = config.type
    pprint("Deploying on client %s" % client)
    data = {
        'helper_image': 'CentOS-7-x86_64-GenericCloud.qcow2',
        'domain': 'karmalabs.com',
        'network': 'default',
        'masters': 1,
        'workers': 0,
        'tag': DEFAULT_TAG,
        'ipv6': False,
        'pub_key': os.path.expanduser('~/.ssh/id_rsa.pub'),
        'pull_secret': 'openshift_pull.json',
        'version': 'nightly',
        'macosx': False,
        'upstream': False,
        'fips': False,
        'apps': [],
        'minimal': False,
        'dualstack': False,
        'sno': False,
        'sno_baremetal': False,
        'sno_disk': 'vda'
    }
    data.update(overrides)
    if 'cluster' in overrides:
        clustervalue = overrides.get('cluster')
    elif cluster is not None:
        clustervalue = cluster
    else:
        clustervalue = 'testk'
    data['cluster'] = clustervalue
    domain = data.get('domain')
    pprint("Deploying cluster %s" % clustervalue)
    plan = cluster if cluster is not None else clustervalue
    overrides['kubetype'] = 'openshift'
    apps = overrides.get('apps', [])
    if ('localstorage' in apps or 'ocs' in apps) and 'extra_disks' not in overrides\
            and 'extra_master_disks' not in overrides and 'extra_worker_disks' not in overrides:
        warning("Storage apps require extra disks to be set")
    overrides['kube'] = data['cluster']
    installparam = overrides.copy()
    sno = data.get('sno', False)
    ignore_hosts = data.get('ignore_hosts', False)
    if sno:
        sno_baremetal = data.get('sno_baremetal', False)
        sno_disk = data.get('sno_disk', 'vda')
        if sno_baremetal and 'vda' in sno_disk:
            error("You need to define correct sno_disk for baremetal")
            os._exit(1)
        # tag = 'registry.svc.ci.openshift.org/sno-dev/openshift-bip:0.5.0'
        masters = 1
        workers = 0
        data['mdns'] = False
        data['kubetype'] = 'openshift'
        data['kube'] = data['cluster']
    masters = data.get('masters', 1)
    if masters == 0:
        error("Invalid number of masters")
        os._exit(1)
    network = data.get('network')
    ipv6 = data['ipv6']
    disconnected_deploy = data.get('disconnected_deploy', False)
    disconnected_reuse = data.get('disconnected_reuse', False)
    disconnected_url = data.get('disconnected_url')
    disconnected_user = data.get('disconnected_user')
    disconnected_password = data.get('disconnected_password')
    disconnected_prefix = data.get('disconnected_prefix', 'ocp4')
    dualstack = data.get('dualstack')
    upstream = data.get('upstream')
    version = data.get('version')
    tag = data.get('tag')
    if os.path.exists('openshift-install'):
        pprint("Removing old openshift-install")
        os.remove('openshift-install')
    minimal = data.get('minimal')
    if version not in ['ci', 'nightly']:
        pprint("Using stable version")
    else:
        pprint("Using %s version" % version)
    cluster = data.get('cluster')
    helper_image = data.get('helper_image')
    image = data.get('image')
    api_ip = data.get('api_ip')
    if platform in virtplatforms and not sno and api_ip is None:
        if network == 'default' and platform == 'kvm':
            warning("Using 192.168.122.253 as api_ip")
            overrides['api_ip'] = "192.168.122.253"
            api_ip = "192.168.122.253"
        else:
            error("You need to define api_ip in your parameters file")
            os._exit(1)
    if not sno and ':' in api_ip:
        ipv6 = True
    if ipv6:
        data['network_type'] = 'OVNKubernetes'
        data['ipv6'] = True
        overrides['ipv6'] = True
        if not disconnected_deploy and disconnected_url is None:
            warning(
                "Forcing disconnected_deploy to True as no disconnected_url was provided"
            )
            data['disconnected_deploy'] = True
            disconnected_deploy = True
    ingress_ip = data.get('ingress_ip')
    if ingress_ip is not None and api_ip is not None and ingress_ip == api_ip:
        ingress_ip = None
        overrides['ingress_ip'] = None
    public_api_ip = data.get('public_api_ip')
    network = data.get('network')
    if platform == 'packet':
        if network == 'default':
            error("You need to indicate a specific vlan network")
            os._exit(1)
        else:
            facilities = [
                n['domain'] for n in k.list_networks().values()
                if str(n['cidr']) == network
            ]
            if not facilities:
                error("Vlan network %s not found in any facility" % network)
                os._exit(1)
            elif k.facility not in facilities:
                error("Vlan network %s not found in facility %s" %
                      (network, k.facility))
                os._exit(1)
    masters = data.get('masters')
    workers = data.get('workers')
    tag = data.get('tag')
    pub_key = data.get('pub_key')
    pull_secret = pwd_path(data.get(
        'pull_secret')) if not upstream else "%s/fake_pull.json" % plandir
    pull_secret = os.path.expanduser(pull_secret)
    macosx = data.get('macosx')
    if macosx and not os.path.exists('/i_am_a_container'):
        macosx = False
    if platform == 'openstack' and (api_ip is None or public_api_ip is None):
        error(
            "You need to define both api_ip and public_api_ip in your parameters file"
        )
        os._exit(1)
    if not os.path.exists(pull_secret):
        error("Missing pull secret file %s" % pull_secret)
        sys.exit(1)
    if not os.path.exists(pub_key):
        if os.path.exists(os.path.expanduser('~/.kcli/id_rsa.pub')):
            pub_key = os.path.expanduser('~/.kcli/id_rsa.pub')
        else:
            error("Missing public key file %s" % pub_key)
            sys.exit(1)
    clusterdir = os.path.expanduser("~/.kcli/clusters/%s" % cluster)
    if os.path.exists(clusterdir):
        if [v for v in config.k.list() if v.get('plan', 'kvirt') == cluster]:
            error("Please remove existing directory %s first..." % clusterdir)
            sys.exit(1)
        else:
            pprint("Removing directory %s" % clusterdir)
            rmtree(clusterdir)
    os.environ['KUBECONFIG'] = "%s/auth/kubeconfig" % clusterdir
    if find_executable('oc') is None:
        get_oc(macosx=macosx)
    if version == 'ci':
        if '/' not in str(tag):
            basetag = 'ocp' if not upstream else 'origin'
            tag = 'registry.ci.openshift.org/%s/release:%s' % (basetag, tag)
        os.environ['OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE'] = tag
        pprint("Setting OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE to %s" % tag)
    if find_executable('openshift-install') is None:
        if version == 'ci':
            run = get_ci_installer(pull_secret, tag=tag, upstream=upstream)
        elif version == 'nightly':
            run = get_downstream_installer(nightly=True, tag=tag)
        elif upstream:
            run = get_upstream_installer(tag=tag)
        else:
            run = get_downstream_installer(tag=tag)
        if run != 0:
            error("Couldn't download openshift-install")
            os._exit(run)
        pprint(
            "Move downloaded openshift-install somewhere in your path if you want to reuse it"
        )
    os.environ["PATH"] += ":%s" % os.getcwd()
    if disconnected_url is not None:
        if '/' not in str(tag):
            tag = '%s/%s/release:%s' % (disconnected_url, disconnected_prefix,
                                        tag)
            os.environ['OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE'] = tag
        pprint("Setting OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE to %s" % tag)
    INSTALLER_VERSION = get_installer_version()
    COMMIT_ID = os.popen('openshift-install version').readlines()[1].replace(
        'built from commit', '').strip()
    if platform == 'packet' and not upstream:
        overrides['commit_id'] = COMMIT_ID
    pprint("Using installer version %s" % INSTALLER_VERSION)
    OPENSHIFT_VERSION = INSTALLER_VERSION[0:3].replace('.', '')
    curl_header = "Accept: application/vnd.coreos.ignition+json; version=3.1.0"
    if upstream:
        curl_header = "User-Agent: Ignition/2.3.0"
    elif OPENSHIFT_VERSION.isdigit() and int(OPENSHIFT_VERSION) < 46:
        curl_header = "User-Agent: Ignition/0.35.0"
    overrides['curl_header'] = curl_header
    if sno:
        pass
    elif image is None:
        if upstream:
            fcos_base = 'stable' if version == 'stable' else 'testing'
            fcos_url = "https://builds.coreos.fedoraproject.org/streams/%s.json" % fcos_base
            image_url = get_latest_fcos(fcos_url, _type=config.type)
        else:
            image_url = get_commit_rhcos(COMMIT_ID, _type=config.type)
        image = os.path.basename(os.path.splitext(image_url)[0])
        images = [v for v in k.volumes() if image in v]
        if not images:
            result = config.handle_host(pool=config.pool,
                                        image=image,
                                        download=True,
                                        update_profile=False,
                                        url=image_url)
            if result['result'] != 'success':
                os._exit(1)
        else:
            pprint("Using image %s" % image)
    elif platform != 'packet':
        pprint("Checking if image %s is available" % image)
        images = [v for v in k.volumes() if image in v]
        if not images:
            error(
                "Missing %s. Indicate correct image in your parameters file..."
                % image)
            os._exit(1)
    else:
        error(
            "Missing image in your parameters file. This is required for packet"
        )
        os._exit(1)
    overrides['image'] = image
    overrides['cluster'] = cluster
    if not os.path.exists(clusterdir):
        os.makedirs(clusterdir)
        with open("%s/kcli_parameters.yml" % clusterdir, 'w') as p:
            installparam['plan'] = plan
            yaml.safe_dump(installparam,
                           p,
                           default_flow_style=False,
                           encoding='utf-8',
                           allow_unicode=True)
    data['pub_key'] = open(pub_key).read().strip()
    if platform in virtplatforms and disconnected_deploy:
        if platform == 'kvm' and network in [
                n for n in k.list_networks()
                if k.list_networks()[n]['type'] == 'routed'
        ]:
            data['disconnected_dns'] = True
        disconnected_vm = "%s-disconnecter" % cluster
        pprint("Deploying disconnected vm %s" % disconnected_vm)
        data['pull_secret'] = re.sub(r"\s", "", open(pull_secret).read())
        disconnected_plan = "%s-reuse" % plan if disconnected_reuse else plan
        if version == 'ci' and 'disconnected_origin' not in overrides:
            warning("Forcing disconnected_origin to registry.ci.openshift.org")
            data['disconnected_origin'] = "registry.ci.openshift.org"
        result = config.plan(disconnected_plan,
                             inputfile='%s/disconnected.yml' % plandir,
                             overrides=data)
        if result['result'] != 'success':
            os._exit(1)
        disconnected_ip, disconnected_vmport = _ssh_credentials(
            k, disconnected_vm)[1:]
        cacmd = "cat /opt/registry/certs/domain.crt"
        cacmd = ssh(disconnected_vm,
                    ip=disconnected_ip,
                    user='******',
                    tunnel=config.tunnel,
                    tunnelhost=config.tunnelhost,
                    tunnelport=config.tunnelport,
                    tunneluser=config.tunneluser,
                    insecure=True,
                    cmd=cacmd,
                    vmport=disconnected_vmport)
        disconnected_ca = os.popen(cacmd).read().strip()
        if data['ca'] is not None:
            data['ca'] += disconnected_ca
        else:
            data['ca'] = disconnected_ca
        urlcmd = "cat /root/url.txt"
        urlcmd = ssh(disconnected_vm,
                     ip=disconnected_ip,
                     user='******',
                     tunnel=config.tunnel,
                     tunnelhost=config.tunnelhost,
                     tunnelport=config.tunnelport,
                     tunneluser=config.tunneluser,
                     insecure=True,
                     cmd=urlcmd,
                     vmport=disconnected_vmport)
        disconnected_url = os.popen(urlcmd).read().strip()
        overrides['disconnected_url'] = disconnected_url
        data['disconnected_url'] = disconnected_url
        if disconnected_user is None:
            disconnected_user = '******'
        if disconnected_password is None:
            disconnected_password = '******'
        versioncmd = "cat /root/version.txt"
        versioncmd = ssh(disconnected_vm,
                         ip=disconnected_ip,
                         user='******',
                         tunnel=config.tunnel,
                         tunnelhost=config.tunnelhost,
                         tunnelport=config.tunnelport,
                         tunneluser=config.tunneluser,
                         insecure=True,
                         cmd=versioncmd,
                         vmport=disconnected_vmport)
        disconnected_version = os.popen(versioncmd).read().strip()
        os.environ[
            'OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE'] = disconnected_version
        pprint("Setting OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE to %s" %
               disconnected_version)
    if disconnected_url is not None and disconnected_user is not None and disconnected_password is not None:
        key = "%s:%s" % (disconnected_user, disconnected_password)
        key = str(b64encode(key.encode('utf-8')), 'utf-8')
        auths = {
            'auths': {
                disconnected_url: {
                    'auth': key,
                    'email': '*****@*****.**'
                }
            }
        }
        data['pull_secret'] = json.dumps(auths)
    else:
        data['pull_secret'] = re.sub(r"\s", "", open(pull_secret).read())
    installconfig = config.process_inputfile(cluster,
                                             "%s/install-config.yaml" %
                                             plandir,
                                             overrides=data)
    with open("%s/install-config.yaml" % clusterdir, 'w') as f:
        f.write(installconfig)
    with open("%s/install-config.yaml.bck" % clusterdir, 'w') as f:
        f.write(installconfig)
    autoapprover = config.process_inputfile(cluster,
                                            "%s/autoapprovercron.yml" %
                                            plandir,
                                            overrides=data)
    with open("%s/autoapprovercron.yml" % clusterdir, 'w') as f:
        f.write(autoapprover)
    run = call('openshift-install --dir=%s create manifests' % clusterdir,
               shell=True)
    if run != 0:
        error("Leaving environment for debugging purposes")
        error("You can delete it with kcli delete kube --yes %s" % cluster)
        os._exit(run)
    if minimal:
        warning("Deploying cvo overrides to provide a minimal install")
        with open("%s/cvo-overrides.yaml" % plandir) as f:
            cvo_override = f.read()
        with open("%s/manifests/cvo-overrides.yaml" % clusterdir, "a") as f:
            f.write(cvo_override)
    if ipv6:
        for role in ['master', 'worker']:
            blacklist = config.process_inputfile(cluster,
                                                 "%s/99-blacklist-ipi.yaml" %
                                                 plandir,
                                                 overrides={'role': role})
            with open(
                    "%s/openshift/99-blacklist-ipi-%s.yaml" %
                (clusterdir, role), 'w') as f:
                f.write(blacklist)
    for f in glob("%s/customisation/*.yaml" % plandir):
        if '99-ingress-controller.yaml' in f:
            ingressrole = 'master' if workers == 0 else 'worker'
            replicas = masters if workers == 0 else workers
            ingressconfig = config.process_inputfile(cluster,
                                                     f,
                                                     overrides={
                                                         'replicas': replicas,
                                                         'role': ingressrole
                                                     })
            with open("%s/openshift/99-ingress-controller.yaml" % clusterdir,
                      'w') as f:
                f.write(ingressconfig)
        else:
            copy2(f, "%s/openshift" % clusterdir)
    manifestsdir = pwd_path("manifests")
    if os.path.exists(manifestsdir) and os.path.isdir(manifestsdir):
        for f in glob("%s/*.yaml" % manifestsdir):
            copy2(f, "%s/openshift" % clusterdir)
    if 'network_type' in data and data['network_type'] == 'Calico':
        for asset in calicoassets:
            fetch(asset, manifestsdir)
    if 'network_type' in data and data['network_type'] == 'Contrail':
        pprint("Fetching contrail assets")
        for asset in contrail_manifests:
            fetch(asset, "%s/manifests" % clusterdir)
        for asset in contrail_openshifts:
            fetch(asset, "%s/openshift" % clusterdir)
        contrail_registry = data.get('contrail_registry', "hub.juniper.net")
        contrail_user = data.get('contrail_user')
        contrail_password = data.get('contrail_password')
        if contrail_user is None:
            error("Missing contrail_user")
            os._exit(1)
        if contrail_password is None:
            error("Missing contrail_password")
            os._exit(1)
        contrail_creds = "%s:%s" % (contrail_user, contrail_password)
        contrail_auth = b64encode(contrail_creds.encode()).decode("UTF-8")
        contrail_auth = {
            "auths": {
                contrail_registry: {
                    "username": contrail_user,
                    "password": contrail_password,
                    "auth": contrail_auth
                }
            }
        }
        contrail_auth = json.dumps(contrail_auth)
        contrail_data = {
            'contrail_auth': b64encode(contrail_auth.encode()).decode("UTF-8")
        }
        contrail_secret = config.process_inputfile(
            cluster,
            "%s/contrail_registry_secret.j2" % plandir,
            overrides=contrail_data)
        with open(
                "%s/manifests/00-contrail-02-registry-secret.yaml" %
                clusterdir, 'w') as f:
            f.write(contrail_secret)
    if dualstack:
        copy2("%s/dualstack.yml" % plandir, "%s/openshift" % clusterdir)
    if sno:
        sno_name = "%s-sno" % cluster
        sno_dns = data.get('sno_dns', True)
        run = call(
            'openshift-install --dir=%s create single-node-ignition-config' %
            clusterdir,
            shell=True)
        if run != 0:
            error("Hit issue.Leaving")
            os._exit(run)
        os.rename("%s/bootstrap-in-place-for-live-iso.ign" % clusterdir,
                  "./%s.ign" % sno_name)
        with open("iso.ign", 'w') as f:
            if sno_dns:
                _files = [{
                    "path": "/root/sno-finish.service",
                    "origin": "%s/sno-finish.service" % plandir
                }, {
                    "path": "/usr/local/bin/sno-finish.sh",
                    "origin": "%s/sno-finish.sh" % plandir,
                    "mode": 700
                }, {
                    "path": "/root/coredns.yml",
                    "origin": "%s/staticpods/coredns.yml" % plandir
                }, {
                    "path": "/root/Corefile",
                    "origin": "%s/Corefile" % plandir
                }, {
                    "path": "/root/99-forcedns",
                    "origin": "%s/99-forcedns" % plandir
                }]
                iso_overrides = {'files': _files}
            else:
                iso_overrides = {}
            iso_overrides.update(data)
            result = config.create_vm(sno_name,
                                      'rhcos46',
                                      overrides=iso_overrides,
                                      onlyassets=True)
            pprint("Writing iso.ign to current dir")
            f.write(result['data'])
        if config.type != 'kvm':
            pprint("Additional workflow not available on %s" % config.type)
            pprint("Embed iso.ign in rhcos live iso")
            os._exit(0)
        else:
            generate_rhcos_iso(k, cluster, data['pool'])
            if not sno_baremetal:
                pprint("Deploying sno")
                result = config.plan(plan,
                                     inputfile='%s/sno.yml' % plandir,
                                     overrides=data)
                if result['result'] != 'success':
                    os._exit(1)
                if ignore_hosts:
                    warning("Not updating /etc/hosts as per your request")
                else:
                    while api_ip is None:
                        api_ip = k.info(sno_name).get('ip')
                        pprint("Waiting 5s to retrieve sno ip...")
                        sleep(5)
                    update_etc_hosts(cluster, domain, api_ip)
                installcommand = 'openshift-install --dir=%s --log-level=debug wait-for install-complete' % clusterdir
                installcommand += " || %s" % installcommand
                pprint(
                    "Launching install-complete step. It will be retried one extra time in case of timeouts"
                )
                call(installcommand, shell=True)
            else:
                warning(
                    "You might need to create manual entries in /etc/hosts to reach the sno installation"
                )
        os._exit(0)
    call('openshift-install --dir=%s create ignition-configs' % clusterdir,
         shell=True)
    for role in ['master', 'worker']:
        ori = "%s/%s.ign" % (clusterdir, role)
        copy2(ori, "%s.ori" % ori)
    if masters < 3:
        version_match = re.match("4.([0-9]*).*", INSTALLER_VERSION)
        COS_VERSION = "4%s" % version_match.group(
            1) if version_match is not None else '45'
        if not upstream and int(COS_VERSION) > 43:
            bootstrap_patch = open('%s/bootstrap_patch.sh' % plandir).read()
            bootstrap_service = open('%s/bootstrap_patch.service' %
                                     plandir).read()
            patch_bootstrap("%s/bootstrap.ign" % clusterdir, bootstrap_patch,
                            bootstrap_service)
    staticdata = gather_dhcp(data, platform)
    if staticdata:
        pprint("Deploying helper dhcp node" % image)
        staticdata.update({
            'network': network,
            'dhcp_image': helper_image,
            'prefix': cluster,
            domain: '%s.%s' % (cluster, domain)
        })
        result = config.plan(plan,
                             inputfile='%s/dhcp.yml' % plandir,
                             overrides=staticdata)
        if result['result'] != 'success':
            os._exit(1)
    if platform in virtplatforms:
        if data.get('virtual_router_id') is None:
            overrides['virtual_router_id'] = hash(cluster) % 254 + 1
        pprint("Using keepalived virtual_router_id %s" %
               overrides['virtual_router_id'])
        pprint("Using %s for api vip...." % api_ip)
        host_ip = api_ip if platform != "openstack" else public_api_ip
        if ignore_hosts:
            warning("Ignoring /etc/hosts")
        else:
            update_etc_hosts(cluster, domain, host_ip, ingress_ip)
        if platform in [
                'openstack', 'vsphere'
        ] or (platform == 'packet' and config.k.tunnelhost is None):
            # bootstrap ignition is too big in those platforms so we deploy a temporary web server to serve it
            helper_overrides = {}
            if helper_image is None:
                images = [
                    v for v in k.volumes()
                    if 'centos' in v.lower() or 'fedora' in v.lower()
                ]
                if images:
                    image = os.path.basename(images[0])
                else:
                    helper_image = "CentOS-7-x86_64-GenericCloud.qcow2"
                    pprint("Downloading centos helper image")
                    result = config.handle_host(pool=config.pool,
                                                image="centos7",
                                                download=True,
                                                update_profile=False)
                pprint("Using helper image %s" % helper_image)
            else:
                images = [v for v in k.volumes() if helper_image in v]
                if not images:
                    error(
                        "Missing image %s. Indicate correct helper image in your parameters file"
                        % helper_image)
                    os._exit(1)
            if platform == 'openstack':
                helper_overrides['flavor'] = "m1.medium"
            helper_overrides['nets'] = [network]
            helper_overrides['plan'] = cluster
            bootstrap_helper_name = "%s-bootstrap-helper" % cluster
            cmds = ["iptables -F", "yum -y install httpd", "setenforce 0"]
            if platform == 'packet':
                config.k.tunnelhost = bootstrap_helper_ip
                cmds.append(
                    "sed -i 's/apache/root/' /etc/httpd/conf/httpd.conf")
            cmds.append("systemctl enable --now httpd")
            helper_overrides['cmds'] = cmds
            config.create_vm("%s-bootstrap-helper" % cluster,
                             helper_image,
                             overrides=helper_overrides,
                             wait=True)
            bootstrap_helper_ip, bootstrap_helper_vmport = _ssh_credentials(
                bootstrap_helper_name)[1:]
            source, destination = "%s/bootstrap.ign" % clusterdir, "/var/www/html/bootstrap"
            scpcmd = scp(bootstrap_helper_name,
                         ip=bootstrap_helper_ip,
                         user='******',
                         source=source,
                         destination=destination,
                         tunnel=config.tunnel,
                         tunnelhost=config.tunnelhost,
                         tunnelport=config.tunnelport,
                         tunneluser=config.tunneluser,
                         download=False,
                         insecure=True,
                         vmport=bootstrap_helper_vmport)
            os.system(scpcmd)
            cmd = "chown apache.apache /var/www/html/bootstrap"
            sshcmd = ssh(bootstrap_helper_name,
                         ip=bootstrap_helper_ip,
                         user='******',
                         tunnel=config.tunnel,
                         tunnelhost=config.tunnelhost,
                         tunnelport=config.tunnelport,
                         tunneluser=config.tunneluser,
                         insecure=True,
                         cmd=cmd,
                         vmport=bootstrap_helper_vmport)
            os.system(sshcmd)
            sedcmd = 'sed "s@https://api-int.%s.%s:22623/config/master@http://%s/bootstrap@" ' % (
                cluster, domain, bootstrap_helper_ip)
            sedcmd += '%s/master.ign' % clusterdir
            sedcmd += ' > %s/bootstrap.ign' % clusterdir
            call(sedcmd, shell=True)
            sedcmd = 'sed "s@https://api-int.%s.%s:22623/config/master@http://%s/worker@" ' % (
                cluster, domain, bootstrap_helper_ip)
            sedcmd += '%s/master.ign' % clusterdir
            sedcmd += ' > %s/worker.ign' % clusterdir
            call(sedcmd, shell=True)
        new_api_ip = api_ip if not ipv6 else "[%s]" % api_ip
        sedcmd = 'sed -i "s@https://api-int.%s.%s:22623/config@http://%s:22624/config@"' % (
            cluster, domain, new_api_ip)
        sedcmd += ' %s/master.ign %s/worker.ign' % (clusterdir, clusterdir)
        call(sedcmd, shell=True)
    if platform in cloudplatforms:
        bootstrap_helper_name = "%s-bootstrap-helper" % cluster
        helper_overrides = {
            'reservedns': True,
            'domain': '%s.%s' % (cluster, domain),
            'tags': [tag],
            'plan': cluster,
            'nets': [network]
        }
        config.create_vm("%s-bootstrap-helper" % cluster,
                         helper_image,
                         overrides=helper_overrides)
        status = ""
        while status != "running":
            status = k.info(bootstrap_helper_name).get('status')
            pprint("Waiting 5s for bootstrap helper node to be running...")
            sleep(5)
        sleep(5)
        bootstrap_helper_ip, bootstrap_helper_vmport = _ssh_credentials(
            k, bootstrap_helper_name)[1:]
        cmd = "iptables -F ; yum -y install httpd ; systemctl start httpd"
        sshcmd = ssh(bootstrap_helper_name,
                     ip=bootstrap_helper_ip,
                     user='******',
                     tunnel=config.tunnel,
                     tunnelhost=config.tunnelhost,
                     tunnelport=config.tunnelport,
                     tunneluser=config.tunneluser,
                     insecure=True,
                     cmd=cmd,
                     vmport=bootstrap_helper_vmport)
        os.system(sshcmd)
        source, destination = "%s/bootstrap.ign" % clusterdir, "/var/www/html/bootstrap"
        scpcmd = scp(bootstrap_helper_name,
                     ip=bootstrap_helper_ip,
                     user='******',
                     source=source,
                     destination=destination,
                     tunnel=config.tunnel,
                     tunnelhost=config.tunnelhost,
                     tunnelport=config.tunnelport,
                     tunneluser=config.tunneluser,
                     download=False,
                     insecure=True,
                     vmport=bootstrap_helper_vmport)
        os.system(scpcmd)
        sedcmd = 'sed "s@https://api-int.%s.%s:22623/config/master@' % (
            cluster, domain)
        sedcmd += 'http://%s-bootstrap-helper.%s.%s/bootstrap@ "' % (
            cluster, cluster, domain)
        sedcmd += '%s/master.ign' % clusterdir
        sedcmd += ' > %s/bootstrap.ign' % clusterdir
        call(sedcmd, shell=True)
    if platform in virtplatforms:
        pprint("Deploying bootstrap")
        result = config.plan(plan,
                             inputfile='%s/bootstrap.yml' % plandir,
                             overrides=overrides)
        if result['result'] != 'success':
            os._exit(1)
        pprint("Deploying masters")
        result = config.plan(plan,
                             inputfile='%s/masters.yml' % plandir,
                             overrides=overrides)
        if result['result'] != 'success':
            os._exit(1)
        if platform == 'packet':
            allnodes = ["%s-bootstrap" % cluster] + [
                "%s-master-%s" % (cluster, num) for num in range(masters)
            ]
            for node in allnodes:
                try:
                    k.add_nic(node, network)
                except Exception as e:
                    error("Hit %s. Continuing still" % str(e))
                    continue
        bootstrapcommand = 'openshift-install --dir=%s wait-for bootstrap-complete' % clusterdir
        bootstrapcommand += ' || %s' % bootstrapcommand
        run = call(bootstrapcommand, shell=True)
        if run != 0:
            error("Leaving environment for debugging purposes")
            error("You can delete it with kcli delete cluster --yes %s" %
                  cluster)
            os._exit(run)
        todelete = [] if 'network_type' in data and data[
            'network_type'] == 'Contrail' else ["%s-bootstrap" % cluster]
        if platform in ['openstack', 'vsphere', 'packet']:
            todelete.append("%s-bootstrap-helper" % cluster)
    else:
        pprint("Deploying bootstrap")
        result = config.plan(plan,
                             inputfile='%s/cloud_bootstrap.yml' % plandir,
                             overrides=overrides)
        if result['result'] != 'success':
            os._exit(1)
        pprint("Deploying masters")
        result = config.plan(plan,
                             inputfile='%s/cloud_masters.yml' % plandir,
                             overrides=overrides)
        if result['result'] != 'success':
            os._exit(1)
        call('openshift-install --dir=%s wait-for bootstrap-complete || exit 1'
             % clusterdir,
             shell=True)
        todelete = [] if 'network_type' in data and data[
            'network_type'] == 'Contrail' else ["%s-bootstrap" % cluster]
        todelete.append("%s-bootstrap-helper" % cluster)
    if platform in virtplatforms:
        if workers > 0:
            pprint("Deploying workers")
            if 'name' in overrides:
                del overrides['name']
            if platform in virtplatforms:
                result = config.plan(plan,
                                     inputfile='%s/workers.yml' % plandir,
                                     overrides=overrides)
            elif platform in cloudplatforms:
                result = config.plan(plan,
                                     inputfile='%s/cloud_workers.yml' %
                                     plandir,
                                     overrides=overrides)
            if result['result'] != 'success':
                os._exit(1)
            if platform == 'packet':
                allnodes = [
                    "%s-worker-%s" % (cluster, num) for num in range(workers)
                ]
                for node in allnodes:
                    k.add_nic(node, network)
    if 'network_type' in data and data['network_type'] == 'Contrail':
        pprint("Waiting 7mn on install to be stable")
        sleep(420)
    call(
        "oc adm taint nodes -l node-role.kubernetes.io/master node-role.kubernetes.io/master:NoSchedule-",
        shell=True)
    pprint("Deploying certs autoapprover cronjob")
    autoapprovercmd = 'oc create -f %s/autoapprovercron.yml' % clusterdir
    call(autoapprovercmd, shell=True)
    if not minimal:
        installcommand = 'openshift-install --dir=%s wait-for install-complete' % clusterdir
        installcommand += " || %s" % installcommand
        pprint(
            "Launching install-complete step. It will be retried one extra time in case of timeouts"
        )
        call(installcommand, shell=True)
    else:
        kubeconf = os.environ['KUBECONFIG']
        kubepassword = open("%s/auth/auth/kubeadmin-password" %
                            clusterdir).read()
        success("Minimal Cluster ready to be used")
        success("INFO Install Complete")
        info2(
            "To access the cluster as the system:admin user when running 'oc', run export KUBECONFIG=%s"
            % kubeconf)
        info2(
            "Access the Openshift web-console here: https://console-openshift-console.apps.%s.%s"
            % (cluster, domain))
        info2("Login to the console with user: kubeadmin, password: %s" %
              kubepassword)
    for vm in todelete:
        pprint("Deleting %s" % vm)
        k.delete(vm)
    os.environ['KUBECONFIG'] = "%s/auth/kubeconfig" % clusterdir
    if apps:
        overrides['openshift_version'] = INSTALLER_VERSION[0:3]
        for app in apps:
            appdir = "%s/apps/%s" % (plandir, app)
            if not os.path.exists(appdir):
                warning("Skipping unsupported app %s" % app)
            else:
                pprint("Adding app %s" % app)
                kube_create_app(config, appdir, overrides=overrides)