def _node_calico(upd, with_testing, node_name, node_ip):
    helpers.remote_install(CALICO_CNI, with_testing)
    helpers.remote_install(CALICOCTL, with_testing)

    _create_etcd_config()
    _create_calico_config()

    run('python /var/lib/kuberdock/scripts/kubelet_args.py --network-plugin=')
    run('python /var/lib/kuberdock/scripts/kubelet_args.py '
        '--network-plugin=cni --network-plugin-dir=/etc/cni/net.d')

    # pull image separately to get reed of calicoctl timeouts
    for i in range(3):
        run('sync')
        rv = run('docker pull kuberdock/calico-node:0.22.0-kd2')
        if not rv.failed:
            break
        upd.print_log("Pull calico-node failed. Doing retry {}".format(i))
        sleep(10)
    if rv.failed:
        raise helpers.UpgradeError(
            "Can't pull calico-node image after 3 retries: {}".format(rv))

    rv = run('ETCD_AUTHORITY="{0}:2379" '
             'ETCD_SCHEME=https '
             'ETCD_CA_CERT_FILE=/etc/pki/etcd/ca.crt '
             'ETCD_CERT_FILE=/etc/pki/etcd/etcd-client.crt '
             'ETCD_KEY_FILE=/etc/pki/etcd/etcd-client.key '
             'HOSTNAME="{1}" '
             '/opt/bin/calicoctl node '
             '--ip="{2}" '
             '--node-image=kuberdock/calico-node:0.22.0-kd2'.format(
                 MASTER_IP, node_name, node_ip))
    if rv.failed:
        raise helpers.UpgradeError("Can't start calico node: {}".format(rv))
def _update_00174_upgrade_node(upd, with_testing):
    upd.print_log("Upgrading kubernetes")
    helpers.remote_install(K8S_NODE, with_testing)
    service, res = helpers.restart_node_kubernetes()
    if res != 0:
        raise helpers.UpgradeError('Failed to restart {0}. {1}'.format(
            service, res))
Beispiel #3
0
def upgrade_node(upd, with_testing, env, *args, **kwargs):
    upd.print_log('Setup network plugin:')

    upd.print_log('Install packages...')
    upd.print_log(helpers.remote_install('python-requests', with_testing))
    upd.print_log(helpers.remote_install('python-ipaddress', with_testing))
    upd.print_log(helpers.remote_install('ipset', with_testing))

    upd.print_log(
        run("""sed -i '/^KUBELET_ARGS/ {s|"\(.*\) --network-plugin=kuberdock"|"\\1"|}' /etc/kubernetes/kubelet"""
            ))
    upd.print_log(
        run("""sed -i '/^KUBELET_ARGS/ {s|"\(.*\) --register-node=false"|"\\1 --register-node=false --network-plugin=kuberdock"|}' /etc/kubernetes/kubelet"""
            ))

    upd.print_log(run("mkdir -p {0}/data".format(PLUGIN_DIR)))
    upd.print_log(
        put('/var/opt/kuberdock/node_network_plugin.sh',
            PLUGIN_DIR + 'kuberdock',
            mode=0755))
    upd.print_log(
        put('/var/opt/kuberdock/node_network_plugin.py',
            PLUGIN_DIR + 'kuberdock.py',
            mode=0755))
    upd.print_log(run('chmod +x {0}'.format(PLUGIN_DIR + 'kuberdock')))

    upd.print_log(
        run("cat > /etc/systemd/system/kuberdock-watcher.service << 'EOF' {0}".
            format(SERVICE_FILE)))

    run('systemctl daemon-reload')
    upd.print_log(run('systemctl reenable kuberdock-watcher'))
def _upgrade_k8s_node(upd, with_testing):
    upd.print_log("Upgrading kubernetes")
    helpers.remote_install(K8S_NODE, with_testing)
    upd.print_log("Updating kubelet config")
    run("sed -i '/^KUBELET_HOSTNAME/s/^/#/' /etc/kubernetes/kubelet")
    run("sed -i '/^KUBE_PROXY_ARGS/ {s|--kubeconfig=/etc/kubernetes/configfile|"
        "--kubeconfig=/etc/kubernetes/configfile --proxy-mode userspace|}' /etc/kubernetes/proxy")
def _node_flannel():
    for cmd in RM_FLANNEL_COMMANDS_NODES:
        run(cmd)
    # disable kuberdock-watcher but do not remove Kuberdock Network Plugin
    # because it should be replaced by new one
    run('rm -f /etc/systemd/system/kuberdock-watcher.service')
    helpers.remote_install('flannel', action='remove')
    helpers.remote_install('ipset', action='remove')
    run('systemctl daemon-reload')
def downgrade_node(upd, with_testing, env, exception, *args, **kwargs):
    upd.print_log(
        helpers.remote_install('kernel-devel', with_testing, action='remove'))
    upd.print_log('Downgrade flannel...')
    upd.print_log(
        helpers.remote_install('flannel-0.5.1',
                               with_testing,
                               action='downgrade'))
    upd.print_log(run('systemctl daemon-reload'))
    upd.print_log(run('systemctl restart flanneld'))
def _update_00191_upgrade_node(upd, with_testing, env, **kwargs):
    helpers.remote_install(CONNTRACK_PACKAGE)
    _node_kube_proxy()

    if run('docker ps --format "{{.Names}}" | grep "^calico-node$"'
           ) != 'calico-node':
        _node_calico(upd, with_testing, env.host_string, kwargs['node_ip'])

    _node_policy_agent(env.host_string)
    _node_move_config()
def _upgrade_docker(upd, with_testing):
    def alter_config(line):
        if not re.match(r'OPTIONS=.*', line):
            return line

        to_remove = (r'\s*(--log-level=[^\s\']+\s*)|(-l \[^\s\']+\s*)',
                     r'\s*(--log-driver=[^\s\']+)')
        for pattern in to_remove:
            line = re.sub(pattern, '', line)

        return re.sub(
            r"OPTIONS='(.*)'",
            r"OPTIONS='\1 --log-driver=json-file --log-level=error'", line)

    upd.print_log("Docker before pkg upgrade " + run("docker --version"))
    helpers.remote_install(SELINUX, with_testing)
    helpers.remote_install(DOCKER, with_testing)
    upd.print_log("Docker after pkg upgrade " + run("docker --version"))

    docker_config = StringIO()
    get('/etc/sysconfig/docker', docker_config)
    current_config = docker_config.getvalue()
    new_config = '\n'.join(
        alter_config(l) for l in current_config.splitlines())

    run("cat << EOF > /etc/sysconfig/docker\n{}\nEOF".format(new_config))

    run("mkdir -p /etc/systemd/system/docker.service.d/")
    run("cat << EOF > /etc/systemd/system/docker.service.d/timeouts.conf\n"
        "{}\nEOF".format(DOCKER_TIMEOUTS_DROPIN))

    # If we restart docker here then rest of node upgrade code will be
    # executed with fresh new docker (don't know whether this is good or bad)
    # and also will results in pods/containers restart at this moment, which
    # will produce lots of events and load on node.
    # If not, then docker will be old till node reboot at the end of upgrade.
    # So we probably could comment restart part (known to work ~ok)
    run("systemctl daemon-reload")
    start_time = time.time()
    # Because of bug in our package docker could be running again at this moment
    # Maybe this is because of rpm %systemd hooks or else, so ensure it stopped
    # again before restart to prevent timeouts
    upd.print_log("===== Docker.service restart timeout has been increased to "
                  "10 min, please, don't interrupt it before timeout ======")
    res = run("bash -c 'for i in $(seq 1 5); do systemctl stop docker; done; "
              "sleep 1; systemctl restart docker;'")
    upd.print_log(
        "Docker second_stop/restart took: {} secs".format(time.time() -
                                                          start_time))
    if res.failed:
        raise helpers.UpgradeError('Failed to restart docker. {}'.format(res))
    upd.print_log(run("docker --version"))
def upgrade_node(upd, with_testing, env, *args, **kwargs):
    run('yum --enablerepo=kube,kube-testing clean metadata')
    #00110_update.py
    upd.print_log('Fix node hostname in rsyslog configuration...')
    run("sed -i 's/^{0} .*/{0} {1}/' {2}".format(PARAM, env.host_string, CONF))
    run('systemctl restart rsyslog')

    #00111_update.py
    res = helpers.remote_install('kubernetes-node-1.1.3-3.el7.cloudlinux',
                                 with_testing)
    upd.print_log(res)
    if res.failed:
        raise helpers.UpgradeError('Failed to update kubernetes on node')
    get(KUBELET_PATH, KUBELET_TEMP_PATH)
    lines = []
    with open(KUBELET_TEMP_PATH) as f:
        lines = f.readlines()
    with open(KUBELET_TEMP_PATH, 'w+') as f:
        for line in lines:
            if KUBELET_ARG in line and KUBELET_MULTIPLIERS not in line:
                s = line.split('"')
                s[1] += KUBELET_MULTIPLIERS
                line = '"'.join(s)
            f.write(line)
    put(KUBELET_TEMP_PATH, KUBELET_PATH)
    os.remove(KUBELET_TEMP_PATH)
    helpers.restart_node_kubernetes(with_enable=True)
Beispiel #10
0
def upgrade_node(upd, with_testing, env, *args, **kwargs):
    run('yum --enablerepo=kube,kube-testing clean metadata')

    # 00084_update.py
    yum_base_no_kube = 'yum install --disablerepo=kube -y '

    run(yum_base_no_kube + 'kernel')
    run(yum_base_no_kube + 'kernel-tools')
    run(yum_base_no_kube + 'kernel-tools-libs')
    run(yum_base_no_kube + 'kernel-headers')
    run(yum_base_no_kube + 'kernel-devel')

    run('rpm -e -v --nodeps kernel-' + old_version)
    run('yum remove -y kernel-tools-' + old_version)
    run('yum remove -y kernel-tools-libs-' + old_version)
    run('yum remove -y kernel-headers-' + old_version)
    run('yum remove -y kernel-devel-' + old_version)

    # 00086_update.py
    res = helpers.remote_install('kubernetes-node-1.1.3', with_testing)
    upd.print_log(res)
    if res.failed:
        raise helpers.UpgradeError('Failed to update kubernetes on node')
    upd.print_log("Turn on cpu-cfs-quota in kubelet")

    get(KUBELET_PATH, KUBELET_TEMP_PATH)
    lines = []
    with open(KUBELET_TEMP_PATH) as f:
        lines = f.readlines()
    with open(KUBELET_TEMP_PATH, 'w+') as f:
        for line in lines:
            if KUBELET_ARG in line and not KUBELET_CPUCFS_ENABLE in KUBELET_ARG:
                s = line.split('"')
                s[1] += KUBELET_CPUCFS_ENABLE
                line = '"'.join(s)
            f.write(line)
    put(KUBELET_TEMP_PATH, KUBELET_PATH)
    os.remove(KUBELET_TEMP_PATH)
    helpers.restart_node_kubernetes(with_enable=True)
    upd.print_log("Restart pods to apply new limits")
    pc = PodCollection()
    pods = pc.get(as_json=False)
    for pod in pods:
        if (pod.get('host') == env.host_string and
            pod['status'] == POD_STATUSES.running):
            pc.update_container(pod['id'], None)

    # 00088_update.py
    put('/var/opt/kuberdock/node_network_plugin.sh', PLUGIN_DIR + 'kuberdock')
    put('/var/opt/kuberdock/node_network_plugin.py', PLUGIN_DIR + 'kuberdock.py')
    run('systemctl restart kuberdock-watcher')

    helpers.reboot_node(upd)
Beispiel #11
0
def upgrade_node(upd, with_testing, env, *args, **kwargs):
    upd.print_log('Replacing kubernetes with new kubernetes-node...')
    upd.print_log(
        helpers.remote_install(
            'kubernetes kubernetes-node-0.20.2-0.4.git323fde5.el7.centos.2',
            with_testing, 'swap'))

    upd.print_log('Replacing auth config with new...')
    put('/etc/kubernetes/configfile_for_nodes', '/etc/kubernetes/configfile')
    run("""sed -i '/^KUBELET_ARGS/ {s|--auth_path=/var/lib/kubelet/kubernetes_auth|--kubeconfig=/etc/kubernetes/configfile --register-node=false|}' /etc/kubernetes/kubelet""")

    run("""sed -i '/^KUBE_MASTER/ {s|http://|https://|}' /etc/kubernetes/config""")
    run("""sed -i '/^KUBE_MASTER/ {s|7080|6443|}' /etc/kubernetes/config""")
    run("""sed -i '/^KUBE_PROXY_ARGS/ {s|""|"--kubeconfig=/etc/kubernetes/configfile"|}' /etc/kubernetes/proxy""")
    service, res = helpers.restart_node_kubernetes(with_enable=True)
    if res != 0:
        raise helpers.UpgradeError('Failed to restart {0}. {1}'
                                   .format(service, res))
    else:
        upd.print_log(res)
        print run('rm -f /var/lib/kubelet/kubernetes_auth')
def upgrade_node(upd, with_testing, env, *args, **kwargs):
    helpers.remote_install('kubernetes-node-1.0.3', with_testing)
    helpers.restart_node_kubernetes()
Beispiel #13
0
def _upgrade_node_213(upd, with_testing, env, *args, **kwargs):
    upd.print_log('Updating Docker packages...')
    helpers.remote_install(DOCKER_SELINUX, with_testing)
    helpers.remote_install(DOCKER, with_testing)
def upgrade_node(upd, with_testing, env, *args, **kwargs):
    upd.print_log(helpers.remote_install('kernel-devel', with_testing))
    upd.print_log('Updating flannel...')
    upd.print_log(helpers.remote_install('flannel-0.5.3', with_testing))
    upd.print_log(run('systemctl daemon-reload'))
    upd.print_log(run('systemctl restart flanneld'))
Beispiel #15
0
 def downgrade(cls, upd, with_testing):
     upd.print_log('Restore kuberdock-cadvisor...')
     helpers.remote_install('kuberdock-cadvisor-0.19.5', with_testing)
     helpers.run('systemctl reenable kuberdock-cadvisor')
     helpers.run('systemctl restart kuberdock-cadvisor')
Beispiel #16
0
 def upgrade(cls, upd):
     upd.print_log('Remove kuberdock-cadvisor...')
     helpers.remote_install('kuberdock-cadvisor', action='remove')
Beispiel #17
0
 def add_kdtools_to_node(cls, with_testing):
     helpers.remote_install('kdtools', testing=with_testing)
def _downgrade_k8s_node(upd, with_testing):
    upd.print_log("Downgrading kubernetes")
    helpers.remote_install('kubernetes-node kubernetes-client', with_testing, action='downgrade')
    upd.print_log("Updating kubelet config")
    run("sed -i '/^#KUBELET_HOSTNAME/s/^#//' /etc/kubernetes/kubelet")
    run("sed -i '/^KUBE_PROXY_ARGS/ {s|--proxy-mode userspace||}' /etc/kubernetes/proxy")
def downgrade_node(upd, with_testing, env, exception, *args, **kwargs):
    helpers.remote_install('kubernetes-node',
                           action='downgrade',
                           testing=with_testing)
    helpers.restart_node_kubernetes()