def upgrade_node(upd, with_testing, env, *args, **kwargs): upd.print_log('Change log template in rsyslog configuration...') run("sed -i '/^{0}/d; a{0};{1}' {2}".format(PARAM, TEMPLATE, CONF)) run('systemctl restart rsyslog') upd.print_log('Update logging pod...') ki = User.filter_by(username=KUBERDOCK_INTERNAL_USER).first() pod_name = get_kuberdock_logs_pod_name(env.host_string) for pod in PodCollection(ki).get(as_json=False): if pod['name'] == pod_name: break else: return PodCollection(ki).delete(pod['id'], force=True) logs_config = get_kuberdock_logs_config( env.host_string, pod_name, pod['kube_type'], pod['containers'][0]['kubes'], pod['containers'][1]['kubes'], MASTER_IP, ki.get_token(), ) check_internal_pod_data(logs_config, user=ki) logs_pod = PodCollection(ki).add(logs_config, skip_check=True) run('docker pull kuberdock/fluentd:1.4') PodCollection(ki).update(logs_pod['id'], {'command': 'start'})
def _recreate_dns_pod(cls, upd, dns_pod_config): upd.print_log("Deleting current DNS pod.") user = User.filter_by(username=settings.KUBERDOCK_INTERNAL_USER).one() dns_pod = Pod.filter_by(name='kuberdock-dns', owner=user).first() if dns_pod: PodCollection(user).delete(dns_pod.id, force=True) # Since usual upgrade is done with healthcheck # we can assume all nodes are # in running state. nodes = Node.query.all() if not nodes: upd.print_log( "No nodes found on the cluster. The new DNS pod will be " "added once the 1st node is added to the cluster.") return check_internal_pod_data(dns_pod_config, user) dns_pod = PodCollection(user).add(dns_pod_config, skip_check=True) PodCollection(user).update(dns_pod['id'], { 'command': 'start', 'async-pod-create': False }) # wait dns pod for 10 minutes upd.print_log( 'Wait until DNS pod starts. It can take up to 10 minutes...') wait_pod_status(dns_pod['id'], POD_STATUSES.running, 30, 20)
def upgrade_node(upd, with_testing, env, *args, **kwargs): upd.print_log('Add node hostname to rsyslog configuration...') run("sed -i '/^{0}/d; i{0} {1}' {2}".format(PARAM, env.host_string, CONF)) run('systemctl restart rsyslog') upd.print_log('Update logging pod...') ki = User.filter_by(username=KUBERDOCK_INTERNAL_USER).first() pod_name = get_kuberdock_logs_pod_name(env.host_string) for pod in PodCollection(ki).get(as_json=False): if pod['name'] == pod_name: break else: return PodCollection(ki).delete(pod['id'], force=True) logs_config = get_kuberdock_logs_config( env.host_string, pod_name, pod['kube_type'], pod['containers'][0]['kubes'], pod['containers'][1]['kubes'], MASTER_IP, ki.get_token(), ) check_internal_pod_data(logs_config, user=ki) logs_pod = PodCollection(ki).add(logs_config, skip_check=True) run('rm -fr /var/lib/elasticsearch/kuberdock/nodes/*/indices/syslog-*') PodCollection(ki).update(logs_pod['id'], {'command': 'start'})
def upgrade_node(upd, with_testing, env, *args, **kwargs): run('yum --enablerepo=kube,kube-testing clean metadata') # 00091_update.py upd.print_log('Upgrading nodes with docker-cleaner.sh') run("""rm -f /var/lib/kuberdock/scripts/docker-cleaner.sh""") run("""crontab -l | grep -v "docker-cleaner.sh" | crontab - """) # 00092_update.py put('/var/opt/kuberdock/node_network_plugin.sh', '/usr/libexec/kubernetes/kubelet-plugins/net/exec/kuberdock/kuberdock') # 00093_update.py upd.print_log('Use custom log template with rsyslog...') run("sed -i '/^{0}/d' {1}".format(PARAM1, CONF)) run("sed -i '/^{0}/d' {1}".format(PARAM2, CONF)) run("sed -i '$ a{0} {1}' {2}".format(PARAM1, TEMPLATE, CONF)) run("sed -i '$ a{0};{1}' {2}".format(PARAM2, TEMPLATE_NAME, CONF)) run('systemctl restart rsyslog') # 00096_update.py upd.print_log('Disabling swap and backing up fstab to {0}...'.format(FSTAB_BACKUP)) run('swapoff -a') run('mkdir -p /var/lib/kuberdock/backups') run('test -f {0} && echo "{0} is already exists" || cp /etc/fstab {0}'.format(FSTAB_BACKUP)) run("sed -r -i '/[[:space:]]+swap[[:space:]]+/d' /etc/fstab") # 00097_update.py upd.print_log('Update elasticsearch for logs...') upd.print_log(put('/var/opt/kuberdock/make_elastic_config.py', '/var/lib/elasticsearch', mode=0755)) upd.print_log('Update logging pod...') ki = User.filter_by(username=KUBERDOCK_INTERNAL_USER).first() pod_name = get_kuberdock_logs_pod_name(env.host_string) for pod in PodCollection(ki).get(as_json=False): if pod['name'] == pod_name: break else: return PodCollection(ki).delete(pod['id'], force=True) logs_config = get_kuberdock_logs_config( env.host_string, pod_name, pod['kube_type'], pod['containers'][0]['kubes'], pod['containers'][1]['kubes'], MASTER_IP, ki.get_token(), ) check_internal_pod_data(logs_config, user=ki) logs_pod = PodCollection(ki).add(logs_config, skip_check=True) run('docker pull kuberdock/elasticsearch:2.2') run('docker pull kuberdock/fluentd:1.5') PodCollection(ki).update(logs_pod['id'], {'command': 'start'})
def upgrade_node(upd, with_testing, env, *args, **kwargs): # 00076_update.py upd.print_log('Add kernel parameters to make pod isolation work...') run('sed -i "/net.bridge.bridge-nf-call-ip6\?tables/d" {0}'.format(CONF)) run("echo net.bridge.bridge-nf-call-iptables = 1 >> {0}".format(CONF)) run("echo net.bridge.bridge-nf-call-ip6tables = 1 >> {0}".format(CONF)) run("sysctl -w net.bridge.bridge-nf-call-iptables=1") run("sysctl -w net.bridge.bridge-nf-call-ip6tables=1") # 00079_update.py upd.print_log('Copy Elasticsearch config maker...') upd.print_log( put('/var/opt/kuberdock/make_elastic_config.py', '/var/lib/elasticsearch', mode=0755)) upd.print_log('Update logging pod...') ki = User.filter_by(username=KUBERDOCK_INTERNAL_USER).first() pod_name = get_kuberdock_logs_pod_name(env.host_string) for pod in PodCollection(ki).get(as_json=False): if pod['name'] == pod_name: break else: return PodCollection(ki).delete(pod['id'], force=True) logs_config = get_kuberdock_logs_config( env.host_string, pod_name, pod['kube_type'], pod['containers'][0]['kubes'], pod['containers'][1]['kubes'], MASTER_IP, ki.get_token(), ) check_internal_pod_data(logs_config, user=ki) logs_pod = PodCollection(ki).add(logs_config, skip_check=True) run('docker pull kuberdock/elasticsearch:1.5') PodCollection(ki).update(logs_pod['id'], {'command': 'start'}) # 00082_update.py upd.print_log('Upgrading nodes with docker-cleaner.sh') run("cat > /var/lib/kuberdock/scripts/docker-cleaner.sh << 'EOF' {0}". format(DOCKERCLEANER)) run("""chmod +x /var/lib/kuberdock/scripts/docker-cleaner.sh""") run("""crontab -l | { cat; echo "0 */6 * * * /var/lib/kuberdock/scripts/docker-cleaner.sh"; } | crontab - """ )
def update_dns_pod(user): dns_pod = db.session.query(Pod).filter( Pod.name == 'kuberdock-dns', Pod.owner_id == user.id ).first() if dns_pod: pods = PodCollection(user) pods.delete(dns_pod.id, force=True) dns_config = get_dns_pod_config() check_internal_pod_data(dns_config, user) dns_pod = PodCollection(user).add(dns_config, skip_check=True) PodCollection(user).update(dns_pod['id'], {'command': 'start'})
def update_log_pods(user): for pod in PodCollection(user).get(as_json=False): if pod_name_pattern.match(pod['name']): PodCollection(user).delete(pod['id'], force=True) logs_kubes = 1 logcollector_kubes = logs_kubes logstorage_kubes = logs_kubes node_resources = kubes_to_limits( logs_kubes, INTERNAL_SERVICE_KUBE_TYPE)['resources'] logs_memory_limit = node_resources['limits']['memory'] if logs_memory_limit < KUBERDOCK_LOGS_MEMORY_LIMIT: logs_kubes = int(math.ceil( float(KUBERDOCK_LOGS_MEMORY_LIMIT) / logs_memory_limit )) if logs_kubes > 1: # allocate total log cubes to log collector and to log # storage/search containers as 1 : 3 total_kubes = logs_kubes * 2 logcollector_kubes = int(math.ceil(float(total_kubes) / 4)) logstorage_kubes = total_kubes - logcollector_kubes for node in Node.query: hostname = node.hostname podname = get_kuberdock_logs_pod_name(hostname) logs_config = get_kuberdock_logs_config( hostname, podname, INTERNAL_SERVICE_KUBE_TYPE, logcollector_kubes, logstorage_kubes, MASTER_IP, user.token, ) check_internal_pod_data(logs_config, user=user) logs_pod = PodCollection(user).add(logs_config) PodCollection(user).update(logs_pod['id'], {'command': 'start'})
def upgrade(upd, with_testing, *args, **kwargs): upd.print_log('Update dns pod...') local( 'etcd-ca --depot-path /root/.etcd-ca new-cert --ip "10.254.0.10" --passphrase "" etcd-dns' ) local('etcd-ca --depot-path /root/.etcd-ca sign --passphrase "" etcd-dns') local( 'etcd-ca --depot-path /root/.etcd-ca export etcd-dns --insecure --passphrase "" | tar -xf -' ) local('mv etcd-dns.crt /etc/pki/etcd/etcd-dns.crt') local('mv etcd-dns.key.insecure /etc/pki/etcd/etcd-dns.key') user = User.filter_by(username=KUBERDOCK_INTERNAL_USER).one() dns_pod = Pod.filter_by(name='kuberdock-dns', owner=user).first() if dns_pod: PodCollection(user).delete(dns_pod.id, force=True) dns_config = get_dns_pod_config() check_internal_pod_data(dns_config, user) dns_pod = PodCollection(user).add(dns_config, skip_check=True) PodCollection(user).update(dns_pod['id'], {'command': 'start'})