def upgrade_node(upd, with_testing, env, *args, **kwargs):
    upd.print_log('Change log template in rsyslog configuration...')

    run("sed -i '/^{0}/d; a{0};{1}' {2}".format(PARAM, TEMPLATE, CONF))
    run('systemctl restart rsyslog')

    upd.print_log('Update logging pod...')
    ki = User.filter_by(username=KUBERDOCK_INTERNAL_USER).first()
    pod_name = get_kuberdock_logs_pod_name(env.host_string)

    for pod in PodCollection(ki).get(as_json=False):
        if pod['name'] == pod_name:
            break
    else:
        return

    PodCollection(ki).delete(pod['id'], force=True)
    logs_config = get_kuberdock_logs_config(
        env.host_string,
        pod_name,
        pod['kube_type'],
        pod['containers'][0]['kubes'],
        pod['containers'][1]['kubes'],
        MASTER_IP,
        ki.get_token(),
    )
    check_internal_pod_data(logs_config, user=ki)
    logs_pod = PodCollection(ki).add(logs_config, skip_check=True)

    run('docker pull kuberdock/fluentd:1.4')

    PodCollection(ki).update(logs_pod['id'], {'command': 'start'})
def upgrade_node(upd, with_testing, env, *args, **kwargs):
    upd.print_log('Add node hostname to rsyslog configuration...')

    run("sed -i '/^{0}/d; i{0} {1}' {2}".format(PARAM, env.host_string, CONF))
    run('systemctl restart rsyslog')

    upd.print_log('Update logging pod...')
    ki = User.filter_by(username=KUBERDOCK_INTERNAL_USER).first()
    pod_name = get_kuberdock_logs_pod_name(env.host_string)

    for pod in PodCollection(ki).get(as_json=False):
        if pod['name'] == pod_name:
            break
    else:
        return

    PodCollection(ki).delete(pod['id'], force=True)
    logs_config = get_kuberdock_logs_config(
        env.host_string,
        pod_name,
        pod['kube_type'],
        pod['containers'][0]['kubes'],
        pod['containers'][1]['kubes'],
        MASTER_IP,
        ki.get_token(),
    )
    check_internal_pod_data(logs_config, user=ki)
    logs_pod = PodCollection(ki).add(logs_config, skip_check=True)

    run('rm -fr /var/lib/elasticsearch/kuberdock/nodes/*/indices/syslog-*')

    PodCollection(ki).update(logs_pod['id'], {'command': 'start'})
def check_nodes():
    msg = []
    states = {}
    try:
        states = get_nodes_state()
        for node, state in states.items():
            node_msg = []
            for key, state in state.items():
                if not state or not isinstance(state, bool):
                    node_msg.append(MESSAGES[key].format(state))
            if node_msg:
                msg.append("Node {} errors:".format(node))
                msg.extend(node_msg)
    except (SystemExit, Exception) as e:
        msg.append("Can't get nodes list because of {}".format(e.message))
    pendings = [
        get_kuberdock_logs_pod_name(node) for node, state in states.items()
        if NODE_STATUSES.pending in state
    ]
    if states and len(pendings) != len(states):
        try:
            pod_states = get_internal_pods_state()
            stopped = [
                pod for pod, status in pod_states.items()
                if pod not in pendings and not status
            ]
            if stopped:
                msg.append(MESSAGES['pods'].format(', '.join(stopped)))
        except (SystemExit, Exception) as e:
            msg.append("Can't get internal pods states because of {}".format(
                e.message))
    return os.linesep.join(msg)
Beispiel #4
0
def _upgrade_node_202(upd, with_testing, env, *args, **kwargs):
    """Update log pod"""

    upd.print_log("Upgrading logs pod ...")
    ki = User.get_internal()
    pod_name = nodes.get_kuberdock_logs_pod_name(env.host_string)

    for pod in PodCollection(ki).get(as_json=False):
        if pod['name'] == pod_name:
            PodCollection(ki).delete(pod['id'], force=True)
            break
    else:
        upd.print_log(u"Warning: logs pod '{}' not found".format(pod_name))

    run('docker pull kuberdock/elasticsearch:2.2')
    run('docker pull kuberdock/fluentd:1.8')
    log_pod = nodes.create_logs_pod(env.host_string, ki)

    # Also we should update rsyslog config, because log pod IP was changed.
    pod_ip = log_pod['podIP']
    put(StringIO('$LocalHostName {node_name}\n'
                 '$template LongTagForwardFormat,'
                 '"<%PRI%>%TIMESTAMP:::date-rfc3339% %HOSTNAME% '
                 '%syslogtag%%msg:::sp-if-no-1st-sp%%msg%"\n'
                 '*.* @{pod_ip}:5140;LongTagForwardFormat\n'.format(
                     node_name=env.host_string, pod_ip=pod_ip)),
        RSYSLOG_CONF,
        mode=0644)
    run('systemctl restart rsyslog')

    upd.print_log("Logs pod successfully upgraded")
def upgrade_node(upd, with_testing, env, *args, **kwargs):
    run('yum --enablerepo=kube,kube-testing clean metadata')

    # 00091_update.py
    upd.print_log('Upgrading nodes with docker-cleaner.sh')
    run("""rm -f /var/lib/kuberdock/scripts/docker-cleaner.sh""")
    run("""crontab -l | grep -v "docker-cleaner.sh" | crontab - """)

    # 00092_update.py
    put('/var/opt/kuberdock/node_network_plugin.sh',
        '/usr/libexec/kubernetes/kubelet-plugins/net/exec/kuberdock/kuberdock')

    # 00093_update.py
    upd.print_log('Use custom log template with rsyslog...')
    run("sed -i '/^{0}/d' {1}".format(PARAM1, CONF))
    run("sed -i '/^{0}/d' {1}".format(PARAM2, CONF))
    run("sed -i '$ a{0} {1}' {2}".format(PARAM1, TEMPLATE, CONF))
    run("sed -i '$ a{0};{1}' {2}".format(PARAM2, TEMPLATE_NAME, CONF))
    run('systemctl restart rsyslog')

    # 00096_update.py
    upd.print_log('Disabling swap and backing up fstab to {0}...'.format(FSTAB_BACKUP))
    run('swapoff -a')
    run('mkdir -p /var/lib/kuberdock/backups')
    run('test -f {0} && echo "{0} is already exists" || cp /etc/fstab {0}'.format(FSTAB_BACKUP))
    run("sed -r -i '/[[:space:]]+swap[[:space:]]+/d' /etc/fstab")

    # 00097_update.py
    upd.print_log('Update elasticsearch for logs...')
    upd.print_log(put('/var/opt/kuberdock/make_elastic_config.py',
                      '/var/lib/elasticsearch',
                      mode=0755))
    upd.print_log('Update logging pod...')
    ki = User.filter_by(username=KUBERDOCK_INTERNAL_USER).first()
    pod_name = get_kuberdock_logs_pod_name(env.host_string)

    for pod in PodCollection(ki).get(as_json=False):
        if pod['name'] == pod_name:
            break
    else:
        return

    PodCollection(ki).delete(pod['id'], force=True)
    logs_config = get_kuberdock_logs_config(
        env.host_string,
        pod_name,
        pod['kube_type'],
        pod['containers'][0]['kubes'],
        pod['containers'][1]['kubes'],
        MASTER_IP,
        ki.get_token(),
    )
    check_internal_pod_data(logs_config, user=ki)
    logs_pod = PodCollection(ki).add(logs_config, skip_check=True)

    run('docker pull kuberdock/elasticsearch:2.2')
    run('docker pull kuberdock/fluentd:1.5')

    PodCollection(ki).update(logs_pod['id'], {'command': 'start'})
def upgrade_node(upd, with_testing, env, *args, **kwargs):
    # 00076_update.py
    upd.print_log('Add kernel parameters to make pod isolation work...')

    run('sed -i "/net.bridge.bridge-nf-call-ip6\?tables/d" {0}'.format(CONF))

    run("echo net.bridge.bridge-nf-call-iptables = 1 >> {0}".format(CONF))
    run("echo net.bridge.bridge-nf-call-ip6tables = 1 >> {0}".format(CONF))

    run("sysctl -w net.bridge.bridge-nf-call-iptables=1")
    run("sysctl -w net.bridge.bridge-nf-call-ip6tables=1")

    # 00079_update.py
    upd.print_log('Copy Elasticsearch config maker...')
    upd.print_log(
        put('/var/opt/kuberdock/make_elastic_config.py',
            '/var/lib/elasticsearch',
            mode=0755))
    upd.print_log('Update logging pod...')
    ki = User.filter_by(username=KUBERDOCK_INTERNAL_USER).first()
    pod_name = get_kuberdock_logs_pod_name(env.host_string)

    for pod in PodCollection(ki).get(as_json=False):
        if pod['name'] == pod_name:
            break
    else:
        return

    PodCollection(ki).delete(pod['id'], force=True)
    logs_config = get_kuberdock_logs_config(
        env.host_string,
        pod_name,
        pod['kube_type'],
        pod['containers'][0]['kubes'],
        pod['containers'][1]['kubes'],
        MASTER_IP,
        ki.get_token(),
    )
    check_internal_pod_data(logs_config, user=ki)
    logs_pod = PodCollection(ki).add(logs_config, skip_check=True)

    run('docker pull kuberdock/elasticsearch:1.5')

    PodCollection(ki).update(logs_pod['id'], {'command': 'start'})

    # 00082_update.py
    upd.print_log('Upgrading nodes with docker-cleaner.sh')
    run("cat > /var/lib/kuberdock/scripts/docker-cleaner.sh << 'EOF' {0}".
        format(DOCKERCLEANER))
    run("""chmod +x /var/lib/kuberdock/scripts/docker-cleaner.sh""")
    run("""crontab -l | { cat; echo "0 */6 * * * /var/lib/kuberdock/scripts/docker-cleaner.sh"; } | crontab - """
        )
def _update_00179_upgrade_node(env):
    nodename = env.host_string
    log_pod_name = get_kuberdock_logs_pod_name(nodename)
    internal_user = User.get_internal()
    pc = PodCollection(internal_user)
    dbpod = Pod.query.filter(Pod.owner_id == internal_user.id,
                             Pod.name == log_pod_name,
                             Pod.status != 'deleted').first()
    if not dbpod:
        raise Exception('Node {} have no logs pod. '
                        'Delete the node and try again'.format(nodename))
    pod = pc.get(dbpod.id, as_json=False)
    old_ip = '127.0.0.1'
    new_ip = pod['podIP']
    run('sed -i "s/@{old_ip}:/@{new_ip}:/g" {conf}'.format(old_ip=old_ip,
                                                           new_ip=new_ip,
                                                           conf=RSYSLOG_CONF))
    run('systemctl restart rsyslog')
Beispiel #8
0
def update_log_pods(user):

    for pod in PodCollection(user).get(as_json=False):
        if pod_name_pattern.match(pod['name']):
            PodCollection(user).delete(pod['id'], force=True)

    logs_kubes = 1
    logcollector_kubes = logs_kubes
    logstorage_kubes = logs_kubes
    node_resources = kubes_to_limits(
        logs_kubes, INTERNAL_SERVICE_KUBE_TYPE)['resources']
    logs_memory_limit = node_resources['limits']['memory']
    if logs_memory_limit < KUBERDOCK_LOGS_MEMORY_LIMIT:
        logs_kubes = int(math.ceil(
            float(KUBERDOCK_LOGS_MEMORY_LIMIT) / logs_memory_limit
        ))

    if logs_kubes > 1:
        # allocate total log cubes to log collector and to log
        # storage/search containers as 1 : 3
        total_kubes = logs_kubes * 2
        logcollector_kubes = int(math.ceil(float(total_kubes) / 4))
        logstorage_kubes = total_kubes - logcollector_kubes

    for node in Node.query:
        hostname = node.hostname
        podname = get_kuberdock_logs_pod_name(hostname)
        logs_config = get_kuberdock_logs_config(
            hostname,
            podname,
            INTERNAL_SERVICE_KUBE_TYPE,
            logcollector_kubes,
            logstorage_kubes,
            MASTER_IP,
            user.token,
        )
        check_internal_pod_data(logs_config, user=user)
        logs_pod = PodCollection(user).add(logs_config)
        PodCollection(user).update(logs_pod['id'], {'command': 'start'})
    def test_execute_es_query(self, es_mock):
        """Test elasticsearch_utils.execute_es_query function."""

        # Add two log pods config + two approprate nodes
        internal_user = User.get_internal()
        pod_id1 = str(uuid4())
        service1 = 'srv1'
        namespace1 = 'ns1'
        pod_id2 = str(uuid4())
        service2 = 'srv2'
        namespace2 = 'ns2'

        host1 = 'h1'
        host2 = 'h2'

        kube_id = Kube.get_default_kube_type()

        pod1 = Pod(id=pod_id1,
                   name=get_kuberdock_logs_pod_name(host1),
                   owner_id=internal_user.id,
                   kube_id=kube_id,
                   config=json.dumps({
                       "service": service1,
                       "namespace": namespace1
                   }),
                   status='RUNNING')
        pod2 = Pod(id=pod_id2,
                   name=get_kuberdock_logs_pod_name(host2),
                   owner_id=internal_user.id,
                   kube_id=kube_id,
                   config=json.dumps({
                       "service": service2,
                       "namespace": namespace2
                   }),
                   status='RUNNING')
        db.session.add_all([pod1, pod2])
        db.session.commit()

        node1 = Node(ip='123.123.123',
                     hostname=host1,
                     kube_id=kube_id,
                     state='completed',
                     upgrade_status='applied')
        node2 = Node(ip='123.123.124',
                     hostname=host2,
                     kube_id=kube_id,
                     state='completed',
                     upgrade_status='applied')
        db.session.add_all([node1, node2])
        db.session.commit()

        size = 123
        index = '1234qwerty'
        query = None
        sort = None
        search_result = {'hits': {'total': 333, 'hits': [1, 2, 3]}}
        search_mock = es_mock.return_value.search
        search_mock.return_value = search_result
        res = elasticsearch_utils.execute_es_query(index, query, size, sort)
        self.assertEqual(
            res, {
                'total': search_result['hits']['total'],
                'hits': search_result['hits']['hits'],
            })
        prefix1 = elasticsearch_utils.K8S_PROXY_PREFIX + \
            '/namespaces/' + namespace1 + '/services/' + service1 + ':9200/'
        prefix2 = elasticsearch_utils.K8S_PROXY_PREFIX + \
            '/namespaces/' + namespace2 + '/services/' + service2 + ':9200/'
        es_mock.assert_called_once_with([
            {
                'host': KUBE_API_HOST,
                'port': KUBE_API_PORT,
                'url_prefix': prefix1,
            },
            {
                'host': KUBE_API_HOST,
                'port': KUBE_API_PORT,
                'url_prefix': prefix2,
            },
        ])
        search_mock.assert_called_once_with(index=index, body={'size': size})

        query = {'a': 1}
        sort = {'b': 2}
        elasticsearch_utils.execute_es_query(index, query, size, sort)
        search_mock.assert_called_with(index=index,
                                       body={
                                           'size': size,
                                           'sort': sort,
                                           'query': query
                                       })
        search_mock.side_effect = RequestException('!!!')
        with self.assertRaises(elasticsearch_utils.LogsError):
            elasticsearch_utils.execute_es_query(index, query, size, sort)
# You should have received a copy of the GNU General Public License
# along with KuberDock; if not, see <http://www.gnu.org/licenses/>.

import re
import yaml

from kubedock.kapi.nodes import (
    get_kuberdock_logs_config,
    get_kuberdock_logs_pod_name,
)
from kubedock.kapi.podcollection import PodCollection
from kubedock.settings import KUBERDOCK_INTERNAL_USER, MASTER_IP
from kubedock.users.models import User
from kubedock.validation import check_new_pod_data

pod_name_pattern = re.compile(get_kuberdock_logs_pod_name('.+?'))


def upgrade(upd, with_testing, *args, **kwargs):
    upd.print_log('Fix logging pods path...')
    with open('/etc/kubernetes/configfile_for_nodes') as node_configfile:
        node_config = yaml.load(node_configfile.read())
    for user in node_config['users']:
        token = user['user']['token']
        if user['name'] == 'kubelet':
            break
    ki = User.filter_by(username=KUBERDOCK_INTERNAL_USER).first()
    for pod in PodCollection(ki).get(as_json=False):
        if pod_name_pattern.match(pod['name']):
            PodCollection(ki).delete(pod['id'], force=True)
            logs_config = get_kuberdock_logs_config(