Exemple #1
0
    def _recreate_dns_pod(cls, upd, dns_pod_config):
        upd.print_log("Deleting current DNS pod.")
        user = User.filter_by(username=settings.KUBERDOCK_INTERNAL_USER).one()
        dns_pod = Pod.filter_by(name='kuberdock-dns', owner=user).first()
        if dns_pod:
            PodCollection(user).delete(dns_pod.id, force=True)

        # Since usual upgrade is done with healthcheck
        #  we can assume all nodes are
        # in running state.
        nodes = Node.query.all()
        if not nodes:
            upd.print_log(
                "No nodes found on the cluster. The new DNS pod will be "
                "added once the 1st node is added to the cluster.")
            return

        check_internal_pod_data(dns_pod_config, user)
        dns_pod = PodCollection(user).add(dns_pod_config, skip_check=True)
        PodCollection(user).update(dns_pod['id'],
                                   {
                                       'command': 'start',
                                       'async-pod-create': False
                                   })
        # wait dns pod for 10 minutes
        upd.print_log(
            'Wait until DNS pod starts. It can take up to 10 minutes...')
        wait_pod_status(dns_pod['id'], POD_STATUSES.running, 30, 20)
def upgrade_node(upd, with_testing, env, *args, **kwargs):
    upd.print_log('Add node hostname to rsyslog configuration...')

    run("sed -i '/^{0}/d; i{0} {1}' {2}".format(PARAM, env.host_string, CONF))
    run('systemctl restart rsyslog')

    upd.print_log('Update logging pod...')
    ki = User.filter_by(username=KUBERDOCK_INTERNAL_USER).first()
    pod_name = get_kuberdock_logs_pod_name(env.host_string)

    for pod in PodCollection(ki).get(as_json=False):
        if pod['name'] == pod_name:
            break
    else:
        return

    PodCollection(ki).delete(pod['id'], force=True)
    logs_config = get_kuberdock_logs_config(
        env.host_string,
        pod_name,
        pod['kube_type'],
        pod['containers'][0]['kubes'],
        pod['containers'][1]['kubes'],
        MASTER_IP,
        ki.get_token(),
    )
    check_internal_pod_data(logs_config, user=ki)
    logs_pod = PodCollection(ki).add(logs_config, skip_check=True)

    run('rm -fr /var/lib/elasticsearch/kuberdock/nodes/*/indices/syslog-*')

    PodCollection(ki).update(logs_pod['id'], {'command': 'start'})
def upgrade_node(upd, with_testing, env, *args, **kwargs):
    upd.print_log('Change log template in rsyslog configuration...')

    run("sed -i '/^{0}/d; a{0};{1}' {2}".format(PARAM, TEMPLATE, CONF))
    run('systemctl restart rsyslog')

    upd.print_log('Update logging pod...')
    ki = User.filter_by(username=KUBERDOCK_INTERNAL_USER).first()
    pod_name = get_kuberdock_logs_pod_name(env.host_string)

    for pod in PodCollection(ki).get(as_json=False):
        if pod['name'] == pod_name:
            break
    else:
        return

    PodCollection(ki).delete(pod['id'], force=True)
    logs_config = get_kuberdock_logs_config(
        env.host_string,
        pod_name,
        pod['kube_type'],
        pod['containers'][0]['kubes'],
        pod['containers'][1]['kubes'],
        MASTER_IP,
        ki.get_token(),
    )
    check_internal_pod_data(logs_config, user=ki)
    logs_pod = PodCollection(ki).add(logs_config, skip_check=True)

    run('docker pull kuberdock/fluentd:1.4')

    PodCollection(ki).update(logs_pod['id'], {'command': 'start'})
Exemple #4
0
def _upgrade_node_202(upd, with_testing, env, *args, **kwargs):
    """Update log pod"""

    upd.print_log("Upgrading logs pod ...")
    ki = User.get_internal()
    pod_name = nodes.get_kuberdock_logs_pod_name(env.host_string)

    for pod in PodCollection(ki).get(as_json=False):
        if pod['name'] == pod_name:
            PodCollection(ki).delete(pod['id'], force=True)
            break
    else:
        upd.print_log(u"Warning: logs pod '{}' not found".format(pod_name))

    run('docker pull kuberdock/elasticsearch:2.2')
    run('docker pull kuberdock/fluentd:1.8')
    log_pod = nodes.create_logs_pod(env.host_string, ki)

    # Also we should update rsyslog config, because log pod IP was changed.
    pod_ip = log_pod['podIP']
    put(StringIO('$LocalHostName {node_name}\n'
                 '$template LongTagForwardFormat,'
                 '"<%PRI%>%TIMESTAMP:::date-rfc3339% %HOSTNAME% '
                 '%syslogtag%%msg:::sp-if-no-1st-sp%%msg%"\n'
                 '*.* @{pod_ip}:5140;LongTagForwardFormat\n'.format(
                     node_name=env.host_string, pod_ip=pod_ip)),
        RSYSLOG_CONF,
        mode=0644)
    run('systemctl restart rsyslog')

    upd.print_log("Logs pod successfully upgraded")
def upgrade_node(upd, with_testing, env, *args, **kwargs):
    run('yum --enablerepo=kube,kube-testing clean metadata')

    # 00091_update.py
    upd.print_log('Upgrading nodes with docker-cleaner.sh')
    run("""rm -f /var/lib/kuberdock/scripts/docker-cleaner.sh""")
    run("""crontab -l | grep -v "docker-cleaner.sh" | crontab - """)

    # 00092_update.py
    put('/var/opt/kuberdock/node_network_plugin.sh',
        '/usr/libexec/kubernetes/kubelet-plugins/net/exec/kuberdock/kuberdock')

    # 00093_update.py
    upd.print_log('Use custom log template with rsyslog...')
    run("sed -i '/^{0}/d' {1}".format(PARAM1, CONF))
    run("sed -i '/^{0}/d' {1}".format(PARAM2, CONF))
    run("sed -i '$ a{0} {1}' {2}".format(PARAM1, TEMPLATE, CONF))
    run("sed -i '$ a{0};{1}' {2}".format(PARAM2, TEMPLATE_NAME, CONF))
    run('systemctl restart rsyslog')

    # 00096_update.py
    upd.print_log('Disabling swap and backing up fstab to {0}...'.format(FSTAB_BACKUP))
    run('swapoff -a')
    run('mkdir -p /var/lib/kuberdock/backups')
    run('test -f {0} && echo "{0} is already exists" || cp /etc/fstab {0}'.format(FSTAB_BACKUP))
    run("sed -r -i '/[[:space:]]+swap[[:space:]]+/d' /etc/fstab")

    # 00097_update.py
    upd.print_log('Update elasticsearch for logs...')
    upd.print_log(put('/var/opt/kuberdock/make_elastic_config.py',
                      '/var/lib/elasticsearch',
                      mode=0755))
    upd.print_log('Update logging pod...')
    ki = User.filter_by(username=KUBERDOCK_INTERNAL_USER).first()
    pod_name = get_kuberdock_logs_pod_name(env.host_string)

    for pod in PodCollection(ki).get(as_json=False):
        if pod['name'] == pod_name:
            break
    else:
        return

    PodCollection(ki).delete(pod['id'], force=True)
    logs_config = get_kuberdock_logs_config(
        env.host_string,
        pod_name,
        pod['kube_type'],
        pod['containers'][0]['kubes'],
        pod['containers'][1]['kubes'],
        MASTER_IP,
        ki.get_token(),
    )
    check_internal_pod_data(logs_config, user=ki)
    logs_pod = PodCollection(ki).add(logs_config, skip_check=True)

    run('docker pull kuberdock/elasticsearch:2.2')
    run('docker pull kuberdock/fluentd:1.5')

    PodCollection(ki).update(logs_pod['id'], {'command': 'start'})
Exemple #6
0
def upgrade_node(upd, with_testing, env, *args, **kwargs):
    run('yum --enablerepo=kube,kube-testing clean metadata')

    # 00084_update.py
    yum_base_no_kube = 'yum install --disablerepo=kube -y '

    run(yum_base_no_kube + 'kernel')
    run(yum_base_no_kube + 'kernel-tools')
    run(yum_base_no_kube + 'kernel-tools-libs')
    run(yum_base_no_kube + 'kernel-headers')
    run(yum_base_no_kube + 'kernel-devel')

    run('rpm -e -v --nodeps kernel-' + old_version)
    run('yum remove -y kernel-tools-' + old_version)
    run('yum remove -y kernel-tools-libs-' + old_version)
    run('yum remove -y kernel-headers-' + old_version)
    run('yum remove -y kernel-devel-' + old_version)

    # 00086_update.py
    res = helpers.remote_install('kubernetes-node-1.1.3', with_testing)
    upd.print_log(res)
    if res.failed:
        raise helpers.UpgradeError('Failed to update kubernetes on node')
    upd.print_log("Turn on cpu-cfs-quota in kubelet")

    get(KUBELET_PATH, KUBELET_TEMP_PATH)
    lines = []
    with open(KUBELET_TEMP_PATH) as f:
        lines = f.readlines()
    with open(KUBELET_TEMP_PATH, 'w+') as f:
        for line in lines:
            if KUBELET_ARG in line and not KUBELET_CPUCFS_ENABLE in KUBELET_ARG:
                s = line.split('"')
                s[1] += KUBELET_CPUCFS_ENABLE
                line = '"'.join(s)
            f.write(line)
    put(KUBELET_TEMP_PATH, KUBELET_PATH)
    os.remove(KUBELET_TEMP_PATH)
    helpers.restart_node_kubernetes(with_enable=True)
    upd.print_log("Restart pods to apply new limits")
    pc = PodCollection()
    pods = pc.get(as_json=False)
    for pod in pods:
        if (pod.get('host') == env.host_string and
            pod['status'] == POD_STATUSES.running):
            pc.update_container(pod['id'], None)

    # 00088_update.py
    put('/var/opt/kuberdock/node_network_plugin.sh', PLUGIN_DIR + 'kuberdock')
    put('/var/opt/kuberdock/node_network_plugin.py', PLUGIN_DIR + 'kuberdock.py')
    run('systemctl restart kuberdock-watcher')

    helpers.reboot_node(upd)
def upgrade_node(upd, with_testing, env, *args, **kwargs):
    # 00076_update.py
    upd.print_log('Add kernel parameters to make pod isolation work...')

    run('sed -i "/net.bridge.bridge-nf-call-ip6\?tables/d" {0}'.format(CONF))

    run("echo net.bridge.bridge-nf-call-iptables = 1 >> {0}".format(CONF))
    run("echo net.bridge.bridge-nf-call-ip6tables = 1 >> {0}".format(CONF))

    run("sysctl -w net.bridge.bridge-nf-call-iptables=1")
    run("sysctl -w net.bridge.bridge-nf-call-ip6tables=1")

    # 00079_update.py
    upd.print_log('Copy Elasticsearch config maker...')
    upd.print_log(
        put('/var/opt/kuberdock/make_elastic_config.py',
            '/var/lib/elasticsearch',
            mode=0755))
    upd.print_log('Update logging pod...')
    ki = User.filter_by(username=KUBERDOCK_INTERNAL_USER).first()
    pod_name = get_kuberdock_logs_pod_name(env.host_string)

    for pod in PodCollection(ki).get(as_json=False):
        if pod['name'] == pod_name:
            break
    else:
        return

    PodCollection(ki).delete(pod['id'], force=True)
    logs_config = get_kuberdock_logs_config(
        env.host_string,
        pod_name,
        pod['kube_type'],
        pod['containers'][0]['kubes'],
        pod['containers'][1]['kubes'],
        MASTER_IP,
        ki.get_token(),
    )
    check_internal_pod_data(logs_config, user=ki)
    logs_pod = PodCollection(ki).add(logs_config, skip_check=True)

    run('docker pull kuberdock/elasticsearch:1.5')

    PodCollection(ki).update(logs_pod['id'], {'command': 'start'})

    # 00082_update.py
    upd.print_log('Upgrading nodes with docker-cleaner.sh')
    run("cat > /var/lib/kuberdock/scripts/docker-cleaner.sh << 'EOF' {0}".
        format(DOCKERCLEANER))
    run("""chmod +x /var/lib/kuberdock/scripts/docker-cleaner.sh""")
    run("""crontab -l | { cat; echo "0 */6 * * * /var/lib/kuberdock/scripts/docker-cleaner.sh"; } | crontab - """
        )
    def test_update_container(self, PodCollection):
        PodCollection().update_container.return_value = {}

        pod_id = str(uuid4())
        container_name = 'just name'

        response = self.user_open(
            PodAPIUrl.check_updates(pod_id, container_name), 'POST', {})

        self.assert200(response)

        PodCollection().update_container.assert_called_once_with(
            pod_id, container_name)
def _update_00203_upgrade(upd):
    def _update_ingress_container(config):
        config_map_cmd = "--nginx-configmap={}/{}".format(
            constants.KUBERDOCK_INGRESS_CONFIG_MAP_NAMESPACE,
            constants.KUBERDOCK_INGRESS_CONFIG_MAP_NAME)

        for c in config['containers']:
            if c['name'] == 'nginx-ingress':
                if config_map_cmd not in c['command']:
                    c['command'].append(config_map_cmd)
                    return True
        return False

    def _create_or_update_ingress_config():
        client = configmap.ConfigMapClient(KubeQuery())
        try:
            client.get(
                constants.KUBERDOCK_INGRESS_CONFIG_MAP_NAME,
                namespace=constants.KUBERDOCK_INGRESS_CONFIG_MAP_NAMESPACE)

            client.patch(
                constants.KUBERDOCK_INGRESS_CONFIG_MAP_NAME,
                data={'server-name-hash-bucket-size': '128'},
                namespace=constants.KUBERDOCK_INGRESS_CONFIG_MAP_NAMESPACE)

        except configmap.ConfigMapNotFound:
            ingress.create_ingress_nginx_configmap()

    owner = User.get_internal()
    pod = Pod.query.filter_by(name=constants.KUBERDOCK_INGRESS_POD_NAME,
                              owner=owner).first()

    if pod is None:
        upd.print_log('Ingress POD hasn\'t been created yet. Skipping')
        return

    _create_or_update_ingress_config()

    config = pod.get_dbconfig()
    if not _update_ingress_container(config):
        upd.print_log('Ingress contoller RC is up-to-date. Skipping')
        return

    collection = PodCollection()

    replace_pod_config(pod, config)
    collection.patch_running_pod(
        pod.id, {'spec': {
            'containers': config['containers']
        }}, restart=True)
Exemple #10
0
def switch_app_package(billing_driver, pod_id, plan_id):
    owner = KubeUtils.get_current_user()

    transaction = db.session.begin_nested()
    with atomic():
        old_pod = PodCollection(owner).get(pod_id, as_json=True)
        AppInstance(pod_id).update_plan(plan_id, async=False, dry_run=True)
        pod = PodCollection(owner).get(pod_id, as_json=True)
    transaction.rollback()

    data = KubeUtils._get_params()
    data['pod'] = pod
    data['oldPod'] = old_pod
    data['referer'] = data.get('referer') or ''
    return billing_driver.orderswitchapppackage(**data)
def upgrade(upd, with_testing, *args, **kwargs):
    pod_collection = PodCollection()
    for pod_dict in pod_collection.get(as_json=False):
        pod = pod_collection._get_by_id(pod_dict['id'])
        db_config = get_pod_config(pod.id)
        cluster_ip = db_config.pop('clusterIP', None)
        if cluster_ip is None:
            service_name = db_config.get('service')
            if service_name is None:
                continue
            namespace = db_config.get('namespace') or pod.id
            service = KubeQuery().get(['services', service_name], ns=namespace)
            cluster_ip = service.get('spec', {}).get('clusterIP')
            if cluster_ip is not None:
                db_config['podIP'] = cluster_ip
        replace_pod_config(pod, db_config)
def upgrade_node(upd, with_testing, env, *args, **kwargs):
    upd.print_log('Apply new limits to nodes...')

    spaces = dict((i, (s, u)) for i, s, u in Kube.query.values(
        Kube.id, Kube.disk_space, Kube.disk_space_units))

    limits = []
    for user in User.query:
        for pod in PodCollection(user).get(as_json=False):
            if pod.get('host') != env.host_string:
                continue
            for container in pod['containers']:
                container_id = container['containerID']
                if container_id == container['name']:
                    continue
                space, unit = spaces.get(pod['kube_type'], (0, 'GB'))
                disk_space = space * container['kubes']
                disk_space_units = unit[0].lower() if unit else ''
                if disk_space_units not in ('', 'k', 'm', 'g', 't'):
                    disk_space_units = ''
                limits.append([container_id, disk_space, disk_space_units])

    if not limits:
        return

    lim_str = ' '.join(['{0}={1}{2}'.format(c, s, u) for c, s, u in limits])
    upd.print_log(
        run('python /var/lib/kuberdock/scripts/fslimit.py {0}'.format(
            lim_str)))
    def test_delete(self, PodCollection):
        delete_id = randint(1, 1000)
        PodCollection().delete.return_value = delete_id

        response = self.user_open(PodAPIUrl.delete(delete_id), 'DELETE', {})

        self.assert200(response)
    def test_put(self, PodCollection):
        PodCollection().update.return_value = {}
        pod = self.fixtures.pod(status='unpaid', owner=self.user)
        pod_config = pod.get_dbconfig()

        response = self.user_open(PodAPIUrl.put(pod.id), 'PUT', {})
        self.assert200(response)

        # check fix-price users restrictions
        SystemSettings.set_by_name('billing_type', 'whmcs')
        self.user.count_type = 'fixed'
        self.db.session.commit()
        # only admin has permission to remove "unpaid" status
        set_paid = {'command': 'set', 'commandOptions': {'status': 'stopped'}}
        response = self.admin_open(PodAPIUrl.put(pod.id), 'PUT', set_paid)
        self.assert200(response)
        # only admin has permission to upgrade pod
        upgrade = {
            'command':
            'redeploy',
            'containers':
            [dict(c, kubes=c['kubes'] + 1) for c in pod_config['containers']]
        }
        response = self.admin_open(PodAPIUrl.put(pod.id), 'PUT', upgrade)
        self.assert200(response)
Exemple #15
0
    def _start_pod_if_needed(self, restored_pod_dict):
        saved_status = self.pod_dump['pod_data']['status']

        if saved_status == 'running':
            restored_pod_id = restored_pod_dict['id']
            restored_pod_dict = PodCollection(owner=self.owner).update(
                pod_id=restored_pod_id, data={'command': 'start'})
        return restored_pod_dict
Exemple #16
0
 def add_kdtools_to_master(cls, upd):
     # Patch RC specs
     upd.print_log('Restart pods to support ssh access...')
     pc = PodCollection()
     query = Pod.query.filter(Pod.status != 'deleted')
     user = User.filter_by(username=settings.KUBERDOCK_INTERNAL_USER).one()
     dns_pod = Pod.filter_by(name='kuberdock-dns', owner=user).first()
     if dns_pod:
         query.filter(Pod.id != dns_pod.id)
     for dbpod in query:
         pod = pc._get_by_id(dbpod.id)
         if pod.status in (POD_STATUSES.pending, POD_STATUSES.stopping,
                           POD_STATUSES.stopped):
             # Workaround for AC-3386 issue: just don't restart
             # pending pods, because it may lead to error during pod start,
             # and upgrade script fail as a result.
             # Also do not restart already stopped pods.
             upd.print_log(
                 u'Skip restart of {} pod "{}". '
                 u'It may need manual restart to enable ssh access.'.format(
                     pod.status, dbpod.name))
             continue
         try:
             pc._stop_pod(pod, block=True)
         except APIError as e:
             upd.print_log(
                 u'Warning: Failed to stop pod {}. It may be needed to '
                 u'manual restart the pod.\n{}'.format(dbpod.name, e))
             continue
         pc._start_pod(pod, {'async_pod_create': False})
         upd.print_log(u'Restart pod: {}'.format(dbpod.name))
Exemple #17
0
def upgrade(upd, with_testing, *args, **kwargs):
    upd.print_log('Upgrading db...')
    helpers.upgrade_db()

    # 00103_update.py
    upd.print_log('Enabling restart for ntpd.service on master')
    local('mkdir -p ' + SERVICE_DIR)
    local('echo -e "' + OVERRIDE_CONF + '" > ' + OVERRIDE_FILE)
    local('systemctl daemon-reload')
    local('systemctl restart ntpd')

    # 00104_update.py
    upd.print_log('Restart pods with persistent storage')
    pc = PodCollection()
    pods = Pod.query.with_entities(Pod.id).filter(Pod.persistent_disks).all()
    for pod_id in pods:
        p = pc._get_by_id(pod_id[0])
        if p.status == POD_STATUSES.running:
            pc._stop_pod(p)
            pc._collection.pop((pod_id[0], pod_id[0]))
            pc._merge()
            p = pc._get_by_id(pod_id[0])
            pc._start_pod(p)

    # 00105_update.py
    upd.print_log('Add roles {}, resources {} and its permissions...'.format(
        ROLES, RESOURCES))
    fixtures.add_permissions(roles=ROLES,
                             resources=RESOURCES,
                             permissions=PERMISSIONS)
    upd.print_log('Add MenuRoles...')
    PAUserRole = Role.query.filter(Role.rolename == 'LimitedUser').first()
    for menu_role in Role.query.filter(
            Role.rolename == 'User').first().menus_assocs:
        db.session.add(
            MenuItemRole(role=PAUserRole, menuitem_id=menu_role.menuitem_id))
    db.session.commit()

    # Fixes for celery workers launching
    upd.print_log('Updating uwsgi configuration ...')
    local('test -f "{0}" && cp "{0}" "{1}"'.format(UWSGI_KUBERDOCK_INI_DEST,
                                                   SAVE_KUBERDOCK_INI))
    local('cp "{0}" "{1}"'.format(UWSGI_KUBERDOCK_INI_SOURCE,
                                  UWSGI_KUBERDOCK_INI_DEST))
    local('chmod 644 "{0}"'.format(UWSGI_KUBERDOCK_INI_DEST))
def _update_00179_upgrade_node(env):
    nodename = env.host_string
    log_pod_name = get_kuberdock_logs_pod_name(nodename)
    internal_user = User.get_internal()
    pc = PodCollection(internal_user)
    dbpod = Pod.query.filter(Pod.owner_id == internal_user.id,
                             Pod.name == log_pod_name,
                             Pod.status != 'deleted').first()
    if not dbpod:
        raise Exception('Node {} have no logs pod. '
                        'Delete the node and try again'.format(nodename))
    pod = pc.get(dbpod.id, as_json=False)
    old_ip = '127.0.0.1'
    new_ip = pod['podIP']
    run('sed -i "s/@{old_ip}:/@{new_ip}:/g" {conf}'.format(old_ip=old_ip,
                                                           new_ip=new_ip,
                                                           conf=RSYSLOG_CONF))
    run('systemctl restart rsyslog')
Exemple #19
0
def _upgrade_199(upd, with_testing, *args, **kwargs):
    ku = User.get_internal()
    pod = db.session.query(Pod).filter_by(name=KUBERDOCK_DNS_POD_NAME,
                                          owner=ku).first()
    nodes = Node.query.all()

    if not nodes:
        upd.print_log('No nodes found, exiting')
        return

    for node in nodes:
        k8s_node = node_utils._get_k8s_node_by_host(node.hostname)
        status, _ = node_utils.get_status(node, k8s_node)
        if status == NODE_STATUSES.running:
            if pod:
                pc = PodCollection()
                pc.delete(pod.id, force=True)
            create_dns_pod(node.hostname, ku)
            return

    raise helpers.UpgradeError("Can't find any running node to run dns pod")
Exemple #20
0
def _recreate_ingress_pod_if_needed():
    kd_user = User.get_internal()
    ingress_pod = Pod.filter_by(name=KUBERDOCK_INGRESS_POD_NAME,
                                owner=kd_user).first()
    if ingress_pod or BaseDomain.query.first():
        PodCollection(kd_user).delete(ingress_pod.id, force=True)
        default_backend_pod = Pod.filter_by(name=KUBERDOCK_BACKEND_POD_NAME,
                                            owner=kd_user).first()
        if not default_backend_pod:
            raise Exception(
                'Nginx ingress controller pod exists, but default backend pod '
                'is not found. Something wrong. Please contact support to get '
                'help.')
        PodCollection(kd_user).delete(default_backend_pod.id, force=True)
        c = ConfigMapClient(KubeQuery())
        try:
            c.delete(name=KUBERDOCK_INGRESS_CONFIG_MAP_NAME,
                     namespace=KUBERDOCK_INGRESS_CONFIG_MAP_NAMESPACE)
        except ConfigMapNotFound:
            pass
        sleep(30)  # TODO: Workaround. Remove it when AC-5470 will be fixed
        ingress.prepare_ip_sharing()
Exemple #21
0
def update_log_pods(user):

    for pod in PodCollection(user).get(as_json=False):
        if pod_name_pattern.match(pod['name']):
            PodCollection(user).delete(pod['id'], force=True)

    logs_kubes = 1
    logcollector_kubes = logs_kubes
    logstorage_kubes = logs_kubes
    node_resources = kubes_to_limits(
        logs_kubes, INTERNAL_SERVICE_KUBE_TYPE)['resources']
    logs_memory_limit = node_resources['limits']['memory']
    if logs_memory_limit < KUBERDOCK_LOGS_MEMORY_LIMIT:
        logs_kubes = int(math.ceil(
            float(KUBERDOCK_LOGS_MEMORY_LIMIT) / logs_memory_limit
        ))

    if logs_kubes > 1:
        # allocate total log cubes to log collector and to log
        # storage/search containers as 1 : 3
        total_kubes = logs_kubes * 2
        logcollector_kubes = int(math.ceil(float(total_kubes) / 4))
        logstorage_kubes = total_kubes - logcollector_kubes

    for node in Node.query:
        hostname = node.hostname
        podname = get_kuberdock_logs_pod_name(hostname)
        logs_config = get_kuberdock_logs_config(
            hostname,
            podname,
            INTERNAL_SERVICE_KUBE_TYPE,
            logcollector_kubes,
            logstorage_kubes,
            MASTER_IP,
            user.token,
        )
        check_internal_pod_data(logs_config, user=user)
        logs_pod = PodCollection(user).add(logs_config)
        PodCollection(user).update(logs_pod['id'], {'command': 'start'})
    def test_start_pod_suspend(self, _run, mock_update):
        user = self.user
        url = self.item_url(self.user.id)

        ippool = IPPool(network='192.168.1.252/30')
        ippool.block_ip([u'192.168.1.252', u'192.168.1.255'])
        ippool.save()
        min_pod = {
            'restartPolicy':
            'Always',
            'kube_type':
            0,
            'containers': [{
                'image':
                'nginx',
                'name':
                'fk8i0gai',
                'args': ['nginx', '-g', 'daemon off;'],
                'ports': [{
                    'protocol': 'tcp',
                    'isPublic': True,
                    'containerPort': 80
                }],
            }]
        }

        # pod
        res = PodCollection(user).add(dict(min_pod, name='pod-1'),
                                      skip_check=False)

        data = {'command': 'start'}
        pod_url = '/'.join(['podapi', res['id']])
        response = self.open(url=pod_url,
                             method='PUT',
                             auth=self.userauth,
                             json=data)
        self.assert200(response)
        check_data = check_change_pod_data(data)
        mock_update.assert_called_once_with(res['id'], check_data)

        data = {'suspended': True}
        response = self.admin_open(url=url, method='PUT', json=data)
        self.assert200(response)

        mock_update.called = False
        response = self.open(url=pod_url,
                             method='PUT',
                             auth=self.userauth,
                             json={'command': 'start'})
        self.assertAPIError(response, 403, "PermissionDenied")
        assert mock_update.not_called
def _master_service_update():
    services = Services()
    all_svc = services.get_all()
    pc = PodCollection()
    for svc in all_svc:
        selector = svc['spec'].get('selector', {})
        labels = svc['metadata'].get('labels', {})
        if KUBERDOCK_POD_UID in selector and KUBERDOCK_TYPE not in labels:
            namespace = svc['metadata']['namespace']
            name = svc['metadata']['name']
            data = {
                'metadata': {
                    'labels': {
                        KUBERDOCK_TYPE: LOCAL_SVC_TYPE,
                        KUBERDOCK_POD_UID: namespace
                    }
                }
            }
            rv = services.patch(name, namespace, data)
            raise_if_failure(rv, "Couldn't patch local service: {}".format(rv))
            pod = pc._get_by_id(namespace)
            if pod.status == POD_STATUSES.running:
                run_service(pod)
def _remove_lifecycle_section_from_pods(upd, pods, pas):
    # PodCollection.update({'command': 'change_config'}) can't delete keys
    # thus mocking instead
    def _mock_lifecycle(config):
        for container in config['containers']:
            if not contains_origin_root(container):
                continue
            container['lifecycle'] = {
                'postStart': {
                    'exec': {
                        'command': ['/bin/true']
                    }
                }
            }
        return config

    def _set_prefill_flag(config, pod):
        prefilled_volumes = _extract_prefilled_volumes_from_pod(pas, pod)

        for container in config['containers']:
            for vm in container.get('volumeMounts', []):
                if vm['name'] in prefilled_volumes:
                    vm['kdCopyFromImage'] = True

    collection = PodCollection()

    for pod in pods:
        config = _mock_lifecycle(pod.get_dbconfig())
        _set_prefill_flag(config, pod)
        config['command'] = 'change_config'

        try:
            replace_pod_config(pod, config)
            collection.update(pod.id, config)
            upd.print_log('POD {} config patched'.format(pod.id))
        except PodNotFound:
            upd.print_log('Skipping POD {}. Not found in K8S'.format(pod.id))
def create_pod(template_id, plan_id, owner=None, start=True, **data):
    owner = check_owner(owner)

    app = PredefinedApp.get(template_id)

    if not owner.is_administrator() and has_billing() and owner.fix_price:
        raise PermissionDenied
    pod_data = app.get_filled_template_for_plan(plan_id, data)
    res = start_pod_from_yaml(pod_data, user=owner, template_id=template_id)
    if start:
        PodCollection(owner).update(res['id'], {
            'command': 'start',
            'commandOptions': {}
        })
    return res
def upgrade(upd, with_testing, *args, **kwargs):
    upd.print_log('Fix logging pods path...')
    with open('/etc/kubernetes/configfile_for_nodes') as node_configfile:
        node_config = yaml.load(node_configfile.read())
    for user in node_config['users']:
        token = user['user']['token']
        if user['name'] == 'kubelet':
            break
    ki = User.filter_by(username=KUBERDOCK_INTERNAL_USER).first()
    for pod in PodCollection(ki).get(as_json=False):
        if pod_name_pattern.match(pod['name']):
            PodCollection(ki).delete(pod['id'], force=True)
            logs_config = get_kuberdock_logs_config(
                pod['node'],
                pod['name'],
                pod['kube_type'],
                pod['containers'][0]['kubes'],
                pod['containers'][1]['kubes'],
                MASTER_IP,
                token,
            )
            check_new_pod_data(logs_config, user=ki)
            logs_pod = PodCollection(ki).add(logs_config)
            PodCollection(ki).update(logs_pod['id'], {'command': 'start'})
def upgrade(upd, with_testing, *args, **kwargs):
    upd.print_log('Update dns pod...')

    local(
        'etcd-ca --depot-path /root/.etcd-ca new-cert --ip "10.254.0.10" --passphrase "" etcd-dns'
    )
    local('etcd-ca --depot-path /root/.etcd-ca sign --passphrase "" etcd-dns')
    local(
        'etcd-ca --depot-path /root/.etcd-ca export etcd-dns --insecure --passphrase "" | tar -xf -'
    )
    local('mv etcd-dns.crt /etc/pki/etcd/etcd-dns.crt')
    local('mv etcd-dns.key.insecure /etc/pki/etcd/etcd-dns.key')

    user = User.filter_by(username=KUBERDOCK_INTERNAL_USER).one()

    dns_pod = Pod.filter_by(name='kuberdock-dns', owner=user).first()

    if dns_pod:
        PodCollection(user).delete(dns_pod.id, force=True)

    dns_config = get_dns_pod_config()
    check_internal_pod_data(dns_config, user)
    dns_pod = PodCollection(user).add(dns_config, skip_check=True)
    PodCollection(user).update(dns_pod['id'], {'command': 'start'})
def _update_pv_mount_paths(upd):
    """Migration k8s 1.1.3 -> 1.2.4 requires removing :Z from mount paths"""
    # Patch RC specs
    upd.print_log("Updating Pod PV mount paths")

    def remove_trailing_z(pod_config):
        updated_config = copy.deepcopy(pod_config)
        for container in updated_config['containers']:
            for mount in container['volumeMounts']:
                mp = mount['mountPath']
                if mp.endswith(":z") or mp.endswith(":Z"):
                    mount['mountPath'] = mount['mountPath'][:-2]
        return updated_config

    pc = PodCollection()
    query = Pod.query.filter(Pod.status != POD_STATUSES.deleted)
    for dbpod in query:
        updated_config = remove_trailing_z(dbpod.get_dbconfig())

        # Update config
        kapi_helpers.replace_pod_config(dbpod, updated_config)
        pc.patch_running_pod(dbpod.id, {'spec': updated_config}, restart=True)

        upd.print_log(u'Successfully updated pod: {}'.format(dbpod.name))
Exemple #29
0
def update_dns_pod(user):
    dns_pod = db.session.query(Pod).filter(
        Pod.name == 'kuberdock-dns',
        Pod.owner_id == user.id
    ).first()
    if dns_pod:
        pods = PodCollection(user)
        pods.delete(dns_pod.id, force=True)

    dns_config = get_dns_pod_config()
    check_internal_pod_data(dns_config, user)
    dns_pod = PodCollection(user).add(dns_config, skip_check=True)
    PodCollection(user).update(dns_pod['id'], {'command': 'start'})
Exemple #30
0
 def _restore_pod(self, pod_dump):
     pod_collection = PodCollection(owner=self.owner)
     restored_pod_dict = pod_collection.add_from_dump(pod_dump)
     return restored_pod_dict