예제 #1
0
def _update_00191_upgrade(upd, calico_network):
    etcd1 = helpers.local('uname -n')
    _master_etcd_cert(etcd1)
    _master_etcd_conf(etcd1)
    helpers.restart_service('etcd')

    _master_docker(upd)
    _master_firewalld()
    _master_k8s_node()

    if helpers.local('docker ps --format "{{.Names}}" | grep "^calico-node$"'
                     ) != 'calico-node':
        _master_calico(upd, calico_network)

    _master_k8s_extensions()
    helpers.restart_master_kubernetes()
    helpers.local('echo "{0}" | kubectl create -f -'.format(_K8S_EXTENSIONS))
    # we need to restart here again, because kubernetes sometimes don't accept
    # extensions onfly
    helpers.restart_master_kubernetes()
    _master_network_policy(upd, calico_network)

    _master_dns_policy()
    _master_pods_policy()

    _master_service_update()
def do_cycle_updates(with_testing=False):
    """
    :return: False if no errors or script name at which was error
    """
    # TODO refactor to 'get next update'
    to_apply = get_available_updates()
    last = get_applied_updates()
    if last:
        # Start from last failed update
        to_apply = to_apply[to_apply.index(last[-1]) + 1:]
    if not to_apply:
        helpers.restart_service(settings.KUBERDOCK_SERVICE)
        helpers.set_maintenance(False)
        print 'There is no new upgrade scripts to apply. ' + \
              SUCCESSFUL_UPDATE_MESSAGE
        return False

    is_failed = False
    for upd in to_apply:
        if not run_script(upd, with_testing):
            is_failed = upd
            print >> sys.stderr, "Update {0} has failed.".format(is_failed)
            break

    if not is_failed:
        helpers.close_all_sessions()
        print 'All update scripts are applied.'
    return is_failed
예제 #3
0
def upgrade(upd, with_testing, *args, **kwargs):
    upd.print_log('Update nginx config...')

    copyfile(nginx_path, nginx_path + '.disabled')
    copystat(nginx_path, nginx_path + '.disabled')
    copyfile(kd_path, kd_path + '.disabled')
    copystat(kd_path, kd_path + '.disabled')

    copyfile('/var/opt/kuberdock/conf/nginx.conf', nginx_path)
    copyfile('/var/opt/kuberdock/conf/kuberdock-ssl.conf', kd_path)
    helpers.restart_service('nginx')
예제 #4
0
def _master_docker(upd):
    helpers.local("mkdir -p /etc/systemd/system/docker.service.d/")
    helpers.local(
        "cat << EOF > /etc/systemd/system/docker.service.d/timeouts.conf\n"
        "{}\nEOF".format(DOCKER_TIMEOUTS_DROPIN))
    helpers.local('systemctl daemon-reload')
    upd.print_log(helpers.local('systemctl reenable docker'))
    upd.print_log(helpers.restart_service('docker'))
    # Just to be sure and see output in logs:
    upd.print_log(helpers.local('docker info'))
def post_upgrade(for_successful=True, reason=None):  # teardown
    """
    Teardown after upgrade
    :return: Error or True if any error else False
    """
    if helpers.set_evicting_timeout('5m0s'):
        print >> sys.stderr, "Can't bring back old pods evicting interval."
        for_successful = False
    if for_successful:
        helpers.restart_service(settings.KUBERDOCK_SERVICE)
        helpers.set_maintenance(False)
        redis = ConnectionPool.get_connection()
        # We should clear cache for licencing info after successful upgrade:
        redis.delete('KDCOLLECTION')
        print SUCCESSFUL_UPDATE_MESSAGE
        health_check(post_upgrade_check=True)
    else:
        if reason is not None:
            print >> sys.stderr, reason
        print >> sys.stderr, FAILED_MESSAGE
예제 #6
0
def downgrade(upd, with_testing, exception, *args, **kwargs):
    _downgrade_k8s_master(upd, with_testing)
    service, res = helpers.restart_master_kubernetes()
    if res != 0:
        raise helpers.UpgradeError('Failed to restart {0}. {1}'
                                   .format(service, res))

    _downgrade_etcd(upd)

    # Restart KD to make sure new libs are running
    res = helpers.restart_service(settings.KUBERDOCK_SERVICE)
    if res != 0:
        raise helpers.UpgradeError('Failed to restart KuberDock')
    helpers.downgrade_db(revision='3c832810a33c')
예제 #7
0
def upgrade(upd, with_testing, *args, **kwargs):
    _upgrade_k8s_master(upd, with_testing)
    service, res = helpers.restart_master_kubernetes()
    if res != 0:
        raise helpers.UpgradeError('Failed to restart {0}. {1}'
                                   .format(service, res))

    _upgrade_etcd(upd)

    # Restart KD to make sure new libs are running
    res = helpers.restart_service(settings.KUBERDOCK_SERVICE)
    if res != 0:
        raise helpers.UpgradeError('Failed to restart KuberDock')

    helpers.upgrade_db()
    _update_pv_mount_paths(upd)
예제 #8
0
def _master_k8s_node():
    helpers.install_package(CONNTRACK_PACKAGE)
    helpers.local('systemctl reenable kube-proxy')
    helpers.restart_service('kube-proxy')
예제 #9
0
def upgrade(upd, with_testing, *args, **kwargs):
    upd.print_log('Restarting nginx...')
    helpers.restart_service('nginx')
예제 #10
0
 def upgrade(cls, upd, with_testing):
     helpers.restart_service('nginx')
예제 #11
0
def downgrade(upd, with_testing, exception, *args, **kwargs):
    upd.print_log('Rollback nginx config...')
    copyfile(nginx_path + '.disabled', nginx_path)
    copyfile(kd_path + '.disabled', kd_path)
    helpers.restart_service('nginx')
예제 #12
0
def upgrade(upd, with_testing, *args, **kwargs):
    # 00085_update.py
    upd.print_log('Add default Persistent Disks size in pods config...')
    pods = Pod.query.all()
    for pod in pods:
        upd.print_log('Processing pod {0}'.format(pod.name))
        config = pod.get_dbconfig()
        config['volumes_public'] = with_size(config.get('volumes_original', []), pod.owner_id)
        pod.set_dbconfig(config, save=False)
    for pod in pods:
        config = pod.get_dbconfig()
        config.pop('volumes_original', None)
        pod.set_dbconfig(config, save=False)
    db.session.commit()

    # 00086_update.py
    upd.print_log('Update kubes to hard limits')
    internal = Kube.get_by_name('Internal service')
    if internal:
        internal.cpu = 0.02
        internal.save()
    small = Kube.get_by_name('Small')
    if small:
        small.cpu = 0.05
        small.save()
    standard = Kube.get_by_name('Standard')
    if standard:
        standard.cpu = 0.25
        standard.save()
    high = Kube.get_by_name('High memory')
    if high:
        high.cpu = 0.5
        high.save()
    upd.print_log('Setup k8s2etcd middleware service')
    upd.print_log(
        helpers.local(
            "cat > /etc/systemd/system/kuberdock-k8s2etcd.service << 'EOF' {0}"
            .format(SERVICE_FILE))
    )

    helpers.local('systemctl daemon-reload')
    upd.print_log(helpers.local('systemctl reenable kuberdock-k8s2etcd'))
    upd.print_log(helpers.local('systemctl restart kuberdock-k8s2etcd'))

    upd.print_log('Add after etcd.service to kube-apiserver service file')
    upd.print_log(
        helpers.local(
            "cat > /etc/systemd/system/kube-apiserver.service << 'EOF' {0}"
            .format(K8S_API_SERVICE_FILE))
    )
    upd.print_log('Turn off watch-cache in kube_apiserver')
    lines = []
    with open(KUBE_API_SERVER_PATH) as f:
        lines = f.readlines()
    with open(KUBE_API_SERVER_PATH, 'w+') as f:
        for line in lines:
            if (KUBE_API_SERVER_ARG in line and
                    not KUBE_API_WATCHCACHE_DISABLE in line):
                s = line.split('"')
                s[1] += KUBE_API_WATCHCACHE_DISABLE
                line = '"'.join(s)
            f.write(line)
    helpers.restart_master_kubernetes(with_enable=True)

    # 00087_update.py
    upd.print_log('Upgrade namespaces for PD...')
    config = ConfigParser.RawConfigParser()
    config.read(KUBERDOCK_SETTINGS_FILE)
    ns = MASTER_IP
    if not config.has_option('main', 'PD_NAMESPACE'):
        if CEPH:
            # Store default CEPH pool as namespace. It already was used
            # by KD cluster, so we will not change it.
            ns = OLD_DEFAULT_CEPH_POOL
        config.set('main', 'PD_NAMESPACE', ns)
        with open(KUBERDOCK_SETTINGS_FILE, 'wb') as fout:
            config.write(fout)

    if CEPH:
        # Set 'rbd' for all existing ceph drives, because it was a default pool
        PersistentDisk.query.filter(
            ~PersistentDisk.drive_name.contains(PD_NS_SEPARATOR)
        ).update(
            {PersistentDisk.drive_name: \
                OLD_DEFAULT_CEPH_POOL + PD_NS_SEPARATOR + \
                PersistentDisk.drive_name
            },
            synchronize_session=False
        )
        db.session.commit()
        try:
            pstorage.check_namespace_exists(namespace=ns)
        except pstorage.NoNodesError:
            # skip CEPH pool checking if there are no nodes with CEPH
            pass

    # Restart kuberdock to prevent loss of PD bind state, becuase fix for this
    # is in the new version.
    helpers.restart_service('emperor.uwsgi')