Пример #1
0
def upgrade(upd, with_testing, *args, **kwargs):
    #00109_update.py
    upd.print_log('Update system settings scheme...')
    helpers.upgrade_db()

    redis = ConnectionPool.get_connection()
    old_settings = SystemSettings.get_all()

    # backup for downgrade
    if not redis.get('old_system_settings'):
        redis.set('old_system_settings',
                  json.dumps(old_settings),
                  ex=int(timedelta(days=7).total_seconds()))

    SystemSettings.query.delete()
    add_system_settings()
    for param in old_settings:
        SystemSettings.set_by_name(param.get('name'),
                                   param.get('value'),
                                   commit=False)
    db.session.commit()

    #00111_update.py
    helpers.restart_master_kubernetes()

    #00113_update.py
    upd.print_log('Adding "count_type" column to packages...')
    helpers.upgrade_db(revision='42b36be03945')
Пример #2
0
def _update_00191_upgrade(upd, calico_network):
    etcd1 = helpers.local('uname -n')
    _master_etcd_cert(etcd1)
    _master_etcd_conf(etcd1)
    helpers.restart_service('etcd')

    _master_docker(upd)
    _master_firewalld()
    _master_k8s_node()

    if helpers.local('docker ps --format "{{.Names}}" | grep "^calico-node$"'
                     ) != 'calico-node':
        _master_calico(upd, calico_network)

    _master_k8s_extensions()
    helpers.restart_master_kubernetes()
    helpers.local('echo "{0}" | kubectl create -f -'.format(_K8S_EXTENSIONS))
    # we need to restart here again, because kubernetes sometimes don't accept
    # extensions onfly
    helpers.restart_master_kubernetes()
    _master_network_policy(upd, calico_network)

    _master_dns_policy()
    _master_pods_policy()

    _master_service_update()
Пример #3
0
def upgrade(upd, with_testing, *args, **kwargs):
    upd.print_log('Generating new auth config file for nodes...')
    with open('/etc/kubernetes/kubelet_token.dat') as f:
        data = json.load(f)
    token = data['BearerToken']
    with open('/etc/kubernetes/configfile_for_nodes', 'w') as f:
        f.write(configfile.format(token, MASTER_IP))

    upd.print_log('Changing config files...')
    upd.print_log('1) controller-manager',
                  helpers.local('mv /etc/kubernetes/controller-manager.rpmnew '
                                '/etc/kubernetes/controller-manager'))
    upd.print_log('2) kube-apiserver')
    with open('/etc/kubernetes/apiserver') as f:
        data = f.read().replace('--portal_net', '--service-cluster-ip-range')
        data = data.replace('AutoProvision,LimitRanger', 'Lifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount')
        data = data.replace('--public_address_override', '--bind-address')
    with open('/etc/kubernetes/apiserver', 'w') as f:
        f.write(data)
    upd.print_log('Done.')

    upd.print_log('Trying to restart master kubernetes...')
    service, code = helpers.restart_master_kubernetes(with_enable=True)
    if code != 0:
        raise helpers.UpgradeError('Kubernetes not restarted. '
                                   'Service {0} code {1}'.format(service, code))
    else:
        upd.print_log('Deleting old token file',
                      helpers.local('rm -f /etc/kubernetes/kubelet_token.dat'))
    helpers.local('rm -f /etc/kubernetes/apiserver.rpmnew')
Пример #4
0
def downgrade(upd, with_testing, exception, *args, **kwargs):
    _downgrade_k8s_master(upd, with_testing)
    service, res = helpers.restart_master_kubernetes()
    if res != 0:
        raise helpers.UpgradeError('Failed to restart {0}. {1}'
                                   .format(service, res))

    _downgrade_etcd(upd)

    # Restart KD to make sure new libs are running
    res = helpers.restart_service(settings.KUBERDOCK_SERVICE)
    if res != 0:
        raise helpers.UpgradeError('Failed to restart KuberDock')
    helpers.downgrade_db(revision='3c832810a33c')
Пример #5
0
def upgrade(upd, with_testing, *args, **kwargs):
    _upgrade_k8s_master(upd, with_testing)
    service, res = helpers.restart_master_kubernetes()
    if res != 0:
        raise helpers.UpgradeError('Failed to restart {0}. {1}'
                                   .format(service, res))

    _upgrade_etcd(upd)

    # Restart KD to make sure new libs are running
    res = helpers.restart_service(settings.KUBERDOCK_SERVICE)
    if res != 0:
        raise helpers.UpgradeError('Failed to restart KuberDock')

    helpers.upgrade_db()
    _update_pv_mount_paths(upd)
Пример #6
0
def upgrade(upd, with_testing, *args, **kwargs):
    # 00085_update.py
    upd.print_log('Add default Persistent Disks size in pods config...')
    pods = Pod.query.all()
    for pod in pods:
        upd.print_log('Processing pod {0}'.format(pod.name))
        config = pod.get_dbconfig()
        config['volumes_public'] = with_size(config.get('volumes_original', []), pod.owner_id)
        pod.set_dbconfig(config, save=False)
    for pod in pods:
        config = pod.get_dbconfig()
        config.pop('volumes_original', None)
        pod.set_dbconfig(config, save=False)
    db.session.commit()

    # 00086_update.py
    upd.print_log('Update kubes to hard limits')
    internal = Kube.get_by_name('Internal service')
    if internal:
        internal.cpu = 0.02
        internal.save()
    small = Kube.get_by_name('Small')
    if small:
        small.cpu = 0.05
        small.save()
    standard = Kube.get_by_name('Standard')
    if standard:
        standard.cpu = 0.25
        standard.save()
    high = Kube.get_by_name('High memory')
    if high:
        high.cpu = 0.5
        high.save()
    upd.print_log('Setup k8s2etcd middleware service')
    upd.print_log(
        helpers.local(
            "cat > /etc/systemd/system/kuberdock-k8s2etcd.service << 'EOF' {0}"
            .format(SERVICE_FILE))
    )

    helpers.local('systemctl daemon-reload')
    upd.print_log(helpers.local('systemctl reenable kuberdock-k8s2etcd'))
    upd.print_log(helpers.local('systemctl restart kuberdock-k8s2etcd'))

    upd.print_log('Add after etcd.service to kube-apiserver service file')
    upd.print_log(
        helpers.local(
            "cat > /etc/systemd/system/kube-apiserver.service << 'EOF' {0}"
            .format(K8S_API_SERVICE_FILE))
    )
    upd.print_log('Turn off watch-cache in kube_apiserver')
    lines = []
    with open(KUBE_API_SERVER_PATH) as f:
        lines = f.readlines()
    with open(KUBE_API_SERVER_PATH, 'w+') as f:
        for line in lines:
            if (KUBE_API_SERVER_ARG in line and
                    not KUBE_API_WATCHCACHE_DISABLE in line):
                s = line.split('"')
                s[1] += KUBE_API_WATCHCACHE_DISABLE
                line = '"'.join(s)
            f.write(line)
    helpers.restart_master_kubernetes(with_enable=True)

    # 00087_update.py
    upd.print_log('Upgrade namespaces for PD...')
    config = ConfigParser.RawConfigParser()
    config.read(KUBERDOCK_SETTINGS_FILE)
    ns = MASTER_IP
    if not config.has_option('main', 'PD_NAMESPACE'):
        if CEPH:
            # Store default CEPH pool as namespace. It already was used
            # by KD cluster, so we will not change it.
            ns = OLD_DEFAULT_CEPH_POOL
        config.set('main', 'PD_NAMESPACE', ns)
        with open(KUBERDOCK_SETTINGS_FILE, 'wb') as fout:
            config.write(fout)

    if CEPH:
        # Set 'rbd' for all existing ceph drives, because it was a default pool
        PersistentDisk.query.filter(
            ~PersistentDisk.drive_name.contains(PD_NS_SEPARATOR)
        ).update(
            {PersistentDisk.drive_name: \
                OLD_DEFAULT_CEPH_POOL + PD_NS_SEPARATOR + \
                PersistentDisk.drive_name
            },
            synchronize_session=False
        )
        db.session.commit()
        try:
            pstorage.check_namespace_exists(namespace=ns)
        except pstorage.NoNodesError:
            # skip CEPH pool checking if there are no nodes with CEPH
            pass

    # Restart kuberdock to prevent loss of PD bind state, becuase fix for this
    # is in the new version.
    helpers.restart_service('emperor.uwsgi')
Пример #7
0
def _upgrade_212(upd, with_testing, *args, **kwargs):
    helpers.restart_master_kubernetes()
Пример #8
0
def downgrade(upd, with_testing, exception, *args, **kwargs):
    helpers.local('yum downgrade kubernetes-master --enablerepo=kube')
    helpers.restart_master_kubernetes()
Пример #9
0
def upgrade(upd, with_testing, *args, **kwargs):
    helpers.install_package('kubernetes-master-1.0.3', with_testing)
    helpers.restart_master_kubernetes()