예제 #1
0
 def add_kdtools_to_master(cls, upd):
     # Patch RC specs
     upd.print_log('Restart pods to support ssh access...')
     pc = PodCollection()
     query = Pod.query.filter(Pod.status != 'deleted')
     user = User.filter_by(username=settings.KUBERDOCK_INTERNAL_USER).one()
     dns_pod = Pod.filter_by(name='kuberdock-dns', owner=user).first()
     if dns_pod:
         query.filter(Pod.id != dns_pod.id)
     for dbpod in query:
         pod = pc._get_by_id(dbpod.id)
         if pod.status in (POD_STATUSES.pending, POD_STATUSES.stopping,
                           POD_STATUSES.stopped):
             # Workaround for AC-3386 issue: just don't restart
             # pending pods, because it may lead to error during pod start,
             # and upgrade script fail as a result.
             # Also do not restart already stopped pods.
             upd.print_log(
                 u'Skip restart of {} pod "{}". '
                 u'It may need manual restart to enable ssh access.'.format(
                     pod.status, dbpod.name))
             continue
         try:
             pc._stop_pod(pod, block=True)
         except APIError as e:
             upd.print_log(
                 u'Warning: Failed to stop pod {}. It may be needed to '
                 u'manual restart the pod.\n{}'.format(dbpod.name, e))
             continue
         pc._start_pod(pod, {'async_pod_create': False})
         upd.print_log(u'Restart pod: {}'.format(dbpod.name))
예제 #2
0
def upgrade(upd, with_testing, *args, **kwargs):
    upd.print_log('Upgrading db...')
    helpers.upgrade_db()

    # 00103_update.py
    upd.print_log('Enabling restart for ntpd.service on master')
    local('mkdir -p ' + SERVICE_DIR)
    local('echo -e "' + OVERRIDE_CONF + '" > ' + OVERRIDE_FILE)
    local('systemctl daemon-reload')
    local('systemctl restart ntpd')

    # 00104_update.py
    upd.print_log('Restart pods with persistent storage')
    pc = PodCollection()
    pods = Pod.query.with_entities(Pod.id).filter(Pod.persistent_disks).all()
    for pod_id in pods:
        p = pc._get_by_id(pod_id[0])
        if p.status == POD_STATUSES.running:
            pc._stop_pod(p)
            pc._collection.pop((pod_id[0], pod_id[0]))
            pc._merge()
            p = pc._get_by_id(pod_id[0])
            pc._start_pod(p)

    # 00105_update.py
    upd.print_log('Add roles {}, resources {} and its permissions...'.format(
        ROLES, RESOURCES))
    fixtures.add_permissions(roles=ROLES,
                             resources=RESOURCES,
                             permissions=PERMISSIONS)
    upd.print_log('Add MenuRoles...')
    PAUserRole = Role.query.filter(Role.rolename == 'LimitedUser').first()
    for menu_role in Role.query.filter(
            Role.rolename == 'User').first().menus_assocs:
        db.session.add(
            MenuItemRole(role=PAUserRole, menuitem_id=menu_role.menuitem_id))
    db.session.commit()

    # Fixes for celery workers launching
    upd.print_log('Updating uwsgi configuration ...')
    local('test -f "{0}" && cp "{0}" "{1}"'.format(UWSGI_KUBERDOCK_INI_DEST,
                                                   SAVE_KUBERDOCK_INI))
    local('cp "{0}" "{1}"'.format(UWSGI_KUBERDOCK_INI_SOURCE,
                                  UWSGI_KUBERDOCK_INI_DEST))
    local('chmod 644 "{0}"'.format(UWSGI_KUBERDOCK_INI_DEST))
예제 #3
0
def upgrade(upd, with_testing, *args, **kwargs):
    pod_collection = PodCollection()
    for pod_dict in pod_collection.get(as_json=False):
        pod = pod_collection._get_by_id(pod_dict['id'])
        db_config = get_pod_config(pod.id)
        cluster_ip = db_config.pop('clusterIP', None)
        if cluster_ip is None:
            service_name = db_config.get('service')
            if service_name is None:
                continue
            namespace = db_config.get('namespace') or pod.id
            service = KubeQuery().get(['services', service_name], ns=namespace)
            cluster_ip = service.get('spec', {}).get('clusterIP')
            if cluster_ip is not None:
                db_config['podIP'] = cluster_ip
        replace_pod_config(pod, db_config)
예제 #4
0
def _master_service_update():
    services = Services()
    all_svc = services.get_all()
    pc = PodCollection()
    for svc in all_svc:
        selector = svc['spec'].get('selector', {})
        labels = svc['metadata'].get('labels', {})
        if KUBERDOCK_POD_UID in selector and KUBERDOCK_TYPE not in labels:
            namespace = svc['metadata']['namespace']
            name = svc['metadata']['name']
            data = {
                'metadata': {
                    'labels': {
                        KUBERDOCK_TYPE: LOCAL_SVC_TYPE,
                        KUBERDOCK_POD_UID: namespace
                    }
                }
            }
            rv = services.patch(name, namespace, data)
            raise_if_failure(rv, "Couldn't patch local service: {}".format(rv))
            pod = pc._get_by_id(namespace)
            if pod.status == POD_STATUSES.running:
                run_service(pod)
예제 #5
0
def upgrade(upd, with_testing, *args, **kwargs):
    upgrade_db()

    # === 00124_update.py ===
    # Move index file of k8s2etcd service from / to /var/lib/kuberdock
    try:
        stop_service(u124_service_name)
        if os.path.isfile(u124_old) and not os.path.isfile(u124_new):
            shutil.move(u124_old, u124_new)
    finally:
        start_service(u124_service_name)

    # === 00126_update.py ===

    pod_collection = PodCollection()
    for pod_dict in pod_collection.get(as_json=False):
        pod = pod_collection._get_by_id(pod_dict['id'])
        db_config = get_pod_config(pod.id)
        cluster_ip = db_config.pop('clusterIP', None)
        if cluster_ip is None:
            service_name = db_config.get('service')
            if service_name is None:
                continue
            namespace = db_config.get('namespace') or pod.id
            service = KubeQuery().get(['services', service_name],
                                      ns=namespace)
            cluster_ip = service.get('spec', {}).get('clusterIP')
            if cluster_ip is not None:
                db_config['podIP'] = cluster_ip
        replace_pod_config(pod, db_config)

    # === 00127_update.py ===

    upd.print_log('Upgrading menu...')
    MenuItemRole.query.delete()
    MenuItem.query.delete()
    Menu.query.delete()
    generate_menu()

    # === 00130_update.py ===

    upd.print_log('Update permissions...')
    Permission.query.delete()
    Resource.query.delete()
    add_permissions()
    db.session.commit()

    # === 00135_update.py ===
    # upd.print_log('Changing session_data schema...')
    # upgrade_db(revision='220dacf65cba')


    # === 00137_update.py ===
    upd.print_log('Upgrading db...')
    # upgrade_db(revision='3c832810a33c')
    upd.print_log('Raise max kubes to 64')
    max_kubes = 'max_kubes_per_container'
    old_value = SystemSettings.get_by_name(max_kubes)
    if old_value == '10':
        SystemSettings.set_by_name(max_kubes, 64)
    upd.print_log('Update kubes')
    small = Kube.get_by_name('Small')
    standard = Kube.get_by_name('Standard')
    if small:
        small.cpu = 0.12
        small.name = 'Tiny'
        small.memory = 64
        if small.is_default and standard:
            small.is_default = False
            standard.is_default = True
        small.save()
    if standard:
        standard.cpu = 0.25
        standard.memory = 128
        standard.save()
    high = Kube.get_by_name('High memory')
    if high:
        high.cpu = 0.25
        high.memory = 256
        high.disk_space = 3
        high.save()

    # === 00138_update.py ===

    if not (CEPH or AWS):
        upgrade_localstorage_paths(upd)

    # === added later ===

    secret_key = SystemSettings.query.filter(
        SystemSettings.name == 'sso_secret_key').first()
    if not secret_key.value:
        secret_key.value = randstr(16)
    secret_key.description = (
    'Used for Single sign-on. Must be shared between '
    'Kuberdock and billing system or other 3rd party '
    'application.')
    db.session.commit()

    upd.print_log('Close all sessions...')
    close_all_sessions()