예제 #1
0
 def add_kdtools_to_master(cls, upd):
     # Patch RC specs
     upd.print_log('Restart pods to support ssh access...')
     pc = PodCollection()
     query = Pod.query.filter(Pod.status != 'deleted')
     user = User.filter_by(username=settings.KUBERDOCK_INTERNAL_USER).one()
     dns_pod = Pod.filter_by(name='kuberdock-dns', owner=user).first()
     if dns_pod:
         query.filter(Pod.id != dns_pod.id)
     for dbpod in query:
         pod = pc._get_by_id(dbpod.id)
         if pod.status in (POD_STATUSES.pending, POD_STATUSES.stopping,
                           POD_STATUSES.stopped):
             # Workaround for AC-3386 issue: just don't restart
             # pending pods, because it may lead to error during pod start,
             # and upgrade script fail as a result.
             # Also do not restart already stopped pods.
             upd.print_log(
                 u'Skip restart of {} pod "{}". '
                 u'It may need manual restart to enable ssh access.'.format(
                     pod.status, dbpod.name))
             continue
         try:
             pc._stop_pod(pod, block=True)
         except APIError as e:
             upd.print_log(
                 u'Warning: Failed to stop pod {}. It may be needed to '
                 u'manual restart the pod.\n{}'.format(dbpod.name, e))
             continue
         pc._start_pod(pod, {'async_pod_create': False})
         upd.print_log(u'Restart pod: {}'.format(dbpod.name))
예제 #2
0
def upgrade(upd, with_testing, *args, **kwargs):
    upd.print_log('Upgrading db...')
    helpers.upgrade_db()

    # 00103_update.py
    upd.print_log('Enabling restart for ntpd.service on master')
    local('mkdir -p ' + SERVICE_DIR)
    local('echo -e "' + OVERRIDE_CONF + '" > ' + OVERRIDE_FILE)
    local('systemctl daemon-reload')
    local('systemctl restart ntpd')

    # 00104_update.py
    upd.print_log('Restart pods with persistent storage')
    pc = PodCollection()
    pods = Pod.query.with_entities(Pod.id).filter(Pod.persistent_disks).all()
    for pod_id in pods:
        p = pc._get_by_id(pod_id[0])
        if p.status == POD_STATUSES.running:
            pc._stop_pod(p)
            pc._collection.pop((pod_id[0], pod_id[0]))
            pc._merge()
            p = pc._get_by_id(pod_id[0])
            pc._start_pod(p)

    # 00105_update.py
    upd.print_log('Add roles {}, resources {} and its permissions...'.format(
        ROLES, RESOURCES))
    fixtures.add_permissions(roles=ROLES,
                             resources=RESOURCES,
                             permissions=PERMISSIONS)
    upd.print_log('Add MenuRoles...')
    PAUserRole = Role.query.filter(Role.rolename == 'LimitedUser').first()
    for menu_role in Role.query.filter(
            Role.rolename == 'User').first().menus_assocs:
        db.session.add(
            MenuItemRole(role=PAUserRole, menuitem_id=menu_role.menuitem_id))
    db.session.commit()

    # Fixes for celery workers launching
    upd.print_log('Updating uwsgi configuration ...')
    local('test -f "{0}" && cp "{0}" "{1}"'.format(UWSGI_KUBERDOCK_INI_DEST,
                                                   SAVE_KUBERDOCK_INI))
    local('cp "{0}" "{1}"'.format(UWSGI_KUBERDOCK_INI_SOURCE,
                                  UWSGI_KUBERDOCK_INI_DEST))
    local('chmod 644 "{0}"'.format(UWSGI_KUBERDOCK_INI_DEST))