def _update_00203_upgrade(upd): def _update_ingress_container(config): config_map_cmd = "--nginx-configmap={}/{}".format( constants.KUBERDOCK_INGRESS_CONFIG_MAP_NAMESPACE, constants.KUBERDOCK_INGRESS_CONFIG_MAP_NAME) for c in config['containers']: if c['name'] == 'nginx-ingress': if config_map_cmd not in c['command']: c['command'].append(config_map_cmd) return True return False def _create_or_update_ingress_config(): client = configmap.ConfigMapClient(KubeQuery()) try: client.get( constants.KUBERDOCK_INGRESS_CONFIG_MAP_NAME, namespace=constants.KUBERDOCK_INGRESS_CONFIG_MAP_NAMESPACE) client.patch( constants.KUBERDOCK_INGRESS_CONFIG_MAP_NAME, data={'server-name-hash-bucket-size': '128'}, namespace=constants.KUBERDOCK_INGRESS_CONFIG_MAP_NAMESPACE) except configmap.ConfigMapNotFound: ingress.create_ingress_nginx_configmap() owner = User.get_internal() pod = Pod.query.filter_by(name=constants.KUBERDOCK_INGRESS_POD_NAME, owner=owner).first() if pod is None: upd.print_log('Ingress POD hasn\'t been created yet. Skipping') return _create_or_update_ingress_config() config = pod.get_dbconfig() if not _update_ingress_container(config): upd.print_log('Ingress contoller RC is up-to-date. Skipping') return collection = PodCollection() replace_pod_config(pod, config) collection.patch_running_pod( pod.id, {'spec': { 'containers': config['containers'] }}, restart=True)
def upgrade(upd, with_testing, *args, **kwargs): pod_collection = PodCollection() for pod_dict in pod_collection.get(as_json=False): pod = pod_collection._get_by_id(pod_dict['id']) db_config = get_pod_config(pod.id) cluster_ip = db_config.pop('clusterIP', None) if cluster_ip is None: service_name = db_config.get('service') if service_name is None: continue namespace = db_config.get('namespace') or pod.id service = KubeQuery().get(['services', service_name], ns=namespace) cluster_ip = service.get('spec', {}).get('clusterIP') if cluster_ip is not None: db_config['podIP'] = cluster_ip replace_pod_config(pod, db_config)
def _remove_lifecycle_section_from_pods(upd, pods, pas): # PodCollection.update({'command': 'change_config'}) can't delete keys # thus mocking instead def _mock_lifecycle(config): for container in config['containers']: if not contains_origin_root(container): continue container['lifecycle'] = { 'postStart': { 'exec': { 'command': ['/bin/true'] } } } return config def _set_prefill_flag(config, pod): prefilled_volumes = _extract_prefilled_volumes_from_pod(pas, pod) for container in config['containers']: for vm in container.get('volumeMounts', []): if vm['name'] in prefilled_volumes: vm['kdCopyFromImage'] = True collection = PodCollection() for pod in pods: config = _mock_lifecycle(pod.get_dbconfig()) _set_prefill_flag(config, pod) config['command'] = 'change_config' try: replace_pod_config(pod, config) collection.update(pod.id, config) upd.print_log('POD {} config patched'.format(pod.id)) except PodNotFound: upd.print_log('Skipping POD {}. Not found in K8S'.format(pod.id))
def _update_pv_mount_paths(upd): """Migration k8s 1.1.3 -> 1.2.4 requires removing :Z from mount paths""" # Patch RC specs upd.print_log("Updating Pod PV mount paths") def remove_trailing_z(pod_config): updated_config = copy.deepcopy(pod_config) for container in updated_config['containers']: for mount in container['volumeMounts']: mp = mount['mountPath'] if mp.endswith(":z") or mp.endswith(":Z"): mount['mountPath'] = mount['mountPath'][:-2] return updated_config pc = PodCollection() query = Pod.query.filter(Pod.status != POD_STATUSES.deleted) for dbpod in query: updated_config = remove_trailing_z(dbpod.get_dbconfig()) # Update config kapi_helpers.replace_pod_config(dbpod, updated_config) pc.patch_running_pod(dbpod.id, {'spec': updated_config}, restart=True) upd.print_log(u'Successfully updated pod: {}'.format(dbpod.name))
def upgrade(upd, with_testing, *args, **kwargs): upgrade_db() # === 00124_update.py === # Move index file of k8s2etcd service from / to /var/lib/kuberdock try: stop_service(u124_service_name) if os.path.isfile(u124_old) and not os.path.isfile(u124_new): shutil.move(u124_old, u124_new) finally: start_service(u124_service_name) # === 00126_update.py === pod_collection = PodCollection() for pod_dict in pod_collection.get(as_json=False): pod = pod_collection._get_by_id(pod_dict['id']) db_config = get_pod_config(pod.id) cluster_ip = db_config.pop('clusterIP', None) if cluster_ip is None: service_name = db_config.get('service') if service_name is None: continue namespace = db_config.get('namespace') or pod.id service = KubeQuery().get(['services', service_name], ns=namespace) cluster_ip = service.get('spec', {}).get('clusterIP') if cluster_ip is not None: db_config['podIP'] = cluster_ip replace_pod_config(pod, db_config) # === 00127_update.py === upd.print_log('Upgrading menu...') MenuItemRole.query.delete() MenuItem.query.delete() Menu.query.delete() generate_menu() # === 00130_update.py === upd.print_log('Update permissions...') Permission.query.delete() Resource.query.delete() add_permissions() db.session.commit() # === 00135_update.py === # upd.print_log('Changing session_data schema...') # upgrade_db(revision='220dacf65cba') # === 00137_update.py === upd.print_log('Upgrading db...') # upgrade_db(revision='3c832810a33c') upd.print_log('Raise max kubes to 64') max_kubes = 'max_kubes_per_container' old_value = SystemSettings.get_by_name(max_kubes) if old_value == '10': SystemSettings.set_by_name(max_kubes, 64) upd.print_log('Update kubes') small = Kube.get_by_name('Small') standard = Kube.get_by_name('Standard') if small: small.cpu = 0.12 small.name = 'Tiny' small.memory = 64 if small.is_default and standard: small.is_default = False standard.is_default = True small.save() if standard: standard.cpu = 0.25 standard.memory = 128 standard.save() high = Kube.get_by_name('High memory') if high: high.cpu = 0.25 high.memory = 256 high.disk_space = 3 high.save() # === 00138_update.py === if not (CEPH or AWS): upgrade_localstorage_paths(upd) # === added later === secret_key = SystemSettings.query.filter( SystemSettings.name == 'sso_secret_key').first() if not secret_key.value: secret_key.value = randstr(16) secret_key.description = ( 'Used for Single sign-on. Must be shared between ' 'Kuberdock and billing system or other 3rd party ' 'application.') db.session.commit() upd.print_log('Close all sessions...') close_all_sessions()