def align_sessions_data(): data = K8S().fetch_resource('services') for session in Session.objects.all(): for item in data.get('items'): metadata = item.get('metadata') if metadata and metadata.get('name') and session.name in metadata.get('name'): continue session.status = choices.StatusChoices.stopped session.save()
def stop_session(session_uuid): session = Session.objects.get(uuid=session_uuid) if session.status == choices.StatusChoices.stopped: return run_tests(session.uuid) eu = ExposedUrl.objects.filter(session=session) for e_url in eu: if e_url.vng_endpoint.url is None: kuber = K8S(app_name=session.name) kuber.delete() session.status = choices.StatusChoices.stopped session.save()
def deploy_db(session, data=[]): db_k8s = K8S(app_name='db-{}'.format(session.name)) db_k8s.initialize() db = copy.deepcopy(postgis) db.name = 'db-{}'.format(session.name) db.data = data d_db = Deployment(name='db-{}'.format(session.name), labels='db-{}'.format(session.name), containers=[db]).execute() for i in range(10): time.sleep(3) # Visualize the new pod can be not immediate, pooling is the way :( try: db_IP_address = db_k8s.get_pod_status()['status']['podIP'] return db_IP_address, db_k8s except: pass
def ZGW_deploy(session): update_session_status(session, _('Connecting to Kubernetes'), 1) k8s = K8S(app_name=session.name) k8s.initialize() # create deployment DB db_IP_address, k8s_db = deploy_db(session, postgis.data) # group all the other containers in the same pod containers = [ copy.deepcopy(ZRC), copy.deepcopy(NRC), copy.deepcopy(ZTC), copy.deepcopy(BRC), copy.deepcopy(DRC), copy.deepcopy(AC), copy.deepcopy(NRC_CELERY), copy.deepcopy(rabbitMQ), ] uwsgi_containers = containers[:-2] exposed_urls = [] for c in containers: vng_endpoint = VNGEndpoint.objects.filter(session_type=session.session_type).filter(name__icontains=c.name) c.variables['DB_HOST'] = db_IP_address if len(vng_endpoint) != 0: bind_url = ExposedUrl.objects.create( session=session, vng_endpoint=vng_endpoint[0], subdomain=uuid.uuid4(), port=c.public_port ) exposed_urls.append(bind_url) c.name = '{}-{}'.format(session.name, c.name) Deployment( name=session.name, labels=session.name, containers=containers ).execute() update_session_status(session, _('Deployment of the pod'), 5) # Crete the service forwarding the right ports LoadBalancer( name='{}-loadbalancer'.format(session.name), app=session.name, containers=containers ).execute() ip = external_ip_pooling(k8s, session, n_trial=150, max_percentage=40) if ip is None: update_session_status(session, _('Impossible to deploy successfully, IP address not allocated')) session.status = choices.StatusChoices.error_deploy session.save() return for ex in exposed_urls: ex.docker_url = ip ex.save() # check migrations status for i in range(60): spawned = [c for c in uwsgi_containers if 'spawned uWSGI' in k8s.get_pod_log(c.name)] update_session_status(session, _('Check migration status'), int(40 + (6 - len(uwsgi_containers)) * 45 / 6)) if len(spawned) == len(uwsgi_containers): break time.sleep(5) if len(spawned) != len(uwsgi_containers): update_session_status(session, _('Not all uWSGI containers have spawned')) session.status = choices.StatusChoices.error_deploy session.save() return update_session_status(session, _('Loading preconfigured models'), 85) filename = str(uuid.uuid4()) file_location = os.path.join(os.path.dirname(__file__), 'kubernetes/data/dump.sql') new_file = os.path.join(os.path.dirname(__file__), 'kubernetes/data/{}'.format(filename)) with open(file_location) as in_file: content = in_file.read() content = content.replace('BASE_IP', ip) with open(new_file, 'w') as out_file: out_file.write(content) # running the eventual migrations k8s_db.copy_to(new_file, 'dump.sql') os.remove(new_file) k8s_db.exec([ 'psql', '-f', 'dump.sql', '-U', 'postgres' ]) update_session_status(session, _('Installation successful'), 100) session.status = choices.StatusChoices.running session.save()
def bootstrap_session(session_uuid, purged=False): ''' Create all the necessary endpoint and exposes it so they can be used as proxy In case there is one or multiple docker images linked, it starts all of them ''' session = Session.objects.get(uuid=session_uuid) if session.session_type.ZGW_images: ZGW_deploy(session) return update_session_status(session, _('Connecting to Kubernetes'), 1) endpoint = VNGEndpoint.objects.filter(session_type=session.session_type) k8s = K8S(app_name=session.name) # Init of the procedure containers = [] exposed_urls = [] db_IP_address = None if session.session_type.database: data = session.session_type.db_data or [] db_IP_address, __ = deploy_db(session, data) if not db_IP_address: update_session_status(session, _('An error within the image prevented from a correct deployment')) return # collecting all the containers for ep in endpoint: bind_url = ExposedUrl.objects.create( session=session, vng_endpoint=ep, subdomain=uuid.uuid4(), port=ep.port ) exposed_urls.append(bind_url) if ep.docker_image: k8s.initialize() env_var = bind_url.vng_endpoint.environmentalvariables_set.all() variables = {v.key: v.value for v in env_var} if db_IP_address: variables['DB_HOST'] = db_IP_address container = Container( name=session.name, image=ep.docker_image, public_port=ep.port, private_port=ep.port, variables=variables ) containers.append(container) if len(containers) != 0: update_session_status(session, _('Docker image installation on Kubernetes'), 10) deployment = Deployment( name=session.name, labels=session.name, containers=containers ).execute() lb = LoadBalancer( name='{}-loadbalancer'.format(session.name), app=session.name, containers=containers ).execute() ip = external_ip_pooling(k8s, session) if not ip: update_session_status(session, _('An error within the image prevented from a correct deployment')) return for ex in exposed_urls: ex.docker_url = ip ex.save() update_session_status(session, _('Installation performed successfully'), 100) session.status = choices.StatusChoices.running session.save()