Beispiel #1
0
    def autoscale(self, proc_type, autoscale):
        """
        Set autoscale rules for the application
        """
        name = '{}-{}'.format(self.id, proc_type)
        # basically fake out a Deployment object (only thing we use) to assign to the HPA
        target = {'apiVersion': 'apps/v1',
                  'kind': 'Deployment',
                  'metadata': {'name': name}}

        try:
            # get the target for autoscaler, in this case Deployment
            self._scheduler.hpa.get(self.id, name)
            if autoscale is None:
                self._scheduler.hpa.delete(self.id, name)
            else:
                self._scheduler.hpa.update(
                    self.id, name, proc_type, target, **autoscale
                )
        except KubeHTTPException as e:
            if e.response.status_code == 404:
                self._scheduler.hpa.create(
                    self.id, name, proc_type, target, **autoscale
                )
            else:
                # let the user know about any other errors
                raise ServiceUnavailable(str(e)) from e
Beispiel #2
0
    def run(self, user, command):
        def pod_name(size=5, chars=string.ascii_lowercase + string.digits):
            return ''.join(random.choice(chars) for _ in range(size))

        """Run a one-off command in an ephemeral app container."""
        release = self.release_set.latest()
        if release.build is None:
            raise DeisException(
                'No build associated with this release to run this command')

        # TODO: add support for interactive shell
        entrypoint, command = self._get_command_run(command)

        name = self._get_job_id('run') + '-' + pod_name()
        self.log("{} on {} runs '{}'".format(user.username, name, command))

        kwargs = {
            'memory': release.config.memory,
            'cpu': release.config.cpu,
            'tags': release.config.tags,
            'envs': release.config.values,
            'registry': release.config.registry,
            'version': "v{}".format(release.version),
            'build_type': release.build.type,
        }

        try:
            exit_code, output = self._scheduler.run(self.id, name,
                                                    release.image, entrypoint,
                                                    command, **kwargs)

            return exit_code, output
        except Exception as e:
            err = '{} (run): {}'.format(name, e)
            raise ServiceUnavailable(err) from e
Beispiel #3
0
    def run(self, user, command):
        def pod_name(size=5, chars=string.ascii_lowercase + string.digits):
            return ''.join(random.choice(chars) for _ in range(size))

        """Run a one-off command in an ephemeral app container."""
        release = self.release_set.filter(failed=False).latest()
        if release.build is None:
            raise DeisException(
                'No build associated with this release to run this command')

        app_settings = self.appsettings_set.latest()
        # use slugrunner image for app if buildpack app otherwise use normal image
        image = settings.SLUGRUNNER_IMAGE if release.build.type == 'buildpack' else release.image

        data = self._gather_app_settings(release,
                                         app_settings,
                                         process_type='run',
                                         replicas=1)

        # create application config and build the pod manifest
        self.set_application_config(release)

        scale_type = 'run'
        name = self._get_job_id(scale_type) + '-' + pod_name()
        self.log("{} on {} runs '{}'".format(user.username, name, command))

        try:
            exit_code, output = self._scheduler.run(
                self.id, name, image, self._get_entrypoint(scale_type),
                [command], **data)

            return exit_code, output
        except Exception as e:
            err = '{} (run): {}'.format(name, e)
            raise ServiceUnavailable(err) from e
Beispiel #4
0
    def _update_application_service(self, namespace, app_type, port, routable=False, annotations={}):  # noqa
        """Update application service with all the various required information"""
        service = self._fetch_service_config(namespace)
        old_service = service.copy()  # in case anything fails for rollback

        try:
            # Update service information
            for key, value in annotations.items():
                if value is not None:
                    service['metadata']['annotations']['router.deis.io/%s' % key] = str(value)
                else:
                    service['metadata']['annotations'].pop('router.deis.io/%s' % key, None)
            if routable:
                service['metadata']['labels']['router.deis.io/routable'] = 'true'
            else:
                # delete the annotation
                service['metadata']['labels'].pop('router.deis.io/routable', None)

            # Set app type selector
            service['spec']['selector']['type'] = app_type

            # Find if target port exists already, update / create as required
            if routable:
                for pos, item in enumerate(service['spec']['ports']):
                    if item['port'] == 80 and port != item['targetPort']:
                        # port 80 is the only one we care about right now
                        service['spec']['ports'][pos]['targetPort'] = int(port)

            self._scheduler.svc.update(namespace, namespace, data=service)
        except Exception as e:
            # Fix service to old port and app type
            self._scheduler.svc.update(namespace, namespace, data=old_service)
            raise ServiceUnavailable(str(e)) from e
Beispiel #5
0
    def detach(self, *args, **kwargs):
        # remove the certificate from the domain
        domain = get_object_or_404(Domain, domain=kwargs['domain'])
        domain.certificate = None
        domain.save()

        name = '%s-cert' % self.name
        app = domain.app

        # only delete if it exists and if no other domains depend on secret
        if len(self.domains) == 0:
            try:
                # We raise an exception when a secret doesn't exist
                self._scheduler.get_secret(app, name)
                self._scheduler.delete_secret(app, name)
            except KubeException as e:
                raise ServiceUnavailable(
                    "Could not delete certificate secret {} for application {}"
                    .format(name, app)) from e  # noqa

        # get config for the service
        config = self._load_service_config(app, 'router')

        # See if certificates are available
        if 'certificates' not in config:
            config['certificates'] = ''

        # convert from string to list to work with and filter out empty strings
        cert = '{}:{}'.format(domain.domain, self.name)
        certificates = [_f for _f in config['certificates'].split(',') if _f]
        if cert in certificates:
            certificates.remove(cert)
        config['certificates'] = ','.join(certificates)

        self._save_service_config(app, 'router', config)
Beispiel #6
0
    def delete(self, *args, **kwargs):
        """Delete this application including all containers"""
        self.log("deleting environment")
        try:
            # check if namespace exists
            self._scheduler.ns.get(self.id)

            try:
                self._scheduler.ns.delete(self.id)

                # wait 30 seconds for termination
                for _ in range(30):
                    try:
                        self._scheduler.ns.get(self.id)
                    except KubeHTTPException as e:
                        # only break out on a 404
                        if e.response.status_code == 404:
                            break
            except KubeException as e:
                raise ServiceUnavailable(
                    'Could not delete Kubernetes Namespace {} within 30 seconds'
                    .format(self.id)) from e  # noqa
        except KubeHTTPException:
            # it's fine if the namespace does not exist - delete app from the DB
            pass

        self._clean_app_logs()
        return super(App, self).delete(*args, **kwargs)
Beispiel #7
0
    def _scale_pods(self, scale_types):
        release = self.release_set.filter(failed=False).latest()
        app_settings = self.appsettings_set.latest()

        # use slugrunner image for app if buildpack app otherwise use normal image
        image = settings.SLUGRUNNER_IMAGE if release.build.type == 'buildpack' else release.image

        tasks = []
        for scale_type, replicas in scale_types.items():
            data = self._gather_app_settings(release, app_settings, scale_type,
                                             replicas)  # noqa

            # gather all proc types to be deployed
            tasks.append(
                functools.partial(self._scheduler.scale,
                                  namespace=self.id,
                                  name=self._get_job_id(scale_type),
                                  image=image,
                                  entrypoint=self._get_entrypoint(scale_type),
                                  command=self._get_command(scale_type),
                                  **data))

        try:
            # create the application config in k8s (secret in this case) for all deploy objects
            self.set_application_config(release)

            async_run(tasks)
        except Exception as e:
            err = '(scale): {}'.format(e)
            self.log(err, logging.ERROR)
            raise ServiceUnavailable(err) from e
Beispiel #8
0
    def get(self, request):
        try:
            import django.db
            with django.db.connection.cursor() as c:
                c.execute("SELECT 0")
        except django.db.Error as e:
            raise ServiceUnavailable("Database health check failed") from e

        return HttpResponse("OK")
Beispiel #9
0
    def run(self, user, command):
        def pod_name(size=5, chars=string.ascii_lowercase + string.digits):
            return ''.join(random.choice(chars) for _ in range(size))

        """Run a one-off command in an ephemeral app container."""
        scale_type = 'run'
        release = self.release_set.latest()
        if release.build is None:
            raise DeisException(
                'No build associated with this release to run this command')

        image = release.image
        registry = release.config.registry
        version = "v{}".format(release.version)
        envs = self._build_env_vars(release.build.type, version, image,
                                    release.config.values)

        # see if the app config has deploy timeout preference, otherwise use global
        deploy_timeout = release.config.values.get(
            'DEIS_DEPLOY_TIMEOUT', settings.DEIS_DEPLOY_TIMEOUT)  # noqa

        # get application level pod termination grace period
        pod_termination_grace_period_seconds = release.config.values.get(
            'KUBERNETES_POD_TERMINATION_GRACE_PERIOD_SECONDS',
            settings.KUBERNETES_POD_TERMINATION_GRACE_PERIOD_SECONDS)  # noqa

        # create image pull secret if needed
        image_pull_secret_name = self.image_pull_secret(
            self.id, registry, image)

        name = self._get_job_id(scale_type) + '-' + pod_name()
        self.log("{} on {} runs '{}'".format(user.username, name, command))

        kwargs = {
            'memory': release.config.memory,
            'cpu': release.config.cpu,
            'tags': release.config.tags,
            'envs': envs,
            'registry': registry,
            'version': version,
            'build_type': release.build.type,
            'deploy_timeout': deploy_timeout,
            'pod_termination_grace_period_seconds':
            pod_termination_grace_period_seconds,
            'image_pull_secret_name': image_pull_secret_name,
        }

        try:
            exit_code, output = self._scheduler.run(
                self.id, name, image, self._get_entrypoint(scale_type),
                [command], **kwargs)

            return exit_code, output
        except Exception as e:
            err = '{} (run): {}'.format(name, e)
            raise ServiceUnavailable(err) from e
Beispiel #10
0
    def list_pods(self, *args, **kwargs):
        """Used to list basic information about pods running for a given application"""
        try:
            labels = self._scheduler_filter(**kwargs)

            # in case a singular pod is requested
            if 'name' in kwargs:
                pods = [
                    self._scheduler.pod.get(self.id, kwargs['name']).json()
                ]
            else:
                pods = self._scheduler.pod.get(self.id,
                                               labels=labels).json()['items']
                if not pods:
                    pods = []

            data = []
            for p in pods:
                labels = p['metadata']['labels']
                # specifically ignore run pods
                if labels['type'] == 'run':
                    continue

                state = str(self._scheduler.pod.state(p))

                # follows kubelete convention - these are hidden unless show-all is set
                if state in ['down', 'crashed']:
                    continue

                # hide pod if it is passed the graceful termination period
                if self._scheduler.pod.deleted(p):
                    continue

                item = Pod()
                item['name'] = p['metadata']['name']
                item['state'] = state
                item['release'] = labels['version']
                item['type'] = labels['type']
                if 'startTime' in p['status']:
                    started = p['status']['startTime']
                else:
                    started = str(datetime.utcnow().strftime(
                        settings.DEIS_DATETIME_FORMAT))
                item['started'] = started

                data.append(item)

            # sorting so latest start date is first
            data.sort(key=lambda x: x['started'], reverse=True)
            return data
        except KubeHTTPException as e:
            pass
        except Exception as e:
            err = '(list pods): {}'.format(e)
            self.log(err, logging.ERROR)
            raise ServiceUnavailable(err) from e
Beispiel #11
0
    def _scale_pods(self, scale_types):
        release = self.release_set.latest()
        envs = release.config.values

        # see if the app config has deploy batch preference, otherwise use global
        batches = release.config.values.get('DEIS_DEPLOY_BATCHES',
                                            settings.DEIS_DEPLOY_BATCHES)

        # see if the app config has deploy timeout preference, otherwise use global
        deploy_timeout = release.config.values.get(
            'DEIS_DEPLOY_TIMEOUT', settings.DEIS_DEPLOY_TIMEOUT)  # noqa

        tasks = []
        for scale_type, replicas in scale_types.items():
            # only web / cmd are routable
            # http://docs.deis.io/en/latest/using_deis/process-types/#web-vs-cmd-process-types
            routable = True if scale_type in ['web', 'cmd'] else False
            # fetch application port and inject into ENV Vars as needed
            port = release.get_port()
            if port:
                envs['PORT'] = port

            kwargs = {
                'memory': release.config.memory,
                'cpu': release.config.cpu,
                'tags': release.config.tags,
                'envs': envs,
                'registry': release.config.registry,
                'version': "v{}".format(release.version),
                'replicas': replicas,
                'app_type': scale_type,
                'build_type': release.build.type,
                'healthcheck': release.config.healthcheck,
                'routable': routable,
                'deploy_batches': batches,
                'deploy_timeout': deploy_timeout,
            }

            # gather all proc types to be deployed
            tasks.append(
                functools.partial(self._scheduler.scale,
                                  namespace=self.id,
                                  name=self._get_job_id(scale_type),
                                  image=release.image,
                                  entrypoint=self._get_entrypoint(scale_type),
                                  command=self._get_command(scale_type),
                                  **kwargs))

        try:
            async_run(tasks)
        except Exception as e:
            err = '(scale): {}'.format(e)
            self.log(err, logging.ERROR)
            raise ServiceUnavailable(err) from e
Beispiel #12
0
 def create(self, *args, **kwargs):  # noqa
     # create required minimum service in k8s for the application
     namespace = self._namespace()
     svc_name = self._svc_name()
     self.log('creating Service: {}'.format(svc_name), level=logging.DEBUG)
     try:
         try:
             self._scheduler.svc.get(namespace, svc_name)
         except KubeException:
             self._scheduler.svc.create(namespace, svc_name)
     except KubeException as e:
         raise ServiceUnavailable(
             'Kubernetes service could not be created') from e
Beispiel #13
0
    def routable(self, routable):
        """
        Turn on/off if an application is publically routable
        """
        service = self._fetch_service_config(self.id)
        old_service = service.copy()  # in case anything fails for rollback

        try:
            service['metadata']['labels']['router.deis.io/routable'] = str(routable).lower()
            self._scheduler.svc.update(self.id, self.id, data=service)
        except KubeException as e:
            self._scheduler.svc.update(self.id, self.id, data=old_service)
            raise ServiceUnavailable(str(e)) from e
Beispiel #14
0
    def maintenance_mode(self, mode):
        """
        Turn application maintenance mode on/off
        """
        service = self._fetch_service_config(self.id)
        old_service = service.copy()  # in case anything fails for rollback

        try:
            service['metadata']['annotations'][
                'router.deis.io/maintenance'] = str(mode).lower()
            self._scheduler.svc.update(self.id, self.id, data=service)
        except KubeException as e:
            self._scheduler.svc.update(self.id, self.id, data=old_service)
            raise ServiceUnavailable(str(e)) from e
Beispiel #15
0
 def create(self, *args, **kwargs):  # noqa
     # create required minimum service in k8s for the application
     namespace = self._namespace()
     svc_name = self._svc_name()
     self.log('creating Service: {}'.format(svc_name), level=logging.DEBUG)
     try:
         try:
             self._scheduler.svc.get(namespace, svc_name)
         except KubeException:
             self._scheduler.svc.create(namespace, svc_name)
     except KubeException as e:
         raise ServiceUnavailable('Kubernetes service could not be created') from e
     # config service
     annotations = self._gather_settings()
     routable = annotations.pop('routable')
     self._update_service(namespace, self.procfile_type, routable, annotations)
Beispiel #16
0
    def whitelist(self, whitelist):
        """
        Add/ Delete addresses to application whitelist
        """
        service = self._fetch_service_config(self.id)

        try:
            if whitelist:
                addresses = ",".join(address for address in whitelist)
                service['metadata']['annotations']['router.deis.io/whitelist'] = addresses
            elif 'router.deis.io/whitelist' in service['metadata']['annotations']:
                service['metadata']['annotations'].pop('router.deis.io/whitelist', None)
            else:
                return
            self._scheduler.svc.update(self.id, self.id, data=service)
        except KubeException as e:
            raise ServiceUnavailable(str(e)) from e
Beispiel #17
0
    def delete(self, *args, **kwargs):
        """Delete this application including all containers"""
        self.log("deleting environment")
        try:
            self._scheduler.delete_namespace(self.id)

            # wait 30 seconds for termination
            for _ in range(30):
                try:
                    self._scheduler.get_namespace(self.id)
                except KubeException:
                    break
        except KubeException as e:
            raise ServiceUnavailable(
                'Could not delete Kubernetes Namespace {}'.format(
                    self.id)) from e  # noqa

        self._clean_app_logs()
        return super(App, self).delete(*args, **kwargs)
Beispiel #18
0
    def detach(self, *args, **kwargs):
        # remove the certificate from the domain
        domain = get_object_or_404(Domain, domain=kwargs['domain'])
        domain.certificate = None
        domain.save()

        name = '%s-certificate' % self.name
        namespace = domain.app.id

        # only delete if it exists and if no other domains depend on secret
        if len(self.domains) == 0:
            try:
                # We raise an exception when a secret doesn't exist
                self._scheduler.secret.get(namespace, name)
                self._scheduler.secret.delete(namespace, name)
            except KubeException as e:
                raise ServiceUnavailable(
                    "Could not delete certificate secret {} for application {}"
                    .format(name, namespace)) from e  # noqa
Beispiel #19
0
    def attach_in_kubernetes(self, domain):
        """Creates the certificate as a kubernetes secret"""
        # only create if it exists - We raise an exception when a secret doesn't exist
        try:
            name = '%s-certificate' % self.name
            namespace = domain.app.id
            data = {'tls.crt': self.certificate, 'tls.key': self.key}

            secret = self._scheduler.secret.get(namespace, name).json()['data']
        except KubeException:
            self._scheduler.secret.create(namespace, name, data)
        else:
            # update cert secret to the TLS Ingress format if required
            if secret != data:
                try:
                    self._scheduler.secret.update(namespace, name, data)
                except KubeException as e:
                    msg = 'There was a problem updating the certificate secret ' \
                          '{} for {}'.format(name, namespace)
                    raise ServiceUnavailable(msg) from e
Beispiel #20
0
    def maintenance_mode(self, mode):
        """
        Turn service maintenance mode on/off
        """
        namespace = self._namespace()
        svc_name = self._svc_name()

        try:
            service = self._fetch_service_config(namespace, svc_name)
        except (ServiceUnavailable, KubeException) as e:
            # ignore non-existing services
            return

        old_service = service.copy()  # in case anything fails for rollback

        try:
            service['metadata']['annotations']['router.deis.io/maintenance'] = str(mode).lower()
            self._scheduler.svc.update(namespace, svc_name, data=service)
        except KubeException as e:
            self._scheduler.svc.update(namespace, svc_name, data=old_service)
            raise ServiceUnavailable(str(e)) from e
Beispiel #21
0
    def attach_in_kubernetes(self, domain):
        """Creates the certificate as a kubernetes secret"""
        # only create if it exists - We raise an exception when a secret doesn't exist
        try:
            name = '%s-cert' % self.name
            namespace = domain.app.id
            data = {
                'tls.crt': self.certificate,
                'tls.key': self.key
            }

            secret = self._scheduler.secret.get(namespace, name).json()['data']
        except KubeException:
            self._scheduler.secret.create(namespace, name, data)
        else:
            # update cert secret to the TLS Ingress format if required
            if secret != data:
                try:
                    self._scheduler.secret.update(namespace, name, data)
                except KubeException as e:
                    msg = 'There was a problem updating the certificate secret ' \
                          '{} for {}'.format(name, namespace)
                    raise ServiceUnavailable(msg) from e

        # get config for the service
        config = self._load_service_config(namespace, 'router')

        # See if certificates are available
        if 'certificates' not in config:
            config['certificates'] = ''

        # convert from string to list to work with and filter out empty strings
        cert = '{}:{}'.format(domain.domain, self.name)
        certificates = [_f for _f in config['certificates'].split(',') if _f]
        if cert not in certificates:
            certificates.append(cert)
        config['certificates'] = ','.join(certificates)

        self._save_service_config(namespace, 'router', config)
Beispiel #22
0
    def run(self, user, command):
        def pod_name(size=5, chars=string.ascii_lowercase + string.digits):
            return ''.join(random.choice(chars) for _ in range(size))

        """Run a one-off command in an ephemeral app container."""
        scale_type = 'run'
        release = self.release_set.latest()
        if release.build is None:
            raise DeisException(
                'No build associated with this release to run this command')

        # see if the app config has deploy timeout preference, otherwise use global
        deploy_timeout = release.config.values.get(
            'DEIS_DEPLOY_TIMEOUT', settings.DEIS_DEPLOY_TIMEOUT)  # noqa

        name = self._get_job_id(scale_type) + '-' + pod_name()
        self.log("{} on {} runs '{}'".format(user.username, name, command))

        kwargs = {
            'memory': release.config.memory,
            'cpu': release.config.cpu,
            'tags': release.config.tags,
            'envs': release.config.values,
            'registry': release.config.registry,
            'version': "v{}".format(release.version),
            'build_type': release.build.type,
            'deploy_timeout': deploy_timeout
        }

        try:
            exit_code, output = self._scheduler.run(
                self.id, name, release.image, self._get_entrypoint(scale_type),
                [command], **kwargs)

            return exit_code, output
        except Exception as e:
            err = '{} (run): {}'.format(name, e)
            raise ServiceUnavailable(err) from e
Beispiel #23
0
    def _scale_pods(self, scale_types):
        release = self.release_set.latest()
        envs = release.config.values
        for scale_type, replicas in scale_types.items():
            # only web / cmd are routable
            # http://docs.deis.io/en/latest/using_deis/process-types/#web-vs-cmd-process-types
            routable = True if scale_type in ['web', 'cmd'] else False
            # fetch application port and inject into ENV Vars as needed
            port = release.get_port(routable)
            if port:
                envs['PORT'] = port

            kwargs = {
                'memory': release.config.memory,
                'cpu': release.config.cpu,
                'tags': release.config.tags,
                'envs': envs,
                'registry': release.config.registry,
                'version': "v{}".format(release.version),
                'replicas': replicas,
                'app_type': scale_type,
                'build_type': release.build.type,
                'healthcheck': release.config.healthcheck,
                'routable': routable
            }

            command = self._get_command(scale_type)
            try:
                self._scheduler.scale(namespace=self.id,
                                      name=self._get_job_id(scale_type),
                                      image=release.image,
                                      command=command,
                                      **kwargs)
            except Exception as e:
                err = '{} (scale): {}'.format(self._get_job_id(scale_type), e)
                self.log(err, logging.ERROR)
                raise ServiceUnavailable(err) from e
Beispiel #24
0
    def _scale_pods(self, scale_types):
        release = self.release_set.latest()
        app_settings = self.appsettings_set.latest()
        version = "v{}".format(release.version)
        image = release.image
        envs = self._build_env_vars(release.build.type, version, image,
                                    release.config.values)
        registry = release.config.registry

        # see if the app config has deploy batch preference, otherwise use global
        batches = release.config.values.get('DEIS_DEPLOY_BATCHES',
                                            settings.DEIS_DEPLOY_BATCHES)

        # see if the app config has deploy timeout preference, otherwise use global
        deploy_timeout = release.config.values.get(
            'DEIS_DEPLOY_TIMEOUT', settings.DEIS_DEPLOY_TIMEOUT)  # noqa

        # get application level pod termination grace period
        pod_termination_grace_period_seconds = release.config.values.get(
            'KUBERNETES_POD_TERMINATION_GRACE_PERIOD_SECONDS',
            settings.KUBERNETES_POD_TERMINATION_GRACE_PERIOD_SECONDS)  # noqa

        # create image pull secret if needed
        image_pull_secret_name = self.image_pull_secret(
            self.id, registry, image)

        tasks = []
        for scale_type, replicas in scale_types.items():
            # only web / cmd are routable
            # http://docs.deis.io/en/latest/using_deis/process-types/#web-vs-cmd-process-types
            routable = True if scale_type in [
                'web', 'cmd'
            ] and app_settings.routable else False
            # fetch application port and inject into ENV Vars as needed
            port = release.get_port()
            if port:
                envs['PORT'] = port

            healthcheck = release.config.get_healthcheck().get(scale_type, {})
            if not healthcheck and scale_type in ['web', 'cmd']:
                healthcheck = release.config.get_healthcheck().get(
                    'web/cmd', {})

            kwargs = {
                'memory': release.config.memory,
                'cpu': release.config.cpu,
                'tags': release.config.tags,
                'envs': envs,
                'registry': registry,
                'version': version,
                'replicas': replicas,
                'app_type': scale_type,
                'build_type': release.build.type,
                'healthcheck': healthcheck,
                'routable': routable,
                'deploy_batches': batches,
                'deploy_timeout': deploy_timeout,
                'pod_termination_grace_period_seconds':
                pod_termination_grace_period_seconds,
                'image_pull_secret_name': image_pull_secret_name,
            }

            # gather all proc types to be deployed
            tasks.append(
                functools.partial(self._scheduler.scale,
                                  namespace=self.id,
                                  name=self._get_job_id(scale_type),
                                  image=image,
                                  entrypoint=self._get_entrypoint(scale_type),
                                  command=self._get_command(scale_type),
                                  **kwargs))

        try:
            # create the application config in k8s (secret in this case) for all deploy objects
            self._scheduler.set_application_config(self.id, envs, version)

            async_run(tasks)
        except Exception as e:
            err = '(scale): {}'.format(e)
            self.log(err, logging.ERROR)
            raise ServiceUnavailable(err) from e
Beispiel #25
0
    def deploy(self, release, force_deploy=False):
        """
        Deploy a new release to this application

        force_deploy can be used when a deployment is broken, such as for Rollback
        """
        if release.build is None:
            raise DeisException('No build associated with this release')

        # use create to make sure minimum resources are created
        self.create()

        if self.structure == {}:
            self.structure = self._default_structure(release)
            self.save()

        # see if the app config has deploy batch preference, otherwise use global
        batches = release.config.values.get('DEIS_DEPLOY_BATCHES',
                                            settings.DEIS_DEPLOY_BATCHES)

        # see if the app config has deploy timeout preference, otherwise use global
        deploy_timeout = release.config.values.get(
            'DEIS_DEPLOY_TIMEOUT', settings.DEIS_DEPLOY_TIMEOUT)  # noqa

        deployment_history = release.config.values.get(
            'KUBERNETES_DEPLOYMENTS_REVISION_HISTORY_LIMIT',
            settings.KUBERNETES_DEPLOYMENTS_REVISION_HISTORY_LIMIT)  # noqa

        # deploy application to k8s. Also handles initial scaling
        deploys = {}
        envs = release.config.values
        for scale_type, replicas in self.structure.items():
            # only web / cmd are routable
            # http://docs.deis.io/en/latest/using_deis/process-types/#web-vs-cmd-process-types
            routable = True if scale_type in ['web', 'cmd'] else False
            # fetch application port and inject into ENV vars as needed
            port = release.get_port()
            if port:
                envs['PORT'] = port

            deploys[scale_type] = {
                'memory': release.config.memory,
                'cpu': release.config.cpu,
                'tags': release.config.tags,
                'envs': envs,
                'registry': release.config.registry,
                # only used if there is no previous RC
                'replicas': replicas,
                'version': "v{}".format(release.version),
                'app_type': scale_type,
                'build_type': release.build.type,
                'healthcheck': release.config.healthcheck,
                'routable': routable,
                'deploy_batches': batches,
                'deploy_timeout': deploy_timeout,
                'deployment_history_limit': deployment_history,
                'release_summary': release.summary
            }

        # Sort deploys so routable comes first
        deploys = OrderedDict(
            sorted(deploys.items(), key=lambda d: d[1].get('routable')))

        # Check if any proc type has a Deployment in progress
        for scale_type, kwargs in deploys.items():
            # Is there an existing deployment in progress?
            name = self._get_job_id(scale_type)
            if not force_deploy and release.deployment_in_progress(
                    self.id, name):
                raise AlreadyExists(
                    'Deployment for {} is already in progress'.format(name))

        try:
            # gather all proc types to be deployed
            tasks = [
                functools.partial(self._scheduler.deploy,
                                  namespace=self.id,
                                  name=self._get_job_id(scale_type),
                                  image=release.image,
                                  entrypoint=self._get_entrypoint(scale_type),
                                  command=self._get_command(scale_type),
                                  **kwargs)
                for scale_type, kwargs in deploys.items()
            ]

            async_run(tasks)
        except Exception as e:
            err = '(app::deploy): {}'.format(e)
            self.log(err, logging.ERROR)
            raise ServiceUnavailable(err) from e

        # Wait until application is available in the router
        # Only run when there is no previous build / release
        old = release.previous()
        if old is None or old.build is None:
            self.verify_application_health(**kwargs)

        # cleanup old release objects from kubernetes
        release.cleanup_old()
Beispiel #26
0
    def create(self, *args, **kwargs):  # noqa
        """
        Create a application with an initial config, settings, release, domain
        and k8s resource if needed
        """
        try:
            cfg = self.config_set.latest()
        except Config.DoesNotExist:
            cfg = Config.objects.create(owner=self.owner, app=self)

        # Only create if no release can be found
        try:
            rel = self.release_set.latest()
        except Release.DoesNotExist:
            rel = Release.objects.create(version=1,
                                         owner=self.owner,
                                         app=self,
                                         config=cfg,
                                         build=None)

        # create required minimum resources in k8s for the application
        namespace = self.id
        service = self.id
        try:
            self.log('creating Namespace {} and services'.format(namespace),
                     level=logging.DEBUG)
            # Create essential resources
            try:
                self._scheduler.ns.get(namespace)
            except KubeException:
                self._scheduler.ns.create(namespace)

            try:
                self._scheduler.svc.get(namespace, service)
            except KubeException:
                self._scheduler.svc.create(namespace, service)
        except KubeException as e:
            # Blow it all away only if something horrible happens
            try:
                self._scheduler.ns.delete(namespace)
            except KubeException as e:
                # Just feed into the item below
                raise ServiceUnavailable(
                    'Could not delete the Namespace in Kubernetes') from e

            raise ServiceUnavailable(
                'Kubernetes resources could not be created') from e

        try:
            self.appsettings_set.latest()
        except AppSettings.DoesNotExist:
            AppSettings.objects.create(owner=self.owner, app=self)
        try:
            self.tls_set.latest()
        except TLS.DoesNotExist:
            TLS.objects.create(owner=self.owner, app=self)
        # Attach the platform specific application sub domain to the k8s service
        # Only attach it on first release in case a customer has remove the app domain
        if rel.version == 1 and not Domain.objects.filter(
                domain=self.id).exists():
            Domain(owner=self.owner, app=self, domain=self.id).save()
Beispiel #27
0
    def deploy(self,
               release,
               force_deploy=False,
               rollback_on_failure=True):  # noqa
        """
        Deploy a new release to this application

        force_deploy can be used when a deployment is broken, such as for Rollback
        """
        if release.build is None:
            raise DeisException('No build associated with this release')

        app_settings = self.appsettings_set.latest()
        addresses = ",".join(address for address in app_settings.whitelist)
        service_annotations = {
            'maintenance': app_settings.maintenance,
            'whitelist': addresses
        }

        # use create to make sure minimum resources are created
        self.create()

        if self.structure == {}:
            self.structure = self._default_structure(release)
            self.save()

        image = release.image
        registry = release.config.registry
        version = "v{}".format(release.version)
        envs = self._build_env_vars(release.build.type, version, image,
                                    release.config.values)
        tags = release.config.tags

        # see if the app config has deploy batch preference, otherwise use global
        batches = release.config.values.get('DEIS_DEPLOY_BATCHES',
                                            settings.DEIS_DEPLOY_BATCHES)

        # see if the app config has deploy timeout preference, otherwise use global
        deploy_timeout = release.config.values.get(
            'DEIS_DEPLOY_TIMEOUT', settings.DEIS_DEPLOY_TIMEOUT)  # noqa

        deployment_history = release.config.values.get(
            'KUBERNETES_DEPLOYMENTS_REVISION_HISTORY_LIMIT',
            settings.KUBERNETES_DEPLOYMENTS_REVISION_HISTORY_LIMIT)  # noqa

        # get application level pod termination grace period
        pod_termination_grace_period_seconds = release.config.values.get(
            'KUBERNETES_POD_TERMINATION_GRACE_PERIOD_SECONDS',
            settings.KUBERNETES_POD_TERMINATION_GRACE_PERIOD_SECONDS)  # noqa

        # create image pull secret if needed
        image_pull_secret_name = self.image_pull_secret(
            self.id, registry, image)

        # deploy application to k8s. Also handles initial scaling
        deploys = {}

        for scale_type, replicas in self.structure.items():
            # only web / cmd are routable
            # http://docs.deis.io/en/latest/using_deis/process-types/#web-vs-cmd-process-types
            routable = True if scale_type in [
                'web', 'cmd'
            ] and app_settings.routable else False
            # fetch application port and inject into ENV vars as needed
            port = release.get_port()
            if port:
                envs['PORT'] = port

            healthcheck = release.config.get_healthcheck().get(scale_type, {})
            if not healthcheck and scale_type in ['web', 'cmd']:
                healthcheck = release.config.get_healthcheck().get(
                    'web/cmd', {})

            deploys[scale_type] = {
                'memory': release.config.memory,
                'cpu': release.config.cpu,
                'tags': tags,
                'envs': envs,
                'registry': registry,
                'replicas': replicas,
                'version': version,
                'app_type': scale_type,
                'build_type': release.build.type,
                'healthcheck': healthcheck,
                'routable': routable,
                'deploy_batches': batches,
                'deploy_timeout': deploy_timeout,
                'deployment_revision_history_limit': deployment_history,
                'release_summary': release.summary,
                'pod_termination_grace_period_seconds':
                pod_termination_grace_period_seconds,
                'image_pull_secret_name': image_pull_secret_name,
            }

        # Sort deploys so routable comes first
        deploys = OrderedDict(
            sorted(deploys.items(), key=lambda d: d[1].get('routable')))

        # Check if any proc type has a Deployment in progress
        self._check_deployment_in_progress(deploys, force_deploy)

        try:
            # create the application config in k8s (secret in this case) for all deploy objects
            self._scheduler.set_application_config(self.id, envs, version)

            # gather all proc types to be deployed
            tasks = [
                functools.partial(self._scheduler.deploy,
                                  namespace=self.id,
                                  name=self._get_job_id(scale_type),
                                  image=image,
                                  entrypoint=self._get_entrypoint(scale_type),
                                  command=self._get_command(scale_type),
                                  **kwargs)
                for scale_type, kwargs in deploys.items()
            ]

            try:
                async_run(tasks)
            except KubeException as e:
                if rollback_on_failure:
                    err = 'There was a problem deploying {}. Rolling back process types to release {}.'.format(
                        version,
                        "v{}".format(release.previous().version))  # noqa
                    # This goes in the log before the rollback starts
                    self.log(err, logging.ERROR)
                    # revert all process types to old release
                    self.deploy(release.previous(),
                                force_deploy=True,
                                rollback_on_failure=False)
                    # let it bubble up
                    raise DeisException('{}\n{}'.format(err, str(e))) from e

                # otherwise just re-raise
                raise
        except Exception as e:
            # This gets shown to the end user
            err = '(app::deploy): {}'.format(e)
            self.log(err, logging.ERROR)
            raise ServiceUnavailable(err) from e

        app_type = 'web' if 'web' in deploys else 'cmd' if 'cmd' in deploys else None
        # Make sure the application is routable and uses the correct port Done after the fact to
        # let initial deploy settle before routing traffic to the application
        if deploys and app_type:
            routable = deploys[app_type].get('routable')
            port = deploys[app_type].get('envs', {}).get('PORT', None)
            self._update_application_service(self.id, app_type, port, routable,
                                             service_annotations)  # noqa

            # Wait until application is available in the router
            # Only run when there is no previous build / release
            old = release.previous()
            if old is None or old.build is None:
                self.verify_application_health(**deploys[app_type])

        # cleanup old release objects from kubernetes
        release.cleanup_old()
Beispiel #28
0
    def deploy(self, release):
        """Deploy a new release to this application"""
        if release.build is None:
            raise DeisException('No build associated with this release')

        # use create to make sure minimum resources are created
        self.create()

        if self.structure == {}:
            self.structure = self._default_structure(release)
            self.save()

        # see if the app config has deploy batch preference, otherwise use global
        batches = release.config.values.get('DEIS_DEPLOY_BATCHES',
                                            settings.DEIS_DEPLOY_BATCHES)

        # deploy application to k8s. Also handles initial scaling
        deploys = {}
        envs = release.config.values
        for scale_type, replicas in self.structure.items():
            # only web / cmd are routable
            # http://docs.deis.io/en/latest/using_deis/process-types/#web-vs-cmd-process-types
            routable = True if scale_type in ['web', 'cmd'] else False
            # fetch application port and inject into ENV vars as needed
            port = release.get_port(routable)
            if port:
                envs['PORT'] = port

            deploys[scale_type] = {
                'memory': release.config.memory,
                'cpu': release.config.cpu,
                'tags': release.config.tags,
                'envs': envs,
                'registry': release.config.registry,
                # only used if there is no previous RC
                'replicas': replicas,
                'version': "v{}".format(release.version),
                'app_type': scale_type,
                'build_type': release.build.type,
                'healthcheck': release.config.healthcheck,
                'routable': routable,
                'batches': batches
            }

        # Sort deploys so routable comes first
        deploys = OrderedDict(
            sorted(deploys.items(), key=lambda d: d[1].get('routable')))

        for scale_type, kwargs in deploys.items():
            try:
                self._scheduler.deploy(namespace=self.id,
                                       name=self._get_job_id(scale_type),
                                       image=release.image,
                                       command=self._get_command(scale_type),
                                       **kwargs)

                # Wait until application is available in the router
                # Only run when there is no previous build / release
                old = release.previous()
                if old is None or old.build is None:
                    self.verify_application_health(**kwargs)

            except Exception as e:
                err = '{} (app::deploy): {}'.format(
                    self._get_job_id(scale_type), e)
                self.log(err, logging.ERROR)
                raise ServiceUnavailable(err) from e

        # cleanup old releases from kubernetes
        release.cleanup_old()
Beispiel #29
0
    def create(self, *args, **kwargs):  # noqa
        """
        Create a application with an initial config, settings, release, domain
        and k8s resource if needed
        """
        try:
            cfg = self.config_set.latest()
        except Config.DoesNotExist:
            cfg = Config.objects.create(owner=self.owner, app=self)

        # Only create if no release can be found
        try:
            rel = self.release_set.latest()
        except Release.DoesNotExist:
            rel = Release.objects.create(
                version=1, owner=self.owner, app=self,
                config=cfg, build=None
            )

        # create required minimum resources in k8s for the application
        namespace = self.id
        ingress = self.id
        service = self.id
        quota_name = '{}-quota'.format(self.id)
        try:
            self.log('creating Namespace {} and services'.format(namespace), level=logging.DEBUG)
            # Create essential resources
            try:
                self._scheduler.ns.get(namespace)
            except KubeException:
                try:
                    self._scheduler.ns.create(namespace, settings.ENABLE_ISTIO_INJECTION)
                except KubeException as e:
                    raise ServiceUnavailable('Could not create the Namespace in Kubernetes') from e

            if settings.KUBERNETES_NAMESPACE_DEFAULT_QUOTA_SPEC != '':
                quota_spec = json.loads(settings.KUBERNETES_NAMESPACE_DEFAULT_QUOTA_SPEC)
                self.log('creating Quota {} for namespace {}'.format(quota_name, namespace),
                         level=logging.DEBUG)
                try:
                    self._scheduler.quota.get(namespace, quota_name)
                except KubeException:
                    self._scheduler.quota.create(namespace, quota_name, data=quota_spec)

            try:
                self._scheduler.svc.get(namespace, service)
            except KubeException:
                self._scheduler.svc.create(namespace, service)
        except KubeException as e:
            # Blow it all away only if something horrible happens
            try:
                self._scheduler.ns.delete(namespace)
            except KubeException as e:
                # Just feed into the item below
                raise ServiceUnavailable('Could not delete the Namespace in Kubernetes') from e

            raise ServiceUnavailable('Kubernetes resources could not be created') from e

        try:
            # In order to create an ingress, we must first have a namespace.
            if settings.EXPERIMENTAL_NATIVE_INGRESS:
                if ingress == "":
                    raise ServiceUnavailable('Empty hostname')
                try:
                    self._scheduler.ingress.get(ingress)
                except KubeException:
                    self.log("creating Ingress {}".format(namespace), level=logging.INFO)
                    self._scheduler.ingress.create(ingress,
                                                   namespace,
                                                   settings.EXPERIMENTAL_NATIVE_INGRESS_HOSTNAME,
                                                   settings.EXPERIMENTAL_NATIVE_INGRESS_CLASS)
        except KubeException as e:
            raise ServiceUnavailable('Could not create Ingress in Kubernetes') from e
        try:
            self.appsettings_set.latest()
        except AppSettings.DoesNotExist:
            AppSettings.objects.create(owner=self.owner, app=self)
        try:
            self.tls_set.latest()
        except TLS.DoesNotExist:
            TLS.objects.create(owner=self.owner, app=self)
        # Attach the platform specific application sub domain to the k8s service
        # Only attach it on first release in case a customer has remove the app domain
        if rel.version == 1 and not Domain.objects.filter(domain=self.id).exists():
            Domain(owner=self.owner, app=self, domain=self.id).save()
Beispiel #30
0
    def deploy(self, release, force_deploy=False, rollback_on_failure=True):  # noqa
        """
        Deploy a new release to this application

        force_deploy can be used when a deployment is broken, such as for Rollback
        """
        if release.build is None:
            raise DeisException('No build associated with this release')

        # use create to make sure minimum resources are created
        self.create()

        # set processes structure to default if app is new.
        if self.structure == {}:
            self.structure = self._default_structure(release)
            self.procfile_structure = self._default_structure(release)
            self.save()
        # reset canonical process types if build type has changed.
        else:
            # find the previous release's build type
            prev_release = release.previous()
            if prev_release and prev_release.build:
                if prev_release.build.type != release.build.type:
                    structure = self.structure.copy()
                    # zero out canonical pod counts
                    for proctype in ['cmd', 'web']:
                        if proctype in structure:
                            structure[proctype] = 0
                    # update with the default process type.
                    structure.update(self._default_structure(release))
                    self.structure = structure
                    # if procfile structure exists then we use it
                    if release.build.procfile and \
                       release.build.sha and not \
                       release.build.dockerfile:
                        self.procfile_structure = release.build.procfile
                    self.save()

        # always set the procfile structure for any new release
        if release.build.procfile:
            self.procfile_structure = release.build.procfile
            self.save()

        # deploy application to k8s. Also handles initial scaling
        app_settings = self.appsettings_set.latest()
        deploys = {}
        for scale_type, replicas in self.structure.items():
            deploys[scale_type] = self._gather_app_settings(release, app_settings, scale_type, replicas)  # noqa

        # Sort deploys so routable comes first
        deploys = OrderedDict(sorted(deploys.items(), key=lambda d: d[1].get('routable')))

        # Check if any proc type has a Deployment in progress
        self._check_deployment_in_progress(deploys, force_deploy)

        # use slugrunner image for app if buildpack app otherwise use normal image
        image = settings.SLUGRUNNER_IMAGE if release.build.type == 'buildpack' else release.image

        try:
            # create the application config in k8s (secret in this case) for all deploy objects
            self.set_application_config(release)
            # only buildpack apps need access to object storage
            if release.build.type == 'buildpack':
                self.create_object_store_secret()

            # gather all proc types to be deployed
            tasks = [
                functools.partial(
                    self._scheduler.deploy,
                    namespace=self.id,
                    name=self._get_job_id(scale_type),
                    image=image,
                    entrypoint=self._get_entrypoint(scale_type),
                    command=self._get_command(scale_type),
                    **kwargs
                ) for scale_type, kwargs in deploys.items()
            ]

            try:
                async_run(tasks)
            except KubeException as e:
                # Don't rollback if the previous release doesn't have a build which means
                # this is the first build and all the previous releases are just config changes.
                if rollback_on_failure and release.previous().build is not None:
                    err = 'There was a problem deploying {}. Rolling back process types to release {}.'.format('v{}'.format(release.version), "v{}".format(release.previous().version))  # noqa
                    # This goes in the log before the rollback starts
                    self.log(err, logging.ERROR)
                    # revert all process types to old release
                    self.deploy(release.previous(), force_deploy=True, rollback_on_failure=False)
                    # let it bubble up
                    raise DeisException('{}\n{}'.format(err, str(e))) from e

                # otherwise just re-raise
                raise
        except Exception as e:
            # This gets shown to the end user
            err = '(app::deploy): {}'.format(e)
            self.log(err, logging.ERROR)
            raise ServiceUnavailable(err) from e

        app_type = 'web' if 'web' in deploys else 'cmd' if 'cmd' in deploys else None
        # Make sure the application is routable and uses the correct port done after the fact to
        # let initial deploy settle before routing traffic to the application
        if deploys and app_type:
            app_settings = self.appsettings_set.latest()
            if app_settings.whitelist:
                addresses = ",".join(address for address in app_settings.whitelist)
            else:
                addresses = None
            service_annotations = {
                'maintenance': app_settings.maintenance,
                'whitelist': addresses
            }

            routable = deploys[app_type].get('routable')
            port = deploys[app_type].get('envs', {}).get('PORT', None)
            self._update_application_service(self.id, app_type, port, routable, service_annotations)  # noqa

            # Wait until application is available in the router
            # Only run when there is no previous build / release
            old = release.previous()
            if old is None or old.build is None:
                self.verify_application_health(**deploys[app_type])

        # cleanup old release objects from kubernetes
        release.cleanup_old()