Beispiel #1
0
    def scale(self, user, structure):  # noqa
        """Scale containers up or down to match requested structure."""
        # use create to make sure minimum resources are created
        self.create()

        if self.release_set.filter(failed=False).latest().build is None:
            raise DeisException('No build associated with this release')

        release = self.release_set.filter(failed=False).latest()

        # Validate structure
        try:
            for target, count in structure.copy().items():
                structure[target] = int(count)
            validate_app_structure(structure)
        except (TypeError, ValueError, ValidationError) as e:
            raise DeisException('Invalid scaling format: {}'.format(e))

        # test for available process types
        available_process_types = release.build.procfile or {}
        for container_type in structure:
            if container_type == 'cmd':
                continue  # allow docker cmd types in case we don't have the image source

            if container_type not in available_process_types:
                raise NotFound(
                    'Container type {} does not exist in application'.format(
                        container_type))

        # merge current structure and the new items together
        old_structure = self.structure
        new_structure = old_structure.copy()
        new_structure.update(structure)

        if new_structure != self.structure:
            # save new structure to the database
            self.structure = new_structure
            self.procfile_structure = release.build.procfile
            self.save()

            try:
                self._scale_pods(structure)
            except ServiceUnavailable:
                # scaling failed, go back to old scaling numbers
                self._scale_pods(old_structure)
                raise

            msg = '{} scaled pods '.format(user.username) + ' '.join(
                "{}={}".format(k, v) for k, v in list(structure.items()))
            self.log(msg)

            return True

        return False
Beispiel #2
0
    def run(self, user, command):
        def pod_name(size=5, chars=string.ascii_lowercase + string.digits):
            return ''.join(random.choice(chars) for _ in range(size))

        """Run a one-off command in an ephemeral app container."""
        release = self.release_set.latest()
        if release.build is None:
            raise DeisException(
                'No build associated with this release to run this command')

        # TODO: add support for interactive shell
        entrypoint, command = self._get_command_run(command)

        name = self._get_job_id('run') + '-' + pod_name()
        self.log("{} on {} runs '{}'".format(user.username, name, command))

        kwargs = {
            'memory': release.config.memory,
            'cpu': release.config.cpu,
            'tags': release.config.tags,
            'envs': release.config.values,
            'registry': release.config.registry,
            'version': "v{}".format(release.version),
            'build_type': release.build.type,
        }

        try:
            exit_code, output = self._scheduler.run(self.id, name,
                                                    release.image, entrypoint,
                                                    command, **kwargs)

            return exit_code, output
        except Exception as e:
            err = '{} (run): {}'.format(name, e)
            raise ServiceUnavailable(err) from e
Beispiel #3
0
    def run(self, user, command):
        def pod_name(size=5, chars=string.ascii_lowercase + string.digits):
            return ''.join(random.choice(chars) for _ in range(size))

        """Run a one-off command in an ephemeral app container."""
        release = self.release_set.filter(failed=False).latest()
        if release.build is None:
            raise DeisException(
                'No build associated with this release to run this command')

        app_settings = self.appsettings_set.latest()
        # use slugrunner image for app if buildpack app otherwise use normal image
        image = settings.SLUGRUNNER_IMAGE if release.build.type == 'buildpack' else release.image

        data = self._gather_app_settings(release,
                                         app_settings,
                                         process_type='run',
                                         replicas=1)

        # create application config and build the pod manifest
        self.set_application_config(release)

        scale_type = 'run'
        name = self._get_job_id(scale_type) + '-' + pod_name()
        self.log("{} on {} runs '{}'".format(user.username, name, command))

        try:
            exit_code, output = self._scheduler.run(
                self.id, name, image, self._get_entrypoint(scale_type),
                [command], **data)

            return exit_code, output
        except Exception as e:
            err = '{} (run): {}'.format(name, e)
            raise ServiceUnavailable(err) from e
Beispiel #4
0
 def post_save(self, config):
     release = config.app.release_set.filter(failed=False).latest()
     latest_version = config.app.release_set.latest().version
     try:
         self.release = release.new(self.request.user,
                                    config=config,
                                    build=release.build)
         # It's possible to set config values before a build
         if self.release.build is not None:
             config.app.deploy(self.release)
     except Exception as e:
         if (not hasattr(self, 'release')
                 and config.app.release_set.latest().version
                 == latest_version + 1):
             self.release = config.app.release_set.latest()
         if hasattr(self, 'release'):
             self.release.failed = True
             self.release.summary = "{} deployed a config that failed".format(
                 self.request.user)  # noqa
             # Get the exception that has occured
             self.release.exception = "error: {}".format(str(e))
             self.release.save()
         else:
             config.delete()
         if isinstance(e, AlreadyExists):
             raise
         raise DeisException(str(e)) from e
Beispiel #5
0
    def run(self, user, command):
        def pod_name(size=5, chars=string.ascii_lowercase + string.digits):
            return ''.join(random.choice(chars) for _ in range(size))

        """Run a one-off command in an ephemeral app container."""
        scale_type = 'run'
        release = self.release_set.latest()
        if release.build is None:
            raise DeisException(
                'No build associated with this release to run this command')

        image = release.image
        registry = release.config.registry
        version = "v{}".format(release.version)
        envs = self._build_env_vars(release.build.type, version, image,
                                    release.config.values)

        # see if the app config has deploy timeout preference, otherwise use global
        deploy_timeout = release.config.values.get(
            'DEIS_DEPLOY_TIMEOUT', settings.DEIS_DEPLOY_TIMEOUT)  # noqa

        # get application level pod termination grace period
        pod_termination_grace_period_seconds = release.config.values.get(
            'KUBERNETES_POD_TERMINATION_GRACE_PERIOD_SECONDS',
            settings.KUBERNETES_POD_TERMINATION_GRACE_PERIOD_SECONDS)  # noqa

        # create image pull secret if needed
        image_pull_secret_name = self.image_pull_secret(
            self.id, registry, image)

        name = self._get_job_id(scale_type) + '-' + pod_name()
        self.log("{} on {} runs '{}'".format(user.username, name, command))

        kwargs = {
            'memory': release.config.memory,
            'cpu': release.config.cpu,
            'tags': release.config.tags,
            'envs': envs,
            'registry': registry,
            'version': version,
            'build_type': release.build.type,
            'deploy_timeout': deploy_timeout,
            'pod_termination_grace_period_seconds':
            pod_termination_grace_period_seconds,
            'image_pull_secret_name': image_pull_secret_name,
        }

        try:
            exit_code, output = self._scheduler.run(
                self.id, name, image, self._get_entrypoint(scale_type),
                [command], **kwargs)

            return exit_code, output
        except Exception as e:
            err = '{} (run): {}'.format(name, e)
            raise ServiceUnavailable(err) from e
Beispiel #6
0
    def attach(self, request, *args, **kwargs):
        try:
            if kwargs['domain'] is None and not request.data.get('domain'):
                raise DeisException("domain is a required field")
            elif request.data.get('domain'):
                kwargs['domain'] = request.data['domain']

            self.get_object().attach(*args, **kwargs)
        except Http404:
            raise

        return Response(status=status.HTTP_201_CREATED)
Beispiel #7
0
 def post_save(self, config):
     release = config.app.release_set.latest()
     self.release = release.new(self.request.user,
                                config=config,
                                build=release.build)
     try:
         # It's possible to set config values before a build
         if self.release.build is not None:
             config.app.deploy(self.release)
     except Exception as e:
         self.release.delete()
         raise DeisException(str(e)) from e
Beispiel #8
0
    def passwd(self, request, **kwargs):
        if not request.data.get('new_password'):
            raise DeisException("new_password is a required field")

        caller_obj = self.get_object()
        target_obj = self.get_object()
        if request.data.get('username'):
            # if you "accidentally" target yourself, that should be fine
            if caller_obj.username == request.data['username'] or caller_obj.is_superuser:
                target_obj = get_object_or_404(User, username=request.data['username'])
            else:
                raise PermissionDenied()

        if not caller_obj.is_superuser:
            if not request.data.get('password'):
                raise DeisException("password is a required field")
            if not target_obj.check_password(request.data['password']):
                raise AuthenticationFailed('Current password does not match')

        target_obj.set_password(request.data['new_password'])
        target_obj.save()
        return Response({'status': 'password set'})
Beispiel #9
0
    def create(self, user, *args, **kwargs):
        latest_release = self.app.release_set.latest()
        new_release = latest_release.new(
            user,
            build=self,
            config=latest_release.config,
            source_version=self.version
        )

        try:
            self.app.deploy(new_release)
            return new_release
        except Exception as e:
            if 'new_release' in locals():
                new_release.delete()
            self.delete()

            raise DeisException(str(e)) from e
Beispiel #10
0
    def _build_env_vars(self, release):
        """
        Build a dict of env vars, setting default vars based on app type
        and then combining with the user set ones
        """
        if release.build is None:
            raise DeisException(
                'No build associated with this release to run this command')

        # mix in default environment information deis may require
        default_env = {
            'DEIS_APP':
            self.id,
            'WORKFLOW_RELEASE':
            'v{}'.format(release.version),
            'WORKFLOW_RELEASE_SUMMARY':
            release.summary,
            'WORKFLOW_RELEASE_CREATED_AT':
            str(release.created.strftime(settings.DEIS_DATETIME_FORMAT))
        }

        # Check if it is a slug builder image.
        if release.build.type == 'buildpack':
            # overwrite image so slugrunner image is used in the container
            default_env['SLUG_URL'] = release.image
            default_env['BUILDER_STORAGE'] = settings.APP_STORAGE
            default_env['DEIS_MINIO_SERVICE_HOST'] = settings.MINIO_HOST
            default_env['DEIS_MINIO_SERVICE_PORT'] = settings.MINIO_PORT

        if release.build.sha:
            default_env['SOURCE_VERSION'] = release.build.sha

        # fetch application port and inject into ENV vars as needed
        port = release.get_port()
        if port:
            default_env['PORT'] = port

        # merge envs on top of default to make envs win
        default_env.update(release.config.values)
        return default_env
Beispiel #11
0
    def run(self, user, command):
        def pod_name(size=5, chars=string.ascii_lowercase + string.digits):
            return ''.join(random.choice(chars) for _ in range(size))

        """Run a one-off command in an ephemeral app container."""
        scale_type = 'run'
        release = self.release_set.latest()
        if release.build is None:
            raise DeisException(
                'No build associated with this release to run this command')

        # see if the app config has deploy timeout preference, otherwise use global
        deploy_timeout = release.config.values.get(
            'DEIS_DEPLOY_TIMEOUT', settings.DEIS_DEPLOY_TIMEOUT)  # noqa

        name = self._get_job_id(scale_type) + '-' + pod_name()
        self.log("{} on {} runs '{}'".format(user.username, name, command))

        kwargs = {
            'memory': release.config.memory,
            'cpu': release.config.cpu,
            'tags': release.config.tags,
            'envs': release.config.values,
            'registry': release.config.registry,
            'version': "v{}".format(release.version),
            'build_type': release.build.type,
            'deploy_timeout': deploy_timeout
        }

        try:
            exit_code, output = self._scheduler.run(
                self.id, name, release.image, self._get_entrypoint(scale_type),
                [command], **kwargs)

            return exit_code, output
        except Exception as e:
            err = '{} (run): {}'.format(name, e)
            raise ServiceUnavailable(err) from e
Beispiel #12
0
    def deploy(self, release, force_deploy=False):
        """
        Deploy a new release to this application

        force_deploy can be used when a deployment is broken, such as for Rollback
        """
        if release.build is None:
            raise DeisException('No build associated with this release')

        # use create to make sure minimum resources are created
        self.create()

        if self.structure == {}:
            self.structure = self._default_structure(release)
            self.save()

        # see if the app config has deploy batch preference, otherwise use global
        batches = release.config.values.get('DEIS_DEPLOY_BATCHES',
                                            settings.DEIS_DEPLOY_BATCHES)

        # see if the app config has deploy timeout preference, otherwise use global
        deploy_timeout = release.config.values.get(
            'DEIS_DEPLOY_TIMEOUT', settings.DEIS_DEPLOY_TIMEOUT)  # noqa

        deployment_history = release.config.values.get(
            'KUBERNETES_DEPLOYMENTS_REVISION_HISTORY_LIMIT',
            settings.KUBERNETES_DEPLOYMENTS_REVISION_HISTORY_LIMIT)  # noqa

        # deploy application to k8s. Also handles initial scaling
        deploys = {}
        envs = release.config.values
        for scale_type, replicas in self.structure.items():
            # only web / cmd are routable
            # http://docs.deis.io/en/latest/using_deis/process-types/#web-vs-cmd-process-types
            routable = True if scale_type in ['web', 'cmd'] else False
            # fetch application port and inject into ENV vars as needed
            port = release.get_port()
            if port:
                envs['PORT'] = port

            deploys[scale_type] = {
                'memory': release.config.memory,
                'cpu': release.config.cpu,
                'tags': release.config.tags,
                'envs': envs,
                'registry': release.config.registry,
                # only used if there is no previous RC
                'replicas': replicas,
                'version': "v{}".format(release.version),
                'app_type': scale_type,
                'build_type': release.build.type,
                'healthcheck': release.config.healthcheck,
                'routable': routable,
                'deploy_batches': batches,
                'deploy_timeout': deploy_timeout,
                'deployment_history_limit': deployment_history,
                'release_summary': release.summary
            }

        # Sort deploys so routable comes first
        deploys = OrderedDict(
            sorted(deploys.items(), key=lambda d: d[1].get('routable')))

        # Check if any proc type has a Deployment in progress
        for scale_type, kwargs in deploys.items():
            # Is there an existing deployment in progress?
            name = self._get_job_id(scale_type)
            if not force_deploy and release.deployment_in_progress(
                    self.id, name):
                raise AlreadyExists(
                    'Deployment for {} is already in progress'.format(name))

        try:
            # gather all proc types to be deployed
            tasks = [
                functools.partial(self._scheduler.deploy,
                                  namespace=self.id,
                                  name=self._get_job_id(scale_type),
                                  image=release.image,
                                  entrypoint=self._get_entrypoint(scale_type),
                                  command=self._get_command(scale_type),
                                  **kwargs)
                for scale_type, kwargs in deploys.items()
            ]

            async_run(tasks)
        except Exception as e:
            err = '(app::deploy): {}'.format(e)
            self.log(err, logging.ERROR)
            raise ServiceUnavailable(err) from e

        # Wait until application is available in the router
        # Only run when there is no previous build / release
        old = release.previous()
        if old is None or old.build is None:
            self.verify_application_health(**kwargs)

        # cleanup old release objects from kubernetes
        release.cleanup_old()
Beispiel #13
0
    def deploy(self,
               release,
               force_deploy=False,
               rollback_on_failure=True):  # noqa
        """
        Deploy a new release to this application

        force_deploy can be used when a deployment is broken, such as for Rollback
        """
        if release.build is None:
            raise DeisException('No build associated with this release')

        app_settings = self.appsettings_set.latest()
        addresses = ",".join(address for address in app_settings.whitelist)
        service_annotations = {
            'maintenance': app_settings.maintenance,
            'whitelist': addresses
        }

        # use create to make sure minimum resources are created
        self.create()

        if self.structure == {}:
            self.structure = self._default_structure(release)
            self.save()

        image = release.image
        registry = release.config.registry
        version = "v{}".format(release.version)
        envs = self._build_env_vars(release.build.type, version, image,
                                    release.config.values)
        tags = release.config.tags

        # see if the app config has deploy batch preference, otherwise use global
        batches = release.config.values.get('DEIS_DEPLOY_BATCHES',
                                            settings.DEIS_DEPLOY_BATCHES)

        # see if the app config has deploy timeout preference, otherwise use global
        deploy_timeout = release.config.values.get(
            'DEIS_DEPLOY_TIMEOUT', settings.DEIS_DEPLOY_TIMEOUT)  # noqa

        deployment_history = release.config.values.get(
            'KUBERNETES_DEPLOYMENTS_REVISION_HISTORY_LIMIT',
            settings.KUBERNETES_DEPLOYMENTS_REVISION_HISTORY_LIMIT)  # noqa

        # get application level pod termination grace period
        pod_termination_grace_period_seconds = release.config.values.get(
            'KUBERNETES_POD_TERMINATION_GRACE_PERIOD_SECONDS',
            settings.KUBERNETES_POD_TERMINATION_GRACE_PERIOD_SECONDS)  # noqa

        # create image pull secret if needed
        image_pull_secret_name = self.image_pull_secret(
            self.id, registry, image)

        # deploy application to k8s. Also handles initial scaling
        deploys = {}

        for scale_type, replicas in self.structure.items():
            # only web / cmd are routable
            # http://docs.deis.io/en/latest/using_deis/process-types/#web-vs-cmd-process-types
            routable = True if scale_type in [
                'web', 'cmd'
            ] and app_settings.routable else False
            # fetch application port and inject into ENV vars as needed
            port = release.get_port()
            if port:
                envs['PORT'] = port

            healthcheck = release.config.get_healthcheck().get(scale_type, {})
            if not healthcheck and scale_type in ['web', 'cmd']:
                healthcheck = release.config.get_healthcheck().get(
                    'web/cmd', {})

            deploys[scale_type] = {
                'memory': release.config.memory,
                'cpu': release.config.cpu,
                'tags': tags,
                'envs': envs,
                'registry': registry,
                'replicas': replicas,
                'version': version,
                'app_type': scale_type,
                'build_type': release.build.type,
                'healthcheck': healthcheck,
                'routable': routable,
                'deploy_batches': batches,
                'deploy_timeout': deploy_timeout,
                'deployment_revision_history_limit': deployment_history,
                'release_summary': release.summary,
                'pod_termination_grace_period_seconds':
                pod_termination_grace_period_seconds,
                'image_pull_secret_name': image_pull_secret_name,
            }

        # Sort deploys so routable comes first
        deploys = OrderedDict(
            sorted(deploys.items(), key=lambda d: d[1].get('routable')))

        # Check if any proc type has a Deployment in progress
        self._check_deployment_in_progress(deploys, force_deploy)

        try:
            # create the application config in k8s (secret in this case) for all deploy objects
            self._scheduler.set_application_config(self.id, envs, version)

            # gather all proc types to be deployed
            tasks = [
                functools.partial(self._scheduler.deploy,
                                  namespace=self.id,
                                  name=self._get_job_id(scale_type),
                                  image=image,
                                  entrypoint=self._get_entrypoint(scale_type),
                                  command=self._get_command(scale_type),
                                  **kwargs)
                for scale_type, kwargs in deploys.items()
            ]

            try:
                async_run(tasks)
            except KubeException as e:
                if rollback_on_failure:
                    err = 'There was a problem deploying {}. Rolling back process types to release {}.'.format(
                        version,
                        "v{}".format(release.previous().version))  # noqa
                    # This goes in the log before the rollback starts
                    self.log(err, logging.ERROR)
                    # revert all process types to old release
                    self.deploy(release.previous(),
                                force_deploy=True,
                                rollback_on_failure=False)
                    # let it bubble up
                    raise DeisException('{}\n{}'.format(err, str(e))) from e

                # otherwise just re-raise
                raise
        except Exception as e:
            # This gets shown to the end user
            err = '(app::deploy): {}'.format(e)
            self.log(err, logging.ERROR)
            raise ServiceUnavailable(err) from e

        app_type = 'web' if 'web' in deploys else 'cmd' if 'cmd' in deploys else None
        # Make sure the application is routable and uses the correct port Done after the fact to
        # let initial deploy settle before routing traffic to the application
        if deploys and app_type:
            routable = deploys[app_type].get('routable')
            port = deploys[app_type].get('envs', {}).get('PORT', None)
            self._update_application_service(self.id, app_type, port, routable,
                                             service_annotations)  # noqa

            # Wait until application is available in the router
            # Only run when there is no previous build / release
            old = release.previous()
            if old is None or old.build is None:
                self.verify_application_health(**deploys[app_type])

        # cleanup old release objects from kubernetes
        release.cleanup_old()
Beispiel #14
0
    def restart(self, **kwargs):  # noqa
        """
        Restart found pods by deleting them (RC / Deployment will recreate).
        Wait until they are all drained away and RC / Deployment has gotten to a good state
        """
        try:
            # Resolve single pod name if short form (cmd-1269180282-1nyfz) is passed
            if 'name' in kwargs and kwargs['name'].count('-') == 2:
                kwargs['name'] = '{}-{}'.format(kwargs['id'], kwargs['name'])

            # Iterate over RCs / RSs to get total desired count if not a single item
            desired = 1
            if 'name' not in kwargs:
                desired = 0
                labels = self._scheduler_filter(**kwargs)
                # fetch RS (which represent Deployments)
                controllers = self._scheduler.get_replicasets(kwargs['id'],
                                                              labels=labels)

                for controller in controllers.json()['items']:
                    desired += controller['spec']['replicas']
        except KubeException:
            # Nothing was found
            return []

        try:
            tasks = [
                functools.partial(self._scheduler.delete_pod, self.id,
                                  pod['name'])
                for pod in self.list_pods(**kwargs)
            ]

            async_run(tasks)
        except Exception as e:
            err = "warning, some pods failed to stop:\n{}".format(str(e))
            self.log(err, logging.WARNING)

        # Wait for pods to start
        try:
            timeout = 300  # 5 minutes
            elapsed = 0
            while True:
                # timed out
                if elapsed >= timeout:
                    raise DeisException(
                        'timeout - 5 minutes have passed and pods are not up')

                # restarting a single pod behaves differently, fetch the *newest* pod
                # and hope it is the right one. Comes back sorted
                if 'name' in kwargs:
                    del kwargs['name']
                    pods = self.list_pods(**kwargs)
                    # Add in the latest name
                    if len(pods) == 0:
                        # if pod is not even scheduled wait for it and pass dummy kwargs
                        # to indicate restart of a single pod
                        kwargs['name'] = "dummy"
                        continue
                    kwargs['name'] = pods[0]['name']
                    pods = pods[0]

                actual = 0
                for pod in self.list_pods(**kwargs):
                    if pod['state'] == 'up':
                        actual += 1

                if desired == actual:
                    break

                elapsed += 5
                time.sleep(5)
        except Exception as e:
            err = "warning, some pods failed to start:\n{}".format(str(e))
            self.log(err, logging.WARNING)

        # Return the new pods
        pods = self.list_pods(**kwargs)
        return pods
Beispiel #15
0
    def restart(self, **kwargs):  # noqa
        """
        Restart found pods by deleting them (RC will recreate).
        Wait until they are all drained away and RC has gotten to a good state
        """
        try:
            # Resolve single pod name if short form (worker-asdfg) is passed
            if 'name' in kwargs and kwargs['name'].count('-') == 1:
                if 'release' not in kwargs or kwargs['release'] is None:
                    release = self.release_set.latest()
                else:
                    release = self.release_set.get(version=kwargs['release'])

                version = "v{}".format(release.version)
                kwargs['name'] = '{}-{}-{}'.format(kwargs['id'], version,
                                                   kwargs['name'])

            # Iterate over RCs to get total desired count if not a single item
            desired = 1
            if 'name' not in kwargs:
                desired = 0
                labels = self._scheduler_filter(**kwargs)
                controllers = self._scheduler.get_rcs(
                    kwargs['id'], labels=labels).json()['items']
                for controller in controllers:
                    desired += controller['spec']['replicas']
        except KubeException:
            # Nothing was found
            return []

        try:
            for pod in self.list_pods(**kwargs):
                # This function verifies the delete. Gives pod 30 seconds
                self._scheduler.delete_pod(self.id, pod['name'])
        except Exception as e:
            err = "warning, some pods failed to stop:\n{}".format(str(e))
            self.log(err, logging.WARNING)

        # Wait for pods to start
        try:
            timeout = 300  # 5 minutes
            elapsed = 0
            while True:
                # timed out
                if elapsed >= timeout:
                    raise DeisException(
                        'timeout - 5 minutes have passed and pods are not up')

                # restarting a single pod behaves differently, fetch the *newest* pod
                # and hope it is the right one. Comes back sorted
                if 'name' in kwargs:
                    del kwargs['name']
                    pods = self.list_pods(**kwargs)
                    # Add in the latest name
                    kwargs['name'] = pods[0]['name']
                    pods = pods[0]

                actual = 0
                for pod in self.list_pods(**kwargs):
                    if pod['state'] == 'up':
                        actual += 1

                if desired == actual:
                    break

                elapsed += 5
                time.sleep(5)
        except Exception as e:
            err = "warning, some pods failed to start:\n{}".format(str(e))
            self.log(err, logging.WARNING)

        # Return the new pods
        pods = self.list_pods(**kwargs)
        return pods
Beispiel #16
0
    def deploy(self, release):
        """Deploy a new release to this application"""
        if release.build is None:
            raise DeisException('No build associated with this release')

        # use create to make sure minimum resources are created
        self.create()

        if self.structure == {}:
            self.structure = self._default_structure(release)
            self.save()

        # see if the app config has deploy batch preference, otherwise use global
        batches = release.config.values.get('DEIS_DEPLOY_BATCHES',
                                            settings.DEIS_DEPLOY_BATCHES)

        # deploy application to k8s. Also handles initial scaling
        deploys = {}
        envs = release.config.values
        for scale_type, replicas in self.structure.items():
            # only web / cmd are routable
            # http://docs.deis.io/en/latest/using_deis/process-types/#web-vs-cmd-process-types
            routable = True if scale_type in ['web', 'cmd'] else False
            # fetch application port and inject into ENV vars as needed
            port = release.get_port(routable)
            if port:
                envs['PORT'] = port

            deploys[scale_type] = {
                'memory': release.config.memory,
                'cpu': release.config.cpu,
                'tags': release.config.tags,
                'envs': envs,
                'registry': release.config.registry,
                # only used if there is no previous RC
                'replicas': replicas,
                'version': "v{}".format(release.version),
                'app_type': scale_type,
                'build_type': release.build.type,
                'healthcheck': release.config.healthcheck,
                'routable': routable,
                'batches': batches
            }

        # Sort deploys so routable comes first
        deploys = OrderedDict(
            sorted(deploys.items(), key=lambda d: d[1].get('routable')))

        for scale_type, kwargs in deploys.items():
            try:
                self._scheduler.deploy(namespace=self.id,
                                       name=self._get_job_id(scale_type),
                                       image=release.image,
                                       command=self._get_command(scale_type),
                                       **kwargs)

                # Wait until application is available in the router
                # Only run when there is no previous build / release
                old = release.previous()
                if old is None or old.build is None:
                    self.verify_application_health(**kwargs)

            except Exception as e:
                err = '{} (app::deploy): {}'.format(
                    self._get_job_id(scale_type), e)
                self.log(err, logging.ERROR)
                raise ServiceUnavailable(err) from e

        # cleanup old releases from kubernetes
        release.cleanup_old()
Beispiel #17
0
    def deploy(self, release, force_deploy=False, rollback_on_failure=True):  # noqa
        """
        Deploy a new release to this application

        force_deploy can be used when a deployment is broken, such as for Rollback
        """
        if release.build is None:
            raise DeisException('No build associated with this release')

        # use create to make sure minimum resources are created
        self.create()

        # set processes structure to default if app is new.
        if self.structure == {}:
            self.structure = self._default_structure(release)
            self.procfile_structure = self._default_structure(release)
            self.save()
        # reset canonical process types if build type has changed.
        else:
            # find the previous release's build type
            prev_release = release.previous()
            if prev_release and prev_release.build:
                if prev_release.build.type != release.build.type:
                    structure = self.structure.copy()
                    # zero out canonical pod counts
                    for proctype in ['cmd', 'web']:
                        if proctype in structure:
                            structure[proctype] = 0
                    # update with the default process type.
                    structure.update(self._default_structure(release))
                    self.structure = structure
                    # if procfile structure exists then we use it
                    if release.build.procfile and \
                       release.build.sha and not \
                       release.build.dockerfile:
                        self.procfile_structure = release.build.procfile
                    self.save()

        # always set the procfile structure for any new release
        if release.build.procfile:
            self.procfile_structure = release.build.procfile
            self.save()

        # deploy application to k8s. Also handles initial scaling
        app_settings = self.appsettings_set.latest()
        deploys = {}
        for scale_type, replicas in self.structure.items():
            deploys[scale_type] = self._gather_app_settings(release, app_settings, scale_type, replicas)  # noqa

        # Sort deploys so routable comes first
        deploys = OrderedDict(sorted(deploys.items(), key=lambda d: d[1].get('routable')))

        # Check if any proc type has a Deployment in progress
        self._check_deployment_in_progress(deploys, force_deploy)

        # use slugrunner image for app if buildpack app otherwise use normal image
        image = settings.SLUGRUNNER_IMAGE if release.build.type == 'buildpack' else release.image

        try:
            # create the application config in k8s (secret in this case) for all deploy objects
            self.set_application_config(release)
            # only buildpack apps need access to object storage
            if release.build.type == 'buildpack':
                self.create_object_store_secret()

            # gather all proc types to be deployed
            tasks = [
                functools.partial(
                    self._scheduler.deploy,
                    namespace=self.id,
                    name=self._get_job_id(scale_type),
                    image=image,
                    entrypoint=self._get_entrypoint(scale_type),
                    command=self._get_command(scale_type),
                    **kwargs
                ) for scale_type, kwargs in deploys.items()
            ]

            try:
                async_run(tasks)
            except KubeException as e:
                # Don't rollback if the previous release doesn't have a build which means
                # this is the first build and all the previous releases are just config changes.
                if rollback_on_failure and release.previous().build is not None:
                    err = 'There was a problem deploying {}. Rolling back process types to release {}.'.format('v{}'.format(release.version), "v{}".format(release.previous().version))  # noqa
                    # This goes in the log before the rollback starts
                    self.log(err, logging.ERROR)
                    # revert all process types to old release
                    self.deploy(release.previous(), force_deploy=True, rollback_on_failure=False)
                    # let it bubble up
                    raise DeisException('{}\n{}'.format(err, str(e))) from e

                # otherwise just re-raise
                raise
        except Exception as e:
            # This gets shown to the end user
            err = '(app::deploy): {}'.format(e)
            self.log(err, logging.ERROR)
            raise ServiceUnavailable(err) from e

        app_type = 'web' if 'web' in deploys else 'cmd' if 'cmd' in deploys else None
        # Make sure the application is routable and uses the correct port done after the fact to
        # let initial deploy settle before routing traffic to the application
        if deploys and app_type:
            app_settings = self.appsettings_set.latest()
            if app_settings.whitelist:
                addresses = ",".join(address for address in app_settings.whitelist)
            else:
                addresses = None
            service_annotations = {
                'maintenance': app_settings.maintenance,
                'whitelist': addresses
            }

            routable = deploys[app_type].get('routable')
            port = deploys[app_type].get('envs', {}).get('PORT', None)
            self._update_application_service(self.id, app_type, port, routable, service_annotations)  # noqa

            # Wait until application is available in the router
            # Only run when there is no previous build / release
            old = release.previous()
            if old is None or old.build is None:
                self.verify_application_health(**deploys[app_type])

        # cleanup old release objects from kubernetes
        release.cleanup_old()
Beispiel #18
0
 def run(self, request, **kwargs):
     app = self.get_object()
     if not request.data.get('command'):
         raise DeisException("command is a required field")
     rc, output = app.run(self.request.user, request.data['command'])
     return Response({'exit_code': rc, 'output': str(output)})