Пример #1
0
    def _scale_pods(self, scale_types):
        release = self.release_set.filter(failed=False).latest()
        app_settings = self.appsettings_set.latest()

        # use slugrunner image for app if buildpack app otherwise use normal image
        image = settings.SLUGRUNNER_IMAGE if release.build.type == 'buildpack' else release.image

        tasks = []
        for scale_type, replicas in scale_types.items():
            data = self._gather_app_settings(release, app_settings, scale_type, replicas)  # noqa

            # gather all proc types to be deployed
            tasks.append(
                functools.partial(
                    self._scheduler.scale,
                    namespace=self.id,
                    name=self._get_job_id(scale_type),
                    image=image,
                    entrypoint=self._get_entrypoint(scale_type),
                    command=self._get_command(scale_type),
                    **data
                )
            )

        try:
            # create the application config in k8s (secret in this case) for all deploy objects
            self.set_application_config(release)

            async_run(tasks)
        except Exception as e:
            err = '(scale): {}'.format(e)
            self.log(err, logging.ERROR)
            raise ServiceUnavailable(err) from e
Пример #2
0
    def _scale_pods(self, scale_types):
        release = self.release_set.filter(failed=False).latest()
        app_settings = self.appsettings_set.latest()

        # use slugrunner image for app if buildpack app otherwise use normal image
        image = settings.SLUGRUNNER_IMAGE if release.build.type == 'buildpack' else release.image

        tasks = []
        for scale_type, replicas in scale_types.items():
            data = self._gather_app_settings(release, app_settings, scale_type, replicas)  # noqa

            # gather all proc types to be deployed
            tasks.append(
                functools.partial(
                    self._scheduler.scale,
                    namespace=self.id,
                    name=self._get_job_id(scale_type),
                    image=image,
                    entrypoint=self._get_entrypoint(scale_type),
                    command=self._get_command(scale_type),
                    **data
                )
            )

        try:
            # create the application config in k8s (secret in this case) for all deploy objects
            self.set_application_config(release)

            async_run(tasks)
        except Exception as e:
            err = '(scale): {}'.format(e)
            self.log(err, logging.ERROR)
            raise ServiceUnavailable(err) from e
Пример #3
0
    def _scale_pods(self, scale_types):
        release = self.release_set.latest()
        envs = release.config.values

        # see if the app config has deploy batch preference, otherwise use global
        batches = release.config.values.get('DEIS_DEPLOY_BATCHES', settings.DEIS_DEPLOY_BATCHES)

        # see if the app config has deploy timeout preference, otherwise use global
        deploy_timeout = release.config.values.get('DEIS_DEPLOY_TIMEOUT', settings.DEIS_DEPLOY_TIMEOUT)  # noqa

        tasks = []
        for scale_type, replicas in scale_types.items():
            # only web / cmd are routable
            # http://docs.deis.io/en/latest/using_deis/process-types/#web-vs-cmd-process-types
            routable = True if scale_type in ['web', 'cmd'] else False
            # fetch application port and inject into ENV Vars as needed
            port = release.get_port()
            if port:
                envs['PORT'] = port

            kwargs = {
                'memory': release.config.memory,
                'cpu': release.config.cpu,
                'tags': release.config.tags,
                'envs': envs,
                'registry': release.config.registry,
                'version': "v{}".format(release.version),
                'replicas': replicas,
                'app_type': scale_type,
                'build_type': release.build.type,
                'healthcheck': release.config.healthcheck,
                'routable': routable,
                'deploy_batches': batches,
                'deploy_timeout': deploy_timeout,
            }

            # gather all proc types to be deployed
            tasks.append(
                functools.partial(
                    self._scheduler.scale,
                    namespace=self.id,
                    name=self._get_job_id(scale_type),
                    image=release.image,
                    entrypoint=self._get_entrypoint(scale_type),
                    command=self._get_command(scale_type),
                    **kwargs
                )
            )

        try:
            async_run(tasks)
        except Exception as e:
            err = '(scale): {}'.format(e)
            self.log(err, logging.ERROR)
            raise ServiceUnavailable(err) from e
Пример #4
0
    def _scale_pods(self, scale_types):
        release = self.release_set.latest()
        envs = release.config.values

        # see if the app config has deploy batch preference, otherwise use global
        batches = release.config.values.get('DEIS_DEPLOY_BATCHES',
                                            settings.DEIS_DEPLOY_BATCHES)

        # see if the app config has deploy timeout preference, otherwise use global
        deploy_timeout = release.config.values.get(
            'DEIS_DEPLOY_TIMEOUT', settings.DEIS_DEPLOY_TIMEOUT)  # noqa

        tasks = []
        for scale_type, replicas in scale_types.items():
            # only web / cmd are routable
            # http://docs.deis.io/en/latest/using_deis/process-types/#web-vs-cmd-process-types
            routable = True if scale_type in ['web', 'cmd'] else False
            # fetch application port and inject into ENV Vars as needed
            port = release.get_port()
            if port:
                envs['PORT'] = port

            kwargs = {
                'memory': release.config.memory,
                'cpu': release.config.cpu,
                'tags': release.config.tags,
                'envs': envs,
                'registry': release.config.registry,
                'version': "v{}".format(release.version),
                'replicas': replicas,
                'app_type': scale_type,
                'build_type': release.build.type,
                'healthcheck': release.config.healthcheck,
                'routable': routable,
                'deploy_batches': batches,
                'deploy_timeout': deploy_timeout,
            }

            # gather all proc types to be deployed
            tasks.append(
                functools.partial(self._scheduler.scale,
                                  namespace=self.id,
                                  name=self._get_job_id(scale_type),
                                  image=release.image,
                                  entrypoint=self._get_entrypoint(scale_type),
                                  command=self._get_command(scale_type),
                                  **kwargs))

        try:
            async_run(tasks)
        except Exception as e:
            err = '(scale): {}'.format(e)
            self.log(err, logging.ERROR)
            raise ServiceUnavailable(err) from e
Пример #5
0
    def deploy(self, release, force_deploy=False):
        """
        Deploy a new release to this application

        force_deploy can be used when a deployment is broken, such as for Rollback
        """
        if release.build is None:
            raise DeisException('No build associated with this release')

        # use create to make sure minimum resources are created
        self.create()

        if self.structure == {}:
            self.structure = self._default_structure(release)
            self.save()

        # see if the app config has deploy batch preference, otherwise use global
        batches = release.config.values.get('DEIS_DEPLOY_BATCHES',
                                            settings.DEIS_DEPLOY_BATCHES)

        # see if the app config has deploy timeout preference, otherwise use global
        deploy_timeout = release.config.values.get(
            'DEIS_DEPLOY_TIMEOUT', settings.DEIS_DEPLOY_TIMEOUT)  # noqa

        deployment_history = release.config.values.get(
            'KUBERNETES_DEPLOYMENTS_REVISION_HISTORY_LIMIT',
            settings.KUBERNETES_DEPLOYMENTS_REVISION_HISTORY_LIMIT)  # noqa

        # deploy application to k8s. Also handles initial scaling
        deploys = {}
        envs = release.config.values
        for scale_type, replicas in self.structure.items():
            # only web / cmd are routable
            # http://docs.deis.io/en/latest/using_deis/process-types/#web-vs-cmd-process-types
            routable = True if scale_type in ['web', 'cmd'] else False
            # fetch application port and inject into ENV vars as needed
            port = release.get_port()
            if port:
                envs['PORT'] = port

            deploys[scale_type] = {
                'memory': release.config.memory,
                'cpu': release.config.cpu,
                'tags': release.config.tags,
                'envs': envs,
                'registry': release.config.registry,
                # only used if there is no previous RC
                'replicas': replicas,
                'version': "v{}".format(release.version),
                'app_type': scale_type,
                'build_type': release.build.type,
                'healthcheck': release.config.healthcheck,
                'routable': routable,
                'deploy_batches': batches,
                'deploy_timeout': deploy_timeout,
                'deployment_history_limit': deployment_history,
                'release_summary': release.summary
            }

        # Sort deploys so routable comes first
        deploys = OrderedDict(
            sorted(deploys.items(), key=lambda d: d[1].get('routable')))

        # Check if any proc type has a Deployment in progress
        for scale_type, kwargs in deploys.items():
            # Is there an existing deployment in progress?
            name = self._get_job_id(scale_type)
            if not force_deploy and release.deployment_in_progress(
                    self.id, name):
                raise AlreadyExists(
                    'Deployment for {} is already in progress'.format(name))

        try:
            # gather all proc types to be deployed
            tasks = [
                functools.partial(self._scheduler.deploy,
                                  namespace=self.id,
                                  name=self._get_job_id(scale_type),
                                  image=release.image,
                                  entrypoint=self._get_entrypoint(scale_type),
                                  command=self._get_command(scale_type),
                                  **kwargs)
                for scale_type, kwargs in deploys.items()
            ]

            async_run(tasks)
        except Exception as e:
            err = '(app::deploy): {}'.format(e)
            self.log(err, logging.ERROR)
            raise ServiceUnavailable(err) from e

        # Wait until application is available in the router
        # Only run when there is no previous build / release
        old = release.previous()
        if old is None or old.build is None:
            self.verify_application_health(**kwargs)

        # cleanup old release objects from kubernetes
        release.cleanup_old()
Пример #6
0
    def restart(self, **kwargs):  # noqa
        """
        Restart found pods by deleting them (RC / Deployment will recreate).
        Wait until they are all drained away and RC / Deployment has gotten to a good state
        """
        try:
            # Resolve single pod name if short form (cmd-1269180282-1nyfz) is passed
            if 'name' in kwargs and kwargs['name'].count('-') == 2:
                kwargs['name'] = '{}-{}'.format(kwargs['id'], kwargs['name'])

            # Iterate over RCs / RSs to get total desired count if not a single item
            desired = 1
            if 'name' not in kwargs:
                desired = 0
                labels = self._scheduler_filter(**kwargs)
                # fetch RS (which represent Deployments)
                controllers = self._scheduler.get_replicasets(kwargs['id'],
                                                              labels=labels)

                for controller in controllers.json()['items']:
                    desired += controller['spec']['replicas']
        except KubeException:
            # Nothing was found
            return []

        try:
            tasks = [
                functools.partial(self._scheduler.delete_pod, self.id,
                                  pod['name'])
                for pod in self.list_pods(**kwargs)
            ]

            async_run(tasks)
        except Exception as e:
            err = "warning, some pods failed to stop:\n{}".format(str(e))
            self.log(err, logging.WARNING)

        # Wait for pods to start
        try:
            timeout = 300  # 5 minutes
            elapsed = 0
            while True:
                # timed out
                if elapsed >= timeout:
                    raise DeisException(
                        'timeout - 5 minutes have passed and pods are not up')

                # restarting a single pod behaves differently, fetch the *newest* pod
                # and hope it is the right one. Comes back sorted
                if 'name' in kwargs:
                    del kwargs['name']
                    pods = self.list_pods(**kwargs)
                    # Add in the latest name
                    if len(pods) == 0:
                        # if pod is not even scheduled wait for it and pass dummy kwargs
                        # to indicate restart of a single pod
                        kwargs['name'] = "dummy"
                        continue
                    kwargs['name'] = pods[0]['name']
                    pods = pods[0]

                actual = 0
                for pod in self.list_pods(**kwargs):
                    if pod['state'] == 'up':
                        actual += 1

                if desired == actual:
                    break

                elapsed += 5
                time.sleep(5)
        except Exception as e:
            err = "warning, some pods failed to start:\n{}".format(str(e))
            self.log(err, logging.WARNING)

        # Return the new pods
        pods = self.list_pods(**kwargs)
        return pods
Пример #7
0
    def deploy(self, release, force_deploy=False, rollback_on_failure=True):  # noqa
        """
        Deploy a new release to this application

        force_deploy can be used when a deployment is broken, such as for Rollback
        """
        if release.build is None:
            raise DeisException('No build associated with this release')

        # use create to make sure minimum resources are created
        self.create()

        # set processes structure to default if app is new.
        if self.structure == {}:
            self.structure = self._default_structure(release)
            self.procfile_structure = self._default_structure(release)
            self.save()
        # reset canonical process types if build type has changed.
        else:
            # find the previous release's build type
            prev_release = release.previous()
            if prev_release and prev_release.build:
                if prev_release.build.type != release.build.type:
                    structure = self.structure.copy()
                    # zero out canonical pod counts
                    for proctype in ['cmd', 'web']:
                        if proctype in structure:
                            structure[proctype] = 0
                    # update with the default process type.
                    structure.update(self._default_structure(release))
                    self.structure = structure
                    # if procfile structure exists then we use it
                    if release.build.procfile and \
                       release.build.sha and not \
                       release.build.dockerfile:
                        self.procfile_structure = release.build.procfile
                    self.save()

        # always set the procfile structure for any new release
        if release.build.procfile:
            self.procfile_structure = release.build.procfile
            self.save()

        # deploy application to k8s. Also handles initial scaling
        app_settings = self.appsettings_set.latest()
        deploys = {}
        for scale_type, replicas in self.structure.items():
            deploys[scale_type] = self._gather_app_settings(release, app_settings, scale_type, replicas)  # noqa

        # Sort deploys so routable comes first
        deploys = OrderedDict(sorted(deploys.items(), key=lambda d: d[1].get('routable')))

        # Check if any proc type has a Deployment in progress
        self._check_deployment_in_progress(deploys, force_deploy)

        # use slugrunner image for app if buildpack app otherwise use normal image
        image = settings.SLUGRUNNER_IMAGE if release.build.type == 'buildpack' else release.image

        try:
            # create the application config in k8s (secret in this case) for all deploy objects
            self.set_application_config(release)
            # only buildpack apps need access to object storage
            if release.build.type == 'buildpack':
                self.create_object_store_secret()

            # gather all proc types to be deployed
            tasks = [
                functools.partial(
                    self._scheduler.deploy,
                    namespace=self.id,
                    name=self._get_job_id(scale_type),
                    image=image,
                    entrypoint=self._get_entrypoint(scale_type),
                    command=self._get_command(scale_type),
                    **kwargs
                ) for scale_type, kwargs in deploys.items()
            ]

            try:
                async_run(tasks)
            except KubeException as e:
                # Don't rollback if the previous release doesn't have a build which means
                # this is the first build and all the previous releases are just config changes.
                if rollback_on_failure and release.previous().build is not None:
                    err = 'There was a problem deploying {}. Rolling back process types to release {}.'.format('v{}'.format(release.version), "v{}".format(release.previous().version))  # noqa
                    # This goes in the log before the rollback starts
                    self.log(err, logging.ERROR)
                    # revert all process types to old release
                    self.deploy(release.previous(), force_deploy=True, rollback_on_failure=False)
                    # let it bubble up
                    raise DeisException('{}\n{}'.format(err, str(e))) from e

                # otherwise just re-raise
                raise
        except Exception as e:
            # This gets shown to the end user
            err = '(app::deploy): {}'.format(e)
            self.log(err, logging.ERROR)
            raise ServiceUnavailable(err) from e

        app_type = 'web' if 'web' in deploys else 'cmd' if 'cmd' in deploys else None
        # Make sure the application is routable and uses the correct port done after the fact to
        # let initial deploy settle before routing traffic to the application
        if deploys and app_type:
            app_settings = self.appsettings_set.latest()
            if app_settings.whitelist:
                addresses = ",".join(address for address in app_settings.whitelist)
            else:
                addresses = None
            service_annotations = {
                'maintenance': app_settings.maintenance,
                'whitelist': addresses
            }

            routable = deploys[app_type].get('routable')
            port = deploys[app_type].get('envs', {}).get('PORT', None)
            self._update_application_service(self.id, app_type, port, routable, service_annotations)  # noqa

            # Wait until application is available in the router
            # Only run when there is no previous build / release
            old = release.previous()
            if old is None or old.build is None:
                self.verify_application_health(**deploys[app_type])

        # cleanup old release objects from kubernetes
        release.cleanup_old()
Пример #8
0
    def deploy(self, release, force_deploy=False, rollback_on_failure=True):  # noqa
        """
        Deploy a new release to this application

        force_deploy can be used when a deployment is broken, such as for Rollback
        """
        if release.build is None:
            raise DeisException('No build associated with this release')

        # use create to make sure minimum resources are created
        self.create()

        if self.structure == {}:
            self.structure = self._default_structure(release)
            self.save()

        app_settings = self.appsettings_set.latest()

        # deploy application to k8s. Also handles initial scaling
        deploys = {}
        for scale_type, replicas in self.structure.items():
            deploys[scale_type] = self._gather_app_settings(release, app_settings, scale_type, replicas)  # noqa

        # Sort deploys so routable comes first
        deploys = OrderedDict(sorted(deploys.items(), key=lambda d: d[1].get('routable')))

        # Check if any proc type has a Deployment in progress
        self._check_deployment_in_progress(deploys, force_deploy)

        # use slugrunner image for app if buildpack app otherwise use normal image
        image = settings.SLUGRUNNER_IMAGE if release.build.type == 'buildpack' else release.image

        try:
            # create the application config in k8s (secret in this case) for all deploy objects
            self.set_application_config(release)
            # only buildpack apps need access to object storage
            if release.build.type == 'buildpack':
                self.create_object_store_secret()

            # gather all proc types to be deployed
            tasks = [
                functools.partial(
                    self._scheduler.deploy,
                    namespace=self.id,
                    name=self._get_job_id(scale_type),
                    image=image,
                    entrypoint=self._get_entrypoint(scale_type),
                    command=self._get_command(scale_type),
                    **kwargs
                ) for scale_type, kwargs in deploys.items()
            ]

            try:
                async_run(tasks)
            except KubeException as e:
                # Don't rollback if the previous release doesn't have a build which means
                # this is the first build and all the previous releases are just config changes.
                if rollback_on_failure and release.previous().build is not None:
                    err = 'There was a problem deploying {}. Rolling back process types to release {}.'.format('v{}'.format(release.version), "v{}".format(release.previous().version))  # noqa
                    # This goes in the log before the rollback starts
                    self.log(err, logging.ERROR)
                    # revert all process types to old release
                    self.deploy(release.previous(), force_deploy=True, rollback_on_failure=False)
                    # let it bubble up
                    raise DeisException('{}\n{}'.format(err, str(e))) from e

                # otherwise just re-raise
                raise
        except Exception as e:
            # This gets shown to the end user
            err = '(app::deploy): {}'.format(e)
            self.log(err, logging.ERROR)
            raise ServiceUnavailable(err) from e

        app_type = 'web' if 'web' in deploys else 'cmd' if 'cmd' in deploys else None
        # Make sure the application is routable and uses the correct port done after the fact to
        # let initial deploy settle before routing traffic to the application
        if deploys and app_type:
            app_settings = self.appsettings_set.latest()
            if app_settings.whitelist:
                addresses = ",".join(address for address in app_settings.whitelist)
            else:
                addresses = None
            service_annotations = {
                'maintenance': app_settings.maintenance,
                'whitelist': addresses
            }

            routable = deploys[app_type].get('routable')
            port = deploys[app_type].get('envs', {}).get('PORT', None)
            self._update_application_service(self.id, app_type, port, routable, service_annotations)  # noqa

            # Wait until application is available in the router
            # Only run when there is no previous build / release
            old = release.previous()
            if old is None or old.build is None:
                self.verify_application_health(**deploys[app_type])

        # cleanup old release objects from kubernetes
        release.cleanup_old()
Пример #9
0
    def restart(self, **kwargs):  # noqa
        """
        Restart found pods by deleting them (RC / Deployment will recreate).
        Wait until they are all drained away and RC / Deployment has gotten to a good state
        """
        try:
            # Resolve single pod name if short form (cmd-1269180282-1nyfz) is passed
            if 'name' in kwargs and kwargs['name'].count('-') == 2:
                kwargs['name'] = '{}-{}'.format(kwargs['id'], kwargs['name'])

            # Iterate over RCs / RSs to get total desired count if not a single item
            desired = 1
            if 'name' not in kwargs:
                desired = 0
                labels = self._scheduler_filter(**kwargs)
                # fetch RS (which represent Deployments)
                controllers = self._scheduler.rs.get(kwargs['id'], labels=labels)

                for controller in controllers.json()['items']:
                    desired += controller['spec']['replicas']
        except KubeException:
            # Nothing was found
            return []

        try:
            tasks = [
                functools.partial(
                    self._scheduler.pod.delete,
                    self.id,
                    pod['name']
                ) for pod in self.list_pods(**kwargs)
            ]

            async_run(tasks)
        except Exception as e:
            err = "warning, some pods failed to stop:\n{}".format(str(e))
            self.log(err, logging.WARNING)

        # Wait for pods to start
        try:
            timeout = 300  # 5 minutes
            elapsed = 0
            while True:
                # timed out
                if elapsed >= timeout:
                    raise DeisException('timeout - 5 minutes have passed and pods are not up')

                # restarting a single pod behaves differently, fetch the *newest* pod
                # and hope it is the right one. Comes back sorted
                if 'name' in kwargs:
                    del kwargs['name']
                    pods = self.list_pods(**kwargs)
                    # Add in the latest name
                    if len(pods) == 0:
                        # if pod is not even scheduled wait for it and pass dummy kwargs
                        # to indicate restart of a single pod
                        kwargs['name'] = "dummy"
                        continue
                    kwargs['name'] = pods[0]['name']
                    pods = pods[0]

                actual = 0
                for pod in self.list_pods(**kwargs):
                    if pod['state'] == 'up':
                        actual += 1

                if desired == actual:
                    break

                elapsed += 5
                time.sleep(5)
        except Exception as e:
            err = "warning, some pods failed to start:\n{}".format(str(e))
            self.log(err, logging.WARNING)

        # Return the new pods
        pods = self.list_pods(**kwargs)
        return pods
Пример #10
0
    def deploy(self, release, force_deploy=False):
        """
        Deploy a new release to this application

        force_deploy can be used when a deployment is broken, such as for Rollback
        """
        if release.build is None:
            raise DeisException('No build associated with this release')

        # use create to make sure minimum resources are created
        self.create()

        if self.structure == {}:
            self.structure = self._default_structure(release)
            self.save()

        # see if the app config has deploy batch preference, otherwise use global
        batches = release.config.values.get('DEIS_DEPLOY_BATCHES', settings.DEIS_DEPLOY_BATCHES)

        # see if the app config has deploy timeout preference, otherwise use global
        deploy_timeout = release.config.values.get('DEIS_DEPLOY_TIMEOUT', settings.DEIS_DEPLOY_TIMEOUT)  # noqa

        deployment_history = release.config.values.get('KUBERNETES_DEPLOYMENTS_REVISION_HISTORY_LIMIT', settings.KUBERNETES_DEPLOYMENTS_REVISION_HISTORY_LIMIT)  # noqa

        # deploy application to k8s. Also handles initial scaling
        deploys = {}
        envs = release.config.values
        for scale_type, replicas in self.structure.items():
            # only web / cmd are routable
            # http://docs.deis.io/en/latest/using_deis/process-types/#web-vs-cmd-process-types
            routable = True if scale_type in ['web', 'cmd'] else False
            # fetch application port and inject into ENV vars as needed
            port = release.get_port()
            if port:
                envs['PORT'] = port

            deploys[scale_type] = {
                'memory': release.config.memory,
                'cpu': release.config.cpu,
                'tags': release.config.tags,
                'envs': envs,
                'registry': release.config.registry,
                # only used if there is no previous RC
                'replicas': replicas,
                'version': "v{}".format(release.version),
                'app_type': scale_type,
                'build_type': release.build.type,
                'healthcheck': release.config.healthcheck,
                'routable': routable,
                'deploy_batches': batches,
                'deploy_timeout': deploy_timeout,
                'deployment_history_limit': deployment_history,
                'release_summary': release.summary
            }

        # Sort deploys so routable comes first
        deploys = OrderedDict(sorted(deploys.items(), key=lambda d: d[1].get('routable')))

        # Check if any proc type has a Deployment in progress
        for scale_type, kwargs in deploys.items():
            # Is there an existing deployment in progress?
            name = self._get_job_id(scale_type)
            if not force_deploy and release.deployment_in_progress(self.id, name):
                raise AlreadyExists('Deployment for {} is already in progress'.format(name))

        try:
            # gather all proc types to be deployed
            tasks = [
                functools.partial(
                    self._scheduler.deploy,
                    namespace=self.id,
                    name=self._get_job_id(scale_type),
                    image=release.image,
                    entrypoint=self._get_entrypoint(scale_type),
                    command=self._get_command(scale_type),
                    **kwargs
                ) for scale_type, kwargs in deploys.items()
            ]

            async_run(tasks)
        except Exception as e:
            err = '(app::deploy): {}'.format(e)
            self.log(err, logging.ERROR)
            raise ServiceUnavailable(err) from e

        # Wait until application is available in the router
        # Only run when there is no previous build / release
        old = release.previous()
        if old is None or old.build is None:
            self.verify_application_health(**kwargs)

        # cleanup old release objects from kubernetes
        release.cleanup_old()
Пример #11
0
    def deploy(self, release, force_deploy=False, rollback_on_failure=True):  # noqa
        """
        Deploy a new release to this application

        force_deploy can be used when a deployment is broken, such as for Rollback
        """
        if release.build is None:
            raise DeisException('No build associated with this release')

        app_settings = self.appsettings_set.latest()
        addresses = ",".join(address for address in app_settings.whitelist)
        service_annotations = {'maintenance': app_settings.maintenance, 'whitelist': addresses}

        # use create to make sure minimum resources are created
        self.create()

        if self.structure == {}:
            self.structure = self._default_structure(release)
            self.save()

        image = release.image
        registry = release.config.registry
        version = "v{}".format(release.version)
        envs = self._build_env_vars(release.build.type, version, image, release.config.values)
        tags = release.config.tags

        # see if the app config has deploy batch preference, otherwise use global
        batches = release.config.values.get('DEIS_DEPLOY_BATCHES', settings.DEIS_DEPLOY_BATCHES)

        # see if the app config has deploy timeout preference, otherwise use global
        deploy_timeout = release.config.values.get('DEIS_DEPLOY_TIMEOUT', settings.DEIS_DEPLOY_TIMEOUT)  # noqa

        deployment_history = release.config.values.get('KUBERNETES_DEPLOYMENTS_REVISION_HISTORY_LIMIT', settings.KUBERNETES_DEPLOYMENTS_REVISION_HISTORY_LIMIT)  # noqa

        # get application level pod termination grace period
        pod_termination_grace_period_seconds = release.config.values.get('KUBERNETES_POD_TERMINATION_GRACE_PERIOD_SECONDS', settings.KUBERNETES_POD_TERMINATION_GRACE_PERIOD_SECONDS)  # noqa

        # create image pull secret if needed
        image_pull_secret_name = self.image_pull_secret(self.id, registry, image)

        # deploy application to k8s. Also handles initial scaling
        deploys = {}

        for scale_type, replicas in self.structure.items():
            # only web / cmd are routable
            # http://docs.deis.io/en/latest/using_deis/process-types/#web-vs-cmd-process-types
            routable = True if scale_type in ['web', 'cmd'] and app_settings.routable else False
            # fetch application port and inject into ENV vars as needed
            port = release.get_port()
            if port:
                envs['PORT'] = port

            healthcheck = release.config.get_healthcheck().get(scale_type, {})
            if not healthcheck and scale_type in ['web', 'cmd']:
                healthcheck = release.config.get_healthcheck().get('web/cmd', {})

            deploys[scale_type] = {
                'memory': release.config.memory,
                'cpu': release.config.cpu,
                'tags': tags,
                'envs': envs,
                'registry': registry,
                'replicas': replicas,
                'version': version,
                'app_type': scale_type,
                'build_type': release.build.type,
                'healthcheck': healthcheck,
                'routable': routable,
                'deploy_batches': batches,
                'deploy_timeout': deploy_timeout,
                'deployment_revision_history_limit': deployment_history,
                'release_summary': release.summary,
                'pod_termination_grace_period_seconds': pod_termination_grace_period_seconds,
                'image_pull_secret_name': image_pull_secret_name,
            }

        # Sort deploys so routable comes first
        deploys = OrderedDict(sorted(deploys.items(), key=lambda d: d[1].get('routable')))

        # Check if any proc type has a Deployment in progress
        self._check_deployment_in_progress(deploys, force_deploy)

        try:
            # create the application config in k8s (secret in this case) for all deploy objects
            self._scheduler.set_application_config(self.id, envs, version)

            # gather all proc types to be deployed
            tasks = [
                functools.partial(
                    self._scheduler.deploy,
                    namespace=self.id,
                    name=self._get_job_id(scale_type),
                    image=image,
                    entrypoint=self._get_entrypoint(scale_type),
                    command=self._get_command(scale_type),
                    **kwargs
                ) for scale_type, kwargs in deploys.items()
            ]

            try:
                async_run(tasks)
            except KubeException as e:
                if rollback_on_failure:
                    err = 'There was a problem deploying {}. Rolling back process types to release {}.'.format(version, "v{}".format(release.previous().version))  # noqa
                    # This goes in the log before the rollback starts
                    self.log(err, logging.ERROR)
                    # revert all process types to old release
                    self.deploy(release.previous(), force_deploy=True, rollback_on_failure=False)
                    # let it bubble up
                    raise DeisException('{}\n{}'.format(err, str(e))) from e

                # otherwise just re-raise
                raise
        except Exception as e:
            # This gets shown to the end user
            err = '(app::deploy): {}'.format(e)
            self.log(err, logging.ERROR)
            raise ServiceUnavailable(err) from e

        app_type = 'web' if 'web' in deploys else 'cmd' if 'cmd' in deploys else None
        # Make sure the application is routable and uses the correct port Done after the fact to
        # let initial deploy settle before routing traffic to the application
        if deploys and app_type:
            routable = deploys[app_type].get('routable')
            port = deploys[app_type].get('envs', {}).get('PORT', None)
            self._update_application_service(self.id, app_type, port, routable, service_annotations)  # noqa

            # Wait until application is available in the router
            # Only run when there is no previous build / release
            old = release.previous()
            if old is None or old.build is None:
                self.verify_application_health(**deploys[app_type])

        # cleanup old release objects from kubernetes
        release.cleanup_old()
Пример #12
0
    def _scale_pods(self, scale_types):
        release = self.release_set.latest()
        app_settings = self.appsettings_set.latest()
        version = "v{}".format(release.version)
        image = release.image
        envs = self._build_env_vars(release.build.type, version, image, release.config.values)
        registry = release.config.registry

        # see if the app config has deploy batch preference, otherwise use global
        batches = release.config.values.get('DEIS_DEPLOY_BATCHES', settings.DEIS_DEPLOY_BATCHES)

        # see if the app config has deploy timeout preference, otherwise use global
        deploy_timeout = release.config.values.get('DEIS_DEPLOY_TIMEOUT', settings.DEIS_DEPLOY_TIMEOUT)  # noqa

        # get application level pod termination grace period
        pod_termination_grace_period_seconds = release.config.values.get('KUBERNETES_POD_TERMINATION_GRACE_PERIOD_SECONDS', settings.KUBERNETES_POD_TERMINATION_GRACE_PERIOD_SECONDS)  # noqa

        # create image pull secret if needed
        image_pull_secret_name = self.image_pull_secret(self.id, registry, image)

        tasks = []
        for scale_type, replicas in scale_types.items():
            # only web / cmd are routable
            # http://docs.deis.io/en/latest/using_deis/process-types/#web-vs-cmd-process-types
            routable = True if scale_type in ['web', 'cmd'] and app_settings.routable else False
            # fetch application port and inject into ENV Vars as needed
            port = release.get_port()
            if port:
                envs['PORT'] = port

            healthcheck = release.config.get_healthcheck().get(scale_type, {})
            if not healthcheck and scale_type in ['web', 'cmd']:
                healthcheck = release.config.get_healthcheck().get('web/cmd', {})

            kwargs = {
                'memory': release.config.memory,
                'cpu': release.config.cpu,
                'tags': release.config.tags,
                'envs': envs,
                'registry': registry,
                'version': version,
                'replicas': replicas,
                'app_type': scale_type,
                'build_type': release.build.type,
                'healthcheck': healthcheck,
                'routable': routable,
                'deploy_batches': batches,
                'deploy_timeout': deploy_timeout,
                'pod_termination_grace_period_seconds': pod_termination_grace_period_seconds,
                'image_pull_secret_name': image_pull_secret_name,
            }

            # gather all proc types to be deployed
            tasks.append(
                functools.partial(
                    self._scheduler.scale,
                    namespace=self.id,
                    name=self._get_job_id(scale_type),
                    image=image,
                    entrypoint=self._get_entrypoint(scale_type),
                    command=self._get_command(scale_type),
                    **kwargs
                )
            )

        try:
            # create the application config in k8s (secret in this case) for all deploy objects
            self._scheduler.set_application_config(self.id, envs, version)

            async_run(tasks)
        except Exception as e:
            err = '(scale): {}'.format(e)
            self.log(err, logging.ERROR)
            raise ServiceUnavailable(err) from e
Пример #13
0
    def deploy(self,
               release,
               force_deploy=False,
               rollback_on_failure=True):  # noqa
        """
        Deploy a new release to this application

        force_deploy can be used when a deployment is broken, such as for Rollback
        """
        if release.build is None:
            raise DeisException('No build associated with this release')

        app_settings = self.appsettings_set.latest()
        addresses = ",".join(address for address in app_settings.whitelist)
        service_annotations = {
            'maintenance': app_settings.maintenance,
            'whitelist': addresses
        }

        # use create to make sure minimum resources are created
        self.create()

        if self.structure == {}:
            self.structure = self._default_structure(release)
            self.save()

        image = release.image
        registry = release.config.registry
        version = "v{}".format(release.version)
        envs = self._build_env_vars(release.build.type, version, image,
                                    release.config.values)
        tags = release.config.tags

        # see if the app config has deploy batch preference, otherwise use global
        batches = release.config.values.get('DEIS_DEPLOY_BATCHES',
                                            settings.DEIS_DEPLOY_BATCHES)

        # see if the app config has deploy timeout preference, otherwise use global
        deploy_timeout = release.config.values.get(
            'DEIS_DEPLOY_TIMEOUT', settings.DEIS_DEPLOY_TIMEOUT)  # noqa

        deployment_history = release.config.values.get(
            'KUBERNETES_DEPLOYMENTS_REVISION_HISTORY_LIMIT',
            settings.KUBERNETES_DEPLOYMENTS_REVISION_HISTORY_LIMIT)  # noqa

        # get application level pod termination grace period
        pod_termination_grace_period_seconds = release.config.values.get(
            'KUBERNETES_POD_TERMINATION_GRACE_PERIOD_SECONDS',
            settings.KUBERNETES_POD_TERMINATION_GRACE_PERIOD_SECONDS)  # noqa

        # create image pull secret if needed
        image_pull_secret_name = self.image_pull_secret(
            self.id, registry, image)

        # deploy application to k8s. Also handles initial scaling
        deploys = {}

        for scale_type, replicas in self.structure.items():
            # only web / cmd are routable
            # http://docs.deis.io/en/latest/using_deis/process-types/#web-vs-cmd-process-types
            routable = True if scale_type in [
                'web', 'cmd'
            ] and app_settings.routable else False
            # fetch application port and inject into ENV vars as needed
            port = release.get_port()
            if port:
                envs['PORT'] = port

            healthcheck = release.config.get_healthcheck().get(scale_type, {})
            if not healthcheck and scale_type in ['web', 'cmd']:
                healthcheck = release.config.get_healthcheck().get(
                    'web/cmd', {})

            deploys[scale_type] = {
                'memory': release.config.memory,
                'cpu': release.config.cpu,
                'tags': tags,
                'envs': envs,
                'registry': registry,
                'replicas': replicas,
                'version': version,
                'app_type': scale_type,
                'build_type': release.build.type,
                'healthcheck': healthcheck,
                'routable': routable,
                'deploy_batches': batches,
                'deploy_timeout': deploy_timeout,
                'deployment_revision_history_limit': deployment_history,
                'release_summary': release.summary,
                'pod_termination_grace_period_seconds':
                pod_termination_grace_period_seconds,
                'image_pull_secret_name': image_pull_secret_name,
            }

        # Sort deploys so routable comes first
        deploys = OrderedDict(
            sorted(deploys.items(), key=lambda d: d[1].get('routable')))

        # Check if any proc type has a Deployment in progress
        self._check_deployment_in_progress(deploys, force_deploy)

        try:
            # create the application config in k8s (secret in this case) for all deploy objects
            self._scheduler.set_application_config(self.id, envs, version)

            # gather all proc types to be deployed
            tasks = [
                functools.partial(self._scheduler.deploy,
                                  namespace=self.id,
                                  name=self._get_job_id(scale_type),
                                  image=image,
                                  entrypoint=self._get_entrypoint(scale_type),
                                  command=self._get_command(scale_type),
                                  **kwargs)
                for scale_type, kwargs in deploys.items()
            ]

            try:
                async_run(tasks)
            except KubeException as e:
                if rollback_on_failure:
                    err = 'There was a problem deploying {}. Rolling back process types to release {}.'.format(
                        version,
                        "v{}".format(release.previous().version))  # noqa
                    # This goes in the log before the rollback starts
                    self.log(err, logging.ERROR)
                    # revert all process types to old release
                    self.deploy(release.previous(),
                                force_deploy=True,
                                rollback_on_failure=False)
                    # let it bubble up
                    raise DeisException('{}\n{}'.format(err, str(e))) from e

                # otherwise just re-raise
                raise
        except Exception as e:
            # This gets shown to the end user
            err = '(app::deploy): {}'.format(e)
            self.log(err, logging.ERROR)
            raise ServiceUnavailable(err) from e

        app_type = 'web' if 'web' in deploys else 'cmd' if 'cmd' in deploys else None
        # Make sure the application is routable and uses the correct port Done after the fact to
        # let initial deploy settle before routing traffic to the application
        if deploys and app_type:
            routable = deploys[app_type].get('routable')
            port = deploys[app_type].get('envs', {}).get('PORT', None)
            self._update_application_service(self.id, app_type, port, routable,
                                             service_annotations)  # noqa

            # Wait until application is available in the router
            # Only run when there is no previous build / release
            old = release.previous()
            if old is None or old.build is None:
                self.verify_application_health(**deploys[app_type])

        # cleanup old release objects from kubernetes
        release.cleanup_old()
Пример #14
0
    def _scale_pods(self, scale_types):
        release = self.release_set.latest()
        app_settings = self.appsettings_set.latest()
        version = "v{}".format(release.version)
        image = release.image
        envs = self._build_env_vars(release.build.type, version, image,
                                    release.config.values)
        registry = release.config.registry

        # see if the app config has deploy batch preference, otherwise use global
        batches = release.config.values.get('DEIS_DEPLOY_BATCHES',
                                            settings.DEIS_DEPLOY_BATCHES)

        # see if the app config has deploy timeout preference, otherwise use global
        deploy_timeout = release.config.values.get(
            'DEIS_DEPLOY_TIMEOUT', settings.DEIS_DEPLOY_TIMEOUT)  # noqa

        # get application level pod termination grace period
        pod_termination_grace_period_seconds = release.config.values.get(
            'KUBERNETES_POD_TERMINATION_GRACE_PERIOD_SECONDS',
            settings.KUBERNETES_POD_TERMINATION_GRACE_PERIOD_SECONDS)  # noqa

        # create image pull secret if needed
        image_pull_secret_name = self.image_pull_secret(
            self.id, registry, image)

        tasks = []
        for scale_type, replicas in scale_types.items():
            # only web / cmd are routable
            # http://docs.deis.io/en/latest/using_deis/process-types/#web-vs-cmd-process-types
            routable = True if scale_type in [
                'web', 'cmd'
            ] and app_settings.routable else False
            # fetch application port and inject into ENV Vars as needed
            port = release.get_port()
            if port:
                envs['PORT'] = port

            healthcheck = release.config.get_healthcheck().get(scale_type, {})
            if not healthcheck and scale_type in ['web', 'cmd']:
                healthcheck = release.config.get_healthcheck().get(
                    'web/cmd', {})

            kwargs = {
                'memory': release.config.memory,
                'cpu': release.config.cpu,
                'tags': release.config.tags,
                'envs': envs,
                'registry': registry,
                'version': version,
                'replicas': replicas,
                'app_type': scale_type,
                'build_type': release.build.type,
                'healthcheck': healthcheck,
                'routable': routable,
                'deploy_batches': batches,
                'deploy_timeout': deploy_timeout,
                'pod_termination_grace_period_seconds':
                pod_termination_grace_period_seconds,
                'image_pull_secret_name': image_pull_secret_name,
            }

            # gather all proc types to be deployed
            tasks.append(
                functools.partial(self._scheduler.scale,
                                  namespace=self.id,
                                  name=self._get_job_id(scale_type),
                                  image=image,
                                  entrypoint=self._get_entrypoint(scale_type),
                                  command=self._get_command(scale_type),
                                  **kwargs))

        try:
            # create the application config in k8s (secret in this case) for all deploy objects
            self._scheduler.set_application_config(self.id, envs, version)

            async_run(tasks)
        except Exception as e:
            err = '(scale): {}'.format(e)
            self.log(err, logging.ERROR)
            raise ServiceUnavailable(err) from e