def save(self, *args, **kwargs): if not self.id: self.id = generate_app_name() while App.objects.filter(id=self.id).exists(): self.id = generate_app_name() # verify the application name doesn't exist as a k8s namespace # only check for it if there have been on releases try: self.release_set.latest() except Release.DoesNotExist: try: if self._scheduler.get_namespace(self.id).status_code == 200: # Namespace already exists err = "{} already exists as a namespace in this kuberenetes setup".format( self.id) # noqa self.log(err, logging.INFO) raise AlreadyExists(err) except KubeHTTPException: pass application = super(App, self).save(**kwargs) # create all the required resources self.create(*args, **kwargs) return application
def attach(self, *args, **kwargs): # add the certificate to the domain domain = get_object_or_404(Domain, domain=kwargs['domain']) if domain.certificate is not None: raise AlreadyExists( "Domain already has a certificate attached to it") # create in kubernetes self.attach_in_kubernetes(domain) domain.certificate = self domain.save()
def destroy(self, request, **kwargs): calling_obj = self.get_object() target_obj = calling_obj if request.data.get('username'): # if you "accidentally" target yourself, that should be fine if calling_obj.username == request.data['username'] or calling_obj.is_superuser: target_obj = get_object_or_404(User, username=request.data['username']) else: raise PermissionDenied() # A user can not be removed without apps changing ownership first if len(models.App.objects.filter(owner=target_obj)) > 0: msg = '{} still has applications assigned. Delete or transfer ownership'.format(str(target_obj)) # noqa raise AlreadyExists(msg) try: target_obj.delete() return Response(status=status.HTTP_204_NO_CONTENT) except ProtectedError as e: raise AlreadyExists(e)
def _check_deployment_in_progress(self, deploys, force_deploy=False): if force_deploy: return for scale_type, kwargs in deploys.items(): # Is there an existing deployment in progress? name = self._get_job_id(scale_type) in_progress, deploy_okay = self._scheduler.deployment.in_progress( self.id, name, kwargs.get("deploy_timeout"), kwargs.get("deploy_batches"), kwargs.get("replicas"), kwargs.get("tags") ) # throw a 409 if things are in progress but we do not want to let through the deploy if in_progress and not deploy_okay: raise AlreadyExists('Deployment for {} is already in progress'.format(name))
def deploy(self, release, force_deploy=False): """ Deploy a new release to this application force_deploy can be used when a deployment is broken, such as for Rollback """ if release.build is None: raise DeisException('No build associated with this release') # use create to make sure minimum resources are created self.create() if self.structure == {}: self.structure = self._default_structure(release) self.save() # see if the app config has deploy batch preference, otherwise use global batches = release.config.values.get('DEIS_DEPLOY_BATCHES', settings.DEIS_DEPLOY_BATCHES) # see if the app config has deploy timeout preference, otherwise use global deploy_timeout = release.config.values.get( 'DEIS_DEPLOY_TIMEOUT', settings.DEIS_DEPLOY_TIMEOUT) # noqa deployment_history = release.config.values.get( 'KUBERNETES_DEPLOYMENTS_REVISION_HISTORY_LIMIT', settings.KUBERNETES_DEPLOYMENTS_REVISION_HISTORY_LIMIT) # noqa # deploy application to k8s. Also handles initial scaling deploys = {} envs = release.config.values for scale_type, replicas in self.structure.items(): # only web / cmd are routable # http://docs.deis.io/en/latest/using_deis/process-types/#web-vs-cmd-process-types routable = True if scale_type in ['web', 'cmd'] else False # fetch application port and inject into ENV vars as needed port = release.get_port() if port: envs['PORT'] = port deploys[scale_type] = { 'memory': release.config.memory, 'cpu': release.config.cpu, 'tags': release.config.tags, 'envs': envs, 'registry': release.config.registry, # only used if there is no previous RC 'replicas': replicas, 'version': "v{}".format(release.version), 'app_type': scale_type, 'build_type': release.build.type, 'healthcheck': release.config.healthcheck, 'routable': routable, 'deploy_batches': batches, 'deploy_timeout': deploy_timeout, 'deployment_history_limit': deployment_history, 'release_summary': release.summary } # Sort deploys so routable comes first deploys = OrderedDict( sorted(deploys.items(), key=lambda d: d[1].get('routable'))) # Check if any proc type has a Deployment in progress for scale_type, kwargs in deploys.items(): # Is there an existing deployment in progress? name = self._get_job_id(scale_type) if not force_deploy and release.deployment_in_progress( self.id, name): raise AlreadyExists( 'Deployment for {} is already in progress'.format(name)) try: # gather all proc types to be deployed tasks = [ functools.partial(self._scheduler.deploy, namespace=self.id, name=self._get_job_id(scale_type), image=release.image, entrypoint=self._get_entrypoint(scale_type), command=self._get_command(scale_type), **kwargs) for scale_type, kwargs in deploys.items() ] async_run(tasks) except Exception as e: err = '(app::deploy): {}'.format(e) self.log(err, logging.ERROR) raise ServiceUnavailable(err) from e # Wait until application is available in the router # Only run when there is no previous build / release old = release.previous() if old is None or old.build is None: self.verify_application_health(**kwargs) # cleanup old release objects from kubernetes release.cleanup_old()