Beispiel #1
0
class App(UuidAuditedModel):
    """
    Application used to service requests on behalf of end-users
    """

    owner = models.ForeignKey(settings.AUTH_USER_MODEL)
    id = models.SlugField(max_length=64, unique=True, default=select_app_name,
                          validators=[validate_id_is_docker_compatible,
                                      validate_reserved_names])
    structure = JSONField(default={}, blank=True, validators=[validate_app_structure])

    class Meta:
        permissions = (('use_app', 'Can use app'),)

    @property
    def _scheduler(self):
        mod = importlib.import_module(settings.SCHEDULER_MODULE)
        return mod.SchedulerClient(settings.SCHEDULER_TARGET,
                                   settings.SCHEDULER_AUTH,
                                   settings.SCHEDULER_OPTIONS,
                                   settings.SSH_PRIVATE_KEY)

    def __str__(self):
        return self.id

    @property
    def url(self):
        return self.id + '.' + settings.DEIS_DOMAIN

    def _get_job_id(self, container_type):
        app = self.id
        release = self.release_set.latest()
        version = "v{}".format(release.version)
        job_id = "{app}_{version}.{container_type}".format(**locals())
        return job_id

    def _get_command(self, container_type):
        try:
            # if this is not procfile-based app, ensure they cannot break out
            # and run arbitrary commands on the host
            # FIXME: remove slugrunner's hardcoded entrypoint
            release = self.release_set.latest()
            if release.build.dockerfile or not release.build.sha:
                return "bash -c '{}'".format(release.build.procfile[container_type])
            else:
                return 'start {}'.format(container_type)
        # if the key is not present or if a parent attribute is None
        except (KeyError, TypeError, AttributeError):
            # handle special case for Dockerfile deployments
            return '' if container_type == 'cmd' else 'start {}'.format(container_type)

    def log(self, message, level=logging.INFO):
        """Logs a message in the context of this application.

        This prefixes log messages with an application "tag" that the customized deis-logspout will
        be on the lookout for.  When it's seen, the message-- usually an application event of some
        sort like releasing or scaling, will be considered as "belonging" to the application
        instead of the controller and will be handled accordingly.
        """
        logger.log(level, "[{}]: {}".format(self.id, message))

    def create(self, *args, **kwargs):
        """Create a new application with an initial config and release"""
        config = Config.objects.create(owner=self.owner, app=self)
        Release.objects.create(version=1, owner=self.owner, app=self, config=config, build=None)

    def delete(self, *args, **kwargs):
        """Delete this application including all containers"""
        try:
            # attempt to remove containers from the scheduler
            self._destroy_containers([c for c in self.container_set.exclude(type='run')])
        except RuntimeError:
            pass
        self._clean_app_logs()
        return super(App, self).delete(*args, **kwargs)

    def restart(self, **kwargs):
        to_restart = self.container_set.all()
        if kwargs.get('type'):
            to_restart = to_restart.filter(type=kwargs.get('type'))
        if kwargs.get('num'):
            to_restart = to_restart.filter(num=kwargs.get('num'))
        self._restart_containers(to_restart)
        return to_restart

    def _clean_app_logs(self):
        """Delete application logs stored by the logger component"""
        try:
            url = 'http://{}:{}/{}/'.format(settings.LOGGER_HOST, settings.LOGGER_PORT, self.id)
            requests.delete(url)
        except Exception as e:
            # Ignore errors deleting application logs.  An error here should not interfere with
            # the overall success of deleting an application, but we should log it.
            err = 'Error deleting existing application logs: {}'.format(e)
            log_event(self, err, logging.WARNING)

    def scale(self, user, structure):  # noqa
        """Scale containers up or down to match requested structure."""
        if self.release_set.latest().build is None:
            raise EnvironmentError('No build associated with this release')
        requested_structure = structure.copy()
        release = self.release_set.latest()
        # test for available process types
        available_process_types = release.build.procfile or {}
        for container_type in requested_structure:
            if container_type == 'cmd':
                continue  # allow docker cmd types in case we don't have the image source
            if container_type not in available_process_types:
                raise EnvironmentError(
                    'Container type {} does not exist in application'.format(container_type))
        msg = '{} scaled containers '.format(user.username) + ' '.join(
            "{}={}".format(k, v) for k, v in requested_structure.items())
        log_event(self, msg)
        # iterate and scale by container type (web, worker, etc)
        changed = False
        to_add, to_remove = [], []
        scale_types = {}

        # iterate on a copy of the container_type keys
        for container_type in requested_structure.keys():
            containers = list(self.container_set.filter(type=container_type).order_by('created'))
            # increment new container nums off the most recent container
            results = self.container_set.filter(type=container_type).aggregate(Max('num'))
            container_num = (results.get('num__max') or 0) + 1
            requested = requested_structure.pop(container_type)
            diff = requested - len(containers)
            if diff == 0:
                continue
            changed = True
            scale_types[container_type] = requested
            while diff < 0:
                c = containers.pop()
                to_remove.append(c)
                diff += 1
            while diff > 0:
                # create a database record
                c = Container.objects.create(owner=self.owner,
                                             app=self,
                                             release=release,
                                             type=container_type,
                                             num=container_num)
                to_add.append(c)
                container_num += 1
                diff -= 1

        if changed:
            if "scale" in dir(self._scheduler):
                self._scale_containers(scale_types, to_remove)
            else:
                if to_add:
                    self._start_containers(to_add)
                if to_remove:
                    self._destroy_containers(to_remove)
        # save new structure to the database
        vals = self.container_set.exclude(type='run').values(
            'type').annotate(Count('pk')).order_by()
        new_structure = structure.copy()
        new_structure.update({v['type']: v['pk__count'] for v in vals})
        self.structure = new_structure
        self.save()
        return changed

    def _scale_containers(self, scale_types, to_remove):
        release = self.release_set.latest()
        for scale_type in scale_types:
            image = release.image
            version = "v{}".format(release.version)
            kwargs = {'memory': release.config.memory,
                      'cpu': release.config.cpu,
                      'tags': release.config.tags,
                      'version': version,
                      'aname': self.id,
                      'num': scale_types[scale_type]}
            job_id = self._get_job_id(scale_type)
            command = self._get_command(scale_type)
            try:
                self._scheduler.scale(
                    name=job_id,
                    image=image,
                    command=command,
                    **kwargs)
            except Exception as e:
                err = '{} (scale): {}'.format(job_id, e)
                log_event(self, err, logging.ERROR)
                raise
        [c.delete() for c in to_remove]

    def _start_containers(self, to_add):
        """Creates and starts containers via the scheduler"""
        if not to_add:
            return
        create_threads = [Thread(target=c.create) for c in to_add]
        start_threads = [Thread(target=c.start) for c in to_add]
        [t.start() for t in create_threads]
        [t.join() for t in create_threads]
        if any(c.state != 'created' for c in to_add):
            err = 'aborting, failed to create some containers'
            log_event(self, err, logging.ERROR)
            self._destroy_containers(to_add)
            raise RuntimeError(err)
        [t.start() for t in start_threads]
        [t.join() for t in start_threads]
        if set([c.state for c in to_add]) != set(['up']):
            err = 'warning, some containers failed to start'
            log_event(self, err, logging.WARNING)
        # if the user specified a health check, try checking to see if it's running
        try:
            config = self.config_set.latest()
            if 'HEALTHCHECK_URL' in config.values.keys():
                self._healthcheck(to_add, config.values)
        except Config.DoesNotExist:
            pass

    def _healthcheck(self, containers, config):
        # if at first it fails, back off and try again at 10%, 50% and 100% of INITIAL_DELAY
        intervals = [1.0, 0.1, 0.5, 1.0]
        # HACK (bacongobbler): we need to wait until publisher has a chance to publish each
        # service to etcd, which can take up to 20 seconds.
        time.sleep(20)
        for i in xrange(len(intervals)):
            delay = int(config.get('HEALTHCHECK_INITIAL_DELAY', 0))
            try:
                # sleep until the initial timeout is over
                if delay > 0:
                    time.sleep(delay * intervals[i])
                to_healthcheck = [c for c in containers if c.type in ['web', 'cmd']]
                self._do_healthcheck(to_healthcheck, config)
                break
            except exceptions.HealthcheckException as e:
                try:
                    next_delay = delay * intervals[i+1]
                    msg = "{}; trying again in {} seconds".format(e, next_delay)
                    log_event(self, msg, logging.WARNING)
                except IndexError:
                    log_event(self, e, logging.WARNING)
        else:
            self._destroy_containers(containers)
            msg = "aborting, app containers failed to respond to health check"
            log_event(self, msg, logging.ERROR)
            raise RuntimeError(msg)

    def _do_healthcheck(self, containers, config):
        path = config.get('HEALTHCHECK_URL', '/')
        timeout = int(config.get('HEALTHCHECK_TIMEOUT', 1))
        if not _etcd_client:
            raise exceptions.HealthcheckException('no etcd client available')
        for container in containers:
            try:
                key = "/deis/services/{self}/{container.job_id}".format(**locals())
                url = "http://{}{}".format(_etcd_client.get(key).value, path)
                response = requests.get(url, timeout=timeout)
                if response.status_code != requests.codes.OK:
                    raise exceptions.HealthcheckException(
                        "app failed health check (got '{}', expected: '200')".format(
                            response.status_code))
            except (requests.Timeout, requests.ConnectionError, KeyError) as e:
                raise exceptions.HealthcheckException(
                    'failed to connect to container ({})'.format(e))

    def _restart_containers(self, to_restart):
        """Restarts containers via the scheduler"""
        if not to_restart:
            return
        stop_threads = [Thread(target=c.stop) for c in to_restart]
        start_threads = [Thread(target=c.start) for c in to_restart]
        [t.start() for t in stop_threads]
        [t.join() for t in stop_threads]
        if any(c.state != 'created' for c in to_restart):
            err = 'warning, some containers failed to stop'
            log_event(self, err, logging.WARNING)
        [t.start() for t in start_threads]
        [t.join() for t in start_threads]
        if any(c.state != 'up' for c in to_restart):
            err = 'warning, some containers failed to start'
            log_event(self, err, logging.WARNING)

    def _destroy_containers(self, to_destroy):
        """Destroys containers via the scheduler"""
        if not to_destroy:
            return
        destroy_threads = [Thread(target=c.destroy) for c in to_destroy]
        [t.start() for t in destroy_threads]
        [t.join() for t in destroy_threads]
        [c.delete() for c in to_destroy if c.state == 'destroyed']
        if any(c.state != 'destroyed' for c in to_destroy):
            err = 'aborting, failed to destroy some containers'
            log_event(self, err, logging.ERROR)
            raise RuntimeError(err)

    def deploy(self, user, release):
        """Deploy a new release to this application"""
        existing = self.container_set.exclude(type='run')
        new = []
        scale_types = set()
        for e in existing:
            n = e.clone(release)
            n.save()
            new.append(n)
            scale_types.add(e.type)

        if new and "deploy" in dir(self._scheduler):
            self._deploy_app(scale_types, release, existing)
        else:
            self._start_containers(new)

            # destroy old containers
            if existing:
                self._destroy_containers(existing)

        # perform default scaling if necessary
        if self.structure == {} and release.build is not None:
            self._default_scale(user, release)

    def _deploy_app(self, scale_types, release, existing):
        for scale_type in scale_types:
            image = release.image
            version = "v{}".format(release.version)
            kwargs = {'memory': release.config.memory,
                      'cpu': release.config.cpu,
                      'tags': release.config.tags,
                      'aname': self.id,
                      'num': 0,
                      'version': version}
            job_id = self._get_job_id(scale_type)
            command = self._get_command(scale_type)
            try:
                self._scheduler.deploy(
                    name=job_id,
                    image=image,
                    command=command,
                    **kwargs)
            except Exception as e:
                err = '{} (deploy): {}'.format(job_id, e)
                log_event(self, err, logging.ERROR)
                raise
        [c.delete() for c in existing]

    def _default_scale(self, user, release):
        """Scale to default structure based on release type"""
        # if there is no SHA, assume a docker image is being promoted
        if not release.build.sha:
            structure = {'cmd': 1}

        # if a dockerfile exists without a procfile, assume docker workflow
        elif release.build.dockerfile and not release.build.procfile:
            structure = {'cmd': 1}

        # if a procfile exists without a web entry, assume docker workflow
        elif release.build.procfile and 'web' not in release.build.procfile:
            structure = {'cmd': 1}

        # default to heroku workflow
        else:
            structure = {'web': 1}

        self.scale(user, structure)

    def logs(self, log_lines=str(settings.LOG_LINES)):
        """Return aggregated log data for this application."""
        try:
            url = "http://{}:{}/{}?log_lines={}".format(settings.LOGGER_HOST, settings.LOGGER_PORT,
                                                        self.id, log_lines)
            r = requests.get(url)
        # Handle HTTP request errors
        except requests.exceptions.RequestException as e:
            logger.error("Error accessing deis-logger using url '{}': {}".format(url, e))
            raise e
        # Handle logs empty or not found
        if r.status_code == 204 or r.status_code == 404:
            logger.info("GET {} returned a {} status code".format(url, r.status_code))
            raise EnvironmentError('Could not locate logs')
        # Handle unanticipated status codes
        if r.status_code != 200:
            logger.error("Error accessing deis-logger: GET {} returned a {} status code"
                         .format(url, r.status_code))
            raise EnvironmentError('Error accessing deis-logger')
        return r.content

    def run(self, user, command):
        """Run a one-off command in an ephemeral app container."""
        # FIXME: remove the need for SSH private keys by using
        # a scheduler that supports one-off admin tasks natively
        if not settings.SSH_PRIVATE_KEY:
            raise EnvironmentError('Support for admin commands is not configured')
        if self.release_set.latest().build is None:
            raise EnvironmentError('No build associated with this release to run this command')
        # TODO: add support for interactive shell
        msg = "{} runs '{}'".format(user.username, command)
        log_event(self, msg)
        c_num = max([c.num for c in self.container_set.filter(type='run')] or [0]) + 1

        # create database record for run process
        c = Container.objects.create(owner=self.owner,
                                     app=self,
                                     release=self.release_set.latest(),
                                     type='run',
                                     num=c_num)
        image = c.release.image

        # check for backwards compatibility
        def _has_hostname(image):
            repo, tag = dockerutils.parse_repository_tag(image)
            return True if '/' in repo and '.' in repo.split('/')[0] else False

        if not _has_hostname(image):
            image = '{}:{}/{}'.format(settings.REGISTRY_HOST,
                                      settings.REGISTRY_PORT,
                                      image)
        # SECURITY: shell-escape user input
        escaped_command = command.replace("'", "'\\''")
        return c.run(escaped_command)
Beispiel #2
0
class App(UuidAuditedModel):
    """
    Application used to service requests on behalf of end-users
    """

    owner = models.ForeignKey(settings.AUTH_USER_MODEL)
    id = models.SlugField(max_length=64, unique=True)
    structure = JSONField(default={}, blank=True, validators=[validate_app_structure])

    class Meta:
        permissions = (('use_app', 'Can use app'),)

    def __str__(self):
        return self.id

    def _get_scheduler(self, *args, **kwargs):
        module_name = 'scheduler.' + settings.SCHEDULER_MODULE
        mod = importlib.import_module(module_name)

        return mod.SchedulerClient(settings.SCHEDULER_TARGET,
                                   settings.SCHEDULER_AUTH,
                                   settings.SCHEDULER_OPTIONS,
                                   settings.SSH_PRIVATE_KEY)

    _scheduler = property(_get_scheduler)

    @property
    def url(self):
        return self.id + '.' + settings.DEIS_DOMAIN

    def log(self, message):
        """Logs a message to the application's log file.

        This is a workaround for how Django interacts with Python's logging module. Each app
        needs its own FileHandler instance so it can write to its own log file. That won't work in
        Django's case because logging is set up before you run the server and it disables all
        existing logging configurations.
        """
        with open(os.path.join(settings.DEIS_LOG_DIR, self.id + '.log'), 'a') as f:
            msg = "{} deis[api]: {}\n".format(time.strftime(settings.DEIS_DATETIME_FORMAT),
                                              message)
            f.write(msg.encode('utf-8'))

    def create(self, *args, **kwargs):
        """Create a new application with an initial config and release"""
        config = Config.objects.create(owner=self.owner, app=self)
        Release.objects.create(version=1, owner=self.owner, app=self, config=config, build=None)

    def delete(self, *args, **kwargs):
        """Delete this application including all containers"""
        for c in self.container_set.exclude(type='run'):
            c.destroy()
        self._clean_app_logs()
        return super(App, self).delete(*args, **kwargs)

    def _clean_app_logs(self):
        """Delete application logs stored by the logger component"""
        path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')
        if os.path.exists(path):
            os.remove(path)

    def scale(self, user, structure):  # noqa
        """Scale containers up or down to match requested structure."""
        if self.release_set.latest().build is None:
            raise EnvironmentError('No build associated with this release')
        requested_structure = structure.copy()
        release = self.release_set.latest()
        # test for available process types
        available_process_types = release.build.procfile or {}
        for container_type in requested_structure.keys():
            if container_type == 'cmd':
                continue  # allow docker cmd types in case we don't have the image source
            if container_type not in available_process_types:
                raise EnvironmentError(
                    'Container type {} does not exist in application'.format(container_type))
        msg = '{} scaled containers '.format(user.username) + ' '.join(
            "{}={}".format(k, v) for k, v in requested_structure.items())
        log_event(self, msg)
        # iterate and scale by container type (web, worker, etc)
        changed = False
        to_add, to_remove = [], []
        for container_type in requested_structure.keys():
            containers = list(self.container_set.filter(type=container_type).order_by('created'))
            # increment new container nums off the most recent container
            results = self.container_set.filter(type=container_type).aggregate(Max('num'))
            container_num = (results.get('num__max') or 0) + 1
            requested = requested_structure.pop(container_type)
            diff = requested - len(containers)
            if diff == 0:
                continue
            changed = True
            while diff < 0:
                c = containers.pop()
                to_remove.append(c)
                diff += 1
            while diff > 0:
                # create a database record
                c = Container.objects.create(owner=self.owner,
                                             app=self,
                                             release=release,
                                             type=container_type,
                                             num=container_num)
                to_add.append(c)
                container_num += 1
                diff -= 1
        if changed:
            if to_add:
                self._start_containers(to_add)
            if to_remove:
                self._destroy_containers(to_remove)
        # save new structure to the database
        vals = self.container_set.values('type').annotate(Count('pk')).order_by()
        self.structure = {v['type']: v['pk__count'] for v in vals}
        self.save()
        return changed

    def _start_containers(self, to_add):
        """Creates and starts containers via the scheduler"""
        create_threads = []
        start_threads = []
        for c in to_add:
            create_threads.append(threading.Thread(target=c.create))
            start_threads.append(threading.Thread(target=c.start))
        [t.start() for t in create_threads]
        [t.join() for t in create_threads]
        if set([c.state for c in to_add]) != set([Container.CREATED]):
            err = 'aborting, failed to create some containers'
            log_event(self, err, logging.ERROR)
            raise RuntimeError(err)
        [t.start() for t in start_threads]
        [t.join() for t in start_threads]
        if set([c.state for c in to_add]) != set([Container.UP]):
            err = 'warning, some containers failed to start'
            log_event(self, err, logging.WARNING)

    def _destroy_containers(self, to_destroy):
        """Destroys containers via the scheduler"""
        destroy_threads = []
        for c in to_destroy:
            destroy_threads.append(threading.Thread(target=c.destroy))
        [t.start() for t in destroy_threads]
        [t.join() for t in destroy_threads]
        [c.delete() for c in to_destroy if c.state == Container.DESTROYED]
        if set([c.state for c in to_destroy]) != set([Container.DESTROYED]):
            err = 'aborting, failed to destroy some containers'
            log_event(self, err, logging.ERROR)
            raise RuntimeError(err)

    def deploy(self, user, release, initial=False):
        """Deploy a new release to this application"""
        existing = self.container_set.exclude(type='run')
        new = []
        for e in existing:
            n = e.clone(release)
            n.save()
            new.append(n)

        # create new containers
        threads = []
        for c in new:
            threads.append(threading.Thread(target=c.create))
        [t.start() for t in threads]
        [t.join() for t in threads]

        # check for containers that failed to create
        if len(new) > 0 and set([c.state for c in new]) != set([Container.CREATED]):
            err = 'aborting, failed to create some containers'
            log_event(self, err, logging.ERROR)
            self._destroy_containers(new)
            raise RuntimeError(err)

        # start new containers
        threads = []
        for c in new:
            threads.append(threading.Thread(target=c.start))
        [t.start() for t in threads]
        [t.join() for t in threads]

        # check for containers that didn't come up correctly
        if len(new) > 0 and set([c.state for c in new]) != set([Container.UP]):
            # report the deploy error
            err = 'warning, some containers failed to start'
            log_event(self, err, logging.WARNING)

        # destroy old containers
        if existing:
            self._destroy_containers(existing)

        # perform default scaling if necessary
        if initial:
            self._default_scale(user, release)

    def _default_scale(self, user, release):
        """Scale to default structure based on release type"""
        # if there is no SHA, assume a docker image is being promoted
        if not release.build.sha:
            structure = {'cmd': 1}

        # if a dockerfile exists without a procfile, assume docker workflow
        elif release.build.dockerfile and not release.build.procfile:
            structure = {'cmd': 1}

        # if a procfile exists without a web entry, assume docker workflow
        elif release.build.procfile and 'web' not in release.build.procfile:
            structure = {'cmd': 1}

        # default to heroku workflow
        else:
            structure = {'web': 1}

        self.scale(user, structure)

    def logs(self):
        """Return aggregated log data for this application."""
        path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')
        if not os.path.exists(path):
            raise EnvironmentError('Could not locate logs')
        data = subprocess.check_output(['tail', '-n', str(settings.LOG_LINES), path])
        return data

    def run(self, user, command):
        """Run a one-off command in an ephemeral app container."""
        # FIXME: remove the need for SSH private keys by using
        # a scheduler that supports one-off admin tasks natively
        if not settings.SSH_PRIVATE_KEY:
            raise EnvironmentError('Support for admin commands is not configured')
        if self.release_set.latest().build is None:
            raise EnvironmentError('No build associated with this release to run this command')
        # TODO: add support for interactive shell
        msg = "{} runs '{}'".format(user.username, command)
        log_event(self, msg)
        c_num = max([c.num for c in self.container_set.filter(type='run')] or [0]) + 1

        # create database record for run process
        c = Container.objects.create(owner=self.owner,
                                     app=self,
                                     release=self.release_set.latest(),
                                     type='run',
                                     num=c_num)
        image = c.release.image

        # check for backwards compatibility
        def _has_hostname(image):
            repo, tag = utils.parse_repository_tag(image)
            return True if '/' in repo and '.' in repo.split('/')[0] else False

        if not _has_hostname(image):
            image = '{}:{}/{}'.format(settings.REGISTRY_HOST,
                                      settings.REGISTRY_PORT,
                                      image)
        # SECURITY: shell-escape user input
        escaped_command = command.replace("'", "'\\''")
        return c.run(escaped_command)
Beispiel #3
0
class App(UuidAuditedModel):
    """
    Application used to service requests on behalf of end-users
    """

    owner = models.ForeignKey(settings.AUTH_USER_MODEL)
    id = models.SlugField(max_length=64, unique=True)
    cluster = models.ForeignKey('Cluster')
    structure = JSONField(default='{}', blank=True)

    class Meta:
        permissions = (('use_app', 'Can use app'),)

    def __str__(self):
        return self.id

    def create(self, *args, **kwargs):
        config = Config.objects.create(owner=self.owner, app=self, values={})
        build = Build.objects.create(owner=self.owner, app=self, image=settings.DEFAULT_BUILD)
        Release.objects.create(version=1, owner=self.owner, app=self, config=config, build=build)

    def destroy(self, *args, **kwargs):
        for c in self.container_set.all():
            c.destroy()

    def deploy(self, release):
        tasks.deploy_release.delay(self, release).get()
        if self.structure == {}:
            # scale the web process by 1 initially
            self.structure = {'web': 1}
            self.save()
            self.scale()

    def scale(self, **kwargs):
        """Scale containers up or down to match requested."""
        requested_containers = self.structure.copy()
        release = self.release_set.latest()
        # increment new container nums off the most recent container
        all_containers = self.container_set.all().order_by('-created')
        container_num = 1 if not all_containers else all_containers[0].num + 1
        msg = 'Containers scaled ' + ' '.join(
            "{}={}".format(k, v) for k, v in requested_containers.items())
        # iterate and scale by container type (web, worker, etc)
        changed = False
        to_add, to_remove = [], []
        for container_type in requested_containers.keys():
            containers = list(self.container_set.filter(type=container_type).order_by('created'))
            requested = requested_containers.pop(container_type)
            diff = requested - len(containers)
            if diff == 0:
                continue
            changed = True
            while diff < 0:
                c = containers.pop()
                to_remove.append(c)
                diff += 1
            while diff > 0:
                c = Container.objects.create(owner=self.owner,
                                             app=self,
                                             release=release,
                                             type=container_type,
                                             num=container_num)
                to_add.append(c)
                container_num += 1
                diff -= 1
        if changed:
            subtasks = []
            if to_add:
                subtasks.append(tasks.start_containers.s(to_add))
            if to_remove:
                subtasks.append(tasks.stop_containers.s(to_remove))
            group(*subtasks).apply_async().join()
            log_event(self, msg)
        return changed

    def logs(self):
        """Return aggregated log data for this application."""
        path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')
        if not os.path.exists(path):
            raise EnvironmentError('Could not locate logs')
        data = subprocess.check_output(['tail', '-n', str(settings.LOG_LINES), path])
        return data

    def run(self, command):
        """Run a one-off command in an ephemeral app container."""
        # TODO: add support for interactive shell
        log_event(self, "deis run '{}'".format(command))
        c_num = max([c.num for c in self.container_set.filter(type='admin')] or [0]) + 1
        c = Container.objects.create(owner=self.owner,
                                     app=self,
                                     release=self.release_set.latest(),
                                     type='admin',
                                     num=c_num)
        rc, output = tasks.run_command.delay(c, command).get()
        return rc, output
Beispiel #4
0
class LoggedRequest(models.Model):

    FINE = 1
    WARNING = 2
    ERROR = 3

    STATUS_CHOICES = ((FINE, _('Fine')), (WARNING, _('Warning')), (ERROR,
                                                                   _('Error')))

    COMMON_REQUEST = 1
    THROTTLED_REQUEST = 2
    SUCCESSFUL_LOGIN_REQUEST = 3
    UNSUCCESSFUL_LOGIN_REQUEST = 4

    TYPE_CHOICES = ((COMMON_REQUEST, _('Common request')),
                    (THROTTLED_REQUEST, _('Throttled request')),
                    (SUCCESSFUL_LOGIN_REQUEST, _('Successful login request')),
                    (UNSUCCESSFUL_LOGIN_REQUEST,
                     _('Unsuccessful login request')))

    objects = LoggedRequestManager()

    # Request information
    request_timestamp = models.DateTimeField(_('Request timestamp'),
                                             null=False,
                                             blank=False,
                                             db_index=True)
    method = models.CharField(_('Method'),
                              max_length=7,
                              null=False,
                              blank=False)
    path = models.CharField(_('URL path'),
                            max_length=255,
                            null=False,
                            blank=False)
    queries = JSONField(_('Queries'), null=True, blank=True)
    headers = JSONField(_('Headers'), null=True, blank=True)
    request_body = models.TextField(_('Request body'), null=False, blank=True)
    is_secure = models.BooleanField(_('HTTPS connection'),
                                    default=False,
                                    null=False,
                                    blank=False)

    # Response information
    response_timestamp = models.DateTimeField(_('Response timestamp'),
                                              null=False,
                                              blank=False)
    response_code = models.PositiveSmallIntegerField(_('Response code'),
                                                     null=False,
                                                     blank=False)
    status = models.PositiveSmallIntegerField(_('Status'),
                                              choices=STATUS_CHOICES,
                                              null=False,
                                              blank=False)
    type = models.PositiveSmallIntegerField(_('Request type'),
                                            choices=TYPE_CHOICES,
                                            default=COMMON_REQUEST,
                                            null=False,
                                            blank=False)
    response_body = models.TextField(_('Response body'),
                                     null=False,
                                     blank=True)
    error_description = models.TextField(_('Error description'),
                                         null=True,
                                         blank=True)

    # User information
    user = models.ForeignKey(AUTH_USER_MODEL,
                             null=True,
                             blank=True,
                             on_delete=models.SET_NULL)
    ip = models.GenericIPAddressField(_('IP address'), null=False, blank=False)

    def get_status(self, response):
        if response.status_code >= 500:
            return LoggedRequest.ERROR
        elif response.status_code >= 400:
            return LoggedRequest.WARNING
        else:
            return LoggedRequest.FINE

    def update_from_response(self, response):
        self.response_timestamp = timezone.now()
        self.status = self.get_status(response)
        self.response_code = response.status_code

        if not response.streaming and response.get(
                'content-type',
                '').split(';')[0] in LOG_RESPONSE_BODY_CONTENT_TYPES:
            response_body = truncatechars(
                force_text(response.content[:LOG_RESPONSE_BODY_LENGTH + 1],
                           errors='replace'), LOG_RESPONSE_BODY_LENGTH)
        else:
            response_body = ''

        self.response_body = response_body

    def response_time(self):
        return '%s ms' % (
            (self.response_timestamp - self.request_timestamp).microseconds /
            1000)

    response_time.short_description = _('Response time')

    def short_path(self):
        return truncatechars(self.path, 20)

    short_path.short_description = _('Path')
    short_path.filter_by = 'path'
    short_path.order_by = 'path'

    def __str__(self):
        return self.path

    class Meta:
        ordering = ('-request_timestamp', )
        verbose_name = _('Logged request')
        verbose_name_plural = _('Logged requests')
Beispiel #5
0
    def mark_as_read(self):
        if self.unread:
            self.unread = False
            self.save()


EXTRA_DATA = False
if getattr(settings, 'NOTIFY_USE_JSONFIELD', False):
    try:
        from json_field.fields import JSONField
    except ImportError:
        raise ImproperlyConfigured(
            "You must have a suitable JSONField installed")

    JSONField(blank=True, null=True).contribute_to_class(Notification, 'data')
    EXTRA_DATA = True


def notify_handler(verb, **kwargs):
    """
    Handler function to create Notification instance upon action signal call.
    """

    kwargs.pop('signal', None)
    recipient = kwargs.pop('recipient')
    actor = kwargs.pop('sender')
    newnotify = Notification(
        recipient=recipient,
        actor_content_type=ContentType.objects.get_for_model(actor),
        actor_object_id=actor.pk,
Beispiel #6
0
class App(UuidAuditedModel):
    """
    Application used to service requests on behalf of end-users
    """

    owner = models.ForeignKey(settings.AUTH_USER_MODEL)
    id = models.SlugField(max_length=64, unique=True)
    formation = models.ForeignKey('Formation')
    containers = JSONField(default='{}', blank=True)

    class Meta:
        permissions = (('use_app', 'Can use app'), )

    def __str__(self):
        return self.id

    def flat(self):
        return {
            'id': self.id,
            'formation': self.formation.id,
            'containers': dict(self.containers)
        }

    def build(self):
        config = Config.objects.create(version=1,
                                       owner=self.owner,
                                       app=self,
                                       values={})
        build = Build.objects.create(owner=self.owner, app=self)
        Release.objects.create(version=1,
                               owner=self.owner,
                               app=self,
                               config=config,
                               build=build)
        self.formation.publish()

    def destroy(self):
        CM.purge_app(self.flat())
        self.delete()
        self.formation.publish()

    def publish(self):
        """Publish the application to configuration management"""
        data = self.calculate()
        CM.publish_app(self.flat(), data)
        return data

    def converge(self):
        databag = self.publish()
        self.formation.converge()
        return databag

    def calculate(self):
        """Return a representation for configuration management"""
        d = {}
        d['id'] = self.id
        d['release'] = {}
        releases = self.release_set.all().order_by('-created')
        if releases:
            release = releases[0]
            d['release']['version'] = release.version
            d['release']['config'] = release.config.values
            d['release']['build'] = {
                'image': release.build.image + ":v{}".format(release.version)
            }
            if release.build.url:
                d['release']['build']['url'] = release.build.url
                d['release']['build']['procfile'] = release.build.procfile
        d['containers'] = {}
        containers = self.container_set.all()
        if containers:
            for c in containers:
                d['containers'].setdefault(c.type, {})[str(c.num)] = c.status
        d['domains'] = []
        if self.formation.domain:
            d['domains'].append('{}.{}'.format(self.id, self.formation.domain))
        else:
            for n in self.formation.node_set.filter(layer__proxy=True):
                d['domains'].append(n.fqdn)
        # add proper sharing and access controls
        d['users'] = {self.owner.username: '******'}
        for u in (get_users_with_perms(self)):
            d['users'][u.username] = 'user'
        return d

    def logs(self):
        """Return aggregated log data for this application."""
        path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')
        if not os.path.exists(path):
            raise EnvironmentError('Could not locate logs')
        data = subprocess.check_output(
            ['tail', '-n', str(settings.LOG_LINES), path])
        return data

    def run(self, command):
        """Run a one-off command in an ephemeral app container."""
        # TODO: add support for interactive shell
        nodes = self.formation.node_set.filter(
            layer__runtime=True).order_by('?')
        if not nodes:
            raise EnvironmentError('No nodes available to run command')
        app_id, node = self.id, nodes[0]
        release = self.release_set.order_by('-created')[0]
        # prepare ssh command
        version = release.version
        image = release.build.image + ":v{}".format(release.version)
        docker_args = ' '.join(['-a', 'stdout', '-a', 'stderr', '-rm', image])
        env_args = ' '.join([
            "-e '{k}={v}'".format(**locals())
            for k, v in release.config.values.items()
        ])
        log_event(self, "deis run '{}'".format(command))
        command = "sudo docker run {env_args} {docker_args} {command}".format(
            **locals())
        return node.run(command)
Beispiel #7
0
class Formation(UuidAuditedModel):
    """
    Formation of nodes used to host applications
    """

    owner = models.ForeignKey(settings.AUTH_USER_MODEL)
    id = models.SlugField(max_length=64, unique=True)
    domain = models.CharField(max_length=128, blank=True, null=True)
    nodes = JSONField(default='{}', blank=True)

    class Meta:
        unique_together = (('owner', 'id'), )

    def __str__(self):
        return self.id

    def flat(self):
        return {'id': self.id, 'domain': self.domain, 'nodes': self.nodes}

    def build(self):
        return

    def destroy(self, *args, **kwargs):
        for app in self.app_set.all():
            app.destroy()
        node_tasks = [tasks.destroy_node.si(n) for n in self.node_set.all()]
        layer_tasks = [tasks.destroy_layer.si(l) for l in self.layer_set.all()]
        group(node_tasks).apply_async().join()
        group(layer_tasks).apply_async().join()
        CM.purge_formation(self.flat())
        self.delete()

    def publish(self):
        data = self.calculate()
        CM.publish_formation(self.flat(), data)
        return data

    def converge(self, **kwargs):
        databag = self.publish()
        nodes = self.node_set.all()
        subtasks = []
        for n in nodes:
            subtask = tasks.converge_node.si(n)
            subtasks.append(subtask)
        group(*subtasks).apply_async().join()
        return databag

    def calculate(self):
        """Return a representation of this formation for config management"""
        d = {}
        d['id'] = self.id
        d['domain'] = self.domain
        d['nodes'] = {}
        proxies = []
        for n in self.node_set.all():
            d['nodes'][n.id] = {
                'fqdn': n.fqdn,
                'runtime': n.layer.runtime,
                'proxy': n.layer.proxy
            }
            if n.layer.proxy is True:
                proxies.append(n.fqdn)
        d['apps'] = {}
        for a in self.app_set.all():
            d['apps'][a.id] = a.calculate()
            d['apps'][a.id]['proxy'] = {}
            d['apps'][a.id]['proxy']['nodes'] = proxies
            d['apps'][a.id]['proxy']['algorithm'] = 'round_robin'
            d['apps'][a.id]['proxy']['port'] = 80
            d['apps'][a.id]['proxy']['backends'] = []
            d['apps'][a.id]['containers'] = containers = {}
            for c in a.container_set.all().order_by('created'):
                containers.setdefault(c.type, {})
                containers[c.type].update(
                    {c.num: "{0}:{1}".format(c.node.id, c.port)})
                if c.type == 'web':
                    d['apps'][a.id]['proxy']['backends'].append(
                        "{0}:{1}".format(c.node.fqdn, c.port))
        return d
Beispiel #8
0
class App(UuidAuditedModel):
    """
    Application used to service requests on behalf of end-users
    """

    owner = models.ForeignKey(settings.AUTH_USER_MODEL)
    id = models.SlugField(max_length=64, unique=True)
    formation = models.ForeignKey('Formation')
    heartbeats = models.PositiveIntegerField(default=0)
    credits = models.FloatField(default=0)

    containers = JSONField(default='{}', blank=True)

    class Meta:
        permissions = (('use_app', 'Can use app'), )

    def __str__(self):
        return self.id

    def github_uri(self):
        """
        TODO: Need to look at what happens to Github URIs that already have double hyphens in them.
        """
        path = self.id\
            .replace('--', '*placeholder*')\
            .replace('-', '/')\
            .replace('*placeholder*', '-')
        return 'https://github.com/{}.git'.format(path)

    def flat(self):
        return {
            'id': self.id,
            'formation': self.formation.id,
            'containers': dict(self.containers)
        }

    def build(self):
        config = Config.objects.create(version=1,
                                       owner=self.owner,
                                       app=self,
                                       values={})
        build = Build.objects.create(owner=self.owner, app=self)
        Release.objects.create(version=1,
                               owner=self.owner,
                               app=self,
                               config=config,
                               build=build)
        self.formation.publish()

    def destroy(self):
        CM.purge_app(self.flat())
        self.delete()
        self.formation.publish()

    def publish(self):
        """Publish the application to configuration management"""
        data = self.calculate()
        CM.publish_app(self.flat(), data)
        return data

    def converge(self):
        databag = self.publish()
        self.formation.converge()
        return databag

    def calculate(self):
        """Return a representation for configuration management"""
        d = {}
        d['id'] = self.id
        d['release'] = {}
        releases = self.release_set.all().order_by('-created')
        if releases:
            release = releases[0]
            d['release']['version'] = release.version
            d['release']['config'] = release.config.values
            d['release']['build'] = {'image': release.build.image}
            if release.build.url:
                d['release']['build']['url'] = release.build.url
                d['release']['build']['procfile'] = release.build.procfile
        d['containers'] = {}
        containers = self.container_set.all()
        if containers:
            for c in containers:
                d['containers'].setdefault(c.type, {})[str(c.num)] = c.status
        d['domains'] = []
        if self.formation.domain:
            d['domains'].append('{}.{}'.format(self.id, self.formation.domain))
        else:
            for n in self.formation.node_set.filter(layer__proxy=True):
                d['domains'].append(n.fqdn)
        # add proper sharing and access controls
        d['users'] = {self.owner.username: '******'}
        for u in (get_users_with_perms(self)):
            d['users'][u.username] = 'user'
        return d

    def logs(self):
        """Return aggregated log data for this application."""
        path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')
        if not os.path.exists(path):
            raise EnvironmentError('Could not locate logs')
        data = subprocess.check_output(
            ['tail', '-n', str(settings.LOG_LINES), path])
        return data

    def run(self, command):
        """Run a one-off command in an ephemeral app container."""
        # TODO: add support for interactive shell
        nodes = self.formation.node_set.filter(
            layer__runtime=True).order_by('?')
        if not nodes:
            raise EnvironmentError('No nodes available to run command')
        app_id, node = self.id, nodes[0]
        release = self.release_set.order_by('-created')[0]
        # prepare ssh command
        version = release.version
        docker_args = ' '.join([
            '-a', 'stdout', '-a', 'stderr', '-rm', '-v',
            '/opt/deis/runtime/slugs/{app_id}-v{version}:/app'.format(
                **locals()), 'deis/slugrunner'
        ])
        env_args = ' '.join([
            "-e '{k}={v}'".format(**locals())
            for k, v in release.config.values.items()
        ])
        command = "sudo docker run {env_args} {docker_args} {command}".format(
            **locals())
        return node.run(command)

    def scaleoff(self):
        """
        When an app runs out of credits scale all its containers down.
        """
        # TODO: spin down *all* container types
        Container.objects.scale(self, {'web': 0, 'worker': 0})
        self.converge()

    def addCredits(self, credits):
        """
        Add credits to app. Also take a cut for Danabox and spread the rest around the other apps.
        We hope that apps are receiving multiple simultaneous donations, so let's use transactions
        to make sure updates are atomic.
        """
        remaining = 1 - settings.DANABOX_CUT - settings.POOL_CUT
        danabox_credits = credits * settings.DANABOX_CUT
        pool_credits = credits * settings.POOL_CUT
        remaining_credits = credits * remaining

        # Give the majority to the chosen app
        app = App.objects.select_for_update().filter(
            id=self.id)[0]  # Use transaction
        app.credits += remaining_credits
        app.save()
        # Keep track of credits going to Danabox
        swanson = Formation.objects.select_for_update().filter(
            id='swanson')[0]  # Use transaction
        swanson.danabox_credits += danabox_credits
        swanson.save()

        # Give a slice to all the other apps
        # TODO; don't give to apps that aren't being used
        apps = App.objects.select_for_update().all()  # Use transaction
        share = pool_credits / len(apps)
        for app in apps:
            if app.id == self.id:
                continue  # Skip the app that acually received the main donation.
            app.credits += share
            app.save()
Beispiel #9
0
class App(UuidAuditedModel):
    """
    Application used to service requests on behalf of end-users
    """

    owner = models.ForeignKey(settings.AUTH_USER_MODEL)
    id = models.SlugField(max_length=64, unique=True)
    cluster = models.ForeignKey('Cluster')
    structure = JSONField(default={},
                          blank=True,
                          validators=[validate_app_structure])

    class Meta:
        permissions = (('use_app', 'Can use app'), )

    def __str__(self):
        return self.id

    @property
    def url(self):
        return self.id + '.' + self.cluster.domain

    def log(self, message):
        """Logs a message to the application's log file.

        This is a workaround for how Django interacts with Python's logging module. Each app
        needs its own FileHandler instance so it can write to its own log file. That won't work in
        Django's case because logging is set up before you run the server and it disables all
        existing logging configurations.
        """
        with open(os.path.join(settings.DEIS_LOG_DIR, self.id + '.log'),
                  'a') as f:
            msg = "{} deis[api]: {}\n".format(
                time.strftime('%Y-%m-%d %H:%M:%S'), message)
            f.write(msg.encode('utf-8'))

    def create(self, *args, **kwargs):
        """Create a new application with an initial release"""
        config = Config.objects.create(owner=self.owner, app=self)
        build = Build.objects.create(owner=self.owner,
                                     app=self,
                                     image=settings.DEFAULT_BUILD)
        Release.objects.create(version=1,
                               owner=self.owner,
                               app=self,
                               config=config,
                               build=build)

    def delete(self, *args, **kwargs):
        """Delete this application including all containers"""
        for c in self.container_set.all():
            c.destroy()
        self._clean_app_logs()
        return super(App, self).delete(*args, **kwargs)

    def _clean_app_logs(self):
        """Delete application logs stored by the logger component"""
        path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')
        if os.path.exists(path):
            os.remove(path)

    def deploy(self, user, release, initial=False):
        """Deploy a new release to this application"""
        containers = self.container_set.all()
        self._deploy_containers(containers, release)
        # update release in database
        for c in containers:
            c.release = release
            c.save()
        self.release = release
        self.save()
        # perform default scaling if necessary
        if initial:
            self._default_scale(user, release)

    def _default_scale(self, user, release):
        """Scale to default structure based on release type"""
        # if there is no SHA, assume a docker image is being promoted
        if not release.build.sha:
            structure = {'cmd': 1}
        # if a dockerfile exists without a procfile, assume docker workflow
        elif release.build.dockerfile and not release.build.procfile:
            structure = {'cmd': 1}
        # if a procfile exists without a web entry, assume docker workflow
        elif release.build.procfile and 'web' not in release.build.procfile:
            structure = {'cmd': 1}
        # default to heroku workflow
        else:
            structure = {'web': 1}
        self.scale(user, structure)

    def _deploy_containers(self, to_deploy, release, **kwargs):
        """Deploys containers via the scheduler"""
        threads = []
        for c in to_deploy:
            threads.append(threading.Thread(target=c.deploy, args=(release, )))
        [t.start() for t in threads]
        [t.join() for t in threads]

    def scale(self, user, structure):  # noqa
        """Scale containers up or down to match requested structure."""
        requested_structure = structure.copy()
        release = self.release_set.latest()
        # test for available process types
        available_process_types = release.build.procfile or {}
        for container_type in requested_structure.keys():
            if container_type == 'cmd':
                continue  # allow docker cmd types in case we don't have the image source
            if container_type not in available_process_types:
                raise EnvironmentError(
                    'Container type {} does not exist in application'.format(
                        container_type))
        msg = '{} scaled containers '.format(user.username) + ' '.join(
            "{}={}".format(k, v) for k, v in requested_structure.items())
        log_event(self, msg)
        self.log(msg)
        # iterate and scale by container type (web, worker, etc)
        changed = False
        to_add, to_remove = [], []
        for container_type in requested_structure.keys():
            containers = list(
                self.container_set.filter(
                    type=container_type).order_by('created'))
            # increment new container nums off the most recent container
            results = self.container_set.filter(type=container_type).aggregate(
                Max('num'))
            container_num = (results.get('num__max') or 0) + 1
            requested = requested_structure.pop(container_type)
            diff = requested - len(containers)
            if diff == 0:
                continue
            changed = True
            while diff < 0:
                c = containers.pop()
                to_remove.append(c)
                diff += 1
            while diff > 0:
                # create a database record
                c = Container.objects.create(owner=self.owner,
                                             app=self,
                                             release=release,
                                             type=container_type,
                                             num=container_num)
                to_add.append(c)
                container_num += 1
                diff -= 1
        if changed:
            if to_add:
                self._start_containers(to_add)
            if to_remove:
                self._destroy_containers(to_remove)
                # remove the database record
                for c in to_remove:
                    c.delete()
        # save new structure to the database
        self.structure = structure
        self.save()
        return changed

    def _start_containers(self, to_add):
        """Creates and starts containers via the scheduler"""
        create_threads = []
        start_threads = []
        for c in to_add:
            create_threads.append(threading.Thread(target=c.create))
            start_threads.append(threading.Thread(target=c.start))
        [t.start() for t in create_threads]
        [t.join() for t in create_threads]
        [t.start() for t in start_threads]
        [t.join() for t in start_threads]

    def _destroy_containers(self, to_destroy):
        """Destroys containers via the scheduler"""
        destroy_threads = []
        for c in to_destroy:
            destroy_threads.append(threading.Thread(target=c.destroy))
        [t.start() for t in destroy_threads]
        [t.join() for t in destroy_threads]

    def logs(self):
        """Return aggregated log data for this application."""
        path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')
        if not os.path.exists(path):
            raise EnvironmentError('Could not locate logs')
        data = subprocess.check_output(
            ['tail', '-n', str(settings.LOG_LINES), path])
        return data

    def run(self, user, command):
        """Run a one-off command in an ephemeral app container."""
        # TODO: add support for interactive shell
        msg = "{} runs '{}'".format(user.username, command)
        log_event(self, msg)
        self.log(msg)
        c_num = max([c.num for c in self.container_set.filter(type='admin')]
                    or [0]) + 1
        try:
            # create database record for admin process
            c = Container.objects.create(owner=self.owner,
                                         app=self,
                                         release=self.release_set.latest(),
                                         type='admin',
                                         num=c_num)
            image = c.release.image + ':v' + str(c.release.version)

            # check for backwards compatibility
            def _has_hostname(image):
                repo, tag = utils.parse_repository_tag(image)
                return True if '/' in repo and '.' in repo.split(
                    '/')[0] else False

            if not _has_hostname(image):
                image = '{}:{}/{}'.format(settings.REGISTRY_HOST,
                                          settings.REGISTRY_PORT, image)
            # SECURITY: shell-escape user input
            escaped_command = command.replace("'", "'\\''")
            return c.run(escaped_command)
        # always cleanup admin containers
        finally:
            c.delete()
Beispiel #10
0
class App(UuidAuditedModel):
    """
    Application used to service requests on behalf of end-users
    """

    owner = models.ForeignKey(settings.AUTH_USER_MODEL)
    id = models.SlugField(max_length=64, unique=True)
    cluster = models.ForeignKey('Cluster')
    structure = JSONField(default={}, blank=True, validators=[validate_app_structure])

    class Meta:
        permissions = (('use_app', 'Can use app'),)

    def __str__(self):
        return self.id

    @property
    def url(self):
        return self.id + '.' + self.cluster.domain

    def log(self, message):
        """Logs a message to the application's log file.

        This is a workaround for how Django interacts with Python's logging module. Each app
        needs its own FileHandler instance so it can write to its own log file. That won't work in
        Django's case because logging is set up before you run the server and it disables all
        existing logging configurations.
        """
        with open(os.path.join(settings.DEIS_LOG_DIR, self.id + '.log'), 'a') as f:
            msg = "{} deis[api]: {}\n".format(time.strftime('%Y-%m-%d %H:%M:%S'), message)
            f.write(msg.encode('utf-8'))

    def create(self, *args, **kwargs):
        config = Config.objects.create(owner=self.owner, app=self)
        build = Build.objects.create(owner=self.owner, app=self, image=settings.DEFAULT_BUILD)
        Release.objects.create(version=1, owner=self.owner, app=self, config=config, build=build)

    def delete(self, *args, **kwargs):
        for c in self.container_set.all():
            c.destroy()
        # delete application logs stored by deis/logger
        path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')
        if os.path.exists(path):
            os.remove(path)
        return super(App, self).delete(*args, **kwargs)

    def deploy(self, release, initial=False):
        tasks.deploy_release.delay(self, release).get()
        if initial:
            # if there is no SHA, assume a docker image is being promoted
            if not release.build.sha:
                self.structure = {'cmd': 1}
            # if a dockerfile exists without a procfile, assume docker workflow
            elif release.build.dockerfile and not release.build.procfile:
                self.structure = {'cmd': 1}
            # if a procfile exists without a web entry, assume docker workflow
            elif release.build.procfile and 'web' not in release.build.procfile:
                self.structure = {'cmd': 1}
            # default to heroku workflow
            else:
                self.structure = {'web': 1}
            self.save()
            self.scale()

    def destroy(self, *args, **kwargs):
        return self.delete(*args, **kwargs)

    def scale(self, **kwargs):  # noqa
        """Scale containers up or down to match requested."""
        requested_containers = self.structure.copy()
        release = self.release_set.latest()
        # test for available process types
        available_process_types = release.build.procfile or {}
        for container_type in requested_containers.keys():
            if container_type == 'cmd':
                continue  # allow docker cmd types in case we don't have the image source
            if container_type not in available_process_types:
                raise EnvironmentError(
                    'Container type {} does not exist in application'.format(container_type))
        msg = 'containers scaled ' + ' '.join(
            "{}={}".format(k, v) for k, v in requested_containers.items())
        # iterate and scale by container type (web, worker, etc)
        changed = False
        to_add, to_remove = [], []
        for container_type in requested_containers.keys():
            containers = list(self.container_set.filter(type=container_type).order_by('created'))
            # increment new container nums off the most recent container
            results = self.container_set.filter(type=container_type).aggregate(Max('num'))
            container_num = (results.get('num__max') or 0) + 1
            requested = requested_containers.pop(container_type)
            diff = requested - len(containers)
            if diff == 0:
                continue
            changed = True
            while diff < 0:
                c = containers.pop()
                to_remove.append(c)
                diff += 1
            while diff > 0:
                c = Container.objects.create(owner=self.owner,
                                             app=self,
                                             release=release,
                                             type=container_type,
                                             num=container_num)
                to_add.append(c)
                container_num += 1
                diff -= 1
        if changed:
            subtasks = []
            if to_add:
                subtasks.append(tasks.start_containers.s(to_add))
            if to_remove:
                subtasks.append(tasks.stop_containers.s(to_remove))
            group(*subtasks).apply_async().join()
            log_event(self, msg)
            self.log(msg)
        return changed

    def logs(self):
        """Return aggregated log data for this application."""
        path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')
        if not os.path.exists(path):
            raise EnvironmentError('Could not locate logs')
        data = subprocess.check_output(['tail', '-n', str(settings.LOG_LINES), path])
        return data

    def run(self, command):
        """Run a one-off command in an ephemeral app container."""
        # TODO: add support for interactive shell
        msg = "deis run '{}'".format(command)
        log_event(self, msg)
        self.log(msg)
        c_num = max([c.num for c in self.container_set.filter(type='admin')] or [0]) + 1
        c = Container.objects.create(owner=self.owner,
                                     app=self,
                                     release=self.release_set.latest(),
                                     type='admin',
                                     num=c_num)
        rc, output = tasks.run_command.delay(c, command).get()
        return rc, output