Ejemplo n.º 1
0
class Build(UuidAuditedModel):
    """
    Instance of a software build used by runtime nodes
    """

    owner = models.ForeignKey(settings.AUTH_USER_MODEL)
    app = models.ForeignKey('App')
    sha = models.CharField('SHA', max_length=255, blank=True)
    output = models.TextField(blank=True)

    image = models.CharField(max_length=256, default='deis/slugbuilder')

    procfile = JSONField(blank=True)
    dockerfile = models.TextField(blank=True)
    config = JSONField(blank=True)

    url = models.URLField('URL')
    size = models.IntegerField(blank=True, null=True)
    checksum = models.CharField(max_length=255, blank=True)

    class Meta:
        get_latest_by = 'created'
        ordering = ['-created']
        unique_together = (('app', 'uuid'), )

    def __str__(self):
        return "{0}-{1}".format(self.app.id, self.sha[:7])
Ejemplo n.º 2
0
class Build(UuidAuditedModel):
    """
    Instance of a software build used by runtime nodes
    """

    owner = models.ForeignKey(settings.AUTH_USER_MODEL)
    app = models.ForeignKey('App')
    sha = models.CharField('SHA', max_length=255, blank=True)
    output = models.TextField(blank=True)

    image = models.CharField(max_length=256, default='deis/buildstep')

    procfile = JSONField(blank=True)
    dockerfile = models.TextField(blank=True)
    config = JSONField(blank=True)

    url = models.URLField('URL')
    size = models.IntegerField(blank=True, null=True)
    checksum = models.CharField(max_length=255, blank=True)

    class Meta:
        get_latest_by = 'created'
        ordering = ['-created']
        unique_together = (('app', 'uuid'), )

    def __str__(self):
        return "{0}-{1}".format(self.app.id, self.sha)

    @classmethod
    def push(cls, push):
        """Process a push from a local Git server.

        Creates a new Build and returns the application's
        databag for processing by the git-receive hook
        """
        # SECURITY:
        # we assume the first part of the ssh key name
        # is the authenticated user because we trust gitosis
        username = push.pop('username').split('_')[0]
        # retrieve the user and app instances
        user = User.objects.get(username=username)
        app = App.objects.get(owner=user, id=push.pop('app'))
        # merge the push with the required model instances
        push['owner'] = user
        push['app'] = app
        # create the build
        new_build = cls.objects.create(**push)
        # send a release signal
        release_signal.send(sender=push, build=new_build, app=app, user=user)
        # see if we need to scale an initial web container
        if len(app.formation.node_set.filter(layer__runtime=True)) > 0 and \
           len(app.container_set.filter(type='web')) < 1:
            # scale an initial web containers
            Container.objects.scale(app, {'web': 1})
        # publish and converge the application
        return app.converge()
Ejemplo n.º 3
0
class Provider(UuidAuditedModel):
    """Cloud provider settings for a user.

    Available as `user.provider_set`.
    """

    objects = ProviderManager()

    PROVIDERS = (
        ('ec2', 'Amazon Elastic Compute Cloud (EC2)'),
        ('mock', 'Mock Reference Provider'),
        ('rackspace', 'Rackspace Open Cloud'),
        ('static', 'Static Node'),
        ('digitalocean', 'Digital Ocean'),
    )

    owner = models.ForeignKey(settings.AUTH_USER_MODEL)
    id = models.SlugField(max_length=64)
    type = models.SlugField(max_length=16, choices=PROVIDERS)
    creds = JSONField(blank=True)

    class Meta:
        unique_together = (('owner', 'id'), )

    def __str__(self):
        return "{}-{}".format(self.id, self.get_type_display())
Ejemplo n.º 4
0
class Service(models.Model):
    service_name = models.CharField(max_length=512)
    instance_amount = models.IntegerField()
    image_name = models.CharField(max_length=512)
    created_at = models.CharField(max_length=512, null=True)
    updated_at = models.DateTimeField(auto_now=True)
    details = JSONField(default={}, blank=True)
    finished_at = models.IntegerField(default=0)
Ejemplo n.º 5
0
class Node(UuidAuditedModel):
    """
    Node used to host containers

    List of nodes available as `formation.nodes`
    """

    objects = NodeManager()

    owner = models.ForeignKey(settings.AUTH_USER_MODEL)
    id = models.CharField(max_length=64)
    formation = models.ForeignKey('Formation')
    layer = models.ForeignKey('Layer')
    num = models.PositiveIntegerField()

    # TODO: add celery beat tasks for monitoring node health
    status = models.CharField(max_length=64, default='up')

    provider_id = models.SlugField(max_length=64, blank=True, null=True)
    fqdn = models.CharField(max_length=256, blank=True, null=True)
    status = JSONField(blank=True, null=True)

    class Meta:
        unique_together = (('formation', 'id'), )

    def __str__(self):
        return self.id

    def flat(self):
        return {
            'id': self.id,
            'provider_type': self.layer.flavor.provider.type,
            'formation': self.formation.id,
            'layer': self.layer.id,
            'creds': dict(self.layer.flavor.provider.creds),
            'params': dict(self.layer.flavor.params),
            'runtime': self.layer.runtime,
            'proxy': self.layer.proxy,
            'ssh_username': self.layer.ssh_username,
            'ssh_public_key': self.layer.ssh_public_key,
            'ssh_private_key': self.layer.ssh_private_key,
            'ssh_port': self.layer.ssh_port,
            'config': dict(self.layer.config),
            'provider_id': self.provider_id,
            'fqdn': self.fqdn
        }

    def build(self):
        return tasks.build_node.delay(self).wait()

    def destroy(self):
        return tasks.destroy_node.delay(self).wait()

    def converge(self):
        return tasks.converge_node.delay(self).wait()

    def run(self, command, **kwargs):
        return tasks.run_node.delay(self, command).wait()
Ejemplo n.º 6
0
Archivo: models.py Proyecto: taoy/deis
class Limit(UuidAuditedModel):
    """
    Set of resource limits applied by the scheduler
    during runtime execution of the Application.
    """

    owner = models.ForeignKey(settings.AUTH_USER_MODEL)
    app = models.ForeignKey('App')
    memory = JSONField(default='{}', blank=True)
    cpu = JSONField(default='{}', blank=True)

    class Meta:
        get_latest_by = 'created'
        ordering = ['-created']
        unique_together = (('app', 'uuid'),)

    def __str__(self):
        return "{}-{}".format(self.app.id, self.uuid[:7])
Ejemplo n.º 7
0
class Layer(UuidAuditedModel):
    """
    Layer of nodes used by the formation

    All nodes in a layer share the same flavor and configuration.

    The layer stores SSH settings used to trigger node convergence,
    as well as other configuration used during node bootstrapping
    (e.g. Chef Run List, Chef Environment)
    """

    owner = models.ForeignKey(settings.AUTH_USER_MODEL)
    id = models.SlugField(max_length=64)

    formation = models.ForeignKey('Formation')
    flavor = models.ForeignKey('Flavor')

    proxy = models.BooleanField(default=False)
    runtime = models.BooleanField(default=False)

    ssh_username = models.CharField(max_length=64, default='ubuntu')
    ssh_private_key = models.TextField()
    ssh_public_key = models.TextField()
    ssh_port = models.SmallIntegerField(default=22)

    # example: {'run_list': [deis::runtime'], 'environment': 'dev'}
    config = JSONField(default='{}', blank=True)

    class Meta:
        unique_together = (('formation', 'id'), )

    def __str__(self):
        return self.id

    def flat(self):
        return {
            'id': self.id,
            'provider_type': self.flavor.provider.type,
            'creds': dict(self.flavor.provider.creds),
            'formation': self.formation.id,
            'flavor': self.flavor.id,
            'params': dict(self.flavor.params),
            'proxy': self.proxy,
            'runtime': self.runtime,
            'ssh_username': self.ssh_username,
            'ssh_private_key': self.ssh_private_key,
            'ssh_public_key': self.ssh_public_key,
            'ssh_port': self.ssh_port,
            'config': dict(self.config)
        }

    def build(self):
        return tasks.build_layer.delay(self).wait()

    def destroy(self):
        return tasks.destroy_layer.delay(self).wait()
Ejemplo n.º 8
0
class Config(UuidAuditedModel):
    """
    Set of configuration values applied as environment variables
    during runtime execution of the Application.
    """

    owner = models.ForeignKey(settings.AUTH_USER_MODEL)
    app = models.ForeignKey('App')
    values = JSONField(default={}, blank=True)
    memory = JSONField(default={}, blank=True)
    cpu = JSONField(default={}, blank=True)
    tags = JSONField(default={}, blank=True)

    class Meta:
        get_latest_by = 'created'
        ordering = ['-created']
        unique_together = (('app', 'uuid'), )

    def __str__(self):
        return "{}-{}".format(self.app.id, self.uuid[:7])

    def save(self, **kwargs):
        """merge the old config with the new"""
        try:
            previous_config = self.app.config_set.latest()
            for attr in ['cpu', 'memory', 'tags', 'values']:
                # Guard against migrations from older apps without fixes to
                # JSONField encoding.
                try:
                    data = getattr(previous_config, attr).copy()
                except AttributeError:
                    data = {}
                try:
                    new_data = getattr(self, attr).copy()
                except AttributeError:
                    new_data = {}
                data.update(new_data)
                # remove config keys if we provided a null value
                [data.pop(k) for k, v in new_data.items() if v is None]
                setattr(self, attr, data)
        except Config.DoesNotExist:
            pass
        return super(Config, self).save(**kwargs)
Ejemplo n.º 9
0
class Config(UuidAuditedModel):
    """
    Set of configuration values applied as environment variables
    during runtime execution of the Application.
    """

    owner = models.ForeignKey(settings.AUTH_USER_MODEL)
    app = models.ForeignKey('App')
    values = JSONField(default={}, blank=True)
    memory = JSONField(default={}, blank=True)
    cpu = JSONField(default={}, blank=True)
    tags = JSONField(default={}, blank=True)

    class Meta:
        get_latest_by = 'created'
        ordering = ['-created']
        unique_together = (('app', 'uuid'),)

    def __str__(self):
        return "{}-{}".format(self.app.id, self.uuid[:7])
Ejemplo n.º 10
0
class NotificationType(MultilingualModel):

    PLURAL_OBJ = 'obj'
    PLURAL_SUB = 'sub'
    PLURAL_BOTH = 'both'

    slug = models.SlugField(
        max_length=255,
        help_text='Used for generating template names for this notification '
        'type.')
    default_preferences = JSONField(
        help_text='Default preferences that will be used if user has no '
        'NotificationPreference or NotificationPreference has no '
        'info about that kind of notification.')
    plural = models.CharField(max_length=32,
                              null=True,
                              blank=True,
                              choices=((PLURAL_SUB, 'Subject'),
                                       (PLURAL_OBJ, 'Object'), (PLURAL_BOTH,
                                                                'Both')))
    s_ct = models.ForeignKey(ContentType,
                             verbose_name='Subject Type',
                             related_name='s_ct',
                             null=True,
                             blank=True)
    o_ct = models.ForeignKey(ContentType,
                             verbose_name='Object Type',
                             related_name='o_ct',
                             null=True,
                             blank=True)
    collecting_period = models.PositiveSmallIntegerField(
        default=10,
        help_text='Amount of minutes that kind notifications collected and '
        'grouped.')
    is_active = models.BooleanField(
        default=False,
        help_text='You can stop that kind of of notifications by '
        'unchecking this.')

    def get_template_prefix(self, multiple_subs=False, multiple_objs=False):
        """
        Return template name prefix based on plurality of subject and
        objects.
        """
        SINGULAR_KEY = 'sing'
        PLURAL_KEY = 'plur'
        pfx = self.slug  # prefix
        sub = 'sub_%s' % (PLURAL_KEY if multiple_subs else SINGULAR_KEY)
        obj = 'obj_%s' % (PLURAL_KEY if multiple_objs else SINGULAR_KEY)
        return 'notification/%s/%s_%s/' % (pfx, sub, obj)

    def __unicode__(self):
        return self.slug
Ejemplo n.º 11
0
class Build(UuidAuditedModel):
    """
    Instance of a software build used by runtime nodes
    """

    owner = models.ForeignKey(settings.AUTH_USER_MODEL)
    app = models.ForeignKey('App')
    image = models.CharField(max_length=256)

    # optional fields populated by builder
    sha = models.CharField(max_length=40, blank=True)
    procfile = JSONField(default={}, blank=True)
    dockerfile = models.TextField(blank=True)

    class Meta:
        get_latest_by = 'created'
        ordering = ['-created']
        unique_together = (('app', 'uuid'), )

    def create(self, user, *args, **kwargs):
        latest_release = self.app.release_set.latest()
        source_version = 'latest'
        if self.sha:
            source_version = 'git-{}'.format(self.sha)
        new_release = latest_release.new(user,
                                         build=self,
                                         config=latest_release.config,
                                         source_version=source_version)
        initial = True if self.app.structure == {} else False
        try:
            self.app.deploy(user, new_release, initial=initial)
            return new_release
        except RuntimeError:
            new_release.delete()
            raise

    def save(self, **kwargs):
        try:
            previous_build = self.app.build_set.latest()
            to_destroy = []
            for proctype in previous_build.procfile.keys():
                if proctype not in self.procfile.keys():
                    for c in self.app.container_set.filter(type=proctype):
                        to_destroy.append(c)
            self.app._destroy_containers(to_destroy)
        except Build.DoesNotExist:
            pass
        return super(Build, self).save(**kwargs)

    def __str__(self):
        return "{0}-{1}".format(self.app.id, self.uuid[:7])
Ejemplo n.º 12
0
class Instance(models.Model):
    name = models.CharField(max_length=512)
    service = models.ForeignKey(Service, null=True)
    instance_id = models.CharField(max_length=20, null=True)
    continer_id = models.CharField(max_length=512, null=True)
    continer_ip = models.ForeignKey(IpInfo, null=True)
    # image_name = models.CharField(max_length=512)
    created_at = models.CharField(max_length=512, null=True)
    updated_at = models.DateTimeField(auto_now=True)
    host = models.ForeignKey(Agent, null=True)
    details = JSONField(default={}, blank=True)
    command = models.CharField(max_length=512, null=True)
    hostname = models.CharField(max_length=512, null=True)
    volumes = models.CharField(max_length=512, null=True)
    environment = models.CharField(max_length=512, null=True)
    state = models.CharField(max_length=20, null=True)
Ejemplo n.º 13
0
class NotificationPreference(models.Model):

    user = models.ForeignKey(User)
    ntype = models.ForeignKey(NotificationType)
    preferences = JSONField()

    @staticmethod
    def cache_key(user_id, preference_slug):
        return 'user:%s:preferences:%s' % (user_id, preference_slug)

    def save(self, *args, **kwargs):
        super(NotificationPreference, self).save(*args, **kwargs)
        cache.delete(self.cache_key(self.user_id, self.ntype.slug))

    def __unicode__(self):
        return '%s\'s preferences on "%s" notifications' % (
            self.user, self.ntype)
Ejemplo n.º 14
0
class Config(UuidAuditedModel):
    """
    Set of configuration values applied as environment variables
    during runtime execution of the Application.
    """

    owner = models.ForeignKey(settings.AUTH_USER_MODEL)
    app = models.ForeignKey('App')
    version = models.PositiveIntegerField()

    values = JSONField(default='{}', blank=True)

    class Meta:
        get_latest_by = 'created'
        ordering = ['-created']
        unique_together = (('app', 'version'), )

    def __str__(self):
        return "{0}-v{1}".format(self.app.id, self.version)
Ejemplo n.º 15
0
class Flavor(UuidAuditedModel):
    """
    Virtual machine flavors associated with a Provider

    Params is a JSON field including unstructured data
    for provider API calls, like region, zone, and size.
    """
    objects = FlavorManager()

    owner = models.ForeignKey(settings.AUTH_USER_MODEL)
    id = models.SlugField(max_length=64)
    provider = models.ForeignKey('Provider')
    params = JSONField(blank=True)

    class Meta:
        unique_together = (('owner', 'id'), )

    def __str__(self):
        return self.id
Ejemplo n.º 16
0
class Cluster(UuidAuditedModel):
    """
    Cluster used to run jobs
    """

    CLUSTER_TYPES = (('mock', 'Mock Cluster'), ('coreos', 'CoreOS Cluster'),
                     ('chaos', 'Chaos Cluster'))

    owner = models.ForeignKey(settings.AUTH_USER_MODEL)
    id = models.CharField(max_length=128, unique=True)
    type = models.CharField(max_length=16,
                            choices=CLUSTER_TYPES,
                            default='coreos')

    domain = models.CharField(max_length=128, validators=[validate_domain])
    hosts = models.CharField(max_length=256,
                             validators=[validate_comma_separated])
    auth = models.TextField()
    options = JSONField(default={}, blank=True)

    def __str__(self):
        return self.id

    def _get_scheduler(self, *args, **kwargs):
        module_name = 'scheduler.' + self.type
        mod = importlib.import_module(module_name)
        return mod.SchedulerClient(self.id, self.hosts, self.auth, self.domain,
                                   self.options)

    _scheduler = property(_get_scheduler)

    def create(self):
        """
        Initialize a cluster's router and log aggregator
        """
        return self._scheduler.setUp()

    def destroy(self):
        """
        Destroy a cluster's router and log aggregator
        """
        return self._scheduler.tearDown()
Ejemplo n.º 17
0
Archivo: models.py Proyecto: taoy/deis
class Build(UuidAuditedModel):
    """
    Instance of a software build used by runtime nodes
    """

    owner = models.ForeignKey(settings.AUTH_USER_MODEL)
    app = models.ForeignKey('App')
    image = models.CharField(max_length=256)

    # optional fields populated by builder
    sha = models.CharField(max_length=40, blank=True)
    procfile = JSONField(default='{}', blank=True)
    dockerfile = models.TextField(blank=True)

    class Meta:
        get_latest_by = 'created'
        ordering = ['-created']
        unique_together = (('app', 'uuid'),)

    def __str__(self):
        return "{0}-{1}".format(self.app.id, self.uuid[:7])
Ejemplo n.º 18
0
class Build(UuidAuditedModel):
    """
    Instance of a software build used by runtime nodes
    """

    owner = models.ForeignKey(settings.AUTH_USER_MODEL)
    app = models.ForeignKey('App')
    image = models.CharField(max_length=256)

    # optional fields populated by builder
    sha = models.CharField(max_length=40, blank=True)
    procfile = JSONField(default={}, blank=True)
    dockerfile = models.TextField(blank=True)

    class Meta:
        get_latest_by = 'created'
        ordering = ['-created']
        unique_together = (('app', 'uuid'),)

    def create(self, user, *args, **kwargs):
        latest_release = self.app.release_set.latest()
        source_version = 'latest'
        if self.sha:
            source_version = 'git-{}'.format(self.sha)
        new_release = latest_release.new(user,
                                         build=self,
                                         config=latest_release.config,
                                         source_version=source_version)
        initial = True if self.app.structure == {} else False
        try:
            self.app.deploy(user, new_release, initial=initial)
            return new_release
        except RuntimeError:
            new_release.delete()
            raise

    def __str__(self):
        return "{0}-{1}".format(self.app.id, self.uuid[:7])
Ejemplo n.º 19
0
class App(UuidAuditedModel):
    """
    Application used to service requests on behalf of end-users
    """

    owner = models.ForeignKey(settings.AUTH_USER_MODEL)
    id = models.SlugField(max_length=64, unique=True)
    formation = models.ForeignKey('Formation')

    containers = JSONField(default='{}', blank=True)

    def __str__(self):
        return self.id

    def flat(self):
        return {
            'id': self.id,
            'formation': self.formation.id,
            'containers': dict(self.containers)
        }

    def build(self):
        config = Config.objects.create(version=1,
                                       owner=self.owner,
                                       app=self,
                                       values={})
        build = Build.objects.create(owner=self.owner, app=self)
        Release.objects.create(version=1,
                               owner=self.owner,
                               app=self,
                               config=config,
                               build=build)
        self.formation.publish()
        tasks.build_app.delay(self).wait()

    def destroy(self):
        tasks.destroy_app.delay(self).wait()

    def publish(self):
        """Publish the application to configuration management"""
        data = self.calculate()
        CM.publish_app(self.flat(), data)
        return data

    def converge(self):
        databag = self.publish()
        self.formation.converge()
        return databag

    def calculate(self):
        """Return a representation for configuration management"""
        d = {}
        d['id'] = self.id
        d['release'] = {}
        releases = self.release_set.all().order_by('-created')
        if releases:
            release = releases[0]
            d['release']['version'] = release.version
            d['release']['config'] = release.config.values
            d['release']['build'] = {'image': release.build.image}
            if release.build.url:
                d['release']['build']['url'] = release.build.url
                d['release']['build']['procfile'] = release.build.procfile
        d['containers'] = {}
        containers = self.container_set.all()
        if containers:
            for c in containers:
                d['containers'].setdefault(c.type, {})[str(c.num)] = c.status
        d['domains'] = []
        if self.formation.domain:
            d['domains'].append('{}.{}'.format(self.id, self.formation.domain))
        else:
            for n in self.formation.node_set.filter(layer__proxy=True):
                d['domains'].append(n.fqdn)
        # TODO: add proper sharing and access controls
        d['users'] = {}
        for u in (self.owner.username, ):
            d['users'][u] = 'admin'
        return d

    def logs(self):
        """Return aggregated log data for this application."""
        path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')
        if not os.path.exists(path):
            raise EnvironmentError('Could not locate logs')
        data = subprocess.check_output(
            ['tail', '-n', str(settings.LOG_LINES), path])
        return data

    def run(self, command):
        """Run a one-off command in an ephemeral app container."""
        nodes = self.formation.node_set.filter(
            layer__runtime=True).order_by('?')
        if not nodes:
            raise EnvironmentError('No nodes available to run command')
        app_id, node = self.id, nodes[0]
        release = self.release_set.order_by('-created')[0]
        # prepare ssh command
        version = release.version
        docker_args = ' '.join([
            '-v', '/opt/deis/runtime/slugs/{app_id}-{version}/app:/app'.format(
                **locals()), release.build.image
        ])
        base_cmd = "export HOME=/app; cd /app && for profile in " \
                   "`find /app/.profile.d/*.sh -type f`; do . $profile; done"
        command = "/bin/sh -c '{base_cmd} && {command}'".format(**locals())
        command = "sudo docker run {docker_args} {command}".format(**locals())
        return node.run(command)
Ejemplo n.º 20
0
class LoggedRequest(models.Model):

    FINE = 1
    WARNING = 2
    ERROR = 3

    STATUS_CHOICES = ((FINE, _('Fine')), (WARNING, _('Warning')), (ERROR,
                                                                   _('Error')))

    COMMON_REQUEST = 1
    THROTTLED_REQUEST = 2
    SUCCESSFUL_LOGIN_REQUEST = 3
    UNSUCCESSFUL_LOGIN_REQUEST = 4

    TYPE_CHOICES = ((COMMON_REQUEST, _('Common request')),
                    (THROTTLED_REQUEST, _('Throttled request')),
                    (SUCCESSFUL_LOGIN_REQUEST, _('Successful login request')),
                    (UNSUCCESSFUL_LOGIN_REQUEST,
                     _('Unsuccessful login request')))

    objects = LoggedRequestManager()

    # Request information
    request_timestamp = models.DateTimeField(_('Request timestamp'),
                                             null=False,
                                             blank=False,
                                             db_index=True)
    method = models.CharField(_('Method'),
                              max_length=7,
                              null=False,
                              blank=False)
    path = models.CharField(_('URL path'),
                            max_length=255,
                            null=False,
                            blank=False)
    queries = JSONField(_('Queries'), null=True, blank=True)
    headers = JSONField(_('Headers'), null=True, blank=True)
    request_body = models.TextField(_('Request body'), null=False, blank=True)
    is_secure = models.BooleanField(_('HTTPS connection'),
                                    default=False,
                                    null=False,
                                    blank=False)

    # Response information
    response_timestamp = models.DateTimeField(_('Response timestamp'),
                                              null=False,
                                              blank=False)
    response_code = models.PositiveSmallIntegerField(_('Response code'),
                                                     null=False,
                                                     blank=False)
    status = models.PositiveSmallIntegerField(_('Status'),
                                              choices=STATUS_CHOICES,
                                              null=False,
                                              blank=False)
    type = models.PositiveSmallIntegerField(_('Request type'),
                                            choices=TYPE_CHOICES,
                                            default=COMMON_REQUEST,
                                            null=False,
                                            blank=False)
    response_body = models.TextField(_('Response body'),
                                     null=False,
                                     blank=True)
    error_description = models.TextField(_('Error description'),
                                         null=True,
                                         blank=True)

    # User information
    user = models.ForeignKey(AUTH_USER_MODEL,
                             null=True,
                             blank=True,
                             on_delete=models.SET_NULL)
    ip = models.GenericIPAddressField(_('IP address'), null=False, blank=False)

    def get_status(self, response):
        if response.status_code >= 500:
            return LoggedRequest.ERROR
        elif response.status_code >= 400:
            return LoggedRequest.WARNING
        else:
            return LoggedRequest.FINE

    def update_from_response(self, response):
        self.response_timestamp = timezone.now()
        self.status = self.get_status(response)
        self.response_code = response.status_code

        if not response.streaming and response.get(
                'content-type',
                '').split(';')[0] in LOG_RESPONSE_BODY_CONTENT_TYPES:
            response_body = truncatechars(
                force_text(response.content[:LOG_RESPONSE_BODY_LENGTH + 1],
                           errors='replace'), LOG_RESPONSE_BODY_LENGTH)
        else:
            response_body = ''

        self.response_body = response_body

    def response_time(self):
        return '%s ms' % (
            (self.response_timestamp - self.request_timestamp).microseconds /
            1000)

    response_time.short_description = _('Response time')

    def short_path(self):
        return truncatechars(self.path, 20)

    short_path.short_description = _('Path')
    short_path.filter_by = 'path'
    short_path.order_by = 'path'

    def __str__(self):
        return self.path

    class Meta:
        ordering = ('-request_timestamp', )
        verbose_name = _('Logged request')
        verbose_name_plural = _('Logged requests')
Ejemplo n.º 21
0
class Story(BaseModel):

    DRAFT = 0
    PUBLISHED = 1
    DELETED_BY_OWNER = 2
    DELETED_BY_ADMINS = 3

    VISIBLE_FOR_EVERYONE = 0
    VISIBLE_FOR_FOLLOWERS = 1

    STATUS_CHOICES = ((DRAFT, _('Draft')), (PUBLISHED, _('Published')),
                      (DELETED_BY_OWNER, _('Deleted by Owner')),
                      (DELETED_BY_ADMINS, _('Deleted by Admins')))

    VISIBLE_FOR_CHOICES = ((VISIBLE_FOR_EVERYONE, _('Everyone')),
                           (VISIBLE_FOR_FOLLOWERS, _('Followers')))

    LIKE_SET_PATTERN = 'answer:%s:likes'
    question = models.ForeignKey('question.Question', null=True, blank=True)
    question_meta = models.ForeignKey('question.QuestionMeta')
    title = models.CharField(_('Title'), max_length=255, null=True, blank=True)
    cover_img = JSONField(null=True, blank=True)
    description = models.TextField(_('Description'), null=True, blank=True)
    is_featured = models.BooleanField(_('Featured'), default=False)
    is_nsfw = models.BooleanField(_('NSFW'), default=False)
    is_anonymouse = models.BooleanField(_('Hide my name'), default=False)
    is_playble = models.BooleanField(default=False)
    like_count = models.PositiveIntegerField(default=0)
    slot_count = models.PositiveIntegerField(null=True, blank=True)
    comment_count = models.PositiveIntegerField(null=True, blank=True)
    status = models.PositiveSmallIntegerField(default=DRAFT,
                                              choices=STATUS_CHOICES)
    visible_for = models.PositiveSmallIntegerField(
        default=VISIBLE_FOR_EVERYONE,
        verbose_name=_('Visible For'),
        choices=VISIBLE_FOR_CHOICES)

    objects = StoryManager()

    @property
    def humanized_order(self):
        return self.order + 1

    @property
    def is_deleted(self):
        return bool(self.status)

    def is_liked_by(self, user):
        return redis.sismember(self._like_set_key(), user.username)

    def is_visible_for(self, user, blocked_user_ids=[]):
        if user.is_superuser:
            return True

        if user.is_authenticated():

            if self.owner_id == user.id:
                return True

            if blocked_user_ids == []:
                from apps.follow.models import compute_blocked_user_ids_for
                blocked_user_ids = compute_blocked_user_ids_for(user)

            if self.owner_id in blocked_user_ids or \
               user.id in blocked_user_ids:
                return False

            if self.status == self.PUBLISHED and \
               self.visible_for == Story.VISIBLE_FOR_FOLLOWERS and \
               user.id in self.owner.follower_user_ids:
                return True

        if self.status == Story.PUBLISHED and self.visible_for == \
           Story.VISIBLE_FOR_EVERYONE:
            return True

        return False

    def _like_set_key(self):
        return self.LIKE_SET_PATTERN % self.id

    def set_like(self, user, liked=True):
        """
        @type liked: object
        """
        from apps.account.models import UserProfile
        from apps.question.signals import story_like_changed

        is_liked = False

        if liked:
            result = redis.sadd(self._like_set_key(), user.username)
            if result:
                redis.zincrby(UserProfile.scoreboard_key(),
                              self.owner.username, 1)
                story_like_changed.send(sender=self)
                is_liked = True
        else:
            result = redis.srem(self._like_set_key(), user.username)
            if result:
                redis.zincrby(UserProfile.scoreboard_key(),
                              self.owner.username, -1)
                story_like_changed.send(sender=self)
            else:
                is_liked = True
        return is_liked

    def get_slot_count(self):
        if not self.slot_count:
            self.slot_count = self.slot_set.all().count()
            self.save(update_fields=['slot_count'])
        return self.slot_count

    def get_cover_img(self):
        if not self.cover_img:
            try:
                slot = self.slot_set.first()
                thmb = get_thumbnail(slot.content.image, '220')
                self.cover_img = {
                    'url': thmb.url,
                    'width': thmb.width,
                    'height': thmb.height
                }
                self.save(update_fields=['cover_img'])
            except:
                logger.error('Could\'nt generate cover image '
                             'for Story: %s' % self.id)
        return self.cover_img

    def get_absolute_url(self):
        return reverse('story', kwargs={'base62_id': self.base62_id})

    def get_update_images_url(self):
        return reverse('update-images-of-story',
                       kwargs={'base62_id': self.base62_id})

    def get_update_details_url(self):
        return reverse('update-details-of-story',
                       kwargs={'base62_id': self.base62_id})

    def get_likers_from_redis(self):
        return [
            User(username=username)
            for username in redis.smembers(self._like_set_key())
        ]

    def get_like_count_from_redis(self):
        return redis.scard(self._like_set_key())

    def get_next_story(self, requested_user=None):
        from apps.follow.models import compute_blocked_user_ids_for
        blocked_user_ids = compute_blocked_user_ids_for(requested_user) if requested_user \
            else []
        return Story.objects.filter(
            question_meta=self.question_meta,
            status=Story.PUBLISHED,
            visible_for=Story.VISIBLE_FOR_EVERYONE,
            created_at__lt=self.created_at)\
            .exclude(owner_id__in=blocked_user_ids)\
            .order_by('-created_at').first()

    def get_prev_story(self, requested_user=None):
        from apps.follow.models import compute_blocked_user_ids_for
        blocked_user_ids = compute_blocked_user_ids_for(requested_user) if requested_user \
            else []
        return Story.objects.filter(
            question_meta=self.question_meta,
            status=Story.PUBLISHED,
            visible_for=Story.VISIBLE_FOR_EVERYONE,
            created_at__gt=self.created_at)\
            .exclude(owner_id__in=blocked_user_ids)\
            .order_by('created_at').first()

    def update_like_count(self, save=False):
        """Update self.likes count from redis db, it does not save, must
        be saved manually."""
        self.like_count = self.get_like_count_from_redis()
        if save:
            self.save(update_fields=['like_count'])

    def update_slot_count(self, save=False):
        self.slot_count = self.slot_set.count()
        if save:
            self.save(update_fields=['slot_count'])

    def update_comment_count(self, save=False):
        from apps.comment.models import Comment
        self.comment_count = Comment.objects.filter(status=Comment.PUBLISHED,
                                                    story=self).count()
        if save:
            self.save(update_fields=['comment_count'])

    def serialize_slots(self):
        data = []
        for slot in self.slot_set.all():  # TODO: Optimise that query.
            data.append({
                'pk': slot.pk,
                'order': slot.order,
                'cPk': slot.cPk,
                'cTp': 'image',
                'thumbnailUrl': slot.content.thumbnail_url,
                'fileCompleted': True
            })
        return dumps(data)

    def __unicode__(self):
        return self.title if self.title else u'Story by %s' % self.owner

    class Meta:
        ordering = ['-created_at']
        verbose_name_plural = 'Stories'
Ejemplo n.º 22
0
class App(UuidAuditedModel):
    """
    Application used to service requests on behalf of end-users
    """

    owner = models.ForeignKey(settings.AUTH_USER_MODEL)
    id = models.SlugField(max_length=64, unique=True, default=select_app_name,
                          validators=[validate_id_is_docker_compatible,
                                      validate_reserved_names])
    structure = JSONField(default={}, blank=True, validators=[validate_app_structure])

    class Meta:
        permissions = (('use_app', 'Can use app'),)

    @property
    def _scheduler(self):
        mod = importlib.import_module(settings.SCHEDULER_MODULE)
        return mod.SchedulerClient(settings.SCHEDULER_TARGET,
                                   settings.SCHEDULER_AUTH,
                                   settings.SCHEDULER_OPTIONS,
                                   settings.SSH_PRIVATE_KEY)

    def __str__(self):
        return self.id

    @property
    def url(self):
        return self.id + '.' + settings.DEIS_DOMAIN

    def _get_job_id(self, container_type):
        app = self.id
        release = self.release_set.latest()
        version = "v{}".format(release.version)
        job_id = "{app}_{version}.{container_type}".format(**locals())
        return job_id

    def _get_command(self, container_type):
        try:
            # if this is not procfile-based app, ensure they cannot break out
            # and run arbitrary commands on the host
            # FIXME: remove slugrunner's hardcoded entrypoint
            release = self.release_set.latest()
            if release.build.dockerfile or not release.build.sha:
                return "bash -c '{}'".format(release.build.procfile[container_type])
            else:
                return 'start {}'.format(container_type)
        # if the key is not present or if a parent attribute is None
        except (KeyError, TypeError, AttributeError):
            # handle special case for Dockerfile deployments
            return '' if container_type == 'cmd' else 'start {}'.format(container_type)

    def log(self, message, level=logging.INFO):
        """Logs a message in the context of this application.

        This prefixes log messages with an application "tag" that the customized deis-logspout will
        be on the lookout for.  When it's seen, the message-- usually an application event of some
        sort like releasing or scaling, will be considered as "belonging" to the application
        instead of the controller and will be handled accordingly.
        """
        logger.log(level, "[{}]: {}".format(self.id, message))

    def create(self, *args, **kwargs):
        """Create a new application with an initial config and release"""
        config = Config.objects.create(owner=self.owner, app=self)
        Release.objects.create(version=1, owner=self.owner, app=self, config=config, build=None)

    def delete(self, *args, **kwargs):
        """Delete this application including all containers"""
        try:
            # attempt to remove containers from the scheduler
            self._destroy_containers([c for c in self.container_set.exclude(type='run')])
        except RuntimeError:
            pass
        self._clean_app_logs()
        return super(App, self).delete(*args, **kwargs)

    def restart(self, **kwargs):
        to_restart = self.container_set.all()
        if kwargs.get('type'):
            to_restart = to_restart.filter(type=kwargs.get('type'))
        if kwargs.get('num'):
            to_restart = to_restart.filter(num=kwargs.get('num'))
        self._restart_containers(to_restart)
        return to_restart

    def _clean_app_logs(self):
        """Delete application logs stored by the logger component"""
        try:
            url = 'http://{}:{}/{}/'.format(settings.LOGGER_HOST, settings.LOGGER_PORT, self.id)
            requests.delete(url)
        except Exception as e:
            # Ignore errors deleting application logs.  An error here should not interfere with
            # the overall success of deleting an application, but we should log it.
            err = 'Error deleting existing application logs: {}'.format(e)
            log_event(self, err, logging.WARNING)

    def scale(self, user, structure):  # noqa
        """Scale containers up or down to match requested structure."""
        if self.release_set.latest().build is None:
            raise EnvironmentError('No build associated with this release')
        requested_structure = structure.copy()
        release = self.release_set.latest()
        # test for available process types
        available_process_types = release.build.procfile or {}
        for container_type in requested_structure:
            if container_type == 'cmd':
                continue  # allow docker cmd types in case we don't have the image source
            if container_type not in available_process_types:
                raise EnvironmentError(
                    'Container type {} does not exist in application'.format(container_type))
        msg = '{} scaled containers '.format(user.username) + ' '.join(
            "{}={}".format(k, v) for k, v in requested_structure.items())
        log_event(self, msg)
        # iterate and scale by container type (web, worker, etc)
        changed = False
        to_add, to_remove = [], []
        scale_types = {}

        # iterate on a copy of the container_type keys
        for container_type in requested_structure.keys():
            containers = list(self.container_set.filter(type=container_type).order_by('created'))
            # increment new container nums off the most recent container
            results = self.container_set.filter(type=container_type).aggregate(Max('num'))
            container_num = (results.get('num__max') or 0) + 1
            requested = requested_structure.pop(container_type)
            diff = requested - len(containers)
            if diff == 0:
                continue
            changed = True
            scale_types[container_type] = requested
            while diff < 0:
                c = containers.pop()
                to_remove.append(c)
                diff += 1
            while diff > 0:
                # create a database record
                c = Container.objects.create(owner=self.owner,
                                             app=self,
                                             release=release,
                                             type=container_type,
                                             num=container_num)
                to_add.append(c)
                container_num += 1
                diff -= 1

        if changed:
            if "scale" in dir(self._scheduler):
                self._scale_containers(scale_types, to_remove)
            else:
                if to_add:
                    self._start_containers(to_add)
                if to_remove:
                    self._destroy_containers(to_remove)
        # save new structure to the database
        vals = self.container_set.exclude(type='run').values(
            'type').annotate(Count('pk')).order_by()
        new_structure = structure.copy()
        new_structure.update({v['type']: v['pk__count'] for v in vals})
        self.structure = new_structure
        self.save()
        return changed

    def _scale_containers(self, scale_types, to_remove):
        release = self.release_set.latest()
        for scale_type in scale_types:
            image = release.image
            version = "v{}".format(release.version)
            kwargs = {'memory': release.config.memory,
                      'cpu': release.config.cpu,
                      'tags': release.config.tags,
                      'version': version,
                      'aname': self.id,
                      'num': scale_types[scale_type]}
            job_id = self._get_job_id(scale_type)
            command = self._get_command(scale_type)
            try:
                self._scheduler.scale(
                    name=job_id,
                    image=image,
                    command=command,
                    **kwargs)
            except Exception as e:
                err = '{} (scale): {}'.format(job_id, e)
                log_event(self, err, logging.ERROR)
                raise
        [c.delete() for c in to_remove]

    def _start_containers(self, to_add):
        """Creates and starts containers via the scheduler"""
        if not to_add:
            return
        create_threads = [Thread(target=c.create) for c in to_add]
        start_threads = [Thread(target=c.start) for c in to_add]
        [t.start() for t in create_threads]
        [t.join() for t in create_threads]
        if any(c.state != 'created' for c in to_add):
            err = 'aborting, failed to create some containers'
            log_event(self, err, logging.ERROR)
            self._destroy_containers(to_add)
            raise RuntimeError(err)
        [t.start() for t in start_threads]
        [t.join() for t in start_threads]
        if set([c.state for c in to_add]) != set(['up']):
            err = 'warning, some containers failed to start'
            log_event(self, err, logging.WARNING)
        # if the user specified a health check, try checking to see if it's running
        try:
            config = self.config_set.latest()
            if 'HEALTHCHECK_URL' in config.values.keys():
                self._healthcheck(to_add, config.values)
        except Config.DoesNotExist:
            pass

    def _healthcheck(self, containers, config):
        # if at first it fails, back off and try again at 10%, 50% and 100% of INITIAL_DELAY
        intervals = [1.0, 0.1, 0.5, 1.0]
        # HACK (bacongobbler): we need to wait until publisher has a chance to publish each
        # service to etcd, which can take up to 20 seconds.
        time.sleep(20)
        for i in xrange(len(intervals)):
            delay = int(config.get('HEALTHCHECK_INITIAL_DELAY', 0))
            try:
                # sleep until the initial timeout is over
                if delay > 0:
                    time.sleep(delay * intervals[i])
                to_healthcheck = [c for c in containers if c.type in ['web', 'cmd']]
                self._do_healthcheck(to_healthcheck, config)
                break
            except exceptions.HealthcheckException as e:
                try:
                    next_delay = delay * intervals[i+1]
                    msg = "{}; trying again in {} seconds".format(e, next_delay)
                    log_event(self, msg, logging.WARNING)
                except IndexError:
                    log_event(self, e, logging.WARNING)
        else:
            self._destroy_containers(containers)
            msg = "aborting, app containers failed to respond to health check"
            log_event(self, msg, logging.ERROR)
            raise RuntimeError(msg)

    def _do_healthcheck(self, containers, config):
        path = config.get('HEALTHCHECK_URL', '/')
        timeout = int(config.get('HEALTHCHECK_TIMEOUT', 1))
        if not _etcd_client:
            raise exceptions.HealthcheckException('no etcd client available')
        for container in containers:
            try:
                key = "/deis/services/{self}/{container.job_id}".format(**locals())
                url = "http://{}{}".format(_etcd_client.get(key).value, path)
                response = requests.get(url, timeout=timeout)
                if response.status_code != requests.codes.OK:
                    raise exceptions.HealthcheckException(
                        "app failed health check (got '{}', expected: '200')".format(
                            response.status_code))
            except (requests.Timeout, requests.ConnectionError, KeyError) as e:
                raise exceptions.HealthcheckException(
                    'failed to connect to container ({})'.format(e))

    def _restart_containers(self, to_restart):
        """Restarts containers via the scheduler"""
        if not to_restart:
            return
        stop_threads = [Thread(target=c.stop) for c in to_restart]
        start_threads = [Thread(target=c.start) for c in to_restart]
        [t.start() for t in stop_threads]
        [t.join() for t in stop_threads]
        if any(c.state != 'created' for c in to_restart):
            err = 'warning, some containers failed to stop'
            log_event(self, err, logging.WARNING)
        [t.start() for t in start_threads]
        [t.join() for t in start_threads]
        if any(c.state != 'up' for c in to_restart):
            err = 'warning, some containers failed to start'
            log_event(self, err, logging.WARNING)

    def _destroy_containers(self, to_destroy):
        """Destroys containers via the scheduler"""
        if not to_destroy:
            return
        destroy_threads = [Thread(target=c.destroy) for c in to_destroy]
        [t.start() for t in destroy_threads]
        [t.join() for t in destroy_threads]
        [c.delete() for c in to_destroy if c.state == 'destroyed']
        if any(c.state != 'destroyed' for c in to_destroy):
            err = 'aborting, failed to destroy some containers'
            log_event(self, err, logging.ERROR)
            raise RuntimeError(err)

    def deploy(self, user, release):
        """Deploy a new release to this application"""
        existing = self.container_set.exclude(type='run')
        new = []
        scale_types = set()
        for e in existing:
            n = e.clone(release)
            n.save()
            new.append(n)
            scale_types.add(e.type)

        if new and "deploy" in dir(self._scheduler):
            self._deploy_app(scale_types, release, existing)
        else:
            self._start_containers(new)

            # destroy old containers
            if existing:
                self._destroy_containers(existing)

        # perform default scaling if necessary
        if self.structure == {} and release.build is not None:
            self._default_scale(user, release)

    def _deploy_app(self, scale_types, release, existing):
        for scale_type in scale_types:
            image = release.image
            version = "v{}".format(release.version)
            kwargs = {'memory': release.config.memory,
                      'cpu': release.config.cpu,
                      'tags': release.config.tags,
                      'aname': self.id,
                      'num': 0,
                      'version': version}
            job_id = self._get_job_id(scale_type)
            command = self._get_command(scale_type)
            try:
                self._scheduler.deploy(
                    name=job_id,
                    image=image,
                    command=command,
                    **kwargs)
            except Exception as e:
                err = '{} (deploy): {}'.format(job_id, e)
                log_event(self, err, logging.ERROR)
                raise
        [c.delete() for c in existing]

    def _default_scale(self, user, release):
        """Scale to default structure based on release type"""
        # if there is no SHA, assume a docker image is being promoted
        if not release.build.sha:
            structure = {'cmd': 1}

        # if a dockerfile exists without a procfile, assume docker workflow
        elif release.build.dockerfile and not release.build.procfile:
            structure = {'cmd': 1}

        # if a procfile exists without a web entry, assume docker workflow
        elif release.build.procfile and 'web' not in release.build.procfile:
            structure = {'cmd': 1}

        # default to heroku workflow
        else:
            structure = {'web': 1}

        self.scale(user, structure)

    def logs(self, log_lines=str(settings.LOG_LINES)):
        """Return aggregated log data for this application."""
        try:
            url = "http://{}:{}/{}?log_lines={}".format(settings.LOGGER_HOST, settings.LOGGER_PORT,
                                                        self.id, log_lines)
            r = requests.get(url)
        # Handle HTTP request errors
        except requests.exceptions.RequestException as e:
            logger.error("Error accessing deis-logger using url '{}': {}".format(url, e))
            raise e
        # Handle logs empty or not found
        if r.status_code == 204 or r.status_code == 404:
            logger.info("GET {} returned a {} status code".format(url, r.status_code))
            raise EnvironmentError('Could not locate logs')
        # Handle unanticipated status codes
        if r.status_code != 200:
            logger.error("Error accessing deis-logger: GET {} returned a {} status code"
                         .format(url, r.status_code))
            raise EnvironmentError('Error accessing deis-logger')
        return r.content

    def run(self, user, command):
        """Run a one-off command in an ephemeral app container."""
        # FIXME: remove the need for SSH private keys by using
        # a scheduler that supports one-off admin tasks natively
        if not settings.SSH_PRIVATE_KEY:
            raise EnvironmentError('Support for admin commands is not configured')
        if self.release_set.latest().build is None:
            raise EnvironmentError('No build associated with this release to run this command')
        # TODO: add support for interactive shell
        msg = "{} runs '{}'".format(user.username, command)
        log_event(self, msg)
        c_num = max([c.num for c in self.container_set.filter(type='run')] or [0]) + 1

        # create database record for run process
        c = Container.objects.create(owner=self.owner,
                                     app=self,
                                     release=self.release_set.latest(),
                                     type='run',
                                     num=c_num)
        image = c.release.image

        # check for backwards compatibility
        def _has_hostname(image):
            repo, tag = dockerutils.parse_repository_tag(image)
            return True if '/' in repo and '.' in repo.split('/')[0] else False

        if not _has_hostname(image):
            image = '{}:{}/{}'.format(settings.REGISTRY_HOST,
                                      settings.REGISTRY_PORT,
                                      image)
        # SECURITY: shell-escape user input
        escaped_command = command.replace("'", "'\\''")
        return c.run(escaped_command)
Ejemplo n.º 23
0
class App(UuidAuditedModel):
    """
    Application used to service requests on behalf of end-users
    """

    owner = models.ForeignKey(settings.AUTH_USER_MODEL)
    id = models.SlugField(max_length=64, unique=True)
    cluster = models.ForeignKey('Cluster')
    structure = JSONField(default='{}', blank=True)

    class Meta:
        permissions = (('use_app', 'Can use app'),)

    def __str__(self):
        return self.id

    def create(self, *args, **kwargs):
        config = Config.objects.create(owner=self.owner, app=self, values={})
        build = Build.objects.create(owner=self.owner, app=self, image=settings.DEFAULT_BUILD)
        Release.objects.create(version=1, owner=self.owner, app=self, config=config, build=build)

    def destroy(self, *args, **kwargs):
        for c in self.container_set.all():
            c.destroy()

    def deploy(self, release):
        tasks.deploy_release.delay(self, release).get()
        if self.structure == {}:
            # scale the web process by 1 initially
            self.structure = {'web': 1}
            self.save()
            self.scale()

    def scale(self, **kwargs):
        """Scale containers up or down to match requested."""
        requested_containers = self.structure.copy()
        release = self.release_set.latest()
        # increment new container nums off the most recent container
        all_containers = self.container_set.all().order_by('-created')
        container_num = 1 if not all_containers else all_containers[0].num + 1
        msg = 'Containers scaled ' + ' '.join(
            "{}={}".format(k, v) for k, v in requested_containers.items())
        # iterate and scale by container type (web, worker, etc)
        changed = False
        to_add, to_remove = [], []
        for container_type in requested_containers.keys():
            containers = list(self.container_set.filter(type=container_type).order_by('created'))
            requested = requested_containers.pop(container_type)
            diff = requested - len(containers)
            if diff == 0:
                continue
            changed = True
            while diff < 0:
                c = containers.pop()
                to_remove.append(c)
                diff += 1
            while diff > 0:
                c = Container.objects.create(owner=self.owner,
                                             app=self,
                                             release=release,
                                             type=container_type,
                                             num=container_num)
                to_add.append(c)
                container_num += 1
                diff -= 1
        if changed:
            subtasks = []
            if to_add:
                subtasks.append(tasks.start_containers.s(to_add))
            if to_remove:
                subtasks.append(tasks.stop_containers.s(to_remove))
            group(*subtasks).apply_async().join()
            log_event(self, msg)
        return changed

    def logs(self):
        """Return aggregated log data for this application."""
        path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')
        if not os.path.exists(path):
            raise EnvironmentError('Could not locate logs')
        data = subprocess.check_output(['tail', '-n', str(settings.LOG_LINES), path])
        return data

    def run(self, command):
        """Run a one-off command in an ephemeral app container."""
        # TODO: add support for interactive shell
        log_event(self, "deis run '{}'".format(command))
        c_num = max([c.num for c in self.container_set.filter(type='admin')] or [0]) + 1
        c = Container.objects.create(owner=self.owner,
                                     app=self,
                                     release=self.release_set.latest(),
                                     type='admin',
                                     num=c_num)
        rc, output = tasks.run_command.delay(c, command).get()
        return rc, output
Ejemplo n.º 24
0
    def mark_as_read(self):
        if self.unread:
            self.unread = False
            self.save()


EXTRA_DATA = False
if getattr(settings, 'NOTIFY_USE_JSONFIELD', False):
    try:
        from json_field.fields import JSONField
    except ImportError:
        raise ImproperlyConfigured(
            "You must have a suitable JSONField installed")

    JSONField(blank=True, null=True).contribute_to_class(Notification, 'data')
    EXTRA_DATA = True


def notify_handler(verb, **kwargs):
    """
    Handler function to create Notification instance upon action signal call.
    """

    kwargs.pop('signal', None)
    recipient = kwargs.pop('recipient')
    actor = kwargs.pop('sender')
    newnotify = Notification(
        recipient=recipient,
        actor_content_type=ContentType.objects.get_for_model(actor),
        actor_object_id=actor.pk,
Ejemplo n.º 25
0
class App(UuidAuditedModel):
    """
    Application used to service requests on behalf of end-users
    """

    owner = models.ForeignKey(settings.AUTH_USER_MODEL)
    id = models.SlugField(max_length=64, unique=True)
    cluster = models.ForeignKey('Cluster')
    structure = JSONField(default={},
                          blank=True,
                          validators=[validate_app_structure])

    class Meta:
        permissions = (('use_app', 'Can use app'), )

    def __str__(self):
        return self.id

    @property
    def url(self):
        return self.id + '.' + self.cluster.domain

    def log(self, message):
        """Logs a message to the application's log file.

        This is a workaround for how Django interacts with Python's logging module. Each app
        needs its own FileHandler instance so it can write to its own log file. That won't work in
        Django's case because logging is set up before you run the server and it disables all
        existing logging configurations.
        """
        with open(os.path.join(settings.DEIS_LOG_DIR, self.id + '.log'),
                  'a') as f:
            msg = "{} deis[api]: {}\n".format(
                time.strftime('%Y-%m-%d %H:%M:%S'), message)
            f.write(msg.encode('utf-8'))

    def create(self, *args, **kwargs):
        """Create a new application with an initial release"""
        config = Config.objects.create(owner=self.owner, app=self)
        build = Build.objects.create(owner=self.owner,
                                     app=self,
                                     image=settings.DEFAULT_BUILD)
        Release.objects.create(version=1,
                               owner=self.owner,
                               app=self,
                               config=config,
                               build=build)

    def delete(self, *args, **kwargs):
        """Delete this application including all containers"""
        for c in self.container_set.all():
            c.destroy()
        self._clean_app_logs()
        return super(App, self).delete(*args, **kwargs)

    def _clean_app_logs(self):
        """Delete application logs stored by the logger component"""
        path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')
        if os.path.exists(path):
            os.remove(path)

    def deploy(self, user, release, initial=False):
        """Deploy a new release to this application"""
        containers = self.container_set.all()
        self._deploy_containers(containers, release)
        # update release in database
        for c in containers:
            c.release = release
            c.save()
        self.release = release
        self.save()
        # perform default scaling if necessary
        if initial:
            self._default_scale(user, release)

    def _default_scale(self, user, release):
        """Scale to default structure based on release type"""
        # if there is no SHA, assume a docker image is being promoted
        if not release.build.sha:
            structure = {'cmd': 1}
        # if a dockerfile exists without a procfile, assume docker workflow
        elif release.build.dockerfile and not release.build.procfile:
            structure = {'cmd': 1}
        # if a procfile exists without a web entry, assume docker workflow
        elif release.build.procfile and 'web' not in release.build.procfile:
            structure = {'cmd': 1}
        # default to heroku workflow
        else:
            structure = {'web': 1}
        self.scale(user, structure)

    def _deploy_containers(self, to_deploy, release, **kwargs):
        """Deploys containers via the scheduler"""
        threads = []
        for c in to_deploy:
            threads.append(threading.Thread(target=c.deploy, args=(release, )))
        [t.start() for t in threads]
        [t.join() for t in threads]

    def scale(self, user, structure):  # noqa
        """Scale containers up or down to match requested structure."""
        requested_structure = structure.copy()
        release = self.release_set.latest()
        # test for available process types
        available_process_types = release.build.procfile or {}
        for container_type in requested_structure.keys():
            if container_type == 'cmd':
                continue  # allow docker cmd types in case we don't have the image source
            if container_type not in available_process_types:
                raise EnvironmentError(
                    'Container type {} does not exist in application'.format(
                        container_type))
        msg = '{} scaled containers '.format(user.username) + ' '.join(
            "{}={}".format(k, v) for k, v in requested_structure.items())
        log_event(self, msg)
        self.log(msg)
        # iterate and scale by container type (web, worker, etc)
        changed = False
        to_add, to_remove = [], []
        for container_type in requested_structure.keys():
            containers = list(
                self.container_set.filter(
                    type=container_type).order_by('created'))
            # increment new container nums off the most recent container
            results = self.container_set.filter(type=container_type).aggregate(
                Max('num'))
            container_num = (results.get('num__max') or 0) + 1
            requested = requested_structure.pop(container_type)
            diff = requested - len(containers)
            if diff == 0:
                continue
            changed = True
            while diff < 0:
                c = containers.pop()
                to_remove.append(c)
                diff += 1
            while diff > 0:
                # create a database record
                c = Container.objects.create(owner=self.owner,
                                             app=self,
                                             release=release,
                                             type=container_type,
                                             num=container_num)
                to_add.append(c)
                container_num += 1
                diff -= 1
        if changed:
            if to_add:
                self._start_containers(to_add)
            if to_remove:
                self._destroy_containers(to_remove)
                # remove the database record
                for c in to_remove:
                    c.delete()
        # save new structure to the database
        self.structure = structure
        self.save()
        return changed

    def _start_containers(self, to_add):
        """Creates and starts containers via the scheduler"""
        create_threads = []
        start_threads = []
        for c in to_add:
            create_threads.append(threading.Thread(target=c.create))
            start_threads.append(threading.Thread(target=c.start))
        [t.start() for t in create_threads]
        [t.join() for t in create_threads]
        [t.start() for t in start_threads]
        [t.join() for t in start_threads]

    def _destroy_containers(self, to_destroy):
        """Destroys containers via the scheduler"""
        destroy_threads = []
        for c in to_destroy:
            destroy_threads.append(threading.Thread(target=c.destroy))
        [t.start() for t in destroy_threads]
        [t.join() for t in destroy_threads]

    def logs(self):
        """Return aggregated log data for this application."""
        path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')
        if not os.path.exists(path):
            raise EnvironmentError('Could not locate logs')
        data = subprocess.check_output(
            ['tail', '-n', str(settings.LOG_LINES), path])
        return data

    def run(self, user, command):
        """Run a one-off command in an ephemeral app container."""
        # TODO: add support for interactive shell
        msg = "{} runs '{}'".format(user.username, command)
        log_event(self, msg)
        self.log(msg)
        c_num = max([c.num for c in self.container_set.filter(type='admin')]
                    or [0]) + 1
        try:
            # create database record for admin process
            c = Container.objects.create(owner=self.owner,
                                         app=self,
                                         release=self.release_set.latest(),
                                         type='admin',
                                         num=c_num)
            image = c.release.image + ':v' + str(c.release.version)

            # check for backwards compatibility
            def _has_hostname(image):
                repo, tag = utils.parse_repository_tag(image)
                return True if '/' in repo and '.' in repo.split(
                    '/')[0] else False

            if not _has_hostname(image):
                image = '{}:{}/{}'.format(settings.REGISTRY_HOST,
                                          settings.REGISTRY_PORT, image)
            # SECURITY: shell-escape user input
            escaped_command = command.replace("'", "'\\''")
            return c.run(escaped_command)
        # always cleanup admin containers
        finally:
            c.delete()
Ejemplo n.º 26
0
class App(UuidAuditedModel):
    """
    Application used to service requests on behalf of end-users
    """

    owner = models.ForeignKey(settings.AUTH_USER_MODEL)
    id = models.SlugField(max_length=64, unique=True)
    formation = models.ForeignKey('Formation')
    heartbeats = models.PositiveIntegerField(default=0)
    credits = models.FloatField(default=0)

    containers = JSONField(default='{}', blank=True)

    class Meta:
        permissions = (('use_app', 'Can use app'), )

    def __str__(self):
        return self.id

    def github_uri(self):
        """
        TODO: Need to look at what happens to Github URIs that already have double hyphens in them.
        """
        path = self.id\
            .replace('--', '*placeholder*')\
            .replace('-', '/')\
            .replace('*placeholder*', '-')
        return 'https://github.com/{}.git'.format(path)

    def flat(self):
        return {
            'id': self.id,
            'formation': self.formation.id,
            'containers': dict(self.containers)
        }

    def build(self):
        config = Config.objects.create(version=1,
                                       owner=self.owner,
                                       app=self,
                                       values={})
        build = Build.objects.create(owner=self.owner, app=self)
        Release.objects.create(version=1,
                               owner=self.owner,
                               app=self,
                               config=config,
                               build=build)
        self.formation.publish()

    def destroy(self):
        CM.purge_app(self.flat())
        self.delete()
        self.formation.publish()

    def publish(self):
        """Publish the application to configuration management"""
        data = self.calculate()
        CM.publish_app(self.flat(), data)
        return data

    def converge(self):
        databag = self.publish()
        self.formation.converge()
        return databag

    def calculate(self):
        """Return a representation for configuration management"""
        d = {}
        d['id'] = self.id
        d['release'] = {}
        releases = self.release_set.all().order_by('-created')
        if releases:
            release = releases[0]
            d['release']['version'] = release.version
            d['release']['config'] = release.config.values
            d['release']['build'] = {'image': release.build.image}
            if release.build.url:
                d['release']['build']['url'] = release.build.url
                d['release']['build']['procfile'] = release.build.procfile
        d['containers'] = {}
        containers = self.container_set.all()
        if containers:
            for c in containers:
                d['containers'].setdefault(c.type, {})[str(c.num)] = c.status
        d['domains'] = []
        if self.formation.domain:
            d['domains'].append('{}.{}'.format(self.id, self.formation.domain))
        else:
            for n in self.formation.node_set.filter(layer__proxy=True):
                d['domains'].append(n.fqdn)
        # add proper sharing and access controls
        d['users'] = {self.owner.username: '******'}
        for u in (get_users_with_perms(self)):
            d['users'][u.username] = 'user'
        return d

    def logs(self):
        """Return aggregated log data for this application."""
        path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')
        if not os.path.exists(path):
            raise EnvironmentError('Could not locate logs')
        data = subprocess.check_output(
            ['tail', '-n', str(settings.LOG_LINES), path])
        return data

    def run(self, command):
        """Run a one-off command in an ephemeral app container."""
        # TODO: add support for interactive shell
        nodes = self.formation.node_set.filter(
            layer__runtime=True).order_by('?')
        if not nodes:
            raise EnvironmentError('No nodes available to run command')
        app_id, node = self.id, nodes[0]
        release = self.release_set.order_by('-created')[0]
        # prepare ssh command
        version = release.version
        docker_args = ' '.join([
            '-a', 'stdout', '-a', 'stderr', '-rm', '-v',
            '/opt/deis/runtime/slugs/{app_id}-v{version}:/app'.format(
                **locals()), 'deis/slugrunner'
        ])
        env_args = ' '.join([
            "-e '{k}={v}'".format(**locals())
            for k, v in release.config.values.items()
        ])
        command = "sudo docker run {env_args} {docker_args} {command}".format(
            **locals())
        return node.run(command)

    def scaleoff(self):
        """
        When an app runs out of credits scale all its containers down.
        """
        # TODO: spin down *all* container types
        Container.objects.scale(self, {'web': 0, 'worker': 0})
        self.converge()

    def addCredits(self, credits):
        """
        Add credits to app. Also take a cut for Danabox and spread the rest around the other apps.
        We hope that apps are receiving multiple simultaneous donations, so let's use transactions
        to make sure updates are atomic.
        """
        remaining = 1 - settings.DANABOX_CUT - settings.POOL_CUT
        danabox_credits = credits * settings.DANABOX_CUT
        pool_credits = credits * settings.POOL_CUT
        remaining_credits = credits * remaining

        # Give the majority to the chosen app
        app = App.objects.select_for_update().filter(
            id=self.id)[0]  # Use transaction
        app.credits += remaining_credits
        app.save()
        # Keep track of credits going to Danabox
        swanson = Formation.objects.select_for_update().filter(
            id='swanson')[0]  # Use transaction
        swanson.danabox_credits += danabox_credits
        swanson.save()

        # Give a slice to all the other apps
        # TODO; don't give to apps that aren't being used
        apps = App.objects.select_for_update().all()  # Use transaction
        share = pool_credits / len(apps)
        for app in apps:
            if app.id == self.id:
                continue  # Skip the app that acually received the main donation.
            app.credits += share
            app.save()
Ejemplo n.º 27
0
class App(UuidAuditedModel):
    """
    Application used to service requests on behalf of end-users
    """

    owner = models.ForeignKey(settings.AUTH_USER_MODEL)
    id = models.SlugField(
        max_length=64,
        unique=True,
        default=utils.generate_app_name,
        validators=[validate_id_is_docker_compatible, validate_reserved_names])
    structure = JSONField(default={},
                          blank=True,
                          validators=[validate_app_structure])

    class Meta:
        permissions = (('use_app', 'Can use app'), )

    @property
    def _scheduler(self):
        mod = importlib.import_module(settings.SCHEDULER_MODULE)
        return mod.SchedulerClient(settings.SCHEDULER_TARGET,
                                   settings.SCHEDULER_AUTH,
                                   settings.SCHEDULER_OPTIONS,
                                   settings.SSH_PRIVATE_KEY)

    def __str__(self):
        return self.id

    @property
    def url(self):
        return self.id + '.' + settings.DEIS_DOMAIN

    def log(self, message):
        """Logs a message to the application's log file.

        This is a workaround for how Django interacts with Python's logging module. Each app
        needs its own FileHandler instance so it can write to its own log file. That won't work in
        Django's case because logging is set up before you run the server and it disables all
        existing logging configurations.
        """
        with open(os.path.join(settings.DEIS_LOG_DIR, self.id + '.log'),
                  'a') as f:
            msg = "{} deis[api]: {}\n".format(
                time.strftime(settings.DEIS_DATETIME_FORMAT), message)
            f.write(msg.encode('utf-8'))

    def create(self, *args, **kwargs):
        """Create a new application with an initial config and release"""
        config = Config.objects.create(owner=self.owner, app=self)
        Release.objects.create(version=1,
                               owner=self.owner,
                               app=self,
                               config=config,
                               build=None)

    def delete(self, *args, **kwargs):
        """Delete this application including all containers"""
        try:
            # attempt to remove containers from the scheduler
            self._destroy_containers(
                [c for c in self.container_set.exclude(type='run')])
        except RuntimeError:
            pass
        self._clean_app_logs()
        return super(App, self).delete(*args, **kwargs)

    def _clean_app_logs(self):
        """Delete application logs stored by the logger component"""
        path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')
        if os.path.exists(path):
            os.remove(path)

    def scale(self, user, structure):  # noqa
        """Scale containers up or down to match requested structure."""
        if self.release_set.latest().build is None:
            raise EnvironmentError('No build associated with this release')
        requested_structure = structure.copy()
        release = self.release_set.latest()
        # test for available process types
        available_process_types = release.build.procfile or {}
        for container_type in requested_structure.keys():
            if container_type == 'cmd':
                continue  # allow docker cmd types in case we don't have the image source
            if container_type not in available_process_types:
                raise EnvironmentError(
                    'Container type {} does not exist in application'.format(
                        container_type))
        msg = '{} scaled containers '.format(user.username) + ' '.join(
            "{}={}".format(k, v) for k, v in requested_structure.items())
        log_event(self, msg)
        # iterate and scale by container type (web, worker, etc)
        changed = False
        to_add, to_remove = [], []
        for container_type in requested_structure.keys():
            containers = list(
                self.container_set.filter(
                    type=container_type).order_by('created'))
            # increment new container nums off the most recent container
            results = self.container_set.filter(type=container_type).aggregate(
                Max('num'))
            container_num = (results.get('num__max') or 0) + 1
            requested = requested_structure.pop(container_type)
            diff = requested - len(containers)
            if diff == 0:
                continue
            changed = True
            while diff < 0:
                c = containers.pop()
                to_remove.append(c)
                diff += 1
            while diff > 0:
                # create a database record
                c = Container.objects.create(owner=self.owner,
                                             app=self,
                                             release=release,
                                             type=container_type,
                                             num=container_num)
                to_add.append(c)
                container_num += 1
                diff -= 1
        if changed:
            if to_add:
                self._start_containers(to_add)
            if to_remove:
                self._destroy_containers(to_remove)
        # save new structure to the database
        vals = self.container_set.exclude(type='run').values('type').annotate(
            Count('pk')).order_by()
        self.structure = {v['type']: v['pk__count'] for v in vals}
        self.save()
        return changed

    def _start_containers(self, to_add):
        """Creates and starts containers via the scheduler"""
        create_threads = []
        start_threads = []
        if not to_add:
            # do nothing if we didn't request any containers
            return
        for c in to_add:
            create_threads.append(threading.Thread(target=c.create))
            start_threads.append(threading.Thread(target=c.start))
        [t.start() for t in create_threads]
        [t.join() for t in create_threads]
        if set([c.state for c in to_add]) != set(['created']):
            err = 'aborting, failed to create some containers'
            log_event(self, err, logging.ERROR)
            raise RuntimeError(err)
        [t.start() for t in start_threads]
        [t.join() for t in start_threads]
        if set([c.state for c in to_add]) != set(['up']):
            err = 'warning, some containers failed to start'
            log_event(self, err, logging.WARNING)

    def _destroy_containers(self, to_destroy):
        """Destroys containers via the scheduler"""
        destroy_threads = []
        if not to_destroy:
            # do nothing if we didn't request any containers
            return
        for c in to_destroy:
            destroy_threads.append(threading.Thread(target=c.destroy))
        [t.start() for t in destroy_threads]
        [t.join() for t in destroy_threads]
        [c.delete() for c in to_destroy if c.state == 'destroyed']
        if set([c.state for c in to_destroy]) != set(['destroyed']):
            err = 'aborting, failed to destroy some containers'
            log_event(self, err, logging.ERROR)
            raise RuntimeError(err)

    def deploy(self, user, release, initial=False):
        """Deploy a new release to this application"""
        existing = self.container_set.exclude(type='run')
        new = []
        for e in existing:
            n = e.clone(release)
            n.save()
            new.append(n)

        self._start_containers(new)

        # destroy old containers
        if existing:
            self._destroy_containers(existing)

        # perform default scaling if necessary
        if initial:
            self._default_scale(user, release)

    def _default_scale(self, user, release):
        """Scale to default structure based on release type"""
        # if there is no SHA, assume a docker image is being promoted
        if not release.build.sha:
            structure = {'cmd': 1}

        # if a dockerfile exists without a procfile, assume docker workflow
        elif release.build.dockerfile and not release.build.procfile:
            structure = {'cmd': 1}

        # if a procfile exists without a web entry, assume docker workflow
        elif release.build.procfile and 'web' not in release.build.procfile:
            structure = {'cmd': 1}

        # default to heroku workflow
        else:
            structure = {'web': 1}

        self.scale(user, structure)

    def logs(self, log_lines):
        """Return aggregated log data for this application."""
        path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')
        if not os.path.exists(path):
            raise EnvironmentError('Could not locate logs')
        data = subprocess.check_output(['tail', '-n', log_lines, path])
        return data

    def run(self, user, command):
        """Run a one-off command in an ephemeral app container."""
        # FIXME: remove the need for SSH private keys by using
        # a scheduler that supports one-off admin tasks natively
        if not settings.SSH_PRIVATE_KEY:
            raise EnvironmentError(
                'Support for admin commands is not configured')
        if self.release_set.latest().build is None:
            raise EnvironmentError(
                'No build associated with this release to run this command')
        # TODO: add support for interactive shell
        msg = "{} runs '{}'".format(user.username, command)
        log_event(self, msg)
        c_num = max([c.num for c in self.container_set.filter(type='run')]
                    or [0]) + 1

        # create database record for run process
        c = Container.objects.create(owner=self.owner,
                                     app=self,
                                     release=self.release_set.latest(),
                                     type='run',
                                     num=c_num)
        image = c.release.image

        # check for backwards compatibility
        def _has_hostname(image):
            repo, tag = dockerutils.parse_repository_tag(image)
            return True if '/' in repo and '.' in repo.split('/')[0] else False

        if not _has_hostname(image):
            image = '{}:{}/{}'.format(settings.REGISTRY_HOST,
                                      settings.REGISTRY_PORT, image)
        # SECURITY: shell-escape user input
        escaped_command = command.replace("'", "'\\''")
        return c.run(escaped_command)
Ejemplo n.º 28
0
class Formation(UuidAuditedModel):
    """
    Formation of nodes used to host applications
    """

    owner = models.ForeignKey(settings.AUTH_USER_MODEL)
    id = models.SlugField(max_length=64, unique=True)
    domain = models.CharField(max_length=128, blank=True, null=True)
    nodes = JSONField(default='{}', blank=True)

    class Meta:
        unique_together = (('owner', 'id'), )

    def __str__(self):
        return self.id

    def flat(self):
        return {'id': self.id, 'domain': self.domain, 'nodes': self.nodes}

    def build(self):
        tasks.build_formation.delay(self).wait()

    def destroy(self, *args, **kwargs):
        tasks.destroy_formation.delay(self).wait()

    def publish(self):
        data = self.calculate()
        CM.publish_formation(self.flat(), data)
        return data

    def converge(self, **kwargs):
        databag = self.publish()
        tasks.converge_formation.delay(self).wait()
        return databag

    def calculate(self):
        """Return a representation of this formation for config management"""
        d = {}
        d['id'] = self.id
        d['domain'] = self.domain
        d['nodes'] = {}
        proxies = []
        for n in self.node_set.all():
            d['nodes'][n.id] = {
                'fqdn': n.fqdn,
                'runtime': n.layer.runtime,
                'proxy': n.layer.proxy
            }
            if n.layer.proxy is True:
                proxies.append(n.fqdn)
        d['apps'] = {}
        for a in self.app_set.all():
            d['apps'][a.id] = a.calculate()
            d['apps'][a.id]['proxy'] = {}
            d['apps'][a.id]['proxy']['nodes'] = proxies
            d['apps'][a.id]['proxy']['algorithm'] = 'round_robin'
            d['apps'][a.id]['proxy']['port'] = 80
            d['apps'][a.id]['proxy']['backends'] = []
            d['apps'][a.id]['containers'] = containers = {}
            for c in a.container_set.all().order_by('created'):
                containers.setdefault(c.type, {})
                containers[c.type].update(
                    {c.num: "{0}:{1}".format(c.node.id, c.port)})
                if c.type == 'web':
                    d['apps'][a.id]['proxy']['backends'].append(
                        "{0}:{1}".format(c.node.fqdn, c.port))
        return d
Ejemplo n.º 29
0
Archivo: models.py Proyecto: taoy/deis
class App(UuidAuditedModel):
    """
    Application used to service requests on behalf of end-users
    """

    owner = models.ForeignKey(settings.AUTH_USER_MODEL)
    id = models.SlugField(max_length=64, unique=True)
    cluster = models.ForeignKey('Cluster')
    structure = JSONField(default='{}', blank=True)

    class Meta:
        permissions = (('use_app', 'Can use app'),)

    def __str__(self):
        return self.id

    @property
    def url(self):
        return self.id + '.' + self.cluster.domain

    def create(self, *args, **kwargs):
        config = Config.objects.create(owner=self.owner, app=self, values={})
        build = Build.objects.create(owner=self.owner, app=self, image=settings.DEFAULT_BUILD)
        Release.objects.create(version=1, owner=self.owner, app=self, config=config, build=build)

    def delete(self, *args, **kwargs):
        for c in self.container_set.all():
            c.destroy()
        # delete application logs stored by deis/logger
        path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')
        if os.path.exists(path):
            os.remove(path)
        return super(App, self).delete(*args, **kwargs)

    def deploy(self, release, initial=False):
        tasks.deploy_release.delay(self, release).get()
        if initial:
            # if there is no SHA, assume a docker image is being promoted
            if not release.build.sha:
                self.structure = {'cmd': 1}
            # if a dockerfile exists without a procfile, assume docker workflow
            elif release.build.dockerfile and not release.build.procfile:
                self.structure = {'cmd': 1}
            # if a procfile exists without a web entry, assume docker workflow
            elif release.build.procfile and not 'web' in release.build.procfile:
                self.structure = {'cmd': 1}
            # default to heroku workflow
            else:
                self.structure = {'web': 1}
            self.save()
            self.scale()

    def destroy(self, *args, **kwargs):
        return self.delete(*args, **kwargs)

    def scale(self, **kwargs):  # noqa
        """Scale containers up or down to match requested."""
        requested_containers = self.structure.copy()
        release = self.release_set.latest()
        # test for available process types
        available_process_types = release.build.procfile or {}
        for container_type in requested_containers.keys():
            if container_type == 'cmd':
                continue  # allow docker cmd types in case we don't have the image source
            if not container_type in available_process_types:
                raise EnvironmentError(
                    'Container type {} does not exist in application'.format(container_type))
        msg = 'Containers scaled ' + ' '.join(
            "{}={}".format(k, v) for k, v in requested_containers.items())
        # iterate and scale by container type (web, worker, etc)
        changed = False
        to_add, to_remove = [], []
        for container_type in requested_containers.keys():
            containers = list(self.container_set.filter(type=container_type).order_by('created'))
            # increment new container nums off the most recent container
            results = self.container_set.filter(type=container_type).aggregate(Max('num'))
            container_num = (results.get('num__max') or 0) + 1
            requested = requested_containers.pop(container_type)
            diff = requested - len(containers)
            if diff == 0:
                continue
            changed = True
            while diff < 0:
                c = containers.pop()
                to_remove.append(c)
                diff += 1
            while diff > 0:
                c = Container.objects.create(owner=self.owner,
                                             app=self,
                                             release=release,
                                             type=container_type,
                                             num=container_num)
                to_add.append(c)
                container_num += 1
                diff -= 1
        if changed:
            subtasks = []
            if to_add:
                subtasks.append(tasks.start_containers.s(to_add))
            if to_remove:
                subtasks.append(tasks.stop_containers.s(to_remove))
            group(*subtasks).apply_async().join()
            log_event(self, msg)
        return changed

    def logs(self):
        """Return aggregated log data for this application."""
        path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')
        if not os.path.exists(path):
            raise EnvironmentError('Could not locate logs')
        data = subprocess.check_output(['tail', '-n', str(settings.LOG_LINES), path])
        return data

    def run(self, command):
        """Run a one-off command in an ephemeral app container."""
        # TODO: add support for interactive shell
        log_event(self, "deis run '{}'".format(command))
        c_num = max([c.num for c in self.container_set.filter(type='admin')] or [0]) + 1
        c = Container.objects.create(owner=self.owner,
                                     app=self,
                                     release=self.release_set.latest(),
                                     type='admin',
                                     num=c_num)
        rc, output = tasks.run_command.delay(c, command).get()
        return rc, output
Ejemplo n.º 30
0
class App(UuidAuditedModel):
    """
    Application used to service requests on behalf of end-users
    """

    owner = models.ForeignKey(settings.AUTH_USER_MODEL)
    id = models.SlugField(max_length=64, unique=True)
    formation = models.ForeignKey('Formation')
    containers = JSONField(default='{}', blank=True)

    class Meta:
        permissions = (('use_app', 'Can use app'), )

    def __str__(self):
        return self.id

    def flat(self):
        return {
            'id': self.id,
            'formation': self.formation.id,
            'containers': dict(self.containers)
        }

    def build(self):
        config = Config.objects.create(version=1,
                                       owner=self.owner,
                                       app=self,
                                       values={})
        build = Build.objects.create(owner=self.owner, app=self)
        Release.objects.create(version=1,
                               owner=self.owner,
                               app=self,
                               config=config,
                               build=build)
        self.formation.publish()

    def destroy(self):
        CM.purge_app(self.flat())
        self.delete()
        self.formation.publish()

    def publish(self):
        """Publish the application to configuration management"""
        data = self.calculate()
        CM.publish_app(self.flat(), data)
        return data

    def converge(self):
        databag = self.publish()
        self.formation.converge()
        return databag

    def calculate(self):
        """Return a representation for configuration management"""
        d = {}
        d['id'] = self.id
        d['release'] = {}
        releases = self.release_set.all().order_by('-created')
        if releases:
            release = releases[0]
            d['release']['version'] = release.version
            d['release']['config'] = release.config.values
            d['release']['build'] = {
                'image': release.build.image + ":v{}".format(release.version)
            }
            if release.build.url:
                d['release']['build']['url'] = release.build.url
                d['release']['build']['procfile'] = release.build.procfile
        d['containers'] = {}
        containers = self.container_set.all()
        if containers:
            for c in containers:
                d['containers'].setdefault(c.type, {})[str(c.num)] = c.status
        d['domains'] = []
        if self.formation.domain:
            d['domains'].append('{}.{}'.format(self.id, self.formation.domain))
        else:
            for n in self.formation.node_set.filter(layer__proxy=True):
                d['domains'].append(n.fqdn)
        # add proper sharing and access controls
        d['users'] = {self.owner.username: '******'}
        for u in (get_users_with_perms(self)):
            d['users'][u.username] = 'user'
        return d

    def logs(self):
        """Return aggregated log data for this application."""
        path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')
        if not os.path.exists(path):
            raise EnvironmentError('Could not locate logs')
        data = subprocess.check_output(
            ['tail', '-n', str(settings.LOG_LINES), path])
        return data

    def run(self, command):
        """Run a one-off command in an ephemeral app container."""
        # TODO: add support for interactive shell
        nodes = self.formation.node_set.filter(
            layer__runtime=True).order_by('?')
        if not nodes:
            raise EnvironmentError('No nodes available to run command')
        app_id, node = self.id, nodes[0]
        release = self.release_set.order_by('-created')[0]
        # prepare ssh command
        version = release.version
        image = release.build.image + ":v{}".format(release.version)
        docker_args = ' '.join(['-a', 'stdout', '-a', 'stderr', '-rm', image])
        env_args = ' '.join([
            "-e '{k}={v}'".format(**locals())
            for k, v in release.config.values.items()
        ])
        log_event(self, "deis run '{}'".format(command))
        command = "sudo docker run {env_args} {docker_args} {command}".format(
            **locals())
        return node.run(command)