예제 #1
0
파일: models.py 프로젝트: pkjhub/deis
class Layer(UuidAuditedModel):

    """
    Layer of nodes used by the formation

    All nodes in a layer share the same flavor and configuration
    """

    owner = models.ForeignKey(settings.AUTH_USER_MODEL)
    id = models.SlugField(max_length=64)

    formation = models.ForeignKey('Formation')
    flavor = models.ForeignKey('Flavor')
    level = models.PositiveIntegerField(default=0)

    # chef settings
    chef_version = models.CharField(max_length=32, default='11.4.4')
    run_list = models.CharField(max_length=512)
    initial_attributes = fields.JSONField(default='{}', blank=True)
    environment = models.CharField(max_length=64, default='_default')
    # ssh settings
    ssh_username = models.CharField(max_length=64, default='ubuntu')
    ssh_private_key = models.TextField()
    ssh_public_key = models.TextField()

    class Meta:
        unique_together = (('formation', 'id'),)

    def __str__(self):
        return self.id

    def build(self, *args, **kwargs):
        tasks = import_tasks(self.flavor.provider.type)
        name = "{0}-{1}".format(self.formation.id, self.id)
        args = (name, self.flavor.provider.creds.copy(),
                self.flavor.params.copy())
        return tasks.build_layer.delay(*args).wait()

    def destroy(self, async=False):
        tasks = import_tasks(self.flavor.provider.type)
        # create subtasks to terminate all nodes in parallel
        node_tasks = [node.destroy(async=True) for node in self.node_set.all()]
예제 #2
0
파일: models.py 프로젝트: pkjhub/deis
class Formation(UuidAuditedModel):

    """
    Formation of machine instances, list of nodes available
    as `formation.nodes`
    """
    objects = FormationManager()

    owner = models.ForeignKey(settings.AUTH_USER_MODEL)
    id = models.SlugField(max_length=64)
    layers = fields.JSONField(default='{}', blank=True)
    containers = fields.JSONField(default='{}', blank=True)

    class Meta:
        unique_together = (('owner', 'id'),)

    def scale_layers(self, **kwargs):
        """Scale layers up or down to match requested."""
        layers = self.layers.copy()
        funcs = []
        new_nodes = False
        for layer_id, requested in layers.items():
            layer = self.layer_set.get(id=layer_id)
            nodes = list(layer.node_set.all().order_by('created'))
            diff = requested - len(nodes)
            if diff == 0:
                continue
            while diff < 0:
                node = nodes.pop(0)
                funcs.append(node.terminate)
                diff = requested - len(nodes)
            while diff > 0:
                node = Node.objects.new(self, layer)
                nodes.append(node)
                funcs.append(node.launch)
                diff = requested - len(nodes)
                new_nodes = True
        # http://docs.celeryproject.org/en/latest/userguide/canvas.html#groups
        job = [func() for func in funcs]
        # launch/terminate nodes in parallel
        if job:
            group(*job).apply_async().join()
        # scale containers in case nodes have been destroyed
        runtime_layers = self.layer_set.filter(id='runtime')
        if runtime_layers.exists() and runtime_layers[0].node_set.count():
            self.scale_containers()
        # balance containers
        containers_balanced = self._balance_containers()
        # once nodes are in place, recalculate the formation and update the data bag
        databag = self.calculate()
        # force-converge nodes if there were new nodes or container rebalancing
        if new_nodes or containers_balanced:
            self.converge(databag)
        # save the formation with updated layers
        self.save()
        return databag

    def scale_containers(self, **kwargs):
        """Scale containers up or down to match requested."""
        requested_containers = self.containers.copy()
        runtime_layers = self.layer_set.filter(id='runtime')
        if len(runtime_layers) < 1:
            raise ScalingError('Must create a "runtime" layer to host containers')
        runtime_nodes = runtime_layers[0].node_set.all()
        if len(runtime_nodes) < 1:
            raise ScalingError('Must scale runtime nodes > 0 to host containers')
        # increment new container nums off the most recent container
        all_containers = self.container_set.all().order_by('-created')
        container_num = 1 if not all_containers else all_containers[0].num + 1
        # iterate and scale by container type (web, worker, etc)
        changed = False
        for container_type in requested_containers.keys():
            containers = list(self.container_set.filter(type=container_type).order_by('created'))
            requested = requested_containers.pop(container_type)
            diff = requested - len(containers)
            if diff == 0:
                continue
            changed = True
            while diff < 0:
                # get the next node with the most containers
                node = Formation.objects.next_container_node(self, container_type, reverse=True)
                # delete a container attached to that node
                for c in containers:
                    if node == c.node:
                        containers.remove(c)
                        c.delete()
                        diff += 1
                        break
            while diff > 0:
                # get the next node with the fewest containers
                node = Formation.objects.next_container_node(self, container_type)
                c = Container.objects.create(owner=self.owner,
                                             formation=self,
                                             type=container_type,
                                             num=container_num,
                                             node=node)
                containers.append(c)
                container_num += 1
                diff -= 1
        # once nodes are in place, recalculate the formation and update the data bag
        databag = self.calculate()
        if changed is True:
            self.converge(databag)
        # save the formation with updated containers
        self.save()
        return databag

    def balance(self, **kwargs):
        changed = self._balance_containers()
        databag = self.calculate()
        if changed:
            self.converge(databag)
        return databag

    def _balance_containers(self, **kwargs):
        runtime_nodes = self.node_set.filter(layer__id='runtime').order_by('created')
        if len(runtime_nodes) < 2:
            return  # there's nothing to balance with 1 runtime node
        all_containers = Container.objects.filter(formation=self).order_by('-created')
        # get the next container number (e.g. web.19)
        container_num = 1 if not all_containers else all_containers[0].num + 1
        changed = False
        # iterate by unique container type
        for container_type in set([c.type for c in all_containers]):
            # map node container counts => { 2: [b3, b4], 3: [ b1, b2 ] }
            n_map = {}
            for node in runtime_nodes:
                ct = len(node.container_set.filter(type=container_type))
                n_map.setdefault(ct, []).append(node)
            # loop until diff between min and max is 1 or 0
            while max(n_map.keys()) - min(n_map.keys()) > 1:
                # get the most over-utilized node
                n_max = max(n_map.keys())
                n_over = n_map[n_max].pop(0)
                if len(n_map[n_max]) == 0:
                    del n_map[n_max]
                # get the most under-utilized node
                n_min = min(n_map.keys())
                n_under = n_map[n_min].pop(0)
                if len(n_map[n_min]) == 0:
                    del n_map[n_min]
                # create a container on the most under-utilized node
                Container.objects.create(owner=self.owner,
                                         formation=self,
                                         type=container_type,
                                         num=container_num,
                                         node=n_under)
                container_num += 1
                # delete the oldest container from the most over-utilized node
                c = n_over.container_set.filter(type=container_type).order_by('created')[0]
                c.delete()
                # update the n_map accordingly
                for n in (n_over, n_under):
                    ct = len(n.container_set.filter(type=container_type))
                    n_map.setdefault(ct, []).append(n)
                changed = True
        return changed

    def __str__(self):
        return self.id

    def calculate(self):
        """Return a Chef data bag item for this formation"""
        release = self.release_set.all().order_by('-created')[0]
        d = {}
        d['id'] = self.id
        d['release'] = {}
        d['release']['version'] = release.version
        d['release']['config'] = release.config.values
        d['release']['image'] = release.image
        d['release']['build'] = {}
        if release.build:
            d['release']['build']['url'] = release.build.url
            d['release']['build']['procfile'] = release.build.procfile
        # calculate proxy
        d['proxy'] = {}
        d['proxy']['algorithm'] = 'round_robin'
        d['proxy']['port'] = 80
        d['proxy']['backends'] = []
        # calculate container formation
        d['containers'] = {}
        for c in self.container_set.all().order_by('created'):
            # all container types get an exposed port starting at 5001
            port = 5000 + c.num
            d['containers'].setdefault(c.type, {})
            d['containers'][c.type].update(
                {c.num: "{0}:{1}".format(c.node.id, port)})
            # only proxy to 'web' containers
            if c.type == 'web':
                d['proxy']['backends'].append("{0}:{1}".format(c.node.fqdn, port))
        # add all the participating nodes
        d['nodes'] = {}
        for n in self.node_set.all():
            d['nodes'].setdefault(n.layer.id, {})[n.id] = n.fqdn
        # call a celery task to update the formation data bag
        if settings.CHEF_ENABLED:
            controller.update_formation.delay(self.id, d).wait()  # @UndefinedVariable
        return d

    def converge(self, databag):
        """Call a celery task to update the formation data bag."""
        if settings.CHEF_ENABLED:
            controller.update_formation.delay(self.id, databag).wait()  # @UndefinedVariable
        # TODO: batch node converging by layer.level
        nodes = [node for node in self.node_set.all()]
        job = group(*[n.converge() for n in nodes])
        job.apply_async().join()
        return databag

    def logs(self):
        """Return aggregated log data for this formation."""
        path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')
        if not os.path.exists(path):
            raise EnvironmentError('Could not locate logs')
        data = subprocess.check_output(['tail', '-n', str(settings.LOG_LINES), path])
        return data

    def run(self, commands):
        """Run a one-off command in an ephemeral container."""
        runtime_nodes = self.node_set.filter(layer__id='runtime').order_by('?')
        if not runtime_nodes:
            raise EnvironmentError('No nodes available')
        return runtime_nodes[0].run(commands)

    def destroy(self):
        """Create subtasks to terminate all nodes in parallel."""
        all_layers = self.layer_set.all()
        tasks = [layer.destroy(async=True) for layer in all_layers]