示例#1
0
class DeployExecution(AbstractProjectResourceModel, AbstractExecutionModel):
    operation = models.CharField(max_length=128, blank=False, null=False)
    project = models.ForeignKey('ansible_api.Project',
                                on_delete=models.CASCADE)
    params = common_models.JsonDictTextField(default={})
    steps = common_models.JsonListTextField(default=[], null=True)

    STEP_STATUS_PENDING = 'pending'
    STEP_STATUS_RUNNING = 'running'
    STEP_STATUS_SUCCESS = 'success'
    STEP_STATUS_ERROR = 'error'

    @property
    def start(self):
        result = {"raw": {}, "summary": {}}
        pre_deploy_execution_start.send(self.__class__, execution=self)
        cluster = self.get_cluster()
        settings = Setting.get_settings()
        extra_vars = {
            "cluster_name": cluster.name,
            "cluster_domain": cluster.cluster_doamin_suffix
        }
        extra_vars.update(settings)
        extra_vars.update(cluster.configs)
        ignore_errors = False
        return_running = False
        try:
            if self.operation == "install":
                logger.info(msg="cluster: {} exec: {} ".format(
                    cluster, self.operation))
                cluster.change_status(Cluster.CLUSTER_STATUS_INSTALLING)
                result = self.on_install(extra_vars)
                cluster.change_status(Cluster.CLUSTER_STATUS_RUNNING)
            elif self.operation == 'uninstall':
                logger.info(msg="cluster: {} exec: {} ".format(
                    cluster, self.operation))
                cluster.change_status(Cluster.CLUSTER_STATUS_DELETING)
                result = self.on_uninstall(extra_vars)
                cluster.change_status(Cluster.CLUSTER_STATUS_READY)
                kubeops_api.cluster_monitor.delete_cluster_redis_data(
                    cluster.name)
            elif self.operation == 'bigip-config':
                logger.info(msg="cluster: {} exec: {} ".format(
                    cluster, self.operation))
                ignore_errors = True
                result = self.on_f5_config(extra_vars)
            elif self.operation == 'upgrade':
                logger.info(msg="cluster: {} exec: {} ".format(
                    cluster, self.operation))
                cluster.change_status(Cluster.CLUSTER_STATUS_UPGRADING)
                package_name = self.params.get('package', None)
                package = Package.objects.get(name=package_name)
                extra_vars.update(package.meta.get('vars'))
                result = self.on_upgrade(extra_vars)
                if result.get('summary', {}).get('success', False):
                    cluster.upgrade_package(package_name)
                cluster.change_status(Cluster.CLUSTER_STATUS_RUNNING)
            elif self.operation == 'scale':
                logger.info(msg="cluster: {} exec: {} ".format(
                    cluster, self.operation))
                ignore_errors = True
                return_running = True
                cluster.change_status(Cluster.CLUSTER_DEPLOY_TYPE_SCALING)
                result = self.on_scaling(extra_vars)
                cluster.exit_new_node()
                cluster.change_status(Cluster.CLUSTER_STATUS_RUNNING)
            elif self.operation == 'add-worker':
                logger.info(msg="cluster: {} exec: {} ".format(
                    cluster, self.operation))
                ignore_errors = True
                return_running = True
                cluster.change_status(Cluster.CLUSTER_DEPLOY_TYPE_SCALING)
                result = self.on_add_worker(extra_vars)
                cluster.exit_new_node()
                cluster.change_status(Cluster.CLUSTER_STATUS_RUNNING)
            elif self.operation == 'remove-worker':
                logger.info(msg="cluster: {} exec: {} ".format(
                    cluster, self.operation))
                ignore_errors = True
                return_running = True
                cluster.change_status(Cluster.CLUSTER_DEPLOY_TYPE_SCALING)
                result = self.on_remove_worker(extra_vars)
                if not result.get('summary', {}).get('success', False):
                    cluster.exit_new_node()
                else:
                    node_name = self.params.get('node', None)
                    cluster.change_to()
                    node = Node.objects.get(name=node_name)
                    node.delete()
                    cluster.change_status(Cluster.CLUSTER_STATUS_RUNNING)
            elif self.operation == 'restore':
                logger.info(msg="cluster: {} exec: {} ".format(
                    cluster, self.operation))
                cluster.change_status(Cluster.CLUSTER_STATUS_RESTORING)
                cluster_backup_id = self.params.get('clusterBackupId', None)
                result = self.on_restore(extra_vars, cluster_backup_id)
                cluster.change_status(Cluster.CLUSTER_STATUS_RUNNING)
            elif self.operation == 'backup':
                logger.info(msg="cluster: {} exec: {} ".format(
                    cluster, self.operation))
                cluster.change_status(Cluster.CLUSTER_STATUS_BACKUP)
                cluster_storage_id = self.params.get('backupStorageId', None)
                result = self.on_backup(extra_vars)
                self.on_upload_backup_file(cluster_storage_id)
                cluster.change_status(Cluster.CLUSTER_STATUS_RUNNING)
            if not result.get('summary', {}).get('success', False):
                if not ignore_errors:
                    cluster.change_status(Cluster.CLUSTER_STATUS_ERROR)
                if return_running:
                    cluster.change_status(Cluster.CLUSTER_STATUS_RUNNING)
                logger.error(msg=":cluster {} exec {} error".format(
                    cluster, self.operation),
                             exc_info=True)
        except Exception as e:
            logger.error(msg=":cluster {} exec {} error".format(
                cluster, self.operation),
                         exc_info=True)
            cluster.change_status(Cluster.CLUSTER_STATUS_ERROR)
        post_deploy_execution_start.send(self.__class__,
                                         execution=self,
                                         result=result,
                                         ignore_errors=ignore_errors)
        return result

    def on_install(self, extra_vars):
        cluster = self.get_cluster()
        self.steps = cluster.get_steps('install')
        self.set_step_default()
        self.update_current_step('create-resource',
                                 DeployExecution.STEP_STATUS_RUNNING)
        if cluster.deploy_type == Cluster.CLUSTER_DEPLOY_TYPE_AUTOMATIC:
            try:
                cluster.create_resource()
                self.update_current_step('create-resource',
                                         DeployExecution.STEP_STATUS_SUCCESS)
            except RuntimeError as e:
                self.update_current_step('create-resource',
                                         DeployExecution.STEP_STATUS_ERROR)
                raise e
        else:
            delete = None
            for step in self.steps:
                if step['name'] == 'create-resource':
                    delete = step
            self.steps.remove(delete)
        return self.run_playbooks(extra_vars)

    def on_scaling(self, extra_vars):
        cluster = self.get_cluster()
        cluster.change_to()
        if not Role.objects.filter(name='new_node'):
            Role.objects.create(name='new_node', project=cluster)
        self.steps = cluster.get_steps('scale')
        self.set_step_default()
        self.update_current_step('create-resource',
                                 DeployExecution.STEP_STATUS_RUNNING)
        if cluster.deploy_type == Cluster.CLUSTER_DEPLOY_TYPE_AUTOMATIC:
            try:
                num = self.params.get('num', None)
                cluster.scale_up_to(int(num))
                self.update_current_step('create-resource',
                                         DeployExecution.STEP_STATUS_SUCCESS)
            except RuntimeError as e:
                self.update_current_step('create-resource',
                                         DeployExecution.STEP_STATUS_ERROR)
                raise e
        return self.run_playbooks(extra_vars)

    def on_add_worker(self, extra_vars):
        cluster = self.get_cluster()
        self.steps = cluster.get_steps('add-worker')
        self.set_step_default()
        host_name = self.params.get('host', None)
        host = Host.objects.get(name=host_name)
        cluster.add_worker(host)
        return self.run_playbooks(extra_vars)

    def on_remove_worker(self, extra_vars):
        cluster = self.get_cluster()
        self.steps = cluster.get_steps('remove-worker')
        self.set_step_default()
        node_name = self.params.get('node', None)
        cluster.change_to()
        node = Node.objects.get(name=node_name)
        node.set_groups(['new_node', 'worker'])
        return self.run_playbooks(extra_vars)

    def on_uninstall(self, extra_vars):
        cluster = self.get_cluster()
        self.steps = cluster.get_steps('uninstall')
        self.set_step_default()
        if cluster.deploy_type == Cluster.CLUSTER_DEPLOY_TYPE_AUTOMATIC:
            try:
                self.update_current_step('uninstall',
                                         DeployExecution.STEP_STATUS_RUNNING)
                cluster.destroy_resource()
                self.update_current_step('uninstall',
                                         DeployExecution.STEP_STATUS_SUCCESS)
            except RuntimeError as e:
                self.update_current_step('uninstall',
                                         DeployExecution.STEP_STATUS_ERROR)
                raise e
            return {"raw": {}, "summary": {"success": True}}
        else:
            return self.run_playbooks(extra_vars)

    def on_upgrade(self, extra_vars):
        cluster = self.get_cluster()
        self.steps = cluster.get_steps('upgrade')
        self.set_step_default()
        return self.run_playbooks(extra_vars)

    def on_f5_config(self, extra_vars):
        cluster = self.get_cluster()
        self.steps = cluster.get_steps('bigip-config')
        self.set_step_default()
        extra_vars.update(cluster.meta)
        return self.run_playbooks(extra_vars)

    def on_restore(self, extra_vars, cluster_backup_id):
        cluster_backup = ClusterBackup.objects.get(id=cluster_backup_id)
        backup_storage = BackupStorage.objects.get(
            id=cluster_backup.backup_storage_id)
        cluster = self.get_cluster()
        self.steps = cluster.get_steps('cluster-restore')
        client = StorageClient(backup_storage)
        backup_file_path = cluster.name + '/' + cluster_backup.name
        if client.exists(backup_file_path):
            success = client.download_file(
                backup_file_path,
                "/etc/ansible/roles/cluster-backup/files/cluster-backup.zip")
            if success:
                return self.run_playbooks(extra_vars)
            else:
                raise Exception('download file failed!')
        else:
            raise Exception('File is not exist!')

    def on_backup(self, extra_vars):
        cluster = self.get_cluster()
        self.steps = cluster.get_steps('cluster-backup')
        return self.run_playbooks(extra_vars)

    def on_upload_backup_file(self, backup_storage_id):
        cluster = self.get_cluster()
        return kubeops_api.cluster_backup_utils.upload_backup_file(
            cluster.id, backup_storage_id)

    def run_playbooks(self, extra_vars):
        result = {"raw": {}, "summary": {}}
        for step in self.steps:
            playbook_name = step.get('playbook', None)
            if playbook_name:
                playbook = self.project.playbook_set.get(name=playbook_name)
                self.update_current_step(step['name'],
                                         DeployExecution.STEP_STATUS_RUNNING)
                _result = playbook.execute(extra_vars=extra_vars)
                result["summary"].update(_result["summary"])
                self.update_current_step(step['name'],
                                         DeployExecution.STEP_STATUS_SUCCESS)
                if not _result.get('summary', {}).get('success', False):
                    self.update_current_step(step['name'],
                                             DeployExecution.STEP_STATUS_ERROR)
                    return result
        return result

    def set_step_default(self):
        for step in self.steps:
            step['status'] = DeployExecution.STEP_STATUS_PENDING

    def get_cluster(self):
        return Cluster.objects.get(name=self.project.name)

    def update_current_step(self, name, status):
        for step in self.steps:
            if step['name'] == name:
                step['status'] = status
                self.save()

    def to_json(self):
        dict = {
            'steps': self.steps,
            'operation': self.operation,
            'state': self.state
        }
        return json.dumps(dict)

    def mark_state(self, state):
        self.state = state
        self.date_end = timezone.now()
        self.timedelta = (timezone.now() - self.date_start).seconds
        self.save()

    class Meta:
        get_latest_by = 'date_created'
        ordering = ('-date_created', )
示例#2
0
class Playbook(AbstractProjectResourceModel):
    TYPE_JSON, TYPE_TEXT, TYPE_FILE, TYPE_GIT, TYPE_HTTP, TYPE_LOCAL = (
        'json',
        'text',
        'file',
        'git',
        'http',
        'local',
    )
    TYPE_CHOICES = (
        (TYPE_JSON, TYPE_JSON),
        (TYPE_TEXT, TYPE_TEXT),
        (TYPE_FILE, TYPE_FILE),
        (TYPE_GIT, TYPE_GIT),
        (TYPE_HTTP, TYPE_HTTP),
        (TYPE_LOCAL, TYPE_LOCAL),
    )
    UPDATE_POLICY_ALWAYS, UPDATE_POLICY_IF_NOT_PRESENT, UPDATE_POLICY_NEVER = (
        'always', 'if_not_present', 'never')
    UPDATE_POLICY_CHOICES = (
        (UPDATE_POLICY_IF_NOT_PRESENT, _('Always')),
        (UPDATE_POLICY_ALWAYS, _("If not present")),
        (UPDATE_POLICY_NEVER, _("Never")),
    )
    name = models.SlugField(max_length=128,
                            allow_unicode=True,
                            verbose_name=_('Name'))
    alias = models.CharField(max_length=128, blank=True, default='site.yml')
    type = models.CharField(choices=TYPE_CHOICES,
                            default=TYPE_JSON,
                            max_length=16)
    plays = models.ManyToManyField('Play', verbose_name='Plays')
    git = common_models.JsonDictCharField(max_length=4096,
                                          default={
                                              'repo': '',
                                              'branch': 'master'
                                          })
    url = models.URLField(verbose_name=_("http url"), blank=True)
    update_policy = models.CharField(choices=UPDATE_POLICY_CHOICES,
                                     max_length=16,
                                     default=UPDATE_POLICY_IF_NOT_PRESENT)
    extra_vars = common_models.JsonDictTextField(verbose_name=_('Vars'),
                                                 blank=True,
                                                 null=True,
                                                 default={})

    # Extra schedule content
    is_periodic = models.BooleanField(default=False, verbose_name=_("Enable"))
    interval = models.CharField(verbose_name=_("Interval"),
                                null=True,
                                blank=True,
                                max_length=128,
                                help_text=_("s/m/d"))
    crontab = models.CharField(verbose_name=_("Crontab"),
                               null=True,
                               blank=True,
                               max_length=128,
                               help_text=_("5 * * * *"))
    meta = common_models.JsonDictTextField(blank=True, verbose_name=_("Meta"))

    execute_times = models.IntegerField(default=0)
    comment = models.TextField(blank=True, verbose_name=_("Comment"))
    is_active = models.BooleanField(default=True, verbose_name=_("Active"))
    created_by = models.CharField(max_length=128, blank=True, null=True)
    date_created = models.DateTimeField(auto_now_add=True)

    class Meta:
        unique_together = ["name", "project"]

    def __str__(self):
        return '{}-{}'.format(self.project, self.name)

    def playbook_dir(self, auto_create=True):
        path = os.path.join(self.project.playbooks_dir, str(self.name))
        if not os.path.isdir(path) and auto_create:
            os.makedirs(path, exist_ok=True)
        return path

    @property
    def playbook_path(self):
        path = os.path.join(self.playbook_dir(), self.alias)
        return path

    @property
    def latest_execution(self):
        try:
            return self.executions.all().latest()
        except PlaybookExecution.DoesNotExist:
            return None

    def get_plays_data(self, fmt='py'):
        return Play.get_plays_data(self.plays.all(), fmt=fmt)

    def install_from_git(self):
        success, error = True, None
        if not self.git.get('repo'):
            success, error = False, 'Not repo get'
            return success, error
        try:
            if os.path.isdir(os.path.join(self.playbook_dir(), '.git')):
                if self.update_policy == self.UPDATE_POLICY_ALWAYS:
                    print("Update playbook from: {}".format(
                        self.git.get('repo')))
                    repo = git.Repo(self.playbook_dir())
                    remote = repo.remote()
                    remote.pull()
            else:
                print("Install playbook from: {}".format(self.git.get('repo')))
                git.Repo.clone_from(
                    self.git['repo'],
                    self.playbook_dir(),
                    branch=self.git.get('branch'),
                    depth=1,
                )
        except Exception as e:
            success, error = False, e
        return success, error

    def install_from_http(self):
        if os.listdir(self.playbook_dir()):
            os.removedirs(self.playbook_dir())
        r = requests.get(self.url)
        tmp_file_path = os.path.join(self.playbook_dir(), 'tmp')
        with open(tmp_file_path, 'wb') as f:
            f.write(r.content)
        # TODO: compress it

    def install_from_plays(self):
        for play in self.plays.all():
            success, error = play.check_role()
            if not success:
                return success, error
        with open(self.playbook_path, 'w') as f:
            f.write(self.get_plays_data(fmt='yaml'))
        return True, None

    def install_from_local(self):
        playbook_dir = self.playbook_dir(auto_create=False)
        if self.update_policy == self.UPDATE_POLICY_NEVER:
            return True, None
        if os.path.isfile(self.playbook_path) and \
                self.update_policy == self.UPDATE_POLICY_IF_NOT_PRESENT:
            return True, None
        shutil.rmtree(playbook_dir, ignore_errors=True)
        url = self.url
        if self.url.startswith('file://'):
            url = self.url.replace('file://', '')
        try:
            shutil.copytree(url, playbook_dir, symlinks=False)
        except Exception as e:
            return False, e
        return True, None

    def install(self):
        if self.type == self.TYPE_JSON:
            return self.install_from_plays()
        elif self.type == self.TYPE_GIT:
            return self.install_from_git()
        elif self.type == self.TYPE_LOCAL:
            return self.install_from_local()
        else:
            return False, 'Not support {}'.format(self.type)

    def execute(self, extra_vars=None):
        pk = current_task.request.id if current_task else None
        execution = PlaybookExecution(playbook=self,
                                      pk=pk,
                                      extra_vars=extra_vars)
        execution.save()
        result = execution.start()
        return result

    def create_period_task(self):
        from ..tasks import execute_playbook
        tasks = {
            self.__str__(): {
                "task": execute_playbook.name,
                "interval": self.interval or None,
                "crontab": self.crontab or None,
                "args": (str(self.id), ),
                "kwargs": {
                    "name": self.__str__()
                },
                "enabled": True,
            }
        }
        create_or_update_periodic_task(tasks)

    def disable_period_task(self):
        disable_celery_periodic_task(self.__str__())

    def remove_period_task(self):
        if self.is_periodic:
            delete_celery_periodic_task(self.__str__())

    @property
    def period_task(self):
        try:
            return PeriodicTask.objects.get(name=self.__str__())
        except PeriodicTask.DoesNotExist:
            return None

    def cleanup(self):
        self.remove_period_task()
        shutil.rmtree(self.playbook_dir(), ignore_errors=True)
示例#3
0
class Cluster(Project):
    CLUSTER_STATUS_READY = 'READY'
    CLUSTER_STATUS_RUNNING = 'RUNNING'
    CLUSTER_STATUS_ERROR = 'ERROR'
    CLUSTER_STATUS_WARNING = 'WARNING'
    CLUSTER_STATUS_INSTALLING = 'INSTALLING'
    CLUSTER_STATUS_DELETING = 'DELETING'
    CLUSTER_STATUS_UPGRADING = 'UPGRADING'
    CLUSTER_DEPLOY_TYPE_MANUAL = 'MANUAL'
    CLUSTER_DEPLOY_TYPE_AUTOMATIC = 'AUTOMATIC'
    CLUSTER_DEPLOY_TYPE_SCALING = 'SCALING'

    CLUSTER_STATUS_CHOICES = (
        (CLUSTER_STATUS_RUNNING, 'running'),
        (CLUSTER_STATUS_INSTALLING, 'installing'),
        (CLUSTER_STATUS_DELETING, 'deleting'),
        (CLUSTER_STATUS_READY, 'ready'),
        (CLUSTER_STATUS_ERROR, 'error'),
        (CLUSTER_STATUS_WARNING, 'warning'),
        (CLUSTER_STATUS_UPGRADING, 'upgrading'),
        (CLUSTER_DEPLOY_TYPE_SCALING, 'scaling')
    )

    CLUSTER_DEPLOY_TYPE_CHOICES = (
        (CLUSTER_DEPLOY_TYPE_MANUAL, 'manual'),
        (CLUSTER_DEPLOY_TYPE_AUTOMATIC, 'automatic'),
    )

    package = models.ForeignKey("Package", null=True, on_delete=models.SET_NULL)
    persistent_storage = models.CharField(max_length=128, null=True, blank=True)
    network_plugin = models.CharField(max_length=128, null=True, blank=True)
    auth_template = models.ForeignKey('kubeops_api.AuthTemplate', null=True, on_delete=models.SET_NULL)
    template = models.CharField(max_length=64, blank=True, default='')
    plan = models.ForeignKey('cloud_provider.Plan', on_delete=models.SET_NULL, null=True)
    worker_size = models.IntegerField(default=3)
    status = models.CharField(max_length=128, choices=CLUSTER_STATUS_CHOICES, default=CLUSTER_STATUS_READY)
    deploy_type = models.CharField(max_length=128, choices=CLUSTER_DEPLOY_TYPE_CHOICES,
                                   default=CLUSTER_DEPLOY_TYPE_MANUAL)
    configs = common_models.JsonDictTextField(default={})

    @property
    def region(self):
        if self.plan:
            return self.plan.region.name

    @property
    def zone(self):
        if self.plan:
            return self.plan.zone.name

    @property
    def zones(self):
        if self.plan.zones:
            zones = []
            for zone in self.plan.zones.all():
                zones.append(zone.name)
            return zones

    @property
    def cloud_provider(self):
        if self.plan:
            return self.plan.region.vars['provider']

    @property
    def current_execution(self):
        current = kubeops_api.models.deploy.DeployExecution.objects.filter(project=self).first()
        return current

    @property
    def resource(self):
        return self.package.meta['resource']

    @property
    def apps(self):
        return get_component_urls(self)

    @property
    def resource_version(self):
        return self.package.meta['version']

    @property
    def nodes(self):
        self.change_to()
        nodes = Node.objects.all().filter(~Q(name__in=['::1', '127.0.0.1', 'localhost']))
        n = []
        for node in nodes:
            n.append(node.name)
        return n

    @property
    def node_size(self):
        self.change_to()
        nodes = Node.objects.all().filter(~Q(name__in=['::1', '127.0.0.1', 'localhost']))
        return len(nodes)

    @property
    def current_workers(selfs):
        selfs.change_to()
        return Node.objects.filter(groups__name__in=['worker'])

    def scale_up_to(self, num):
        scale_up(self, num)

    def add_to_new_node(self, node):
        self.change_to()
        node.add_to_groups(['new_node'])

    def exit_new_node(self):
        self.change_to()
        role = Role.objects.get(name='new_node')
        hosts = role.hosts
        for host in hosts:
            role.hosts.remove(host)

    def change_status(self, status):
        self.status = status
        self.save()

    def get_steps(self, opt):
        config_file = self.load_config_file()
        for op in config_file.get('operations', []):
            if op['name'] == opt:
                return op['steps']

    def create_network_plugin(self):
        cluster_configs = self.load_config_file()
        if self.network_plugin:
            networks = cluster_configs.get('networks', [])
            vars = {}
            for net in networks:
                if net["name"] == self.network_plugin:
                    vars = net.get('vars', {})
            self.set_config_unlock(vars)

    def create_storage(self):
        cluster_configs = self.load_config_file()
        if self.persistent_storage:
            storages = cluster_configs.get('storages', [])
            vars = {}
            for storage in storages:
                if storage['name'] == self.persistent_storage:
                    vars = storage.get('vars', {})
            self.set_config_unlock(vars)

    def set_package_configs(self):
        self.configs.update(self.package.meta['vars'])
        self.save()

    def get_template_meta(self):
        for template in self.package.meta.get('templates', []):
            if template['name'] == self.template:
                return template['name']

    def create_playbooks(self):
        config_file = self.load_config_file()
        for playbook in config_file.get('playbooks', []):
            url = 'file:///{}'.format(os.path.join(KUBEEASZ_DIR))
            Playbook.objects.create(
                name=playbook['name'], alias=playbook['alias'],
                type=Playbook.TYPE_LOCAL, url=url, project=self
            )

    def upgrade_package(self, name):
        package = Package.objects.get(name=name)
        self.package = package
        self.save()

    @staticmethod
    def load_config_file():
        with open(os.path.join(CLUSTER_CONFIG_DIR, "config.yml")) as f:
            return yaml.load(f.read())

    def create_roles(self):
        config_file = self.load_config_file()
        _roles = {}
        for role in config_file.get('roles', []):
            _roles[role['name']] = role
        template = None
        for tmp in config_file.get('templates', []):
            if tmp['name'] == self.template:
                template = tmp
                break

        for role in template.get('roles', []):
            _roles[role['name']] = role
        roles_data = [role for role in _roles.values()]
        children_data = {}
        for data in roles_data:
            children_data[data['name']] = data.pop('children', [])
            Role.objects.update_or_create(defaults=data, name=data['name'])
        for name, children_name in children_data.items():
            try:
                role = Role.objects.get(name=name)
                children = Role.objects.filter(name__in=children_name)
                role.children.set(children)
            except Role.DoesNotExist:
                pass
        config_role = Role.objects.get(name='config')
        private_var = template['private_vars']
        role_vars = config_role.vars
        role_vars.update(private_var)
        config_role.vars = role_vars
        config_role.save()

    def set_config(self, k, v):
        cluster = Cluster.objects.select_for_update().get(name=self.name)
        _vars = cluster.configs
        if isinstance(v, str):
            v = v.strip()
        _vars[k] = v
        cluster.configs = _vars
        cluster.save()

    def get_config(self, k):
        v = self.configs.get(k)
        return {'key': k, 'value': v}

    def get_configs(self):
        configs = [{'key': k, 'value': v} for k, v in self.configs.items()]
        return configs

    def del_config(self, k):
        _vars = self.vars
        _vars.pop(k, None)
        self.vars = _vars
        self.save()

    def set_config_unlock(self, vars):
        configs = self.configs
        configs.update(vars)
        self.configs = configs
        self.save()

    def create_node_localhost(self):
        local_nodes = ['localhost', '127.0.0.1', '::1']
        for name in local_nodes:
            node = Node.objects.create(
                name=name, vars={"ansible_connection": "local"},
                project=self, meta={"hidden": True},
            )
            node.set_groups(group_names=['config'])

    def create_node(self, role, host):
        node = Node.objects.create(
            name=host.name,
            host=host,
            project=self
        )
        node.set_groups(group_names=[role])
        return node

    def create_resource(self):
        create_hosts(self)

    def destroy_resource(self):
        delete_hosts(self)

    def fetch_config(self):
        path = None
        if self.status == Cluster.CLUSTER_STATUS_RUNNING:
            self.change_to()
            master = self.group_set.get(name='master').hosts.first()
            dest = fetch_cluster_config(master, os.path.join(ANSIBLE_PROJECTS_DIR, self.name))
            path = dest
        return path

    def get_cluster_token(self):
        token = None
        if self.status == Cluster.CLUSTER_STATUS_RUNNING:
            self.change_to()
            master = self.group_set.get(name='master').hosts.first()
            token = get_cluster_token(master)
        return token

    def delete_data(self):
        path = os.path.join(ANSIBLE_PROJECTS_DIR, self.name)
        if os.path.exists(path):
            shutil.rmtree(path)

    def set_plan_configs(self):
        if self.plan and self.deploy_type == Cluster.CLUSTER_DEPLOY_TYPE_AUTOMATIC:
            self.set_config_unlock(self.plan.mixed_vars)

    def get_current_worker_hosts(self):
        self.change_to()
        hosts = []
        for node in Node.objects.filter(groups__name__in=['worker']):
            hosts.append(node.host)
        return hosts

    def on_cluster_create(self):
        self.change_to()
        self.create_roles()
        self.create_playbooks()
        self.create_node_localhost()
        self.create_network_plugin()
        self.set_package_configs()
        self.create_storage()
        self.set_plan_configs()

    def on_cluster_delete(self):
        self.delete_data()
示例#4
0
class Play(AbstractProjectResourceModel):
    name = models.CharField(max_length=128, verbose_name=_('Name'), blank=True, null=True)
    pattern = models.CharField(max_length=1024, default='all', verbose_name=_('Pattern'))
    gather_facts = models.BooleanField(default=False)
    vars = common_models.JsonDictTextField(verbose_name=_('Vars'), blank=True, null=True)
    tasks = common_models.JsonListTextField(verbose_name=_('Tasks'), blank=True, null=True)
    roles = common_models.JsonListTextField(verbose_name=_('Roles'), blank=True, null=True)

    @staticmethod
    def format_data(data, fmt='py'):
        if fmt == 'yaml':
            return yaml.safe_dump(data, default_flow_style=False)
        elif fmt == 'json':
            return json.dumps(data, indent=4)
        else:
            return data

    def get_play_data(self, fmt='py'):
        data = {
            'hosts': self.pattern,
            'gather_facts': self.gather_facts,
            'vars': self.vars or [],
            'tasks': self.tasks or [],
            'roles': self.roles or [],
        }
        return self.format_data(data, fmt=fmt)

    @classmethod
    def get_plays_data(cls, plays, fmt='py'):
        data = []
        for play in plays:
            data.append(play.get_play_data())
        return cls.format_data(data, fmt=fmt)

    def get_play_roles_names(self):
        names = []
        for role in self.roles or []:
            name = role['role'] if isinstance(role, dict) else role
            names.append(name)
        return names

    def check_role(self):
        for role_name in self.get_play_roles_names():
            try:
                role = self.project.role_set.get(name=role_name)
            except Role.DoesNotExist:
                error = "- Role not exist in project: {}".format(role_name)
                logger.error(error)
                return False, error
            if role.state != Role.STATE_INSTALLED:
                success, error = role.install()
                if not success:
                    msg = "- Install role failed {}: {}".format(role_name, error)
                    logger.error(msg)
                return False, error
        return True, None

    @classmethod
    def get_plays_roles_names(cls, plays):
        names = []
        for play in plays:
            names.extend(play.get_play_roles_names())
        return names

    @staticmethod
    def test_tasks():
        return [
            {
                "name": "Test ping",
                "ping": ""
            },
            {
                "name": "Ifconfig",
                "command": "ifconfig"
            }
        ]

    @staticmethod
    def test_roles():
        return [
            {
                "role": "bennojoy.memcached",
                "memcached_port": 11244,
                "memcached_cache_size": 512
            }
        ]
示例#5
0
class Cluster(Project):
    CLUSTER_STATUS_READY = 'READY'
    CLUSTER_STATUS_RUNNING = 'RUNNING'
    CLUSTER_STATUS_ERROR = 'ERROR'
    CLUSTER_STATUS_WARNING = 'WARNING'
    CLUSTER_STATUS_INSTALLING = 'INSTALLING'
    CLUSTER_STATUS_DELETING = 'DELETING'
    CLUSTER_STATUS_UPGRADING = 'UPGRADING'
    CLUSTER_STATUS_RESTORING = 'RESTORING'
    CLUSTER_STATUS_BACKUP = 'BACKUP'
    CLUSTER_DEPLOY_TYPE_MANUAL = 'MANUAL'
    CLUSTER_DEPLOY_TYPE_AUTOMATIC = 'AUTOMATIC'
    CLUSTER_DEPLOY_TYPE_SCALING = 'SCALING'

    CLUSTER_STATUS_CHOICES = ((CLUSTER_STATUS_RUNNING, 'running'),
                              (CLUSTER_STATUS_INSTALLING, 'installing'),
                              (CLUSTER_STATUS_DELETING,
                               'deleting'), (CLUSTER_STATUS_READY, 'ready'),
                              (CLUSTER_STATUS_ERROR,
                               'error'), (CLUSTER_STATUS_WARNING, 'warning'),
                              (CLUSTER_STATUS_UPGRADING,
                               'upgrading'), (CLUSTER_DEPLOY_TYPE_SCALING,
                                              'scaling'),
                              (CLUSTER_STATUS_RESTORING,
                               'restoring'), (CLUSTER_STATUS_BACKUP, 'backup'))

    CLUSTER_DEPLOY_TYPE_CHOICES = (
        (CLUSTER_DEPLOY_TYPE_MANUAL, 'manual'),
        (CLUSTER_DEPLOY_TYPE_AUTOMATIC, 'automatic'),
    )

    package = models.ForeignKey("Package",
                                null=True,
                                on_delete=models.SET_NULL)
    persistent_storage = models.CharField(max_length=128,
                                          null=True,
                                          blank=True)
    network_plugin = models.CharField(max_length=128, null=True, blank=True)
    template = models.CharField(max_length=64, blank=True, default='')
    plan = models.ForeignKey('cloud_provider.Plan',
                             on_delete=models.SET_NULL,
                             null=True)
    worker_size = models.IntegerField(default=3)
    status = models.CharField(max_length=128,
                              choices=CLUSTER_STATUS_CHOICES,
                              default=CLUSTER_STATUS_READY)
    deploy_type = models.CharField(max_length=128,
                                   choices=CLUSTER_DEPLOY_TYPE_CHOICES,
                                   default=CLUSTER_DEPLOY_TYPE_MANUAL)
    configs = common_models.JsonDictTextField(default={})
    cluster_doamin_suffix = models.CharField(max_length=256, null=True)

    @property
    def region(self):
        if self.plan:
            return self.plan.region.name

    @property
    def zone(self):
        if self.plan:
            return self.plan.zone.name

    @property
    def zones(self):
        if self.plan.zones:
            zones = []
            for zone in self.plan.zones.all():
                zones.append(zone.name)
            return zones

    @property
    def cloud_provider(self):
        if self.plan:
            return self.plan.region.vars['provider']

    @property
    def current_execution(self):
        current = kubeops_api.models.deploy.DeployExecution.objects.filter(
            project=self).first()
        return current

    @property
    def resource(self):
        return self.package.meta['resource']

    @property
    def apps(self):
        return get_component_urls(self)

    @property
    def resource_version(self):
        return self.package.meta['version']

    @property
    def nodes(self):
        self.change_to()
        nodes = Node.objects.all().filter(~Q(
            name__in=['::1', '127.0.0.1', 'localhost']))
        n = []
        for node in nodes:
            n.append(node.name)
        return n

    @property
    def node_size(self):
        self.change_to()
        nodes = Node.objects.all().filter(~Q(
            name__in=['::1', '127.0.0.1', 'localhost']))
        return len(nodes)

    @property
    def expect_worker_size(self):
        if self.deploy_type == Cluster.CLUSTER_DEPLOY_TYPE_AUTOMATIC:
            if self.template == 'MULTIPLE':
                return self.worker_size + 3
            if self.template == 'SINGLE':
                return self.worker_size + 1

    @property
    def current_workers(selfs):
        selfs.change_to()
        return Node.objects.filter(groups__name__in=['worker'])

    @property
    def item_name(self):
        self.change_to()
        item_resource = ItemResource.objects.get(resource_id=self.id)
        if item_resource:
            return Item.objects.get(id=item_resource.item_id).name
        else:
            return None

    @property
    def item_id(self):
        self.change_to()
        item_resource = ItemResource.objects.get(resource_id=self.id)
        if item_resource:
            return Item.objects.get(id=item_resource.item_id).id
        else:
            return None

    def scale_up_to(self, num):
        scale_compute_resource(self, num)

    def set_worker_size(self, num):
        self.worker_size = num
        self.save()

    def add_to_new_node(self, node):
        self.change_to()
        node.add_to_groups(['new_node'])

    def exit_new_node(self):
        self.change_to()
        role = Role.objects.get(name='new_node')
        hosts = role.hosts.all()
        for host in hosts:
            role.hosts.remove(host)

    def change_status(self, status):
        self.refresh_from_db()
        self.status = status
        self.save()

    def get_steps(self, opt):
        config_file = self.load_config_file()
        for op in config_file.get('operations', []):
            if op['name'] == opt:
                return op['steps']

    def create_network_plugin(self):
        cluster_configs = self.load_config_file()
        if self.network_plugin:
            networks = cluster_configs.get('networks', [])
            vars = {}
            for net in networks:
                if net["name"] == self.network_plugin:
                    vars = net.get('vars', {})
            self.set_config_unlock(vars)

    def create_storage(self):
        cluster_configs = self.load_config_file()
        if self.persistent_storage:
            storages = cluster_configs.get('storages', [])
            vars = {}
            for storage in storages:
                if storage['name'] == self.persistent_storage:
                    vars = storage.get('vars', {})
            if self.persistent_storage == 'nfs':
                nfs = NfsStorage.objects.get(name=self.configs['nfs'])
                if 'repo_port' in nfs.vars:
                    nfs.vars.pop('repo_port', None)
                vars.update(nfs.vars)
            if self.persistent_storage == 'external-ceph':
                ceph = CephStorage.objects.get(
                    name=self.configs['external-ceph'])
                vars.update(ceph.vars)
            self.set_config_unlock(vars)

    def set_package_configs(self):
        pkg_vars = self.package.meta['vars']
        pkg_vars.update(self.configs)
        self.configs = pkg_vars
        self.save()

    def get_template_meta(self):
        for template in self.package.meta.get('templates', []):
            if template['name'] == self.template:
                return template['name']

    def create_playbooks(self):
        config_file = self.load_config_file()
        for playbook in config_file.get('playbooks', []):
            url = 'file:///{}'.format(os.path.join(KUBEEASZ_DIR))
            Playbook.objects.create(name=playbook['name'],
                                    alias=playbook['alias'],
                                    type=Playbook.TYPE_LOCAL,
                                    url=url,
                                    project=self)

    def upgrade_package(self, name):
        package = Package.objects.get(name=name)
        self.package = package
        self.configs.update(package.meta['vars'])
        self.save()

    @staticmethod
    def load_config_file():
        with open(os.path.join(CLUSTER_CONFIG_DIR, "config.yml")) as f:
            return yaml.load(f.read())

    def create_roles(self):
        config_file = self.load_config_file()
        _roles = {}
        for role in config_file.get('roles', []):
            _roles[role['name']] = role
        template = None
        for tmp in config_file.get('templates', []):
            if tmp['name'] == self.template:
                template = tmp
                break

        for role in template.get('roles', []):
            _roles[role['name']] = role
        roles_data = [role for role in _roles.values()]
        children_data = {}
        for data in roles_data:
            children_data[data['name']] = data.pop('children', [])
            Role.objects.update_or_create(defaults=data, name=data['name'])
        for name, children_name in children_data.items():
            try:
                role = Role.objects.get(name=name)
                children = Role.objects.filter(name__in=children_name)
                role.children.set(children)
            except Role.DoesNotExist:
                pass
        config_role = Role.objects.get(name='config')
        private_var = template['private_vars']
        role_vars = config_role.vars
        role_vars.update(private_var)
        config_role.vars = role_vars
        config_role.save()

    def set_config(self, k, v):
        cluster = Cluster.objects.select_for_update().get(name=self.name)
        _vars = cluster.configs
        if isinstance(v, str):
            v = v.strip()
        _vars[k] = v
        cluster.configs = _vars
        cluster.save()

    def get_config(self, k):
        v = self.configs.get(k)
        return {'key': k, 'value': v}

    def get_configs(self):
        configs = [{'key': k, 'value': v} for k, v in self.configs.items()]
        return configs

    def del_config(self, k):
        _vars = self.vars
        _vars.pop(k, None)
        self.vars = _vars
        self.save()

    def set_config_unlock(self, vars):
        configs = self.configs
        configs.update(vars)
        self.configs = configs
        self.save()

    def create_node_localhost(self):
        local_nodes = ['localhost', '127.0.0.1', '::1']
        for name in local_nodes:
            node = Node.objects.create(
                name=name,
                vars={"ansible_connection": "local"},
                project=self,
                meta={"hidden": True},
            )
            node.set_groups(group_names=['config'])

    def create_node(self, role, host):
        node = Node.objects.create(name=host.name, host=host, project=self)
        node.set_groups(group_names=[role])
        return node

    def add_worker(self, hosts):
        num = len(self.current_workers)
        nodes = []
        for host in hosts:
            num += 1
            node = Node.objects.create(name="worker{}.{}.{}".format(
                num, self.name, self.cluster_doamin_suffix),
                                       host=host,
                                       project=self)
            node.set_groups(group_names=['worker', 'new_node'])
            nodes.append(node)
        return nodes

    def create_resource(self):
        create_compute_resource(self)

    def destroy_resource(self):
        delete_hosts(self)

    def fetch_config(self):
        path = None
        if self.status == Cluster.CLUSTER_STATUS_RUNNING:
            self.change_to()
            master = self.group_set.get(name='master').hosts.first()
            dest = fetch_cluster_config(
                master, os.path.join(ANSIBLE_PROJECTS_DIR, self.name))
            path = dest
        return path

    def get_first_master(self):
        self.change_to()
        return self.group_set.get(name='master').hosts.first()

    def get_cluster_token(self):
        if self.status == Cluster.CLUSTER_STATUS_RUNNING:
            cache_key = "token-{}".format(self.id)
            token = cache.get(cache_key)
            if not token:
                self.change_to()
                master = self.group_set.get(name='master').hosts.first()
                token = get_cluster_token(master)
                cache.set(cache_key, token)
            return token

    def delete_data(self):
        path = os.path.join(ANSIBLE_PROJECTS_DIR, self.name)
        if os.path.exists(path):
            shutil.rmtree(path)

    def set_plan_configs(self):
        if self.plan and self.deploy_type == Cluster.CLUSTER_DEPLOY_TYPE_AUTOMATIC:
            self.set_config_unlock(self.plan.mixed_vars)

    def get_current_worker_hosts(self):
        def get_name(ele):
            return ele.name

        self.change_to()
        hosts = []
        for node in Node.objects.filter(groups__name__in=['worker']):
            hosts.append(node.host)
        hosts.sort(key=get_name)
        return hosts

    def set_app_domain(self):
        self.set_config_unlock({
            'APP_DOMAIN':
            "apps.{}.{}".format(self.name, self.cluster_doamin_suffix)
        })

    def get_kube_config_base64(self):
        file_name = self.fetch_config()
        with open(file_name) as f:
            text = f.read()
            return base64.encodebytes(bytes(text, 'utf-8')).decode().replace(
                '\n', '')

    def get_webkubectl_token(self):
        data = {"name": self.name, "kubeConfig": self.get_kube_config_base64()}
        result = requests.post(WEBKUBECTL_URL, json=data)
        if result.ok:
            return result.json()['token']

    def set_cluster_storage(self):
        if self.persistent_storage and self.persistent_storage == 'external-ceph':
            ceph = CephStorage.objects.get(name=self.configs['external-ceph'])
            cluster = Cluster.objects.get(name=self.name)
            cluster_ceph = ClusterCephStorage(cluster_id=cluster.id,
                                              ceph_storage_id=ceph.id)
            cluster_ceph.save()

    def node_health_check(self):
        from kubeops_api.models.health.node_health import NodeHealthCheck
        check = NodeHealthCheck(self)
        check.run()

    def on_cluster_create(self):
        self.change_to()
        self.create_roles()
        self.create_playbooks()
        self.create_node_localhost()
        self.create_network_plugin()
        self.set_package_configs()
        self.create_storage()
        self.set_plan_configs()
        self.set_app_domain()
        self.set_cluster_storage()

    def on_cluster_delete(self):
        self.delete_data()

    class Meta:
        ordering = ('date_created', )
示例#6
0
class NfsStorage(Project):
    NFS_STATUS_CREATING = 'CREATING'
    NFS_STATUS_RUNNING = 'RUNNING'
    NFS_STATUS_ERROR = 'ERROR'

    NFS_STATUS_CHOICES = ((NFS_STATUS_CREATING, 'CREATING'),
                          (NFS_STATUS_RUNNING, 'RUNNING'), (NFS_STATUS_ERROR,
                                                            'ERROR'))

    NFS_OPTION_NEW = 'NEW'
    NFS_OPTION_EXISTS = 'EXISTS'

    NFS_OPTION_CHOICES = ((NFS_OPTION_NEW, 'NEW'), (NFS_OPTION_EXISTS,
                                                    'EXISTS'))
    status = models.CharField(max_length=128,
                              choices=NFS_STATUS_CHOICES,
                              default=NFS_STATUS_RUNNING,
                              null=True)
    vars = common_models.JsonDictTextField(
        default={
            "allow_ip": "0.0.0.0/0",
            "storage_nfs_server_path": "/exports"
        })

    def create_group_node(self):
        host = Host.objects.get(name=self.vars['host'])
        node = Node.objects.create(name=host.name, host=host, project=self)
        node.set_groups(['nfs'])
        self.vars['storage_nfs_server'] = node.host.ip
        self.save()

    def create_playbooks(self):
        self.change_to()
        url = 'file:///{}'.format(os.path.join(KUBEEASZ_DIR))
        Playbook.objects.create(name='nfs',
                                alias="nfs.yml",
                                type=Playbook.TYPE_LOCAL,
                                url=url,
                                project=self)

    def deploy_nfs(self):
        playbook = self.playbook_set.get(name="nfs")
        Logger.info('开始部署 NFS 服务')
        thread = threading.Thread(target=self.execute_playbook,
                                  args=(playbook, self.vars))
        thread.start()

    def execute_playbook(self, playbook, extra_vars):
        self.change_to()
        self.status = NfsStorage.NFS_STATUS_CREATING
        _result = playbook.execute(extra_vars=extra_vars)
        if _result.get('summary', {}).get("success", False):
            self.status = NfsStorage.NFS_STATUS_RUNNING
        else:
            self.status = NfsStorage.NFS_STATUS_ERROR
        self.save()

    def on_nfs_save(self):
        if self.vars['option'].upper() == NfsStorage.NFS_OPTION_NEW:
            self.change_to()
            self.create_group_node()
            self.create_playbooks()
            self.deploy_nfs()
示例#7
0
class Zone(models.Model):
    ZONE_STATUS_READY = "READY"
    ZONE_STATUS_INITIALIZING = "INITIALIZING"
    ZONE_STATUS_ERROR = "ERROR"
    ZONE_STATUS_CHOICES = (
        (ZONE_STATUS_READY, 'READY'),
        (ZONE_STATUS_INITIALIZING, 'INITIALIZING'),
        (ZONE_STATUS_ERROR, 'ERROR'),
    )
    id = models.UUIDField(default=uuid.uuid4, primary_key=True)
    name = models.CharField(max_length=20, unique=True, verbose_name=_('Name'))
    date_created = models.DateTimeField(auto_now_add=True,
                                        verbose_name=_('Date created'))
    vars = common_models.JsonDictTextField(default={})
    region = models.ForeignKey('Region', on_delete=models.CASCADE, null=True)
    cloud_zone = models.CharField(max_length=128, null=True, default=None)
    ip_used = common_models.JsonListTextField(null=True, default=[])
    status = models.CharField(max_length=64,
                              choices=ZONE_STATUS_CHOICES,
                              null=True)

    @property
    def host_size(self):
        hosts = Host.objects.filter(zone=self)
        return len(hosts)

    def change_status(self, status):
        self.status = status
        self.save()

    def create_image(self):
        try:
            logger.info('upload os image')
            self.change_status(Zone.ZONE_STATUS_INITIALIZING)
            client = get_cloud_client(self.region.vars)
            client.create_image(zone=self)
            self.change_status(Zone.ZONE_STATUS_READY)
        except Exception as e:
            logger.error(msg='upload os image error!', exc_info=True)
            self.change_status(Zone.ZONE_STATUS_ERROR)

    def on_zone_create(self):
        thread = threading.Thread(target=self.create_image)
        thread.start()

    def allocate_ip(self):
        ip = self.ip_pools().pop()
        self.ip_used.append(ip)
        self.save()
        return ip

    def recover_ip(self, ip):
        self.ip_used.remove(ip)
        self.save()

    def to_dict(self):
        dic = {
            "key": "z" + str(self.id).split("-")[3],
            "name": self.cloud_zone,
            "zone_name": self.name,
            "ip_pool": self.ip_pools()
        }
        dic.update(self.vars)
        return dic

    def ip_pools(self):
        ip_pool = []
        ip_start = ip_address(self.vars['ip_start'])
        ip_end = ip_address(self.vars['ip_end'])

        if self.region.template.name == 'openstack':
            while ip_start <= ip_end:
                ip_pool.append(str(ip_start))
                ip_start += 1
            for ip in self.ip_used:
                if ip in ip_pool:
                    ip_pool.remove(ip)
            return ip_pool

        net_mask = self.vars['net_mask']
        interface = ip_interface("{}/{}".format(str(ip_start), net_mask))
        network = interface.network
        for host in network.hosts():
            if ip_start <= host <= ip_end:
                ip_pool.append(str(host))
        for ip in self.ip_used:
            if ip in ip_pool:
                ip_pool.remove(ip)
        return ip_pool

    def ip_available_size(self):
        return len(self.ip_pools())

    def has_plan(self):
        for plan in Plan.objects.all():
            for zone in plan.get_zones():
                if zone.name == self.name:
                    return True
        return False

    @property
    def provider(self):
        return self.region.template.name
示例#8
0
class UserReceiver(models.Model):
    user = models.OneToOneField(User, on_delete=models.CASCADE, to_field='id')
    vars = common_models.JsonDictTextField(default={})
示例#9
0
class Zone(models.Model):
    ZONE_STATUS_READY = "READY"
    ZONE_STATUS_INITIALIZING = "INITIALIZING"
    ZONE_STATUS_ERROR = "ERROR"
    ZONE_STATUS_CHOICES = (
        (ZONE_STATUS_READY, 'READY'),
        (ZONE_STATUS_INITIALIZING, 'INITIALIZING'),
        (ZONE_STATUS_ERROR, 'ERROR'),
    )
    id = models.UUIDField(default=uuid.uuid4, primary_key=True)
    name = models.CharField(max_length=20, unique=True, verbose_name=_('Name'))
    date_created = models.DateTimeField(auto_now_add=True,
                                        verbose_name=_('Date created'))
    vars = common_models.JsonDictTextField(default={})
    region = models.ForeignKey('Region', on_delete=models.CASCADE, null=True)
    cloud_zone = models.CharField(max_length=128, null=True, default=None)
    status = models.CharField(max_length=64,
                              choices=ZONE_STATUS_CHOICES,
                              null=True)

    @property
    def host_size(self):
        hosts = Host.objects.filter(zone=self)
        return len(hosts)

    def change_status(self, status):
        self.status = status
        self.save()

    def create_image(self):
        try:
            self.change_status(Zone.ZONE_STATUS_INITIALIZING)
            client = get_cloud_client(self.region.vars)
            client.create_image(zone=self)
            self.change_status(Zone.ZONE_STATUS_READY)
        except Exception as e:
            print(e.args)
            self.change_status(Zone.ZONE_STATUS_ERROR)

    def on_zone_create(self):
        thread = threading.Thread(target=self.create_image)
        thread.start()

    def to_dict(self):
        dic = {"name": self.cloud_zone, "zone_name": self.name}
        dic.update(self.vars)
        return dic

    def ip_pools(self):
        ip_start = ip_address(self.vars['vc_ip_start'])
        ip_end = ip_address(self.vars['vc_ip_end'])
        net_mask = self.vars['net_mask']
        interface = ip_interface("{}/{}".format(str(ip_start), net_mask))
        network = interface.network
        ip_pool = []
        for host in network.hosts():
            if ip_start <= host <= ip_end:
                ip_pool.append(str(host))
        hosts = Host.objects.filter(ip__in=ip_pool)
        for host in hosts:
            ip_pool.remove(host.ip)
        return ip_pool

    def ip_available_size(self):
        return len(self.ip_pools())
示例#10
0
class BaseHost(models.Model):
    name = models.CharField(max_length=1024, validators=[name_validator])
    ip = models.GenericIPAddressField(null=True)
    port = models.IntegerField(default=22)
    username = models.CharField(max_length=1024, default='root')
    password = common_models.EncryptCharField(max_length=4096,
                                              blank=True,
                                              null=True)
    private_key = common_models.EncryptCharField(max_length=8192,
                                                 blank=True,
                                                 null=True)
    vars = common_models.JsonDictTextField(default={})
    meta = common_models.JsonDictTextField(default={})
    comment = models.TextField(blank=True)

    class Meta:
        abstract = True

    @property
    def ansible_vars(self):
        host_vars = {k: v for k, v in self.vars.items()}
        host_vars['ansible_ssh_host'] = self.ip
        host_vars['ansible_ssh_port'] = self.port
        host_vars['ansible_ssh_user'] = self.username
        host_vars['ansible_ssh_pass'] = self.password
        host_vars['ansible_ssh_private_key_file'] = self.private_key_path
        return host_vars

    @property
    def private_key_obj(self):
        return ssh_key_string_to_obj(self.private_key, self.password)

    @property
    def private_key_path(self):
        if not self.private_key_obj:
            return None
        tmp_dir = os.path.join(settings.BASE_DIR, 'data', 'tmp')
        if not os.path.isdir(tmp_dir):
            os.makedirs(tmp_dir)
        key_name = '.' + md5(self.private_key.encode('utf-8')).hexdigest()
        key_path = os.path.join(tmp_dir, key_name)
        if not os.path.exists(key_path):
            self.private_key_obj.write_private_key_file(key_path)
            os.chmod(key_path, 0o400)
        return key_path

    def add_to_groups(self, group_names, auto_create=True):
        with transaction.atomic():
            for name in group_names:
                group = self.groups.model.get_group(name=name,
                                                    auto_create=auto_create)
                group.hosts.add(self)

    def set_groups(self, group_names, auto_create=True):
        with transaction.atomic():
            groups = []
            for name in group_names:
                group = self.groups.model.get_group(name=name,
                                                    auto_create=auto_create)
                groups.append(group)
            self.groups.set(groups)
示例#11
0
class Node(Ansible_Host):
    host = models.ForeignKey('kubeops_api.Host',
                             related_name='host',
                             default=None,
                             null=True,
                             on_delete=models.CASCADE)
    conditions = models.ManyToManyField("Condition")
    info = common_models.JsonDictTextField(default={})

    @property
    def roles(self):
        return self.groups

    @property
    def host_memory(self):
        return self.host.memory

    @property
    def host_cpu_core(self):
        return self.host.cpu_core

    @property
    def host_os(self):
        return self.host.os

    @property
    def host_os_version(self):
        return self.host.os_version

    @roles.setter
    def roles(self, value):
        self.groups.set(value)

    @property
    def status(self):
        if self.host:
            return self.host.status

    def on_node_save(self):
        self.ip = self.host.ip
        self.username = self.host.username
        self.password = self.host.password
        self.private_key = self.host.private_key
        self.port = self.host.port
        self.host.node_id = self.id
        self.host.save()
        self.save()

    def add_vars(self, _vars):
        __vars = {k: v for k, v in self.vars.items()}
        __vars.update(_vars)
        if self.vars != __vars:
            self.vars = __vars
            self.save()

    def remove_var(self, key):
        __vars = self.vars
        if key in __vars:
            del __vars[key]
            self.vars = __vars
            self.save()

    def get_var(self, key, default):
        return self.vars.get(key, default)

    class Meta:
        ordering = ['name']
示例#12
0
class Role(AbstractProjectResourceModel):
    STATE_NOT_INSTALL = 'uninstalled'
    STATE_INSTALLED = 'installed'
    STATE_INSTALLING = 'installing'
    STATE_FAILED = 'failed'
    STATE_CHOICES = ((STATE_NOT_INSTALL, 'UnInstalled'), (STATE_INSTALLED,
                                                          'Installed'),
                     (STATE_INSTALLING, 'Installing'), (STATE_FAILED,
                                                        'Failed'))
    TYPE_GIT = 'git'
    TYPE_HTTP = 'http'
    TYPE_GALAXY = 'galaxy'
    TYPE_FILE = 'file'
    TYPE_CHOICES = (
        (TYPE_GALAXY, 'galaxy'),
        (TYPE_GIT, 'git'),
        (TYPE_HTTP, 'http'),
        (TYPE_FILE, 'file'),
    )

    name = models.CharField(max_length=128, validators=[name_validator])
    type = models.CharField(max_length=16,
                            choices=TYPE_CHOICES,
                            default=TYPE_GALAXY)
    comment = models.CharField(max_length=1024,
                               blank=True,
                               verbose_name=_("Comment"))
    galaxy_name = models.CharField(max_length=128, blank=True, null=True)
    git = common_models.JsonDictCharField(max_length=4096,
                                          default={
                                              'repo': '',
                                              'branch': 'master'
                                          })
    url = models.CharField(max_length=1024, verbose_name=_("Url"), blank=True)
    logo = models.ImageField(verbose_name='Logo', upload_to="logo", null=True)
    categories = models.CharField(max_length=256,
                                  verbose_name=_("Tags"),
                                  blank=True)
    version = models.CharField(max_length=1024, blank=True, default='master')
    state = models.CharField(default=STATE_NOT_INSTALL,
                             choices=STATE_CHOICES,
                             max_length=16)
    meta = common_models.JsonDictTextField(verbose_name=_("Meta"), blank=True)
    meta_ext = common_models.JsonDictTextField(verbose_name=_("Meta Ext"),
                                               blank=True)
    created_by = models.CharField(max_length=128,
                                  blank=True,
                                  null=True,
                                  default='')
    date_created = models.DateTimeField(auto_now_add=True)
    date_updated = models.DateTimeField(auto_now=True)

    class Meta:
        unique_together = ('name', 'project')

    def __str__(self):
        return self.name

    def delete(self, using=None, keep_parents=False):
        role = MyGalaxyRole(self.name, path=self.project.roles_dir)
        role.remove()
        return super().delete(using=using, keep_parents=keep_parents)

    @property
    def _role(self):
        role = MyGalaxyRole(self.name, path=self.project.roles_dir)
        return role

    @property
    def variables(self):
        return self._role.default_variables

    @property
    def role_dir(self):
        return os.path.join(self.project.roles_dir, self.name)

    @property
    def meta_all(self):
        meta = OrderedDict([
            ('name', self.name),
            ('version', self.version),
            ('comment', self.comment),
            ('state', self.get_state_display()),
            ('url', self.url),
            ('type', self.type),
            ('categories', self.categories),
        ])
        if isinstance(self.meta, dict):
            meta.update(self.meta)
        if isinstance(self.meta_ext, dict):
            meta.update(self.meta_ext)
        meta.pop('readme', None)
        meta.pop('readme_html', None)
        galaxy_info = meta.pop('galaxy_info', {})
        for k, v in galaxy_info.items():
            if k == 'platforms':
                v = ' '.join([i['name'] + str(i['versions']) for i in v])
            meta[k] = v
        return meta

    def install_from_galaxy(self):
        api = MyGalaxyAPI()
        role = MyGalaxyRole(self.galaxy_name, path=self.project.roles_dir)
        success, error = role.install()
        if success:
            self.comment = api.lookup_role_by_name(
                self.galaxy_name)['description']
            self.url = api.role_git_url(self.galaxy_name)
            self.version = role.version
            self.meta = role.metadata
            categories = ''
            if self.meta and self.meta['galaxy_info'].get('categories'):
                categories = self.meta['galaxy_info']['categories']
            elif self.meta and self.meta['galaxy_info'].get('galaxy_tags'):
                categories = self.meta['galaxy_info']['galaxy_tags']
            self.categories = ','.join(categories) if isinstance(
                categories, list) else str(categories)
            os.rename(os.path.join(self.project.roles_dir, self.galaxy_name),
                      self.role_dir)
        return success, error

    def install_from_git(self):
        success, error = True, None
        if not self.git.get('repo'):
            success = False
            error = 'Not repo get'
            return success, error
        print("Install playbook from: {}".format(self.git.get('repo')))
        try:
            if os.path.isdir(os.path.join(self.role_dir, '.git')):
                repo = git.Repo(self.role_dir)
                remote = repo.remote()
                remote.pull()
            else:
                git.Repo.clone_from(
                    self.git['repo'],
                    self.role_dir,
                    branch=self.git.get('branch'),
                    depth=1,
                )
        except Exception as e:
            success = False
            error = e
        return success, error

    def install(self):
        self.state = self.STATE_INSTALLING
        if self.type == self.TYPE_GALAXY:
            success, err = self.install_from_galaxy()
        elif self.type == self.TYPE_GIT:
            success, err = self.install_from_git()
        else:
            success = False
            err = Exception("From {}, using other function".format(self.type))
        if success:
            self.state = self.STATE_INSTALLED
        else:
            self.state = self.STATE_FAILED
        self.save()
        return success, err

    def uninstall(self):
        role = MyGalaxyRole(self.name, path=self.project.roles_dir)
        role.remove()

    @property
    def logo_url(self):
        default = settings.STATIC_URL + "ansible/img/role_logo_default.png"
        if self.logo:
            return self.logo.url
        return default
示例#13
0
class DeployExecution(AbstractProjectResourceModel, AbstractExecutionModel):
    operation = models.CharField(max_length=128, blank=False, null=False)
    project = models.ForeignKey('ansible_api.Project',
                                on_delete=models.CASCADE)
    params = common_models.JsonDictTextField(default={})
    steps = common_models.JsonListTextField(default=[], null=True)

    STEP_STAUTS_PENDING = 'pending'
    STEP_STAUTS_RUNNING = 'running'
    STEP_STAUTS_SUCCESS = 'success'
    STEP_STAUTS_ERROR = 'error'

    @property
    def start(self):
        result = {"raw": {}, "summary": {}}
        pre_deploy_execution_start.send(self.__class__, execution=self)
        cluster = Cluster.objects.get(id=self.project.id)
        hostname = Setting.objects.get(key='local_hostname')
        domain_suffix = Setting.objects.get(key="domain_suffix")
        extra_vars = {
            "cluster_name": cluster.name,
            "local_hostname": hostname.value,
            "domain_suffix": domain_suffix.value,
            "APP_DOMAIN": "apps.{}.{}".format(cluster.name,
                                              domain_suffix.value)
        }

        extra_vars.update(cluster.configs)
        ignore_errors = False
        try:
            if self.operation == "install":
                cluster.change_status(Cluster.CLUSTER_STATUS_INSTALLING)
                result = self.on_install(extra_vars)
                cluster.change_status(Cluster.CLUSTER_STATUS_RUNNING)
            elif self.operation == 'uninstall':
                cluster.change_status(Cluster.CLUSTER_STATUS_DELETING)
                result = self.on_uninstall(extra_vars)
                cluster.change_status(Cluster.CLUSTER_STATUS_READY)
            elif self.operation == 'bigip-config':
                ignore_errors = True
                result = self.on_f5_config(extra_vars)
            elif self.operation == 'upgrade':
                cluster.change_status(Cluster.CLUSTER_STATUS_UPGRADING)
                package_name = self.params.get('package', None)
                package = Package.objects.get(name=package_name)
                extra_vars.update(package.meta.get('vars'))
                result = self.on_upgrade(extra_vars)
                cluster.upgrade_package(package_name)
                cluster.change_status(Cluster.CLUSTER_STATUS_RUNNING)
            elif self.operation == 'scale':
                cluster.change_status(Cluster.CLUSTER_DEPLOY_TYPE_SCALING)
                result = self.on_scaling(extra_vars)
                cluster.exit_new_node()
                cluster.change_status(Cluster.CLUSTER_STATUS_RUNNING)

        except Exception as e:
            print('Unexpect error occur: {}'.format(e))
            if not ignore_errors:
                cluster.change_status(Cluster.CLUSTER_STATUS_ERROR)
            logger.error(e, exc_info=True)
            result['summary'] = {'error': 'Unexpect error occur: {}'.format(e)}
        post_deploy_execution_start.send(self.__class__,
                                         execution=self,
                                         result=result,
                                         ignore_errors=ignore_errors)
        return result

    def on_install(self, extra_vars):
        cluster = self.get_cluster()
        self.steps = cluster.get_steps('install')
        self.set_step_default()
        self.update_current_step('create-resource',
                                 DeployExecution.STEP_STAUTS_RUNNING)
        if cluster.deploy_type == Cluster.CLUSTER_DEPLOY_TYPE_AUTOMATIC:
            if not cluster.node_size > 0:
                try:
                    cluster.create_resource()
                    self.update_current_step(
                        'create-resource', DeployExecution.STEP_STAUTS_SUCCESS)
                except RuntimeError as e:
                    self.update_current_step('create-resource',
                                             DeployExecution.STEP_STAUTS_ERROR)
                    raise e
            else:
                self.update_current_step('create-resource',
                                         DeployExecution.STEP_STAUTS_SUCCESS)
        return self.run_playbooks(extra_vars)

    def on_scaling(self, extra_vars):
        cluster = self.get_cluster()
        self.steps = cluster.get_steps('scale')
        self.set_step_default()
        self.update_current_step('create-resource',
                                 DeployExecution.STEP_STAUTS_RUNNING)
        if cluster.deploy_type == Cluster.CLUSTER_DEPLOY_TYPE_AUTOMATIC:
            try:
                num = self.params.get('num', None)
                cluster.scale_up_to(int(num))
                self.update_current_step('create-resource',
                                         DeployExecution.STEP_STAUTS_SUCCESS)
            except RuntimeError as e:
                self.update_current_step('create-resource',
                                         DeployExecution.STEP_STAUTS_ERROR)
                raise e
        return self.run_playbooks(extra_vars)

    def on_uninstall(self, extra_vars):
        cluster = self.get_cluster()
        self.steps = cluster.get_steps('uninstall')
        self.set_step_default()
        if cluster.deploy_type == Cluster.CLUSTER_DEPLOY_TYPE_AUTOMATIC:
            try:
                self.update_current_step('uninstall',
                                         DeployExecution.STEP_STAUTS_RUNNING)
                cluster.destroy_resource()
                self.update_current_step('uninstall',
                                         DeployExecution.STEP_STAUTS_SUCCESS)
            except RuntimeError as e:
                self.update_current_step('uninstall',
                                         DeployExecution.STEP_STAUTS_ERROR)
                raise e
            return {"raw": {}, "summary": {"success": True}}
        else:
            return self.run_playbooks(extra_vars)

    def on_upgrade(self, extra_vars):
        cluster = self.get_cluster()
        self.steps = cluster.get_steps('upgrade')
        self.set_step_default()
        return self.run_playbooks(extra_vars)

    def on_f5_config(self, extra_vars):
        cluster = self.get_cluster()
        self.steps = cluster.get_steps('bigip-config')
        self.set_step_default()
        extra_vars.update(cluster.meta)
        return self.run_playbooks(extra_vars)

    def run_playbooks(self, extra_vars):
        result = {"raw": {}, "summary": {}}
        for step in self.steps:
            playbook_name = step.get('playbook', None)
            if playbook_name:
                playbook = self.project.playbook_set.get(name=playbook_name)
                self.update_current_step(step['name'],
                                         DeployExecution.STEP_STAUTS_RUNNING)
                _result = playbook.execute(extra_vars=extra_vars)
                result["summary"].update(_result["summary"])
                self.update_current_step(step['name'],
                                         DeployExecution.STEP_STAUTS_SUCCESS)
                if not _result.get('summary', {}).get('success', False):
                    self.update_current_step(step['name'],
                                             DeployExecution.STEP_STAUTS_ERROR)
                    raise RuntimeError("playbook: {} error!".format(
                        step['playbook']))
        return result

    def set_step_default(self):
        for step in self.steps:
            step['status'] = DeployExecution.STEP_STAUTS_PENDING

    def get_cluster(self):
        return Cluster.objects.get(name=self.project.name)

    def update_current_step(self, name, status):
        for step in self.steps:
            if step['name'] == name:
                step['status'] = status
                self.save()

    def to_json(self):
        dict = {
            'steps': self.steps,
            'operation': self.operation,
            'state': self.state
        }
        return json.dumps(dict)

    class Meta:
        get_latest_by = 'date_created'
        ordering = ('-date_created', )
示例#14
0
class Project(models.Model):
    id = models.UUIDField(default=uuid.uuid4, primary_key=True)
    name = models.SlugField(max_length=128,
                            allow_unicode=True,
                            unique=True,
                            verbose_name=_('Name'))
    meta = common_models.JsonDictTextField(blank=True, null=True)
    # Run full_options, ex: forks,
    options = common_models.JsonCharField(max_length=1024,
                                          blank=True,
                                          null=True,
                                          verbose_name=_('Run options'))
    comment = models.CharField(max_length=128,
                               blank=True,
                               null=True,
                               verbose_name=_("Comment"))
    created_by = models.CharField(max_length=128,
                                  blank=True,
                                  null=True,
                                  default='')
    date_created = models.DateTimeField(auto_now_add=True)

    __root_id = '00000000-0000-0000-0000-000000000000'
    __public_id = '00000000-0000-0000-0000-000000000001'

    def __str__(self):
        return self.name

    @property
    def inventory(self):
        return Inventory(self.host_set.all(), self.group_set.all())

    @property
    def inventory_file_path(self):
        return os.path.join(self.project_dir, 'hosts.yaml')

    def refresh_inventory_file(self):
        with open(self.inventory_file_path, 'w') as f:
            f.write(self.inventory.get_data(fmt='yaml'))

    @property
    def roles_dir(self):
        roles_dir = os.path.join(self.project_dir, 'roles')
        if not os.path.isdir(roles_dir):
            os.makedirs(roles_dir, exist_ok=True)
        return roles_dir

    @property
    def project_dir(self):
        project_dir = os.path.join(settings.ANSIBLE_PROJECTS_DIR, self.name)
        if not os.path.isdir(project_dir):
            os.makedirs(project_dir, exist_ok=True)
        return project_dir

    @property
    def playbooks_dir(self):
        playbooks_dir = os.path.join(self.project_dir, 'playbooks')
        if not os.path.isdir(playbooks_dir):
            os.makedirs(playbooks_dir, exist_ok=True)
        return playbooks_dir

    @property
    def adhoc_dir(self):
        adhoc_dir = os.path.join(self.project_dir, 'adhoc')
        if not os.path.isdir(adhoc_dir):
            os.makedirs(adhoc_dir, exist_ok=True)
        return adhoc_dir

    @classmethod
    def root_project(cls):
        return cls(id=cls.__root_id, name='ROOT')

    @classmethod
    def public_project(cls):
        return cls(id=cls.__public_id, name='Public')

    def is_real(self):
        return self.id not in [self.__root_id, self.__public_id]

    @property
    def inventory_obj(self):
        return self.inventory.as_object()

    def get_inventory_data(self):
        return self.inventory.get_data(fmt='py')

    def change_to(self):
        set_current_project(self)

    def clear_inventory(self):
        self.group_set.all().delete()
        self.host_set.all().delete()

    @property
    def cleaned_options(self):
        options = self.options or {}
        options['roles_path'] = [self.roles_dir]
        return options

    @staticmethod
    def test_inventory():
        data = {
            "hosts": [{
                "hostname": "192.168.244.128",
                "vars": {
                    "ansible_ssh_user": "******",
                    "ansible_ssh_pass": "******"
                }
            }, {
                "hostname": "gaga",
                "vars": {
                    "ansible_ssh_host": "192.168.1.1"
                }
            }],
            "groups": [
                {
                    "name": "apache",
                    "hosts": ["gaga"]
                },
                {
                    "name": "web",
                    "hosts": ["192.168.244.128"],
                    "vars": {
                        "hello": "world"
                    },
                    "children": ["apache"]
                },
            ]
        }
        return data

    @classmethod
    def get_test_inventory(cls):
        return BaseInventory(cls.test_inventory())
示例#15
0
class CephStorage(models.Model):
    id = models.UUIDField(primary_key=True, default=uuid.uuid4)
    name = models.CharField(max_length=255, null=False, unique=True, blank=False)
    vars = common_models.JsonDictTextField()
    date_created = models.DateTimeField(auto_now_add=True)