class DeployExecution(AbstractProjectResourceModel, AbstractExecutionModel): operation = models.CharField(max_length=128, blank=False, null=False) project = models.ForeignKey('ansible_api.Project', on_delete=models.CASCADE) params = common_models.JsonDictTextField(default={}) steps = common_models.JsonListTextField(default=[], null=True) STEP_STAUTS_PENDING = 'pending' STEP_STAUTS_RUNNING = 'running' STEP_STAUTS_SUCCESS = 'success' STEP_STAUTS_ERROR = 'error' @property def start(self): result = {"raw": {}, "summary": {}} pre_deploy_execution_start.send(self.__class__, execution=self) cluster = self.get_cluster() hostname = Setting.objects.get(key='local_hostname') extra_vars = { "cluster_name": cluster.name, "local_hostname": hostname.value, "domain_suffix": cluster.cluster_doamin_suffix } extra_vars.update(cluster.configs) ignore_errors = False return_running = False try: if self.operation == "install": cluster.change_status(Cluster.CLUSTER_STATUS_INSTALLING) result = self.on_install(extra_vars) cluster.change_status(Cluster.CLUSTER_STATUS_RUNNING) elif self.operation == 'uninstall': cluster.change_status(Cluster.CLUSTER_STATUS_DELETING) result = self.on_uninstall(extra_vars) cluster.change_status(Cluster.CLUSTER_STATUS_READY) elif self.operation == 'bigip-config': ignore_errors = True result = self.on_f5_config(extra_vars) elif self.operation == 'upgrade': cluster.change_status(Cluster.CLUSTER_STATUS_UPGRADING) package_name = self.params.get('package', None) package = Package.objects.get(name=package_name) extra_vars.update(package.meta.get('vars')) result = self.on_upgrade(extra_vars) cluster.upgrade_package(package_name) cluster.change_status(Cluster.CLUSTER_STATUS_RUNNING) elif self.operation == 'scale': ignore_errors = True return_running = True cluster.change_status(Cluster.CLUSTER_DEPLOY_TYPE_SCALING) result = self.on_scaling(extra_vars) cluster.exit_new_node() elif self.operation == 'add-worker': ignore_errors = True return_running = True cluster.change_status(Cluster.CLUSTER_DEPLOY_TYPE_ADDING) result = self.on_add_worker(extra_vars) cluster.exit_new_node() cluster.change_status(Cluster.CLUSTER_STATUS_RUNNING) elif self.operation == 'restore': cluster.change_status(Cluster.CLUSTER_STATUS_RESTORING) cluster_backup_id = self.params.get('clusterBackupId', None) result = self.on_restore(extra_vars, cluster_backup_id) cluster.change_status(Cluster.CLUSTER_STATUS_RUNNING) elif self.operation == 'backup': cluster.change_status(Cluster.CLUSTER_STATUS_BACKUP) cluster_storage_id = self.params.get('backupStorageId', None) result = self.on_backup(extra_vars) self.on_upload_backup_file(cluster_storage_id) cluster.change_status(Cluster.CLUSTER_STATUS_RUNNING) except Exception as e: print('Unexpect error occur: {}'.format(e)) if not ignore_errors: cluster.change_status(Cluster.CLUSTER_STATUS_ERROR) if return_running: cluster.change_status(Cluster.CLUSTER_STATUS_RUNNING) logger.error(e, exc_info=True) result['summary'] = {'error': 'Unexpect error occur: {}'.format(e)} post_deploy_execution_start.send(self.__class__, execution=self, result=result, ignore_errors=ignore_errors) return result def on_install(self, extra_vars): cluster = self.get_cluster() self.steps = cluster.get_steps('install') self.set_step_default() self.update_current_step('create-resource', DeployExecution.STEP_STAUTS_RUNNING) if cluster.deploy_type == Cluster.CLUSTER_DEPLOY_TYPE_AUTOMATIC: if not cluster.node_size > 0: try: cluster.create_resource() self.update_current_step( 'create-resource', DeployExecution.STEP_STAUTS_SUCCESS) except RuntimeError as e: self.update_current_step('create-resource', DeployExecution.STEP_STAUTS_ERROR) raise e else: self.update_current_step('create-resource', DeployExecution.STEP_STAUTS_SUCCESS) else: delete = None for step in self.steps: if step['name'] == 'create-resource': delete = step self.steps.remove(delete) return self.run_playbooks(extra_vars) def on_scaling(self, extra_vars): cluster = self.get_cluster() self.steps = cluster.get_steps('scale') self.set_step_default() self.update_current_step('create-resource', DeployExecution.STEP_STAUTS_RUNNING) if cluster.deploy_type == Cluster.CLUSTER_DEPLOY_TYPE_AUTOMATIC: try: num = self.params.get('num', None) cluster.scale_up_to(int(num)) self.update_current_step('create-resource', DeployExecution.STEP_STAUTS_SUCCESS) except RuntimeError as e: self.update_current_step('create-resource', DeployExecution.STEP_STAUTS_ERROR) raise e return self.run_playbooks(extra_vars) def on_add_worker(self, extra_vars): cluster = self.get_cluster() self.steps = cluster.get_steps('add-worker') self.set_step_default() host_name = self.params.get('host', None) host = Host.objects.get(name=host_name) cluster.add_worker(host) return self.run_playbooks(extra_vars) def on_uninstall(self, extra_vars): cluster = self.get_cluster() self.steps = cluster.get_steps('uninstall') self.set_step_default() if cluster.deploy_type == Cluster.CLUSTER_DEPLOY_TYPE_AUTOMATIC: try: self.update_current_step('uninstall', DeployExecution.STEP_STAUTS_RUNNING) cluster.destroy_resource() self.update_current_step('uninstall', DeployExecution.STEP_STAUTS_SUCCESS) except RuntimeError as e: self.update_current_step('uninstall', DeployExecution.STEP_STAUTS_ERROR) raise e return {"raw": {}, "summary": {"success": True}} else: return self.run_playbooks(extra_vars) def on_upgrade(self, extra_vars): cluster = self.get_cluster() self.steps = cluster.get_steps('upgrade') self.set_step_default() return self.run_playbooks(extra_vars) def on_f5_config(self, extra_vars): cluster = self.get_cluster() self.steps = cluster.get_steps('bigip-config') self.set_step_default() extra_vars.update(cluster.meta) return self.run_playbooks(extra_vars) def on_restore(self, extra_vars, cluster_backup_id): cluster_backup = ClusterBackup.objects.get(id=cluster_backup_id) backup_storage = BackupStorage.objects.get( id=cluster_backup.backup_storage_id) cluster = self.get_cluster() self.steps = cluster.get_steps('cluster-restore') client = StorageClient(backup_storage) backup_file_path = cluster.name + '/' + cluster_backup.name if client.exists(backup_file_path): success = client.download_file( backup_file_path, "/etc/ansible/roles/cluster-backup/files/cluster-backup.zip") if success: return self.run_playbooks(extra_vars) else: raise Exception('download file failed!') else: raise Exception('File is not exist!') def on_backup(self, extra_vars): cluster = self.get_cluster() self.steps = cluster.get_steps('cluster-backup') return self.run_playbooks(extra_vars) def on_upload_backup_file(self, backup_storage_id): cluster = self.get_cluster() return kubeops_api.cluster_backup_utils.upload_backup_file( cluster.id, backup_storage_id) def run_playbooks(self, extra_vars): result = {"raw": {}, "summary": {}} for step in self.steps: playbook_name = step.get('playbook', None) if playbook_name: playbook = self.project.playbook_set.get(name=playbook_name) self.update_current_step(step['name'], DeployExecution.STEP_STAUTS_RUNNING) _result = playbook.execute(extra_vars=extra_vars) result["summary"].update(_result["summary"]) self.update_current_step(step['name'], DeployExecution.STEP_STAUTS_SUCCESS) if not _result.get('summary', {}).get('success', False): self.update_current_step(step['name'], DeployExecution.STEP_STAUTS_ERROR) raise RuntimeError("playbook: {} error!".format( step['playbook'])) return result def set_step_default(self): for step in self.steps: step['status'] = DeployExecution.STEP_STAUTS_PENDING def get_cluster(self): return Cluster.objects.get(name=self.project.name) def update_current_step(self, name, status): for step in self.steps: if step['name'] == name: step['status'] = status self.save() def to_json(self): dict = { 'steps': self.steps, 'operation': self.operation, 'state': self.state } return json.dumps(dict) class Meta: get_latest_by = 'date_created' ordering = ('-date_created', )
class Play(AbstractProjectResourceModel): name = models.CharField(max_length=128, verbose_name=_('Name'), blank=True, null=True) pattern = models.CharField(max_length=1024, default='all', verbose_name=_('Pattern')) gather_facts = models.BooleanField(default=False) vars = common_models.JsonDictTextField(verbose_name=_('Vars'), blank=True, null=True) tasks = common_models.JsonListTextField(verbose_name=_('Tasks'), blank=True, null=True) roles = common_models.JsonListTextField(verbose_name=_('Roles'), blank=True, null=True) @staticmethod def format_data(data, fmt='py'): if fmt == 'yaml': return yaml.safe_dump(data, default_flow_style=False) elif fmt == 'json': return json.dumps(data, indent=4) else: return data def get_play_data(self, fmt='py'): data = { 'hosts': self.pattern, 'gather_facts': self.gather_facts, 'vars': self.vars or [], 'tasks': self.tasks or [], 'roles': self.roles or [], } return self.format_data(data, fmt=fmt) @classmethod def get_plays_data(cls, plays, fmt='py'): data = [] for play in plays: data.append(play.get_play_data()) return cls.format_data(data, fmt=fmt) def get_play_roles_names(self): names = [] for role in self.roles or []: name = role['role'] if isinstance(role, dict) else role names.append(name) return names def check_role(self): for role_name in self.get_play_roles_names(): try: role = self.project.role_set.get(name=role_name) except Role.DoesNotExist: error = "- Role not exist in project: {}".format(role_name) logger.error(error) return False, error if role.state != Role.STATE_INSTALLED: success, error = role.install() if not success: msg = "- Install role failed {}: {}".format( role_name, error) logger.error(msg) return False, error return True, None @classmethod def get_plays_roles_names(cls, plays): names = [] for play in plays: names.extend(play.get_play_roles_names()) return names @staticmethod def test_tasks(): return [{ "name": "Test ping", "ping": "" }, { "name": "Ifconfig", "command": "ifconfig" }] @staticmethod def test_roles(): return [{ "role": "bennojoy.memcached", "memcached_port": 11244, "memcached_cache_size": 512 }]
class Zone(models.Model): ZONE_STATUS_READY = "READY" ZONE_STATUS_INITIALIZING = "INITIALIZING" ZONE_STATUS_ERROR = "ERROR" ZONE_STATUS_CHOICES = ( (ZONE_STATUS_READY, 'READY'), (ZONE_STATUS_INITIALIZING, 'INITIALIZING'), (ZONE_STATUS_ERROR, 'ERROR'), ) id = models.UUIDField(default=uuid.uuid4, primary_key=True) name = models.CharField(max_length=20, unique=True, verbose_name=_('Name')) date_created = models.DateTimeField(auto_now_add=True, verbose_name=_('Date created')) vars = common_models.JsonDictTextField(default={}) region = models.ForeignKey('Region', on_delete=models.CASCADE, null=True) cloud_zone = models.CharField(max_length=128, null=True, default=None) ip_used = common_models.JsonListTextField(null=True, default=[]) status = models.CharField(max_length=64, choices=ZONE_STATUS_CHOICES, null=True) @property def host_size(self): hosts = Host.objects.filter(zone=self) return len(hosts) def change_status(self, status): self.status = status self.save() def create_image(self): try: self.change_status(Zone.ZONE_STATUS_INITIALIZING) client = get_cloud_client(self.region.vars) client.create_image(zone=self) self.change_status(Zone.ZONE_STATUS_READY) except Exception as e: self.change_status(Zone.ZONE_STATUS_ERROR) def on_zone_create(self): thread = threading.Thread(target=self.create_image) thread.start() def allocate_ip(self): ip = self.ip_pools().pop() self.ip_used.append(ip) self.save() return ip def recover_ip(self, ip): self.ip_used.remove(ip) self.save() def to_dict(self): dic = { "key": "z" + str(self.id).split("-")[3], "name": self.cloud_zone, "zone_name": self.name, "ip_pool": self.ip_pools() } dic.update(self.vars) return dic def ip_pools(self): ip_pool = [] ip_start = ip_address(self.vars['ip_start']) ip_end = ip_address(self.vars['ip_end']) if self.region.template.name == 'openstack': while ip_start <= ip_end: ip_pool.append(str(ip_start)) ip_start += 1 for ip in self.ip_used: if ip in ip_pool: ip_pool.remove(ip) return ip_pool net_mask = self.vars['net_mask'] interface = ip_interface("{}/{}".format(str(ip_start), net_mask)) network = interface.network for host in network.hosts(): if ip_start <= host <= ip_end: ip_pool.append(str(host)) for ip in self.ip_used: if ip in ip_pool: ip_pool.remove(ip) return ip_pool def ip_available_size(self): return len(self.ip_pools()) @property def provider(self): return self.region.template.name
class DeployExecution(AbstractProjectResourceModel, AbstractExecutionModel): operation = models.CharField(max_length=128, blank=False, null=False) project = models.ForeignKey('ansible_api.Project', on_delete=models.CASCADE) params = common_models.JsonDictTextField(default={}) steps = common_models.JsonListTextField(default=[], null=True) STEP_STATUS_PENDING = 'pending' STEP_STATUS_RUNNING = 'running' STEP_STATUS_SUCCESS = 'success' STEP_STATUS_ERROR = 'error' @property def start(self): result = {"raw": {}, "summary": {}} pre_deploy_execution_start.send(self.__class__, execution=self) cluster = self.get_cluster() settings = Setting.get_db_settings() extra_vars = { "cluster_name": cluster.name, "cluster_domain": cluster.cluster_doamin_suffix } extra_vars.update(settings) extra_vars.update(cluster.configs) ignore_errors = False return_running = False message_client = MessageClient() message = { "item_id": cluster.item_id, "title": self.get_operation_name(), "content": "", "level": "INFO", "type": "SYSTEM" } try: if self.operation == "install": logger.info(msg="cluster: {} exec: {} ".format(cluster, self.operation)) cluster.change_status(Cluster.CLUSTER_STATUS_INSTALLING) result = self.on_install(extra_vars) cluster.change_status(Cluster.CLUSTER_STATUS_RUNNING) elif self.operation == 'uninstall': logger.info(msg="cluster: {} exec: {} ".format(cluster, self.operation)) cluster.change_status(Cluster.CLUSTER_STATUS_DELETING) result = self.on_uninstall(extra_vars) cluster.change_status(Cluster.CLUSTER_STATUS_READY) kubeops_api.cluster_monitor.delete_cluster_redis_data(cluster.name) elif self.operation == 'bigip-config': logger.info(msg="cluster: {} exec: {} ".format(cluster, self.operation)) ignore_errors = True result = self.on_f5_config(extra_vars) elif self.operation == 'upgrade': logger.info(msg="cluster: {} exec: {} ".format(cluster, self.operation)) cluster.change_status(Cluster.CLUSTER_STATUS_UPGRADING) package_name = self.params.get('package', None) package = Package.objects.get(name=package_name) extra_vars.update(package.meta.get('vars')) result = self.on_upgrade(extra_vars) if result.get('summary', {}).get('success', False): cluster.upgrade_package(package_name) cluster.change_status(Cluster.CLUSTER_STATUS_RUNNING) elif self.operation == 'scale': logger.info(msg="cluster: {} exec: {} ".format(cluster, self.operation)) ignore_errors = True return_running = True cluster.change_status(Cluster.CLUSTER_DEPLOY_TYPE_SCALING) result = self.on_scaling(extra_vars) cluster.exit_new_node() cluster.change_status(Cluster.CLUSTER_STATUS_RUNNING) elif self.operation == 'add-worker': logger.info(msg="cluster: {} exec: {} ".format(cluster, self.operation)) ignore_errors = True return_running = True cluster.change_status(Cluster.CLUSTER_DEPLOY_TYPE_SCALING) result = self.on_add_worker(extra_vars) cluster.exit_new_node() cluster.change_status(Cluster.CLUSTER_STATUS_RUNNING) elif self.operation == 'remove-worker': logger.info(msg="cluster: {} exec: {} ".format(cluster, self.operation)) ignore_errors = True return_running = True cluster.change_status(Cluster.CLUSTER_DEPLOY_TYPE_SCALING) result = self.on_remove_worker(extra_vars) if not result.get('summary', {}).get('success', False): cluster.exit_new_node() else: node_name = self.params.get('node', None) cluster.change_to() node = Node.objects.get(name=node_name) node.delete() cluster.change_status(Cluster.CLUSTER_STATUS_RUNNING) elif self.operation == 'restore': logger.info(msg="cluster: {} exec: {} ".format(cluster, self.operation)) cluster.change_status(Cluster.CLUSTER_STATUS_RESTORING) cluster_backup_id = self.params.get('clusterBackupId', None) result = self.on_restore(extra_vars, cluster_backup_id) cluster.change_status(Cluster.CLUSTER_STATUS_RUNNING) elif self.operation == 'backup': logger.info(msg="cluster: {} exec: {} ".format(cluster, self.operation)) cluster.change_status(Cluster.CLUSTER_STATUS_BACKUP) cluster_storage_id = self.params.get('backupStorageId', None) result = self.on_backup(extra_vars) self.on_upload_backup_file(cluster_storage_id) cluster.change_status(Cluster.CLUSTER_STATUS_RUNNING) if not result.get('summary', {}).get('success', False): message['content'] = self.get_content(False) message['level'] = 'WARNING' if not ignore_errors: cluster.change_status(Cluster.CLUSTER_STATUS_ERROR) if return_running: cluster.change_status(Cluster.CLUSTER_STATUS_RUNNING) logger.error(msg=":cluster {} exec {} error".format(cluster, self.operation), exc_info=True) else: message['content'] = self.get_content(True) message_client.insert_message(message) except Exception as e: logger.error(msg=":cluster {} exec {} error".format(cluster, self.operation), exc_info=True) cluster.change_status(Cluster.CLUSTER_STATUS_ERROR) message['content'] = self.get_content(False) message['level'] = 'WARNING' message_client.insert_message(message) post_deploy_execution_start.send(self.__class__, execution=self, result=result, ignore_errors=ignore_errors) return result def on_install(self, extra_vars): cluster = self.get_cluster() self.steps = cluster.get_steps('install') self.set_step_default() self.update_current_step('create-resource', DeployExecution.STEP_STATUS_RUNNING) if cluster.deploy_type == Cluster.CLUSTER_DEPLOY_TYPE_AUTOMATIC: try: cluster.create_resource() cluster.refresh_from_db() extra_vars.update(cluster.configs) self.update_current_step('create-resource', DeployExecution.STEP_STATUS_SUCCESS) except RuntimeError as e: self.update_current_step('create-resource', DeployExecution.STEP_STATUS_ERROR) raise e else: delete = None for step in self.steps: if step['name'] == 'create-resource': delete = step self.steps.remove(delete) return self.run_playbooks(extra_vars) def on_scaling(self, extra_vars): cluster = self.get_cluster() cluster.change_to() if not Role.objects.filter(name='new_node'): Role.objects.create(name='new_node', project=cluster) self.steps = cluster.get_steps('scale') self.set_step_default() self.update_current_step('create-resource', DeployExecution.STEP_STATUS_RUNNING) if cluster.deploy_type == Cluster.CLUSTER_DEPLOY_TYPE_AUTOMATIC: try: num = self.params.get('num', None) cluster.scale_up_to(int(num)) self.update_current_step('create-resource', DeployExecution.STEP_STATUS_SUCCESS) except RuntimeError as e: self.update_current_step('create-resource', DeployExecution.STEP_STATUS_ERROR) raise e return self.run_playbooks(extra_vars) def on_add_worker(self, extra_vars): cluster = self.get_cluster() self.steps = cluster.get_steps('add-worker') self.set_step_default() host_names = self.params.get('hosts', None) hosts = Host.objects.filter(name__in=host_names) cluster.add_worker(hosts) return self.run_playbooks(extra_vars) def on_remove_worker(self, extra_vars): cluster = self.get_cluster() self.steps = cluster.get_steps('remove-worker') self.set_step_default() node_names = self.params.get('nodes', None) cluster.change_to() nodes = Node.objects.filter(name__in=node_names) for node in nodes: node.set_groups(['new_node', 'worker']) return self.run_playbooks(extra_vars) def on_uninstall(self, extra_vars): cluster = self.get_cluster() self.steps = cluster.get_steps('uninstall') self.set_step_default() if cluster.deploy_type == Cluster.CLUSTER_DEPLOY_TYPE_AUTOMATIC: try: self.update_current_step('uninstall', DeployExecution.STEP_STATUS_RUNNING) cluster.destroy_resource() self.update_current_step('uninstall', DeployExecution.STEP_STATUS_SUCCESS) except RuntimeError as e: self.update_current_step('uninstall', DeployExecution.STEP_STATUS_ERROR) raise e return {"raw": {}, "summary": {"success": True}} else: return self.run_playbooks(extra_vars) def on_upgrade(self, extra_vars): cluster = self.get_cluster() self.steps = cluster.get_steps('upgrade') self.set_step_default() return self.run_playbooks(extra_vars) def on_f5_config(self, extra_vars): cluster = self.get_cluster() self.steps = cluster.get_steps('bigip-config') self.set_step_default() extra_vars.update(cluster.meta) return self.run_playbooks(extra_vars) def on_restore(self, extra_vars, cluster_backup_id): cluster_backup = ClusterBackup.objects.get(id=cluster_backup_id) backup_storage = BackupStorage.objects.get(id=cluster_backup.backup_storage_id) cluster = self.get_cluster() self.steps = cluster.get_steps('cluster-restore') client = StorageClient(backup_storage) backup_file_path = cluster.name + '/' + cluster_backup.name if client.exists(backup_file_path): success = client.download_file(backup_file_path, "/etc/ansible/roles/cluster-backup/files/cluster-backup.zip") if success: return self.run_playbooks(extra_vars) else: raise Exception('download file failed!') else: raise Exception('File is not exist!') def on_backup(self, extra_vars): cluster = self.get_cluster() self.steps = cluster.get_steps('cluster-backup') return self.run_playbooks(extra_vars) def on_upload_backup_file(self, backup_storage_id): cluster = self.get_cluster() return kubeops_api.cluster_backup_utils.upload_backup_file(cluster.id, backup_storage_id) def run_playbooks(self, extra_vars): result = {"raw": {}, "summary": {}} for step in self.steps: playbook_name = step.get('playbook', None) if playbook_name: playbook = self.project.playbook_set.get(name=playbook_name) self.update_current_step(step['name'], DeployExecution.STEP_STATUS_RUNNING) _result = playbook.execute(extra_vars=extra_vars) result["summary"].update(_result["summary"]) self.update_current_step(step['name'], DeployExecution.STEP_STATUS_SUCCESS) if not _result.get('summary', {}).get('success', False): self.update_current_step(step['name'], DeployExecution.STEP_STATUS_ERROR) return result return result def set_step_default(self): for step in self.steps: step['status'] = DeployExecution.STEP_STATUS_PENDING def get_cluster(self): return Cluster.objects.get(name=self.project.name) def update_current_step(self, name, status): for step in self.steps: if step['name'] == name: step['status'] = status self.save() def to_json(self): dict = { 'steps': self.steps, 'operation': self.operation, 'state': self.state} return json.dumps(dict) def mark_state(self, state): self.state = state self.date_end = timezone.now() self.timedelta = (timezone.now() - self.date_start).seconds self.save() class Meta: get_latest_by = 'date_created' ordering = ('-date_created',) def get_operation_name(self): operation_name = { "install": "集群安装", "uninstall": "集群卸载", "upgrade": "集群升级", "scale": "集群伸缩", "add-worker": "集群伸缩", "remove-worker": "集群安装", "restore": "集群恢复", "backup": "集群备份", } return operation_name[self.operation] def get_content(self, success): cluster = self.get_cluster() content = { "item_name": cluster.item_name, "resource": "集群", "resource_name": cluster.name, "resource_type": 'CLUSTER', "detail": self.get_msg_detail(success), "status": cluster.status } return content def get_msg_detail(self, success): operation = self.get_operation_name() if success: result = "成功" else: result = "失败" return json.dumps({"message": operation + result})
class DeployExecution(AbstractProjectResourceModel, AbstractExecutionModel): operation = models.CharField(max_length=128, blank=False, null=False) project = models.ForeignKey('ansible_api.Project', on_delete=models.CASCADE) steps = common_models.JsonListTextField(default=[], null=True) STEP_STAUTS_PENDING = 'pending' STEP_STAUTS_RUNNING = 'running' STEP_STAUTS_SUCCESS = 'success' STEP_STAUTS_ERROR = 'error' @property def start(self): result = {"raw": {}, "summary": {}} pre_deploy_execution_start.send(self.__class__, execution=self) cluster = Cluster.objects.get(id=self.project.id) hostname = Setting.objects.get(key='local_hostname') domain_suffix = Setting.objects.get(key="domain_suffix") extra_vars = { "cluster_name": cluster.name, "local_hostname": hostname.value, "domain_suffix": domain_suffix.value } ignore_errors = False try: if self.operation == "install": cluster.change_status(Cluster.CLUSTER_STATUS_INSTALLING) result = self.on_install(extra_vars) cluster.change_status(Cluster.CLUSTER_STATUS_RUNNING) elif self.operation == 'uninstall': cluster.change_status(Cluster.CLUSTER_STATUS_DELETING) result = self.on_uninstall(extra_vars) cluster.change_status(Cluster.CLUSTER_STATUS_READY) elif self.operation == 'bigip-config': ignore_errors = True result = self.on_f5_config(extra_vars) except Exception as e: print('Unexpect error occur: {}'.format(e)) if not ignore_errors: cluster.change_status(Cluster.CLUSTER_STATUS_ERROR) logger.error(e, exc_info=True) result['summary'] = {'error': 'Unexpect error occur: {}'.format(e)} post_deploy_execution_start.send(self.__class__, execution=self, result=result, ignore_errors=ignore_errors) return result def on_install(self, extra_vars): cluster = self.get_cluster() self.steps = cluster.get_steps('install') self.set_step_default() self.update_current_step('create-resource', DeployExecution.STEP_STAUTS_RUNNING) if cluster.deploy_type == Cluster.CLUSTER_DEPLOY_TYPE_AUTOMATIC: if not cluster.node_size > 0: try: cluster.create_resource() self.update_current_step('create-resource', DeployExecution.STEP_STAUTS_SUCCESS) except RuntimeError as e: self.update_current_step('create-resource', DeployExecution.STEP_STAUTS_ERROR) raise e return self.run_playbooks(extra_vars) def on_uninstall(self, extra_vars): cluster = self.get_cluster() self.steps = cluster.get_steps('uninstall') self.set_step_default() if cluster.deploy_type == Cluster.CLUSTER_DEPLOY_TYPE_AUTOMATIC: try: self.update_current_step('uninstall', DeployExecution.STEP_STAUTS_RUNNING) cluster.destroy_resource() self.update_current_step('uninstall', DeployExecution.STEP_STAUTS_SUCCESS) except RuntimeError as e: self.update_current_step('uninstall', DeployExecution.STEP_STAUTS_ERROR) raise e return {"raw": {}, "summary": {"success": True}} else: return self.run_playbooks(extra_vars) def on_f5_config(self, extra_vars): cluster = self.get_cluster() self.steps = cluster.get_steps('bigip-config') self.set_step_default() extra_vars.update(cluster.meta) return self.run_playbooks(extra_vars) def run_playbooks(self, extra_vars): result = {"raw": {}, "summary": {}} for step in self.steps: playbook_name = step.get('playbook', None) if playbook_name: playbook = self.project.playbook_set.get(name=playbook_name) self.update_current_step(step['name'], DeployExecution.STEP_STAUTS_RUNNING) time.sleep(10) _result = playbook.execute(extra_vars=extra_vars) result["summary"].update(_result["summary"]) self.update_current_step(step['name'], DeployExecution.STEP_STAUTS_SUCCESS) if not _result.get('summary', {}).get('success', False): self.update_current_step(step['name'], DeployExecution.STEP_STAUTS_ERROR) raise RuntimeError("playbook: {} error!".format(step['playbook'])) return result def set_step_default(self): for step in self.steps: step['status'] = DeployExecution.STEP_STAUTS_PENDING def get_cluster(self): return Cluster.objects.get(name=self.project.name) def update_current_step(self, name, status): for step in self.steps: if step['name'] == name: step['status'] = status self.save() def to_json(self): dict = { 'steps': self.steps, 'operation': self.operation, 'state': self.state} return json.dumps(dict) class Meta: get_latest_by = 'date_created' ordering = ('-date_created',)