def load_bundle(bundle_file): log.info('loading bundle file "%s" ...', bundle_file) (bundle_hash, path) = process_file(bundle_file) try: check_stage() process_bundle(path, bundle_hash) bundle_proto = get_stage_bundle(bundle_file) second_pass() except: clear_stage() shutil.rmtree(path) raise try: bundle = copy_stage(bundle_hash, bundle_proto) order_versions() clear_stage() ProductCategory.re_collect() bundle.refresh_from_db() prepare_action_roles(bundle) cm.status_api.post_event('create', 'bundle', bundle.id) return bundle except: clear_stage() raise
def run(self, tmp=None, task_vars=None): super().run(tmp, task_vars) msg = 'You can modify hc only in cluster or service context' cluster_id = get_object_id_from_context(task_vars, 'cluster_id', 'cluster', 'service', err_msg=msg) job_id = task_vars['job']['id'] ops = self._task.args['operations'] log.info('ansible module adcm_hc: cluster #%s, ops: %s', cluster_id, ops) if not isinstance(ops, list): raise AnsibleError('Operations should be an array: %s' % ops) for op in ops: if not isinstance(op, dict): raise AnsibleError( 'Operation items should be a dictionary: %s' % op) args = frozenset(op.keys()) if args.difference(self._VALID_SUB_ARGS): raise AnsibleError('Invalid operation arguments: %s' % op) try: cm.api.change_hc(job_id, cluster_id, ops) except AdcmEx as e: raise AnsibleError(e.code + ": " + e.msg) from e return {"failed": False, "changed": True}
def switch_object(obj: Union[Host, ClusterObject], new_prototype: Prototype) -> None: """Upgrade object""" log.info('upgrade switch from %s to %s', proto_ref(obj.prototype), proto_ref(new_prototype)) old_prototype = obj.prototype obj.prototype = new_prototype obj.save() switch_config(obj, new_prototype, old_prototype)
def set_action_state(action, task, obj, state): if not obj: log.warning('empty object for action %s of task #%s', action.name, task.id) return msg = 'action "%s" of task #%s will set %s state to "%s"' log.info(msg, action.name, task.id, obj_ref(obj), state) api.push_obj(obj, state)
def do_upgrade(obj: Union[Cluster, HostProvider], upgrade: Upgrade) -> dict: old_proto = obj.prototype check_license(obj.prototype.bundle) check_license(upgrade.bundle) ok, msg = check_upgrade(obj, upgrade) if not ok: return err('UPGRADE_ERROR', msg) log.info('upgrade %s version %s (upgrade #%s)', obj_ref(obj), old_proto.version, upgrade.id) if obj.prototype.type == 'cluster': new_proto = Prototype.objects.get(bundle=upgrade.bundle, type='cluster') elif obj.prototype.type == 'provider': new_proto = Prototype.objects.get(bundle=upgrade.bundle, type='provider') else: return err('UPGRADE_ERROR', 'can upgrade only cluster or host provider') with transaction.atomic(): obj.prototype = new_proto obj.before_upgrade['state'] = obj.state if upgrade.state_on_success: obj.state = upgrade.state_on_success obj.save() switch_config(obj, new_proto, old_proto) if obj.prototype.type == 'cluster': switch_services(upgrade, obj) elif obj.prototype.type == 'provider': switch_hosts(upgrade, obj) cm.issue.update_hierarchy_issues(obj) log.info('upgrade %s OK to version %s', obj_ref(obj), obj.prototype.version) cm.status_api.post_event( 'upgrade', obj.prototype.type, obj.id, 'version', str(obj.prototype.version) ) return {'id': obj.id, 'upgradable': bool(get_upgrade(obj))}
def add_service_to_cluster(cluster, proto): check_proto_type(proto, 'service') check_license(proto.bundle) if not proto.shared: if cluster.prototype.bundle != proto.bundle: msg = '{} does not belong to bundle "{}" {}' err( 'SERVICE_CONFLICT', msg.format(proto_ref(proto), cluster.prototype.bundle.name, cluster.prototype.version), ) with transaction.atomic(): cs = ClusterObject.objects.create(cluster=cluster, prototype=proto) obj_conf = init_object_config(proto, cs) cs.config = obj_conf cs.save() add_components_to_service(cluster, cs) cm.issue.update_hierarchy_issues(cs) rbac.models.re_apply_object_policy(cluster) cm.status_api.post_event('add', 'service', cs.id, 'cluster', str(cluster.id)) load_service_map() log.info( f'service #{cs.id} {cs.prototype.name} is added to cluster #{cluster.id} {cluster.name}' ) return cs
def read_definition(conf_file, conf_type): if os.path.isfile(conf_file): conf = check_adcm_config(conf_file) log.info('Read config file: "%s"', conf_file) return conf log.warning('Can not open config file: "%s"', conf_file) return {}
def read_definition(conf_file, conf_type): parsers = { 'toml': toml.load, 'yaml': yaml.safe_load, 'json': json.load } fn = parsers[conf_type] if os.path.isfile(conf_file): with open(conf_file) as fd: try: conf = fn(fd) except (toml.TomlDecodeError, IndexError) as e: err('STACK_LOAD_ERROR', 'TOML decode "{}" error: {}'.format(conf_file, e)) except yaml.parser.ParserError as e: err('STACK_LOAD_ERROR', 'YAML decode "{}" error: {}'.format(conf_file, e)) except yaml.composer.ComposerError as e: err('STACK_LOAD_ERROR', 'YAML decode "{}" error: {}'.format(conf_file, e)) except yaml.constructor.ConstructorError as e: err('STACK_LOAD_ERROR', 'YAML decode "{}" error: {}'.format(conf_file, e)) except yaml.scanner.ScannerError as e: err('STACK_LOAD_ERROR', 'YAML decode "{}" error: {}'.format(conf_file, e)) log.info('Read config file: "%s"', conf_file) return conf log.warning('Can not open config file: "%s"', conf_file) return {}
def run(self, tmp=None, task_vars=None): super().run(tmp, task_vars) service = self._task.args.get('service', None) if service: msg = 'You can delete service by name only in cluster context' cluster_id = get_object_id_from_context(task_vars, 'cluster_id', 'cluster', err_msg=msg) log.info('ansible module adcm_delete_service: service "%s"', service) try: cm.api.delete_service_by_name(service, cluster_id) except AdcmEx as e: raise AnsibleError(e.code + ":" + e.msg) from e else: msg = 'You can delete service only in service context' service_id = get_object_id_from_context(task_vars, 'service_id', 'service', err_msg=msg) log.info('ansible module adcm_delete_service: service #%s', service_id) try: cm.api.delete_service_by_id(service_id) except AdcmEx as e: raise AnsibleError(e.code + ":" + e.msg) from e return {"failed": False, "changed": True}
def set_object_config(obj, keys, value): proto = obj.prototype try: spl = keys.split('/') key = spl[0] if len(spl) == 1: subkey = '' else: subkey = spl[1] pconf = PrototypeConfig.objects.get(prototype=proto, action=None, name=key, subname=subkey) except PrototypeConfig.DoesNotExist: msg = '{} does not has config key "{}/{}"' err('CONFIG_NOT_FOUND', msg.format(proto_ref(proto), key, subkey)) if pconf.type == 'group': msg = 'You can not update config group "{}" for {}' err('CONFIG_VALUE_ERROR', msg.format(key, obj_ref(obj))) check_config_type(proto, key, subkey, obj_to_dict(pconf, ('type', 'limits', 'option')), value) # if config_is_ro(obj, keys, pconf.limits): # msg = 'config key {} of {} is read only' # err('CONFIG_VALUE_ERROR', msg.format(key, ref)) replace_object_config(obj, key, subkey, value) if pconf.type == 'file': save_file_type(obj, key, subkey, value) log.info('update %s config %s/%s to "%s"', obj_ref(obj), key, subkey, value) return value
def set_action_state( action: Action, task: TaskLog, obj: ADCMEntity, state: str = None, multi_state_set: List[str] = None, multi_state_unset: List[str] = None, ): if not obj: log.warning('empty object for action %s of task #%s', action.name, task.pk) return log.info( 'action "%s" of task #%s will set %s state to "%s" ' 'add to multi_states "%s" and remove from multi_states "%s"', action.name, task.pk, obj, state, multi_state_set, multi_state_unset, ) if state: obj.set_state(state, ctx.event) for m_state in multi_state_set or []: obj.set_multi_state(m_state, ctx.event) for m_state in multi_state_unset or []: obj.unset_multi_state(m_state, ctx.event)
def init_adcm(bundle): proto = Prototype.objects.get(type='adcm', bundle=bundle) spec, _, conf, attr = get_prototype_config(proto) with transaction.atomic(): obj_conf = init_object_config(spec, conf, attr) adcm = ADCM(prototype=proto, name='ADCM', config=obj_conf) adcm.save() log.info('init adcm object version %s OK', proto.version) return adcm
def init_adcm(bundle): proto = Prototype.objects.get(type='adcm', bundle=bundle) with transaction.atomic(): adcm = ADCM.objects.create(prototype=proto, name='ADCM') obj_conf = init_object_config(proto, adcm) adcm.config = obj_conf adcm.save() log.info('init adcm object version %s OK', proto.version) return adcm
def backup_sqlite(dbfile): dt = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") backupfile = os.path.join(config.BASE_DIR, 'data', 'var', f'{dt}.db') old = sqlite3.connect(dbfile) new = sqlite3.connect(backupfile) with new: old.backup(new) new.close() old.close() log.info('Backup sqlite db to %s', backupfile)
def run_logrotate(path): cmd = ['logrotate', '-f', path] proc = Popen(cmd, stdout=PIPE, stderr=PIPE) output, error = proc.communicate() log.info( 'RUN: logrotate -f %s, output: %s, error: %s', path, output.decode(errors='ignore'), error.decode(errors='ignore'), )
def env_configuration(job_config): job_id = job_config['job']['id'] stack_dir = job_config['env']['stack_dir'] env = os.environ.copy() env = set_pythonpath(env, stack_dir) # This condition is intended to support compatibility. # Since older bundle versions may contain their own ansible.cfg if not os.path.exists(os.path.join(stack_dir, 'ansible.cfg')): env = set_ansible_config(env, job_id) log.info('set ansible config for job:%s', job_id) return env
def run_task(task, event, args=''): err_file = open(os.path.join(config.LOG_DIR, 'task_runner.err'), 'a+') proc = subprocess.Popen([ os.path.join(config.CODE_DIR, 'task_runner.py'), str(task.id), args ], stderr=err_file) log.info("run task #%s, python process %s", task.id, proc.pid) task.pid = proc.pid set_task_status(task, config.Job.RUNNING, event)
def delete_host_provider(provider, cancel_tasks=True): hosts = Host.objects.filter(provider=provider) if hosts: msg = 'There is host #{} "{}" of host {}' err('PROVIDER_CONFLICT', msg.format(hosts[0].id, hosts[0].fqdn, obj_ref(provider))) if cancel_tasks: _cancel_locking_tasks(provider) provider_id = provider.id provider.delete() cm.status_api.post_event('delete', 'provider', provider_id) log.info(f'host provider #{provider_id} is deleted')
def delete_host(host, cancel_tasks=True): cluster = host.cluster if cluster: msg = 'Host #{} "{}" belong to {}' err('HOST_CONFLICT', msg.format(host.id, host.fqdn, obj_ref(cluster))) if cancel_tasks: _cancel_locking_tasks(host) host_id = host.id host.delete() cm.status_api.post_event('delete', 'host', host_id) load_service_map() log.info(f'host #{host_id} is deleted')
def add_group_role(group, role): if Role.objects.filter(id=role.id, group=group): err('ROLE_ERROR', f'Group "{group.name}" already has role "{role.name}"') with transaction.atomic(): role.group.add(group) role.save() for perm in role.permissions.all(): group.permissions.add(perm) log.info('Add role "%s" to group "%s"', role.name, group.name) role.role_id = role.id return role
def add_user_role(user, role): if Role.objects.filter(id=role.id, user=user): err('ROLE_ERROR', f'User "{user.username}" already has role "{role.name}"') with transaction.atomic(): role.user.add(user) role.save() for perm in role.permissions.all(): user.user_permissions.add(perm) log.info('Add role "%s" to user "%s"', role.name, user.username) role.role_id = role.id return role
def do_upgrade(obj, upgrade): old_proto = obj.prototype check_license(obj.prototype.bundle) check_license(upgrade.bundle) ok, msg = check_upgrade(obj, upgrade) if not ok: return err('UPGRADE_ERROR', msg) log.info('upgrade %s version %s (upgrade #%s)', obj_ref(obj), old_proto.version, upgrade.id) if obj.prototype.type == 'cluster': new_proto = Prototype.objects.get(bundle=upgrade.bundle, type='cluster') elif obj.prototype.type == 'provider': new_proto = Prototype.objects.get(bundle=upgrade.bundle, type='provider') else: return err('UPGRADE_ERROR', 'can upgrade only cluster or host provider') with transaction.atomic(): obj.prototype = new_proto if upgrade.state_on_success: obj.state = upgrade.state_on_success obj.save() switch_config(obj, new_proto, old_proto) if obj.prototype.type == 'cluster': for p in Prototype.objects.filter(bundle=upgrade.bundle, type='service'): try: co = ClusterObject.objects.get(cluster=obj, prototype__name=p.name) switch_service(co, p) switch_components(obj, co, p) except ClusterObject.DoesNotExist: # co.delete() ?! pass switch_hc(obj, upgrade) elif obj.prototype.type == 'provider': for p in Prototype.objects.filter(bundle=upgrade.bundle, type='host'): for host in Host.objects.filter(provider=obj, prototype__name=p.name): switch_service(host, p) cm.issue.save_issue(obj) log.info('upgrade %s OK to version %s', obj_ref(obj), obj.prototype.version) cm.status_api.post_event('upgrade', obj.prototype.type, obj.id, 'version', str(obj.prototype.version)) return {'id': obj.id, 'upgradable': bool(get_upgrade(obj))}
def remove_group_role(group, role): group_roles = Role.objects.filter(group=group) if role not in group_roles: err('ROLE_ERROR', f'Group "{group.name}" does not has role "{role.name}"') perm_list = cook_perm_list(role, group_roles) with transaction.atomic(): role.group.remove(group) role.save() for perm in role.permissions.all(): if perm.codename not in perm_list: group.permissions.remove(perm) log.info('Remove role "%s" from group "%s"', role.name, group.name)
def remove_user_role(user, role): user_roles = Role.objects.filter(user=user) if role not in user_roles: err('ROLE_ERROR', f'User "{user.username}" does not has role "{role.name}"') perm_list = cook_perm_list(role, user_roles) with transaction.atomic(): role.user.remove(user) role.save() for perm in role.permissions.all(): if perm.codename not in perm_list: user.user_permissions.remove(perm) log.info('Remove role "%s" from user "%s"', role.name, user.username)
def upgrade_adcm(adcm, bundle): old_proto = adcm.prototype new_proto = Prototype.objects.get(type='adcm', bundle=bundle) if rpm.compare_versions(old_proto.version, new_proto.version) >= 0: msg = 'Current adcm version {} is more than or equal to upgrade version {}' err('UPGRADE_ERROR', msg.format(old_proto.version, new_proto.version)) with transaction.atomic(): adcm.prototype = new_proto adcm.save() switch_config(adcm, new_proto, old_proto) log.info('upgrade adcm OK from version %s to %s', old_proto.version, adcm.prototype.version) return adcm
def run(self, tmp=None, task_vars=None): super().run(tmp, task_vars) msg = 'You can delete host only in host context' host_id = get_object_id_from_context(task_vars, 'host_id', 'host', err_msg=msg) log.info('ansible module adcm_delete_host: host #%s', host_id) try: cm.api.delete_host_by_id(host_id) except AdcmEx as e: raise AnsibleError(e.code + ":" + e.msg) from e return {"failed": False, "changed": True}
def add_cluster(proto, name, desc=''): check_proto_type(proto, 'cluster') check_license(proto.bundle) with transaction.atomic(): cluster = Cluster.objects.create(prototype=proto, name=name, description=desc) obj_conf = init_object_config(proto, cluster) cluster.config = obj_conf cluster.save() cm.issue.update_hierarchy_issues(cluster) cm.status_api.post_event('create', 'cluster', cluster.id) load_service_map() log.info(f'cluster #{cluster.id} {cluster.name} is added') return cluster
def save_object_definition(path, fname, conf, obj_list, bundle_hash, adcm=False): def_type = conf['type'] if def_type == 'adcm' and not adcm: msg = 'Invalid type "{}" in object definition: {}' return err('INVALID_OBJECT_DEFINITION', msg.format(def_type, fname)) check_object_definition(fname, conf, def_type, obj_list) obj = save_prototype(path, conf, def_type, bundle_hash) log.info('Save definition of %s "%s" %s to stage', def_type, conf['name'], conf['version']) obj_list[cook_obj_id(conf)] = fname return obj
def add_host_provider(proto, name, desc=''): check_proto_type(proto, 'provider') check_license(proto.bundle) with transaction.atomic(): provider = HostProvider.objects.create(prototype=proto, name=name, description=desc) obj_conf = init_object_config(proto, provider) provider.config = obj_conf provider.save() provider.add_to_concerns(ctx.lock) cm.issue.update_hierarchy_issues(provider) ctx.event.send_state() cm.status_api.post_event('create', 'provider', provider.id) log.info(f'host provider #{provider.id} {provider.name} is added') return provider
def run_job(task_id, job_id, err_file): log.debug("task run job #%s of task #%s", job_id, task_id) cmd = [ '/adcm/python/job_venv_wrapper.sh', TaskLog.objects.get(id=task_id).action.venv, os.path.join(config.CODE_DIR, 'job_runner.py'), str(job_id), ] log.info("task run job cmd: %s", ' '.join(cmd)) try: proc = subprocess.Popen(cmd, stderr=err_file) res = proc.wait() return res except: log.error("exception runnung job %s", job_id) return 1