def manage_failover(params): failover = params['failover'] control_console = get_control_console(params['control_sock']) current_failover = control_console.eval(''' return require('cartridge').admin_get_failover() ''') if current_failover == failover: return ModuleRes(success=True, changed=False) function_name = 'admin_enable_failover' if failover else 'admin_disable_failover' res = control_console.eval(''' local failover, err = require('cartridge').{}() return {{ ok = failover ~= nil, err = err and err.err or box.NULL }} '''.format(function_name)) if not res['ok']: errmsg = 'Failed {}: {}'.format(function_name, res['err']) return ModuleRes(success=False, msg=errmsg) return ModuleRes(success=True, changed=True)
def check_instance_started(params): waiting_for_state = params['waiting_for_state'] try: control_console = get_control_console(params['control_sock']) ok = True if not params['stateboard']: ok = control_console.eval(''' return require('membership').myself().status == 'alive' ''') if ok is True: response = control_console.eval(''' local state, err = require('cartridge.confapplier').get_state() return { state = state, err = err or box.NULL } ''') state = response['state'] err = response['err'] if state in waiting_for_state: return ModuleRes(success=ok) else: return ModuleRes( success=False, msg=('Instance state is {}. Error: {}').format( state, err)) except CartridgeException as e: return ModuleRes(success=False, msg=str(e)) if not params['stateboard']: return ModuleRes(success=False, msg='Instance is not running') else: return ModuleRes(success=True, msg='This is a stateboard instance')
def manage_instance(params): config = params['config'] cartridge_defaults = params['cartridge_defaults'] control_sock = params['control_sock'] # Check if memtx_memory parameter is specified if 'memtx_memory' not in config and 'memtx_memory' not in cartridge_defaults: return ModuleRes(success=True, changed=False) new_memtx_memory = None if 'memtx_memory' in config: new_memtx_memory = config['memtx_memory'] else: new_memtx_memory = cartridge_defaults['memtx_memory'] # Check if instance isn't started yet if not os.path.exists(control_sock): return ModuleRes(success=True, changed=False) try: control_console = get_control_console(control_sock) except CartridgeException as e: allowed_errcodes = [ cartridge_errcodes.SOCKET_NOT_FOUND, cartridge_errcodes.FAILED_TO_CONNECT_TO_SOCKET, cartridge_errcodes.INSTANCE_IS_NOT_STARTED_YET ] if e.code in allowed_errcodes: return ModuleRes(success=True, changed=False) raise e # Get current memtx memory current_memtx_memory = control_console.eval(''' return type(box.cfg) ~= 'function' and box.cfg.memtx_memory or box.NULL ''') if current_memtx_memory is None: # box.cfg wasn't called return ModuleRes(success=True, changed=False) if new_memtx_memory <= current_memtx_memory: return ModuleRes(success=True, changed=False) # try to increase memtx_memory increased = control_console.eval(''' local ok, err = pcall(function() box.cfg {{ memtx_memory = {} }} end) if not ok then if tostring(err):find("cannot decrease memory size at runtime") == nil then error('failed to set memtx_memory: ' .. tostring(err)) end end return ok '''.format(new_memtx_memory)) return ModuleRes(success=True, changed=increased)
def check_instance_started(params): try: control_console = get_control_console(params['control_sock']) ok = control_console.eval(''' return require('membership').myself().status == 'alive' ''') except CartridgeException as e: return ModuleRes(success=False, msg=str(e)) return ModuleRes(success=ok)
def manage_replicaset(params): control_console = get_control_console(params['control_sock']) # Check if replicaset is already created cluster_replicaset = get_cluster_replicaset(control_console, params['replicaset']['alias']) if cluster_replicaset is None: return create_replicaset(control_console, params) return change_replicaset(control_console, params, cluster_replicaset)
def manage_failover(params): failover_params = params.get('failover_params') if isinstance(failover_params, bool): failover_params = { 'mode': 'eventual' if failover_params is True else 'disabled' } control_console = get_control_console(params['control_sock']) version = get_tarantool_version(control_console) if version is not None and version >= NEW_FAILOVER_API_CARTRIDGE_VERSION: return manage_failover_new(control_console, failover_params) else: if failover_params['mode'] == 'stateful': errmsg = 'Stateful failover is supported since cartridge {}'.format( NEW_FAILOVER_API_CARTRIDGE_VERSION) return ModuleRes(success=False, msg=errmsg) return manage_failover_old(control_console, failover_params)
def manage_instance(params): config = params['config'] cartridge_defaults = params['cartridge_defaults'] control_sock = params['control_sock'] # Check if instance isn't started yet if not os.path.exists(control_sock): return ModuleRes(success=True, changed=False) try: control_console = get_control_console(control_sock) except CartridgeException as e: allowed_errcodes = [ cartridge_errcodes.SOCKET_NOT_FOUND, cartridge_errcodes.FAILED_TO_CONNECT_TO_SOCKET, cartridge_errcodes.INSTANCE_IS_NOT_STARTED_YET ] if e.code in allowed_errcodes: return ModuleRes(success=True, changed=False) raise e if not box_cfg_was_called(control_console): return ModuleRes(success=True, changed=False) # Change memory size memory_size_changed = False for param_name in memory_size_box_cfg_params: if param_name in config or param_name in cartridge_defaults: memory_size_changed = memory_size_changed or change_memory_size( param_name, cartridge_defaults, config, control_console) # Change dynamic params dynamic_params_changed = change_dynamic_params(cartridge_defaults, config, control_console) changed = memory_size_changed or dynamic_params_changed return ModuleRes(success=True, changed=changed)
def bootstrap_vshard(params): control_console = get_control_console(params['control_sock']) can_bootstrap = control_console.eval(''' return require('cartridge.vshard-utils').can_bootstrap() ''') if not can_bootstrap: return ModuleRes(success=True, changed=False) res = control_console.eval(''' local ok, err = require('cartridge.admin').bootstrap_vshard() return { ok = ok or box.NULL, err = err and err.err or box.NULL } ''') if not res['ok']: errmsg = 'Vshard bootstrap failed: {}'.format(res['err']) return ModuleRes(success=False, msg=errmsg) return ModuleRes(success=True, changed=True)
def get_control_instance(params): control_console = get_control_console(params['sock']) control_instance = '' members = control_console.eval(''' return require('membership').members() ''') for _, member in members.items(): if 'payload' in member and member['payload'].get('uuid') is not None: if member['payload'].get('alias') is None: errmsg = 'Unable to get instance alias for "{}"'.format( member['payload']['uuid']) return ModuleRes(success=False, msg=errmsg) control_instance = member['payload']['alias'] break if not control_instance and not params['allow_empty']: errmsg = "Cluster isn't bootstrapped yet" return ModuleRes(success=False, msg=errmsg) return ModuleRes(success=True, meta={'host': control_instance})
def probe_server(params): control_console = get_control_console(params['control_sock']) hostvars = params['hostvars'] play_hosts = params['play_hosts'] for i, instance_vars in hostvars.items(): if 'config' not in instance_vars: continue if instance_expelled(instance_vars): continue res = control_console.eval(''' local ok, err = require('cartridge').admin_probe_server('{}') return {{ ok = ok and true or false, err = err and err.err or box.NULL }} '''.format(instance_vars['config']['advertise_uri'])) if not res['ok'] and i in play_hosts: return ModuleRes(success=False, msg=res['err']) return ModuleRes(success=True)
def needs_restart(params): restarted = params['restarted'] if restarted is True: return ModuleRes(success=True, changed=True) if restarted is False: return ModuleRes(success=True, changed=False) stateboard = params['stateboard'] control_sock = params['control_sock'] appname = params['appname'] new_default_conf = params['cartridge_defaults'] new_instance_conf = params['config'] cluster_cookie = params['cluster_cookie'] instance_conf_file = params['instance_conf_file'] conf_section_name = params['conf_section_name'] default_conf_path = params['app_conf_file'] app_code_path = params['bin_dir'] # check if instance was not started yet if not os.path.exists(control_sock): return ModuleRes(success=True, changed=True) try: control_console = get_control_console(control_sock) except CartridgeException as e: allowed_errcodes = [ cartridge_errcodes.SOCKET_NOT_FOUND, cartridge_errcodes.FAILED_TO_CONNECT_TO_SOCKET, cartridge_errcodes.INSTANCE_IS_NOT_STARTED_YET ] if e.code in allowed_errcodes: return ModuleRes(success=True, changed=True) last_restart_time = os.path.getmtime(control_sock) # check if application code was updated package_update_time = os.path.getmtime(app_code_path) if last_restart_time < package_update_time: return ModuleRes(success=True, changed=True) # check if instance config was changed (except dynamic params) current_instance_conf = read_yaml_file_section( instance_conf_file, control_console, conf_section_name ) if check_conf_updated(new_instance_conf, current_instance_conf, dynamic_box_cfg_params): return ModuleRes(success=True, changed=True) if not stateboard: # check if default config was changed (except dynamic params) current_default_conf = read_yaml_file_section( default_conf_path, control_console, appname ) new_default_conf.update({'cluster_cookie': cluster_cookie}) if check_conf_updated(new_default_conf, current_default_conf, dynamic_box_cfg_params): return ModuleRes(success=True, changed=True) # if box.cfg wasn't called, if not box_cfg_was_called(control_console): return ModuleRes(success=True, changed=True) current_cfg = get_current_cfg(control_console) if current_cfg is None: return ModuleRes(success=True, changed=True) for param_name in dynamic_box_cfg_params: new_value = None if param_name in new_instance_conf: new_value = new_instance_conf[param_name] elif not stateboard and param_name in new_default_conf: new_value = new_default_conf[param_name] # This code is ran after attempt to change parameter in runtime # If current parameter wasn't changed to the new value, # it mean that instance should be restarted to apply change if new_value is not None: if current_cfg.get(param_name) != new_value: return ModuleRes(success=True, changed=True) return ModuleRes(success=True, changed=False)
def manage_auth(params): auth_params = params['auth'] control_console = get_control_console(params['control_sock']) if not auth_params: return ModuleRes(success=True, changed=False) # Check if auth backend implements all functions for users management if auth_params.get('users') is not None: if not check_cluster_auth_implements_all(control_console): errmsg = 'Cluster auth backend must implement all user management functions' return ModuleRes(success=False, msg=errmsg) # Manage auth params ok, cluster_auth_params = get_cluster_auth_params(control_console) if not ok: return ModuleRes(success=False, msg=cluster_auth_params) ok, new_cluster_auth_params = edit_cluster_auth_params( control_console, enabled=auth_params.get('enabled'), cookie_max_age=auth_params.get('cookie_max_age'), cookie_renew_age=auth_params.get('cookie_renew_age'), ) if not ok: return ModuleRes(success=False, msg=new_cluster_auth_params) params_changed = new_cluster_auth_params != cluster_auth_params # Manage users if auth_params.get('users') is None: return ModuleRes(success=True, changed=params_changed) users = auth_params['users'] ok, cluster_users = get_cluster_users(control_console) if not ok: return ModuleRes(success=False, msg=cluster_users) # find new users new_usernames = set(u['username'] for u in users).difference( set(u['username'] for u in cluster_users) ) users_to_add = list(filter( lambda u: u['username'] in new_usernames and not user_is_deleted(u), users )) # find users to edit users_to_edit = list(filter( lambda u: u['username'] not in new_usernames and not user_is_deleted(u), users )) # find users to delete users_to_delete = list(filter( lambda u: user_is_deleted(u) and len(list(filter( lambda c: c['username'] == u['username'], cluster_users) )) > 0, users )) users_changed = False for user in users_to_add: ok, err = add_cluster_user(control_console, user) if not ok: return ModuleRes(success=False, msg=err) users_changed = True for user in users_to_edit: cluster_user = [u for u in cluster_users if u['username'] == user['username']][0] ok, edited_user = edit_cluster_user(control_console, user) if not ok: return ModuleRes(success=False, msg=edited_user) users_changed = users_changed or not users_are_equal(cluster_user, edited_user) for user in users_to_delete: ok, err = delete_cluster_user(control_console, user) if not ok: return ModuleRes(success=False, msg=err) users_changed = True return ModuleRes(success=True, changed=params_changed or users_changed)
def needs_restart(params): restart_forced = params['restart_forced'] if restart_forced: return ModuleRes(success=True, changed=True) stateboard = params['stateboard'] control_sock = params['control_sock'] appname = params['appname'] new_default_conf = params['cartridge_defaults'] new_instance_conf = params['config'] cluster_cookie = params['cluster_cookie'] instance_conf_file = params['instance_conf_file'] conf_section_name = params['conf_section_name'] default_conf_path = '/etc/tarantool/conf.d/{}.yml'.format(appname) app_code_path = '/usr/share/tarantool/{}'.format(appname) # check if instance was not started yet if not os.path.exists(control_sock): return ModuleRes(success=True, changed=True) try: control_console = get_control_console(control_sock) except CartridgeException as e: allowed_errcodes = [ cartridge_errcodes.SOCKET_NOT_FOUND, cartridge_errcodes.FAILED_TO_CONNECT_TO_SOCKET, cartridge_errcodes.INSTANCE_IS_NOT_STARTED_YET ] if e.code in allowed_errcodes: return ModuleRes(success=True, changed=True) last_restart_time = os.path.getmtime(control_sock) # check if application code was updated package_update_time = os.path.getmtime(app_code_path) if last_restart_time < package_update_time: return ModuleRes(success=True, changed=True) # check if instance config was changed (except memtx_memory) current_instance_conf = read_yaml_file_section(instance_conf_file, control_console, conf_section_name) if check_conf_updated(new_instance_conf, current_instance_conf, ['memtx_memory']): return ModuleRes(success=True, changed=True) if not stateboard: # check if default config was changed (except memtx_memory) current_default_conf = read_yaml_file_section(default_conf_path, control_console, appname) new_default_conf.update({'cluster_cookie': cluster_cookie}) if check_conf_updated(new_default_conf, current_default_conf, ['memtx_memory']): return ModuleRes(success=True, changed=True) new_memtx_memory = None if 'memtx_memory' in new_instance_conf: new_memtx_memory = new_instance_conf['memtx_memory'] elif not stateboard and 'memtx_memory' in new_default_conf: new_memtx_memory = new_default_conf['memtx_memory'] # This code is ran after attempt to change memtx_memory in runtime # If current memtx_memory wasn't changed to the new value, # it mean that instance should be restarted to apply change if new_memtx_memory is not None: current_memtx_memory = get_memtx_memory(control_console) if current_memtx_memory != new_memtx_memory: return ModuleRes(success=True, changed=True) return ModuleRes(success=True, changed=False)
def manage_get_uuids(params): instance_names = params.get('instances') replicasets = params.get('replicasets') control_console = get_control_console(params['control_sock']) hostvars = params['hostvars'] return get_uuids(control_console, instance_names, replicasets, hostvars)
def config_app(params): control_console = get_control_console(params['control_sock']) config = params['app_config'] system_sections = { 'topology': True, 'vshard': True, 'vshard_groups': True, 'auth': True, 'users_acl': True, } # Get current config res = control_console.eval(''' local cartridge = require('cartridge') local config = cartridge.config_get_readonly() return { ok = config ~= nil, config = config ~= nil and config or "Cluster isn't bootstrapped yet", } ''') if not res['ok']: errmsg = 'Config patch failed: {}'.format(res['config']) return ModuleRes(success=False, msg=errmsg) current_config = res['config'] # Patch it patch = {} changed = False for section_name, section in config.items(): if section_name in system_sections: errmsg = 'Unable to patch config system section: "{}"'.format( section_name) return ModuleRes(success=False, msg=errmsg) if section_is_deleted(section): if section_name in current_config: patch[section_name] = None changed = True else: if section_name not in current_config or current_config[ section_name] != section['body']: patch[section_name] = section['body'] changed = True if not changed: return ModuleRes(success=True, changed=False) res = control_console.eval(''' local cartridge = require('cartridge') local patch = require('json').decode('{}') local ok, err = cartridge.config_patch_clusterwide(patch) return {{ ok = ok == true, err = err and err.err or box.NULL }} '''.format(json.dumps(patch))) if not res['ok']: errmsg = 'Config patch failed: {}'.format(res['err']) return ModuleRes(success=False, msg=errmsg) return ModuleRes(success=True, changed=True)