def manage_failover(params): failover = params['failover'] control_console = get_control_console(params['control_sock']) current_failover = control_console.eval(''' return require('cartridge').admin_get_failover() ''') if current_failover == failover: return ModuleRes(success=True, changed=False) function_name = 'admin_enable_failover' if failover else 'admin_disable_failover' res = control_console.eval(''' local failover, err = require('cartridge').{}() return {{ ok = failover ~= nil, err = err and err.err or box.NULL }} '''.format(function_name)) if not res['ok']: errmsg = 'Failed {}: {}'.format(function_name, res['err']) return ModuleRes(success=False, msg=errmsg) return ModuleRes(success=True, changed=True)
def check_instance_started(params): waiting_for_state = params['waiting_for_state'] try: control_console = get_control_console(params['control_sock']) ok = True if not params['stateboard']: ok = control_console.eval(''' return require('membership').myself().status == 'alive' ''') if ok is True: response = control_console.eval(''' local state, err = require('cartridge.confapplier').get_state() return { state = state, err = err or box.NULL } ''') state = response['state'] err = response['err'] if state in waiting_for_state: return ModuleRes(success=ok) else: return ModuleRes( success=False, msg=('Instance state is {}. Error: {}').format( state, err)) except CartridgeException as e: return ModuleRes(success=False, msg=str(e)) if not params['stateboard']: return ModuleRes(success=False, msg='Instance is not running') else: return ModuleRes(success=True, msg='This is a stateboard instance')
def validate_config(params): found_replicasets = {} found_common_params = {} for host in params['hosts']: host_vars = params['hostvars'][host] # Validate types errmsg = validate_types(host_vars) if errmsg is not None: return ModuleRes(success=False, msg=errmsg) # All required params should be specified errmsg = check_required_params(host_vars, host) if errmsg is not None: return ModuleRes(success=False, msg=errmsg) # Instance config errmsg = check_instance_config(host_vars['config'], host) if errmsg is not None: return ModuleRes(success=False, msg=errmsg) # Params common for all instances errmsg = check_params_the_same_for_all_hosts(host_vars, found_common_params) if errmsg is not None: return ModuleRes(success=False, msg=errmsg) # Cartridge defaults if 'cartridge_defaults' in host_vars: if 'cluster_cookie' in host_vars['cartridge_defaults']: errmsg = 'Cluster cookie must be specified in "cartridge_cluster_cookie", not in "cartridge_defaults"' return ModuleRes(success=False, msg=errmsg) # Instance state if host_vars.get('expelled') is True and host_vars.get( 'restarted') is True: errmsg = 'Flags "expelled" and "restarted" can not be set at the same time' return ModuleRes(success=False, msg=errmsg) # Replicasets errmsg = check_replicaset(host_vars, found_replicasets) if errmsg is not None: return ModuleRes(success=False, msg=errmsg) # Authorization params errmsg = check_auth(found_common_params) if errmsg is not None: return ModuleRes(success=False, msg=errmsg) # Clusterwide config errmsg = check_app_config(found_common_params) if errmsg is not None: return ModuleRes(success=False, msg=errmsg) return ModuleRes(success=True, changed=False)
def check_instance_started(params): try: control_console = get_control_console(params['control_sock']) ok = control_console.eval(''' return require('membership').myself().status == 'alive' ''') except CartridgeException as e: return ModuleRes(success=False, msg=str(e)) return ModuleRes(success=ok)
def manage_instance(params): config = params['config'] cartridge_defaults = params['cartridge_defaults'] control_sock = params['control_sock'] # Check if instance isn't started yet if not os.path.exists(control_sock): return ModuleRes(success=True, changed=False) try: control_console = get_control_console(control_sock) except CartridgeException as e: allowed_errcodes = [ cartridge_errcodes.SOCKET_NOT_FOUND, cartridge_errcodes.FAILED_TO_CONNECT_TO_SOCKET, cartridge_errcodes.INSTANCE_IS_NOT_STARTED_YET ] if e.code in allowed_errcodes: return ModuleRes(success=True, changed=False) raise e if not box_cfg_was_called(control_console): return ModuleRes(success=True, changed=False) # Change memory size memory_size_changed = False for param_name in memory_size_box_cfg_params: if param_name in config or param_name in cartridge_defaults: memory_size_changed = memory_size_changed or change_memory_size( param_name, cartridge_defaults, config, control_console) # Change dynamic params dynamic_params_changed = change_dynamic_params(cartridge_defaults, config, control_console) changed = memory_size_changed or dynamic_params_changed return ModuleRes(success=True, changed=changed)
def bootstrap_vshard(params): control_console = get_control_console(params['control_sock']) can_bootstrap = control_console.eval(''' return require('cartridge.vshard-utils').can_bootstrap() ''') if not can_bootstrap: return ModuleRes(success=True, changed=False) res = control_console.eval(''' local ok, err = require('cartridge.admin').bootstrap_vshard() return { ok = ok or box.NULL, err = err and err.err or box.NULL } ''') if not res['ok']: errmsg = 'Vshard bootstrap failed: {}'.format(res['err']) return ModuleRes(success=False, msg=errmsg) return ModuleRes(success=True, changed=True)
def get_replicasets(params): hostvars = params['hostvars'] play_hosts = params['play_hosts'] replicasets = {} for i, instance_vars in hostvars.items(): if i not in play_hosts: continue if instance_expelled(instance_vars): continue if 'replicaset_alias' in instance_vars: replicaset_alias = instance_vars['replicaset_alias'] if replicaset_alias not in replicasets: replicasets.update({ replicaset_alias: { 'instances': [], 'roles': instance_vars.get('roles', None), 'failover_priority': instance_vars.get('failover_priority', None), 'all_rw': instance_vars.get('all_rw', None), 'weight': instance_vars.get('weight', None), 'vshard_group': instance_vars.get('vshard_group', None), 'alias': replicaset_alias, } }) replicasets[replicaset_alias]['instances'].append(i) join_host = params['control_host'] replicasets_list = [v for _, v in replicasets.items()] for r in replicasets_list: if r['failover_priority'] is None: r['failover_priority'] = [r['instances'][0]] if replicasets_list and not join_host: first_replicaset = replicasets_list[0] join_host = first_replicaset['failover_priority'][0] return ModuleRes(success=True, changed=False, meta={ 'replicasets': replicasets_list, 'join_host': join_host, })
def get_control_instance(params): control_console = get_control_console(params['sock']) control_instance = '' members = control_console.eval(''' return require('membership').members() ''') for _, member in members.items(): if 'payload' in member and member['payload'].get('uuid') is not None: if member['payload'].get('alias') is None: errmsg = 'Unable to get instance alias for "{}"'.format( member['payload']['uuid']) return ModuleRes(success=False, msg=errmsg) control_instance = member['payload']['alias'] break if not control_instance and not params['allow_empty']: errmsg = "Cluster isn't bootstrapped yet" return ModuleRes(success=False, msg=errmsg) return ModuleRes(success=True, meta={'host': control_instance})
def manage_instance(params): config = params['config'] cartridge_defaults = params['cartridge_defaults'] control_sock = params['control_sock'] # Check if memtx_memory parameter is specified if 'memtx_memory' not in config and 'memtx_memory' not in cartridge_defaults: return ModuleRes(success=True, changed=False) new_memtx_memory = None if 'memtx_memory' in config: new_memtx_memory = config['memtx_memory'] else: new_memtx_memory = cartridge_defaults['memtx_memory'] # Check if instance isn't started yet if not os.path.exists(control_sock): return ModuleRes(success=True, changed=False) try: control_console = get_control_console(control_sock) except CartridgeException as e: allowed_errcodes = [ cartridge_errcodes.SOCKET_NOT_FOUND, cartridge_errcodes.FAILED_TO_CONNECT_TO_SOCKET, cartridge_errcodes.INSTANCE_IS_NOT_STARTED_YET ] if e.code in allowed_errcodes: return ModuleRes(success=True, changed=False) raise e # Get current memtx memory current_memtx_memory = control_console.eval(''' return type(box.cfg) ~= 'function' and box.cfg.memtx_memory or box.NULL ''') if current_memtx_memory is None: # box.cfg wasn't called return ModuleRes(success=True, changed=False) if new_memtx_memory <= current_memtx_memory: return ModuleRes(success=True, changed=False) # try to increase memtx_memory increased = control_console.eval(''' local ok, err = pcall(function() box.cfg {{ memtx_memory = {} }} end) if not ok then if tostring(err):find("cannot decrease memory size at runtime") == nil then error('failed to set memtx_memory: ' .. tostring(err)) end end return ok '''.format(new_memtx_memory)) return ModuleRes(success=True, changed=increased)
def probe_server(params): control_console = get_control_console(params['control_sock']) hostvars = params['hostvars'] play_hosts = params['play_hosts'] for i, instance_vars in hostvars.items(): if 'config' not in instance_vars: continue if instance_expelled(instance_vars): continue res = control_console.eval(''' local ok, err = require('cartridge').admin_probe_server('{}') return {{ ok = ok and true or false, err = err and err.err or box.NULL }} '''.format(instance_vars['config']['advertise_uri'])) if not res['ok'] and i in play_hosts: return ModuleRes(success=False, msg=res['err']) return ModuleRes(success=True)
def manage_failover(params): failover_params = params.get('failover_params') if isinstance(failover_params, bool): failover_params = { 'mode': 'eventual' if failover_params is True else 'disabled' } control_console = get_control_console(params['control_sock']) version = get_tarantool_version(control_console) if version is not None and version >= NEW_FAILOVER_API_CARTRIDGE_VERSION: return manage_failover_new(control_console, failover_params) else: if failover_params['mode'] == 'stateful': errmsg = 'Stateful failover is supported since cartridge {}'.format( NEW_FAILOVER_API_CARTRIDGE_VERSION) return ModuleRes(success=False, msg=errmsg) return manage_failover_old(control_console, failover_params)
def get_uuids(control_console, instances_to_find, replicasets, hostvars): res = [] found_replicasets = [] for instance_name in sorted(instances_to_find): if is_expelled(hostvars[instance_name]) or is_stateboard( hostvars[instance_name]): continue response = control_console.eval(''' local membership = require('membership') local member = membership.get_member('{}') if member == nil or member.status ~= 'alive' or member.payload.state ~= 'RolesConfigured' then return box.NULL end local replicasets = require('cartridge').admin_get_replicasets() for _, r in ipairs(replicasets) do for _, s in ipairs(r.servers) do if s.alias == '{}' then return {{ replicaset_uuid = r.uuid, instance_uuid = s.uuid }} end end end return box.NULL '''.format(hostvars[instance_name]['config']['advertise_uri'], instance_name)) if response is None: continue if response['replicaset_uuid'] not in found_replicasets: found_replicasets.append(response['replicaset_uuid']) res.append(response) return ModuleRes(success=True, changed=False, meta=res)
def manage_failover_new(control_console, failover_params): current_failover_params = control_console.eval(''' return require('cartridge').failover_get_params() ''') mode = failover_params['mode'] lua_params = ['mode = "{}"'.format(mode)] if mode == 'stateful': state_provider = failover_params.get('state_provider') if state_provider == 'stateboard': lua_params.append('state_provider = "tarantool"') stateboard_params = failover_params.get('stateboard_params') lua_stateboard_params = [] if stateboard_params is not None: for string_param in ['uri', 'password']: if stateboard_params.get(string_param) is not None: lua_stateboard_params.append('{} = "{}"'.format( string_param, stateboard_params[string_param])) if lua_stateboard_params: lua_params.append('tarantool_params = {{ {} }}'.format( ', '.join(lua_stateboard_params))) elif state_provider == 'etcd2': lua_params.append('state_provider = "etcd2"') etcd2_params = failover_params.get('etcd2_params') lua_etcd2_params = [] if etcd2_params is not None: for string_param in ['prefix', 'username', 'password']: if etcd2_params.get(string_param) is not None: lua_etcd2_params.append('{} = "{}"'.format( string_param, etcd2_params[string_param])) if etcd2_params.get('lock_delay') is not None: lua_etcd2_params.append('lock_delay = {}'.format( etcd2_params['lock_delay'])) if etcd2_params.get('endpoints') is not None: lua_etcd2_params.append('endpoints = {{ {} }}'.format( ", ".join('"{}"'.format(endpoint) for endpoint in etcd2_params['endpoints']))) lua_params.append('etcd2_params = {{ {} }}'.format( ', '.join(lua_etcd2_params))) res = control_console.eval(''' local ok, err = require('cartridge').failover_set_params({{ {} }}) return {{ ok = ok ~= nil and ok or box.NULL, err = err and err.err or box.NULL, }} '''.format(', '.join(lua_params))) if not res['ok']: errmsg = 'Failed to set failover params: {}'.format(res['err']) return ModuleRes(success=False, msg=errmsg) new_failover_params = control_console.eval(''' return require('cartridge').failover_get_params() ''') changed = new_failover_params != current_failover_params return ModuleRes(success=True, changed=changed)
def change_replicaset(control_console, params, cluster_replicaset): replicaset_alias = params['replicaset']['alias'] replicaset_roles = params['replicaset']['roles'] replicaset_failover_priority = params['replicaset']['failover_priority'] replicaset_instances = params['replicaset']['instances'] replicaset_all_rw = params['replicaset']['all_rw'] if 'all_rw' in params[ 'replicaset'] else None replicaset_weight = params['replicaset']['weight'] if 'weight' in params[ 'replicaset'] else None replicaset_vshard_group = params['replicaset'].get('vshard_group', None) healthy_timeout = params['healthy_timeout'] if cluster_replicaset['status'] != 'healthy': errmsg = 'Replicaset "{}" is not healthy'.format(replicaset_alias) return ModuleRes(success=False, msg=errmsg) cluster_instances = get_all_cluster_instances(control_console) cluster_instances = {i['alias']: i for i in cluster_instances} # make it dict servers_to_join = list( set(replicaset_instances) - set([s['alias'] for s in cluster_replicaset['servers']])) for s in servers_to_join: if s not in cluster_instances: errmsg = 'Instance "{}" specified in replicaset "{}" is not in cluster. '.format(s, replicaset_alias) + \ 'Make sure that it was started.' return ModuleRes(success=False, msg=errmsg) for s in replicaset_failover_priority: if s not in cluster_instances: errmsg = 'Instance "{}" specified in replicaset "{}" is not in cluster. '.format(s, replicaset_alias) + \ 'Make sure that it was started.' return ModuleRes(success=False, msg=errmsg) if servers_to_join: res, err = edit_replicaset(control_console, cluster_instances, uuid=cluster_replicaset['uuid'], join_servers=servers_to_join) if not res: errmsg = 'Failed to edit replicaset "{}": {}'.format( replicaset_alias, err) return ModuleRes(success=False, msg=errmsg) # Wait for replicaset is healthy if not wait_for_replicaset_is_healthy( control_console, replicaset_alias, healthy_timeout): errmsg = 'Replicaset "{}" is not healthy'.format(replicaset_alias) return ModuleRes(success=False, msg=errmsg) for i in res['servers']: cluster_instances[i['alias']] = i res, err = edit_replicaset(control_console, cluster_instances, uuid=cluster_replicaset['uuid'], roles=replicaset_roles, failover_priority=replicaset_failover_priority, all_rw=replicaset_all_rw, weight=replicaset_weight, vshard_group=replicaset_vshard_group) if not res: errmsg = 'Failed to edit replicaset "{}": {}'.format( replicaset_alias, err) return ModuleRes(success=False, msg=errmsg) # Wait for replicaset is healthy if not wait_for_replicaset_is_healthy(control_console, replicaset_alias, healthy_timeout): errmsg = 'Replicaset "{}" is not healthy'.format(replicaset_alias) return ModuleRes(success=False, msg=errmsg) edited_cluster_replicaset = get_cluster_replicaset(control_console, replicaset_alias) changed = not cluster_replicasets_are_equal(cluster_replicaset, edited_cluster_replicaset) return ModuleRes(success=True, changed=changed)
def create_replicaset(control_console, params): replicaset_alias = params['replicaset']['alias'] replicaset_failover_priority = params['replicaset']['failover_priority'] replicaset_roles = params['replicaset']['roles'] replicaset_instances = params['replicaset']['instances'] replicaset_all_rw = params['replicaset']['all_rw'] if 'all_rw' in params[ 'replicaset'] else None replicaset_weight = params['replicaset']['weight'] if 'weight' in params[ 'replicaset'] else None replicaset_vshard_group = params['replicaset'].get('vshard_group', None) healthy_timeout = params['healthy_timeout'] cluster_instances = get_all_cluster_instances(control_console) cluster_instances = {i['alias']: i for i in cluster_instances} # make it dict replicaset_leader = replicaset_failover_priority[0] if replicaset_leader not in cluster_instances: errmsg = 'Leader "{}" (replicaset "{}") not found is cluster. Make sure it was started'.format( replicaset_leader, replicaset_alias) return ModuleRes(success=False, msg=errmsg) # Cerate replicaset (join leader) res, err = edit_replicaset(control_console, cluster_instances, alias=replicaset_alias, join_servers=[replicaset_leader], roles=replicaset_roles, all_rw=replicaset_all_rw, weight=replicaset_weight, vshard_group=replicaset_vshard_group) if not res: errmsg = 'Failed to create "{}" replicaset: {}'.format( replicaset_alias, err) return ModuleRes(success=False, msg=errmsg) for i in res['servers']: cluster_instances[i['alias']] = i # Wait for replicaset is healthy if not wait_for_replicaset_is_healthy(control_console, replicaset_alias, healthy_timeout): errmsg = 'Replicaset "{}" is not healthy'.format(replicaset_alias) return ModuleRes(success=False, msg=errmsg) # Get replicaset UUID cluster_replicaset = get_cluster_replicaset(control_console, replicaset_alias) replicaset_uuid = cluster_replicaset['uuid'] # Remove leader from instances list replicaset_instances.remove(replicaset_leader) # Join other instances for replicaset_instance in replicaset_instances: if replicaset_instance not in cluster_instances: errmsg = 'Instance "{}" (replicaset "{}") not found is cluster. Make sure it was started'.format( replicaset_instance, replicaset_alias) return ModuleRes(success=False, msg=errmsg) res, err = edit_replicaset(control_console, cluster_instances, uuid=replicaset_uuid, join_servers=[replicaset_instance]) if not res: errmsg = 'Failed to join "{}" to replicaset "{}": {}'.format( replicaset_instance, replicaset_alias, err) return ModuleRes(success=False, msg=errmsg) for i in res['servers']: cluster_instances[i['alias']] = i # Wait for replicaset is healthy if not wait_for_replicaset_is_healthy( control_console, replicaset_alias, healthy_timeout): errmsg = 'Replicaset "{}" is not healthy'.format(replicaset_alias) return ModuleRes(success=False, msg=errmsg) if len(replicaset_failover_priority) > 1: # Set failover priority res, err = edit_replicaset( control_console, cluster_instances, uuid=replicaset_uuid, failover_priority=replicaset_failover_priority) if not res: errmsg = 'Failed to set failover priority in "{}" replicaset: {}'.format( replicaset_alias, err) return ModuleRes(success=False, msg=errmsg) return ModuleRes(success=True, changed=True)
def needs_restart(params): restarted = params['restarted'] if restarted is True: return ModuleRes(success=True, changed=True) if restarted is False: return ModuleRes(success=True, changed=False) stateboard = params['stateboard'] control_sock = params['control_sock'] appname = params['appname'] new_default_conf = params['cartridge_defaults'] new_instance_conf = params['config'] cluster_cookie = params['cluster_cookie'] instance_conf_file = params['instance_conf_file'] conf_section_name = params['conf_section_name'] default_conf_path = params['app_conf_file'] app_code_path = params['bin_dir'] # check if instance was not started yet if not os.path.exists(control_sock): return ModuleRes(success=True, changed=True) try: control_console = get_control_console(control_sock) except CartridgeException as e: allowed_errcodes = [ cartridge_errcodes.SOCKET_NOT_FOUND, cartridge_errcodes.FAILED_TO_CONNECT_TO_SOCKET, cartridge_errcodes.INSTANCE_IS_NOT_STARTED_YET ] if e.code in allowed_errcodes: return ModuleRes(success=True, changed=True) last_restart_time = os.path.getmtime(control_sock) # check if application code was updated package_update_time = os.path.getmtime(app_code_path) if last_restart_time < package_update_time: return ModuleRes(success=True, changed=True) # check if instance config was changed (except dynamic params) current_instance_conf = read_yaml_file_section( instance_conf_file, control_console, conf_section_name ) if check_conf_updated(new_instance_conf, current_instance_conf, dynamic_box_cfg_params): return ModuleRes(success=True, changed=True) if not stateboard: # check if default config was changed (except dynamic params) current_default_conf = read_yaml_file_section( default_conf_path, control_console, appname ) new_default_conf.update({'cluster_cookie': cluster_cookie}) if check_conf_updated(new_default_conf, current_default_conf, dynamic_box_cfg_params): return ModuleRes(success=True, changed=True) # if box.cfg wasn't called, if not box_cfg_was_called(control_console): return ModuleRes(success=True, changed=True) current_cfg = get_current_cfg(control_console) if current_cfg is None: return ModuleRes(success=True, changed=True) for param_name in dynamic_box_cfg_params: new_value = None if param_name in new_instance_conf: new_value = new_instance_conf[param_name] elif not stateboard and param_name in new_default_conf: new_value = new_default_conf[param_name] # This code is ran after attempt to change parameter in runtime # If current parameter wasn't changed to the new value, # it mean that instance should be restarted to apply change if new_value is not None: if current_cfg.get(param_name) != new_value: return ModuleRes(success=True, changed=True) return ModuleRes(success=True, changed=False)
def manage_auth(params): auth_params = params['auth'] control_console = get_control_console(params['control_sock']) if not auth_params: return ModuleRes(success=True, changed=False) # Check if auth backend implements all functions for users management if auth_params.get('users') is not None: if not check_cluster_auth_implements_all(control_console): errmsg = 'Cluster auth backend must implement all user management functions' return ModuleRes(success=False, msg=errmsg) # Manage auth params ok, cluster_auth_params = get_cluster_auth_params(control_console) if not ok: return ModuleRes(success=False, msg=cluster_auth_params) ok, new_cluster_auth_params = edit_cluster_auth_params( control_console, enabled=auth_params.get('enabled'), cookie_max_age=auth_params.get('cookie_max_age'), cookie_renew_age=auth_params.get('cookie_renew_age'), ) if not ok: return ModuleRes(success=False, msg=new_cluster_auth_params) params_changed = new_cluster_auth_params != cluster_auth_params # Manage users if auth_params.get('users') is None: return ModuleRes(success=True, changed=params_changed) users = auth_params['users'] ok, cluster_users = get_cluster_users(control_console) if not ok: return ModuleRes(success=False, msg=cluster_users) # find new users new_usernames = set(u['username'] for u in users).difference( set(u['username'] for u in cluster_users) ) users_to_add = list(filter( lambda u: u['username'] in new_usernames and not user_is_deleted(u), users )) # find users to edit users_to_edit = list(filter( lambda u: u['username'] not in new_usernames and not user_is_deleted(u), users )) # find users to delete users_to_delete = list(filter( lambda u: user_is_deleted(u) and len(list(filter( lambda c: c['username'] == u['username'], cluster_users) )) > 0, users )) users_changed = False for user in users_to_add: ok, err = add_cluster_user(control_console, user) if not ok: return ModuleRes(success=False, msg=err) users_changed = True for user in users_to_edit: cluster_user = [u for u in cluster_users if u['username'] == user['username']][0] ok, edited_user = edit_cluster_user(control_console, user) if not ok: return ModuleRes(success=False, msg=edited_user) users_changed = users_changed or not users_are_equal(cluster_user, edited_user) for user in users_to_delete: ok, err = delete_cluster_user(control_console, user) if not ok: return ModuleRes(success=False, msg=err) users_changed = True return ModuleRes(success=True, changed=params_changed or users_changed)
def needs_restart(params): restart_forced = params['restart_forced'] if restart_forced: return ModuleRes(success=True, changed=True) stateboard = params['stateboard'] control_sock = params['control_sock'] appname = params['appname'] new_default_conf = params['cartridge_defaults'] new_instance_conf = params['config'] cluster_cookie = params['cluster_cookie'] instance_conf_file = params['instance_conf_file'] conf_section_name = params['conf_section_name'] default_conf_path = '/etc/tarantool/conf.d/{}.yml'.format(appname) app_code_path = '/usr/share/tarantool/{}'.format(appname) # check if instance was not started yet if not os.path.exists(control_sock): return ModuleRes(success=True, changed=True) try: control_console = get_control_console(control_sock) except CartridgeException as e: allowed_errcodes = [ cartridge_errcodes.SOCKET_NOT_FOUND, cartridge_errcodes.FAILED_TO_CONNECT_TO_SOCKET, cartridge_errcodes.INSTANCE_IS_NOT_STARTED_YET ] if e.code in allowed_errcodes: return ModuleRes(success=True, changed=True) last_restart_time = os.path.getmtime(control_sock) # check if application code was updated package_update_time = os.path.getmtime(app_code_path) if last_restart_time < package_update_time: return ModuleRes(success=True, changed=True) # check if instance config was changed (except memtx_memory) current_instance_conf = read_yaml_file_section(instance_conf_file, control_console, conf_section_name) if check_conf_updated(new_instance_conf, current_instance_conf, ['memtx_memory']): return ModuleRes(success=True, changed=True) if not stateboard: # check if default config was changed (except memtx_memory) current_default_conf = read_yaml_file_section(default_conf_path, control_console, appname) new_default_conf.update({'cluster_cookie': cluster_cookie}) if check_conf_updated(new_default_conf, current_default_conf, ['memtx_memory']): return ModuleRes(success=True, changed=True) new_memtx_memory = None if 'memtx_memory' in new_instance_conf: new_memtx_memory = new_instance_conf['memtx_memory'] elif not stateboard and 'memtx_memory' in new_default_conf: new_memtx_memory = new_default_conf['memtx_memory'] # This code is ran after attempt to change memtx_memory in runtime # If current memtx_memory wasn't changed to the new value, # it mean that instance should be restarted to apply change if new_memtx_memory is not None: current_memtx_memory = get_memtx_memory(control_console) if current_memtx_memory != new_memtx_memory: return ModuleRes(success=True, changed=True) return ModuleRes(success=True, changed=False)
def validate_config(params): found_replicasets = {} found_common_params = {} found_stateboard_vars = None warnings = [] for host in params['hosts']: host_vars = params['hostvars'][host] # Validate types errmsg = validate_types(host_vars) if errmsg is not None: return ModuleRes(success=False, msg=errmsg) if host_vars.get('stateboard') is True: if found_stateboard_vars is not None: return ModuleRes( success=False, msg='Only one instance can be marked as a "stateboard"') found_stateboard_vars = host_vars continue # All required params should be specified errmsg = check_required_params(host_vars, host) if errmsg is not None: return ModuleRes(success=False, msg=errmsg) # Instance config errmsg = check_instance_config(host_vars['config'], host) if errmsg is not None: return ModuleRes(success=False, msg=errmsg) # Params common for all instances errmsg = check_params_the_same_for_all_hosts(host_vars, found_common_params) if errmsg is not None: return ModuleRes(success=False, msg=errmsg) # Cartridge defaults if 'cartridge_defaults' in host_vars: if 'cluster_cookie' in host_vars['cartridge_defaults']: errmsg = 'Cluster cookie must be specified in "cartridge_cluster_cookie", not in "cartridge_defaults"' return ModuleRes(success=False, msg=errmsg) # Instance state if host_vars.get('expelled') is True and host_vars.get( 'restarted') is True: errmsg = 'Flags "expelled" and "restarted" cannot be set at the same time' return ModuleRes(success=False, msg=errmsg) # Replicasets errmsg = check_replicaset(host_vars, found_replicasets) if errmsg is not None: return ModuleRes(success=False, msg=errmsg) # Authorization params errmsg = check_auth(found_common_params) if errmsg is not None: return ModuleRes(success=False, msg=errmsg) # Clusterwide config errmsg = check_app_config(found_common_params) if errmsg is not None: return ModuleRes(success=False, msg=errmsg) # Failover errmsg = check_failover(found_common_params) if errmsg is not None: return ModuleRes(success=False, msg=errmsg) # Stateboard if found_stateboard_vars is not None: errmsg = check_stateboard(found_stateboard_vars) if errmsg is not None: return ModuleRes(success=False, msg=errmsg) if found_common_params.get('cartridge_failover') is not None: warnings.append( 'Variable `cartridge_failover` is deprecated since 1.3.0 and will be removed in 2.0.0. ' 'Use `cartridge_failover_params` instead.') return ModuleRes(success=True, changed=False, warnings=warnings)
def config_app(params): control_console = get_control_console(params['control_sock']) config = params['app_config'] system_sections = { 'topology': True, 'vshard': True, 'vshard_groups': True, 'auth': True, 'users_acl': True, } # Get current config res = control_console.eval(''' local cartridge = require('cartridge') local config = cartridge.config_get_readonly() return { ok = config ~= nil, config = config ~= nil and config or "Cluster isn't bootstrapped yet", } ''') if not res['ok']: errmsg = 'Config patch failed: {}'.format(res['config']) return ModuleRes(success=False, msg=errmsg) current_config = res['config'] # Patch it patch = {} changed = False for section_name, section in config.items(): if section_name in system_sections: errmsg = 'Unable to patch config system section: "{}"'.format( section_name) return ModuleRes(success=False, msg=errmsg) if section_is_deleted(section): if section_name in current_config: patch[section_name] = None changed = True else: if section_name not in current_config or current_config[ section_name] != section['body']: patch[section_name] = section['body'] changed = True if not changed: return ModuleRes(success=True, changed=False) res = control_console.eval(''' local cartridge = require('cartridge') local patch = require('json').decode('{}') local ok, err = cartridge.config_patch_clusterwide(patch) return {{ ok = ok == true, err = err and err.err or box.NULL }} '''.format(json.dumps(patch))) if not res['ok']: errmsg = 'Config patch failed: {}'.format(res['err']) return ModuleRes(success=False, msg=errmsg) return ModuleRes(success=True, changed=True)