def _get_qdevice_model_net_options_validators(node_ids, allow_empty_values=False, force_options=False): allow_extra_values = validate.allow_extra_values( report_codes.FORCE_OPTIONS, force_options) validators = { "connect_timeout": validate.value_integer_in_range("connect_timeout", 1000, 2 * 60 * 1000, **allow_extra_values), "force_ip_version": validate.value_in("force_ip_version", ("0", "4", "6"), **allow_extra_values), "port": validate.value_port_number("port", **allow_extra_values), "tie_breaker": validate.value_in("tie_breaker", ["lowest", "highest"] + node_ids, **allow_extra_values), } if not allow_empty_values: return ([ validate.value_not_empty("host", "a qdevice host address"), _validate_qdevice_net_algorithm(**allow_extra_values) ] + # explicitely convert to a list for python 3 list(validators.values())) return ([ validate.value_not_empty("host", "a qdevice host address"), _validate_qdevice_net_algorithm(**allow_extra_values) ] + [ validate.value_empty_or_valid(option_name, validator) for option_name, validator in validators.items() ])
def __validate_quorum_device_add_heuristics( self, heuristics_options, force_options=False ): report_list = [] options_nonexec, options_exec = self.__split_heuristics_exec_options( heuristics_options ) validators = self.__get_heuristics_options_validators( force_options=force_options ) exec_options_reports, valid_exec_options = ( self.__validate_heuristics_exec_option_names(options_exec) ) for option in valid_exec_options: validators.append( validate.value_not_empty(option, "a command to be run") ) report_list.extend( validate.run_collection_of_option_validators( heuristics_options, validators ) + self.__validate_heuristics_noexec_option_names( options_nonexec, force_options=force_options ) + exec_options_reports ) return report_list
def _qdevice_add_heuristics_options(options, force_options=False): """ Validate quorum device heuristics options when adding a quorum device dict options -- heuristics options bool force_options -- turn forceable errors into warnings """ options_nonexec, options_exec = _split_heuristics_exec_options(options) validators = _get_qdevice_heuristics_options_validators( force_options=force_options ) exec_options_reports, valid_exec_options = ( _validate_heuristics_exec_option_names(options_exec) ) for option in valid_exec_options: validators.append( validate.value_not_empty(option, "a command to be run") ) return ( validate.run_collection_of_option_validators(options, validators) + _validate_heuristics_noexec_option_names( options_nonexec, force_options=force_options ) + exec_options_reports )
def _validate_container_docker_options_update( docker_el, options, force_options ): validators = [ # image is a mandatory attribute and cannot be removed validate.value_not_empty("image", "image name"), validate.value_empty_or_valid( "masters", validate.value_nonnegative_integer("masters") ), validate.value_empty_or_valid( "replicas", validate.value_positive_integer("replicas") ), validate.value_empty_or_valid( "replicas-per-host", validate.value_positive_integer("replicas-per-host") ), ] return ( validate.run_collection_of_option_validators(options, validators) + validate.names_in( # allow to remove options even if they are not allowed _docker_options | _options_to_remove(options), options.keys(), "container", report_codes.FORCE_OPTIONS, force_options ) )
def _validate_container(container_type, container_options, force_options=False): if not container_type in GENERIC_CONTAINER_TYPES: return [ reports.invalid_option_value( "container type", container_type, GENERIC_CONTAINER_TYPES, ) ] validators = [ validate.is_required("image", "container"), validate.value_not_empty("image", "image name"), validate.value_nonnegative_integer("masters"), validate.value_nonnegative_integer("promoted-max"), validate.mutually_exclusive(["masters", "promoted-max"], "container"), validate.value_positive_integer("replicas"), validate.value_positive_integer("replicas-per-host"), ] deprecation_reports = [] if "masters" in container_options: deprecation_reports.append( reports.deprecated_option("masters", ["promoted-max"], "container", severity=ReportItemSeverity.WARNING)) return (validate.run_collection_of_option_validators( container_options, validators) + deprecation_reports + validate.names_in(GENERIC_CONTAINER_OPTIONS, container_options.keys(), "container", report_codes.FORCE_OPTIONS, force_options))
def _get_node_name_validators(node_index): return [ validate.is_required("name", f"node {node_index}"), validate.value_not_empty( "name", "a non-empty string", option_name_for_report=f"node {node_index} name") ]
def _validate_generic_container_options_update(docker_el, options, force_options): validators = [ # image is a mandatory attribute and cannot be removed validate.value_not_empty("image", "image name"), validate.value_empty_or_valid( "masters", validate.value_nonnegative_integer("masters")), validate.value_empty_or_valid( "promoted-max", validate.value_nonnegative_integer("promoted-max")), validate.value_empty_or_valid( "replicas", validate.value_positive_integer("replicas")), validate.value_empty_or_valid( "replicas-per-host", validate.value_positive_integer("replicas-per-host")), ] # CIB does not allow both to be set. Deleting both is not a problem, # though. Deleting one while setting another also works and is further # checked bellow. if not (options.get("masters", "") == "" or options.get("promoted-max", "") == ""): validators.append( validate.mutually_exclusive(["masters", "promoted-max"], "container")) deprecation_reports = [] if options.get("masters"): # If the user wants to delete the masters option, do not report it is # deprecated. They may be removing it because they just found out it is # deprecated. deprecation_reports.append( reports.deprecated_option("masters", ["promoted-max"], "container", severity=ReportItemSeverity.WARNING)) # Do not allow to set masters if promoted-max is set unless promoted-max is # going to be removed now. Do the same check also the other way around. CIB # only allows one of them to be set. if (options.get("masters") and docker_el.get("promoted-max") and options.get("promoted-max") != ""): deprecation_reports.append( reports.prerequisite_option_must_not_be_set( "masters", "promoted-max", "container", "container")) if (options.get("promoted-max") and docker_el.get("masters") and options.get("masters") != ""): deprecation_reports.append( reports.prerequisite_option_must_not_be_set( "promoted-max", "masters", "container", "container")) return (validate.run_collection_of_option_validators(options, validators) + deprecation_reports + validate.names_in( # allow to remove options even if they are not allowed _generic_container_options | _options_to_remove(options), options.keys(), "container", report_codes.FORCE_OPTIONS, force_options))
def test_report_on_empty_string(self): assert_report_item_list_equal( validate.value_not_empty("key", "description")({ "key": "" }), [ (severities.ERROR, report_codes.INVALID_OPTION_VALUE, { "option_name": "key", "option_value": "", "allowed_values": "description", }, None), ])
def _validate_container_docker_options_new(options, force_options): validators = [ validate.is_required("image", "container"), validate.value_not_empty("image", "image name"), validate.value_nonnegative_integer("masters"), validate.value_positive_integer("replicas"), validate.value_positive_integer("replicas-per-host"), ] return (validate.run_collection_of_option_validators(options, validators) + validate.names_in(_docker_options, options.keys(), "container", report_codes.FORCE_OPTIONS, force_options))
def test_report_on_empty_string(self): assert_report_item_list_equal( validate.value_not_empty("key", "description")({"key": ""}), [ ( severities.ERROR, report_codes.INVALID_OPTION_VALUE, { "option_name": "key", "option_value": "", "allowed_values": "description", }, None ), ] )
def _validate_generic_container_options_new(options, force_options): validators = [ validate.is_required("image", "container"), validate.value_not_empty("image", "image name"), validate.value_nonnegative_integer("masters"), validate.value_nonnegative_integer("promoted-max"), validate.mutually_exclusive(["masters", "promoted-max"], "container"), validate.value_positive_integer("replicas"), validate.value_positive_integer("replicas-per-host"), ] deprecation_reports = [] if "masters" in options: deprecation_reports.append( reports.deprecated_option("masters", ["promoted-max"], "container", severity=ReportItemSeverity.WARNING)) return (validate.run_collection_of_option_validators(options, validators) + deprecation_reports + validate.names_in( _generic_container_options, options.keys(), "container", report_codes.FORCE_OPTIONS, force_options))
def test_empty_report_on_zero_int_value(self): assert_report_item_list_equal( validate.value_not_empty("key", "description")({ "key": 0 }), [])
def create_transport_knet(generic_options, compression_options, crypto_options): """ Validate creating knet transport options dict generic_options -- generic transport options dict compression_options -- compression options dict crypto_options -- crypto options """ # No need to support force: # * values are either an enum or numbers with no range set - nothing to force # * names are strictly set as we cannot risk the user overwrites some # setting they should not to # * changes to names and values in corosync are very rare generic_allowed = [ "ip_version", # It tells knet which IP to prefer. "knet_pmtud_interval", "link_mode", ] generic_validators = [ validate.value_in("ip_version", ("ipv4", "ipv6")), validate.value_nonnegative_integer("knet_pmtud_interval"), validate.value_in("link_mode", ("active", "passive", "rr")), ] compression_allowed = [ "level", "model", "threshold", ] compression_validators = [ validate.value_nonnegative_integer("level"), validate.value_not_empty( "model", "a compression model e.g. zlib, lz4 or bzip2" ), validate.value_nonnegative_integer("threshold"), ] crypto_type = "crypto" crypto_allowed = [ "cipher", "hash", "model", ] crypto_validators = [ validate.value_in( "cipher", ("none", "aes256", "aes192", "aes128", "3des") ), validate.value_in( "hash", ("none", "md5", "sha1", "sha256", "sha384", "sha512") ), validate.value_in("model", ("nss", "openssl")), ] report_items = ( validate.run_collection_of_option_validators( generic_options, generic_validators ) + validate.names_in( generic_allowed, generic_options.keys(), "knet transport" ) + validate.run_collection_of_option_validators( compression_options, compression_validators ) + validate.names_in( compression_allowed, compression_options.keys(), "compression" ) + validate.run_collection_of_option_validators( crypto_options, crypto_validators ) + validate.names_in( crypto_allowed, crypto_options.keys(), crypto_type ) ) if ( # default values taken from `man corosync.conf` crypto_options.get("cipher", "aes256") != "none" and crypto_options.get("hash", "sha1") == "none" ): report_items.append( reports.prerequisite_option_must_be_enabled_as_well( "cipher", "hash", option_type="crypto", prerequisite_type="crypto" ) ) return report_items
def create(cluster_name, node_list, transport, force_unresolvable=False): """ Validate creating a new minimalistic corosync.conf string cluster_name -- the name of the new cluster list node_list -- nodes of the new cluster; dict: name, addrs string transport -- corosync transport used in the new cluster bool force_unresolvable -- if True, report unresolvable addresses as warnings instead of errors """ # cluster name and transport validation validators = [ validate.value_not_empty("name", "a non-empty string", "cluster name"), validate.value_in("transport", constants.TRANSPORTS_ALL) ] report_items = validate.run_collection_of_option_validators( { "name": cluster_name, "transport": transport }, validators ) # nodelist validation get_addr_type = _addr_type_analyzer() all_names_usable = True # can names be used to identifying nodes? all_names_count = defaultdict(int) all_addrs_count = defaultdict(int) addr_types_per_node = [] unresolvable_addresses = set() # First, validate each node on its own. Also extract some info which will # be needed when validating the nodelist and inter-node dependencies. for i, node in enumerate(node_list, 1): report_items.extend( validate.run_collection_of_option_validators( node, _get_node_name_validators(i) ) + validate.names_in(["addrs", "name"], node.keys(), "node") ) if "name" in node and node["name"]: # Count occurrences of each node name. Do not bother counting # missing or empty names. They must be fixed anyway. all_names_count[node["name"]] += 1 else: all_names_usable = False # Cannot use node.get("addrs", []) - if node["addrs"] == None then # the get returns None and len(None) raises an exception. addr_count = len(node.get("addrs") or []) if transport in (constants.TRANSPORTS_KNET + constants.TRANSPORTS_UDP): if transport in constants.TRANSPORTS_KNET: min_addr_count = constants.LINKS_KNET_MIN max_addr_count = constants.LINKS_KNET_MAX else: min_addr_count = constants.LINKS_UDP_MIN max_addr_count = constants.LINKS_UDP_MAX if ( addr_count < min_addr_count or addr_count > max_addr_count ): report_items.append( reports.corosync_bad_node_addresses_count( addr_count, min_addr_count, max_addr_count, node_name=node.get("name"), node_index=i ) ) addr_types = [] # Cannot use node.get("addrs", []) - if node["addrs"] == None then # the get returns None and len(None) raises an exception. for addr in (node.get("addrs") or []): all_addrs_count[addr] += 1 addr_types.append(get_addr_type(addr)) if get_addr_type(addr) == ADDR_UNRESOLVABLE: unresolvable_addresses.add(addr) addr_types_per_node.append(addr_types) # Report all unresolvable addresses at once instead on each own. if unresolvable_addresses: severity = ReportItemSeverity.ERROR forceable = report_codes.FORCE_NODE_ADDRESSES_UNRESOLVABLE if force_unresolvable: severity = ReportItemSeverity.WARNING forceable = None report_items.append( reports.node_addresses_unresolvable( unresolvable_addresses, severity, forceable ) ) # Reporting single-node errors finished. # Now report nodelist and inter-node errors. if len(node_list) < 1: report_items.append(reports.corosync_nodes_missing()) non_unique_names = set([ name for name, count in all_names_count.items() if count > 1 ]) if non_unique_names: all_names_usable = False report_items.append( reports.node_names_duplication(non_unique_names) ) non_unique_addrs = set([ addr for addr, count in all_addrs_count.items() if count > 1 ]) if non_unique_addrs: report_items.append( reports.node_addresses_duplication(non_unique_addrs) ) if all_names_usable: # Check for errors using node names in their reports. If node names are # ambiguous then such issues cannot be comprehensibly reported so the # checks are skipped. node_addr_count = {} for node in node_list: # Cannot use node.get("addrs", []) - if node["addrs"] == None then # the get returns None and len(None) raises an exception. node_addr_count[node["name"]] = len(node.get("addrs") or []) # Check if all nodes have the same number of addresses. No need to # check that if udp or udpu transport is used as they can only use one # address and that has already been checked above. if ( transport not in constants.TRANSPORTS_UDP and len(Counter(node_addr_count.values()).keys()) > 1 ): report_items.append( reports.corosync_node_address_count_mismatch(node_addr_count) ) # Check mixing IPv4 and IPv6 in one link, node names are not relevant links_ip_mismatch = [] for link, addr_types in enumerate(zip_longest(*addr_types_per_node)): if ADDR_IPV4 in addr_types and ADDR_IPV6 in addr_types: links_ip_mismatch.append(link) if links_ip_mismatch: report_items.append( reports.corosync_ip_version_mismatch_in_links(links_ip_mismatch) ) return report_items
def test_empty_report_on_zero_int_value(self): assert_report_item_list_equal( validate.value_not_empty("key", "description")({"key": 0}), [] )