def validate_different_intervals(operation_list): """ Check that the same operations (e.g. monitor) have different interval. list operation_list contains dictionaries with attributes of operation return see resource operation in pcs/lib/exchange_formats.md """ duplication_map = defaultdict(lambda: defaultdict(list)) for operation in operation_list: interval = operation.get( "interval", get_default_operation_interval(operation["name"])) seconds = timeout_to_seconds(interval) duplication_map[operation["name"]][seconds].append(interval) duplications = defaultdict(list) for name, interval_map in duplication_map.items(): for timeout in sorted(interval_map.values()): if len(timeout) > 1: duplications[name].append(timeout) if duplications: return [ ReportItem.error( reports.messages.ResourceOperationIntervalDuplication( dict(duplications))) ] return []
def test_valid(self): self.assertEqual(10, tools.timeout_to_seconds(10)) self.assertEqual(10, tools.timeout_to_seconds("10")) self.assertEqual(10, tools.timeout_to_seconds("10s")) self.assertEqual(10, tools.timeout_to_seconds("10sec")) self.assertEqual(600, tools.timeout_to_seconds("10m")) self.assertEqual(600, tools.timeout_to_seconds("10min")) self.assertEqual(36000, tools.timeout_to_seconds("10h")) self.assertEqual(36000, tools.timeout_to_seconds("10hr"))
def wait_to_timeout(wait: Union[bool, str, None]) -> int: if wait is False: return -1 if wait is None: return 0 timeout = timeout_to_seconds(wait) if timeout is None: raise CmdLineInputError(f"'{wait}' is not a valid interval value") return timeout
def timeout_to_seconds_legacy( timeout: Union[int, str]) -> Union[int, str, None]: """ Transform pacemaker style timeout to number of seconds. If timeout is not valid then `timeout` is returned. timeout -- timeout string """ parsed_timeout = timeout_to_seconds(timeout) if parsed_timeout is None: return timeout return parsed_timeout
def test_invalid(self): self.assertEqual(None, tools.timeout_to_seconds(-10)) self.assertEqual(None, tools.timeout_to_seconds("1a1s")) self.assertEqual(None, tools.timeout_to_seconds("10mm")) self.assertEqual(None, tools.timeout_to_seconds("10mim")) self.assertEqual(None, tools.timeout_to_seconds("aaa")) self.assertEqual(None, tools.timeout_to_seconds(""))
def _get_monitor_attrs( resource_el: _Element, ) -> List[Dict[str, Optional[str]]]: """ Get list of interval/timeout attributes of all monitor oparations of the resource which is being updated. Only interval and timeout attributes are needed for digests calculations. Interval attribute is mandatory attribute and timeout attribute is optional and it must be converted to milliseconds when passing to crm_resource utility. Operation with missing interval attribute or with attributes unable to convert to milliseconds will be skipped. Misconfigured operations do not have to necessarily prevent restartless update because pacemaker can ignore such misconfigured operations. If there is some mismatch between op elements from the resource definition and lrm_rsc_op elements from the cluster status, it will be found later. """ monitor_attrs_list: List[Dict[str, Optional[str]]] = [] for operation_el in operations.get_resource_operations( resource_el, names=["monitor"] ): sec = timeout_to_seconds(operation_el.get("interval", "")) interval = ( None if sec is None or isinstance(sec, str) else str(sec * 1000) ) if interval is None: continue timeout = operation_el.get("timeout") if timeout is None: monitor_attrs_list.append(dict(interval=interval, timeout=timeout)) continue sec = timeout_to_seconds(timeout) timeout = ( None if sec is None or isinstance(sec, str) else str(sec * 1000) ) if timeout is None: continue monitor_attrs_list.append(dict(interval=interval, timeout=timeout)) return monitor_attrs_list
def get_valid_timeout_seconds( timeout_candidate: Union[str, int, None], ) -> Optional[int]: """ Transform pacemaker style timeout to number of seconds, raise LibraryError on invalid timeout timeout_candidate timeout string or None """ if timeout_candidate is None: return None wait_timeout = timeout_to_seconds(timeout_candidate) if wait_timeout is None: raise LibraryError( ReportItem.error( reports.messages.InvalidTimeoutValue(str(timeout_candidate)))) return wait_timeout
def get_uniq_interval(name, initial_interval): """ Return unique interval for name based on initial_interval if initial_interval is valid or return initial_interval otherwise. string name is the operation name for searching interval initial_interval is starting point for finding free value """ used_intervals = used_intervals_map[name] normalized_interval = timeout_to_seconds(initial_interval) if normalized_interval is None: return initial_interval if normalized_interval not in used_intervals: used_intervals.add(normalized_interval) return initial_interval while normalized_interval in used_intervals: normalized_interval += 1 used_intervals.add(normalized_interval) return str(normalized_interval)
def _is_valid(self, value: TypeOptionValue) -> bool: return timeout_to_seconds(value) is not None
def assert_command_success( self, devices_before=DEVICES_1, devices_updated=DEVICES_2, devices_add=(), devices_remove=(), unfence=None, resource_ops=DEFAULT_OPS, lrm_monitor_ops=DEFAULT_LRM_MONITOR_OPS, lrm_start_ops=DEFAULT_LRM_START_OPS, lrm_monitor_ops_updated=DEFAULT_LRM_MONITOR_OPS_UPDATED, lrm_start_ops_updated=DEFAULT_LRM_START_OPS_UPDATED, ): # pylint: disable=too-many-arguments # pylint: disable=too-many-locals devices_value = ",".join(sorted(devices_updated)) self.config.runner.cib.load( resources=fixture_scsi( devices=devices_before, resource_ops=resource_ops ), status=_fixture_status_lrm_ops( SCSI_STONITH_ID, lrm_start_ops=lrm_start_ops, lrm_monitor_ops=lrm_monitor_ops, ), ) self.config.runner.pcmk.is_resource_digests_supported() self.config.runner.pcmk.load_state( resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES ) devices_opt = "devices={}".format(devices_value) self.config.runner.pcmk.resource_digests( SCSI_STONITH_ID, SCSI_NODE, name="start.op.digests", stdout=fixture_digests_xml( SCSI_STONITH_ID, SCSI_NODE, devices=devices_value ), args=[devices_opt], ) for num, op in enumerate(resource_ops, 1): name, interval, timeout, _ = op if name != "monitor": continue args = [devices_opt] args.append( "CRM_meta_interval={}".format( 1000 * timeout_to_seconds(interval) ) ) if timeout: args.append( "CRM_meta_timeout={}".format( 1000 * timeout_to_seconds(timeout) ) ) self.config.runner.pcmk.resource_digests( SCSI_STONITH_ID, SCSI_NODE, name=f"{name}-{num}.op.digests", stdout=fixture_digests_xml( SCSI_STONITH_ID, SCSI_NODE, devices=devices_value, ), args=args, ) if unfence: self.config.corosync_conf.load_content( corosync_conf_fixture( self.existing_corosync_nodes, get_two_node(len(self.existing_corosync_nodes)), ) ) self.config.http.corosync.get_corosync_online_targets( node_labels=self.existing_nodes ) self.config.http.scsi.unfence_node( original_devices=devices_before, updated_devices=devices_updated, node_labels=self.existing_nodes, ) self.config.env.push_cib( resources=fixture_scsi( devices=devices_updated, resource_ops=resource_ops ), status=_fixture_status_lrm_ops( SCSI_STONITH_ID, lrm_start_ops=lrm_start_ops_updated, lrm_monitor_ops=lrm_monitor_ops_updated, ), ) if devices_add or devices_remove: stonith.update_scsi_devices_add_remove( self.env_assist.get_env(), SCSI_STONITH_ID, devices_add, devices_remove, ) else: stonith.update_scsi_devices( self.env_assist.get_env(), SCSI_STONITH_ID, devices_updated ) self.env_assist.assert_reports([])
def config_cib( self, devices_before=DEVICES_1, devices_updated=DEVICES_2, resource_ops=DEFAULT_OPS, lrm_monitor_ops=DEFAULT_LRM_MONITOR_OPS, lrm_start_ops=DEFAULT_LRM_START_OPS, host_map=DEFAULT_PCMK_HOST_MAP, nodes_running_on=1, start_digests=True, monitor_digests=True, ): # pylint: disable=too-many-arguments # pylint: disable=too-many-locals devices_value = ",".join(sorted(devices_updated)) self.config.runner.cib.load( resources=fixture_scsi( stonith_id=self.stonith_id, stonith_type=self.stonith_type, devices=devices_before, resource_ops=resource_ops, host_map=host_map, ), status=_fixture_status_lrm_ops( self.stonith_id, self.stonith_type, lrm_start_ops=lrm_start_ops, lrm_monitor_ops=lrm_monitor_ops, ), ) self.config.runner.pcmk.is_resource_digests_supported() self.config.runner.pcmk.load_state( resources=fixture_crm_mon_res_running( self.stonith_id, f"stonith:{self.stonith_type}", nodes_running_on=nodes_running_on, ), nodes=FIXTURE_CRM_MON_NODES, ) devices_opt = "devices={}".format(devices_value) if start_digests: self.config.runner.pcmk.resource_digests( self.stonith_id, SCSI_NODE, name="start.op.digests", stdout=fixture_digests_xml(self.stonith_id, SCSI_NODE, devices=devices_value), args=[devices_opt], ) if monitor_digests: for num, op in enumerate(resource_ops, 1): name, interval, timeout, _ = op if name != "monitor": continue args = [devices_opt] args.append("CRM_meta_interval={}".format( 1000 * timeout_to_seconds(interval))) if timeout: args.append("CRM_meta_timeout={}".format( 1000 * timeout_to_seconds(timeout))) self.config.runner.pcmk.resource_digests( self.stonith_id, SCSI_NODE, name=f"{name}-{num}.op.digests", stdout=fixture_digests_xml( self.stonith_id, SCSI_NODE, devices=devices_value, ), args=args, )