class LoadSettingsProcessor(object): TG = ThreadGroup.__name__ CTG = ConcurrencyThreadGroup.__name__ def __init__(self, executor): self.log = executor.log.getChild(self.__class__.__name__) self.load = executor.get_specific_load() self.tg = self._detect_thread_group(executor) self.tg_handler = ThreadGroupHandler(self.log) def _detect_thread_group(self, executor): """ Detect preferred thread group :param executor: :return: """ tg = self.TG if not executor.settings.get('force-ctg', True): return tg msg = 'Thread group detection: %s, regular ThreadGroup will be used' if not self.load.duration: self.log.debug(msg, 'duration not found') elif self.load.iterations: self.log.debug(msg, 'iterations are found') elif not executor.tool: msg = 'You must set executor tool (%s) for choosing of ConcurrencyThreadGroup' raise TaurusInternalException(msg % executor.tool_name) elif not executor.tool.ctg_plugin_installed(): self.log.warning(msg % 'plugin for ConcurrentThreadGroup not found') else: tg = self.CTG return tg def modify(self, jmx): if not (self.load.iterations or self.load.concurrency or self.load.duration): self.log.debug('No iterations/concurrency/duration found, thread group modification is skipped') return # IMPORTANT: fix groups order as changing of element type changes order of getting of groups groups = list(self.tg_handler.groups(jmx)) # user concurrency is jmeter variable, write it to tg as is if isinstance(self.load.concurrency, string_types): target_list = [(group, self.load.concurrency) for group in groups] else: # concurrency is numeric or empty raw = self.load.concurrency is None # keep existed concurrency if self.load.concurrency is omitted concurrency_list = [] for group in groups: concurrency = group.get_concurrency(raw=raw) if concurrency is None: concurrency = 1 concurrency_list.append(concurrency) if not raw: # divide numeric concurrency self._divide_concurrency(concurrency_list) target_list = zip(groups, concurrency_list) for group, concurrency in target_list: self.tg_handler.convert(source=group, target_gtype=self.tg, load=self.load, concurrency=concurrency) if self.load.throughput: self._add_shaper(jmx) if self.tg == self.TG and self.load.steps: self.log.warning("Stepping ramp-up isn't supported for regular ThreadGroup") def _divide_concurrency(self, concurrency_list): """ calculate target concurrency for every thread group """ total_old_concurrency = sum(concurrency_list) for idx, concurrency in enumerate(concurrency_list): if total_old_concurrency: part_of_load = 1.0 * self.load.concurrency * concurrency / total_old_concurrency concurrency_list[idx] = int(round(part_of_load)) else: concurrency_list[idx] = 0 total_new_concurrency = sum(concurrency_list) leftover = self.load.concurrency - total_new_concurrency if leftover < 0: msg = "Had to add %s more threads to maintain thread group proportion" self.log.warning(msg, -leftover) elif leftover > 0: msg = "%s threads left undistributed due to thread group proportion" self.log.warning(msg, leftover) def _add_shaper(self, jmx): """ Add shaper :param jmx: JMX :return: """ if not self.load.duration: self.log.warning("You must set 'ramp-up' and/or 'hold-for' when using 'throughput' option") return etree_shaper = jmx.get_rps_shaper() if self.load.ramp_up: if isinstance(self.load.throughput, numeric_types) and self.load.duration: start_rps = self.load.throughput / float(self.load.duration) start_rps = max(start_rps, 0.001) # avoid zeroing start_rps = min(start_rps, 1.0) # avoid starting too fast else: start_rps = 1 if not self.load.steps: jmx.add_rps_shaper_schedule(etree_shaper, start_rps, self.load.throughput, self.load.ramp_up) else: step_h = self.load.throughput / self.load.steps step_w = float(self.load.ramp_up) / self.load.steps accum_time = 0 for step in range(1, self.load.steps + 1): jmx.add_rps_shaper_schedule(etree_shaper, step_h * step, step_h * step, step_w * step - accum_time) accum_time += cond_int(step_w * step - accum_time) if self.load.hold: jmx.add_rps_shaper_schedule(etree_shaper, self.load.throughput, self.load.throughput, self.load.hold) jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree_shaper) jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree.Element("hashTree"))
class LoadSettingsProcessor(object): TG = ThreadGroup.__name__ CTG = ConcurrencyThreadGroup.__name__ def __init__(self, executor): self.log = executor.log.getChild(self.__class__.__name__) self.load = executor.get_specific_load() self.tg = self._detect_thread_group(executor) self.tg_handler = ThreadGroupHandler(self.log) def _detect_thread_group(self, executor): """ Detect preferred thread group :param executor: :return: """ tg = self.TG if not executor.settings.get('force-ctg', True): return tg msg = 'Thread group detection: %s, regular ThreadGroup will be used' if not self.load.duration: self.log.debug(msg, 'duration not found') elif self.load.iterations: self.log.debug(msg, 'iterations are found') elif not executor.tool: msg = 'You must set executor tool (%s) for choosing of ConcurrencyThreadGroup' raise TaurusInternalException(msg % executor.tool_name) elif not executor.tool.ctg_plugin_installed(): self.log.warning(msg % 'plugin for ConcurrentThreadGroup not found') else: tg = self.CTG return tg def modify(self, jmx): if not (self.load.iterations or self.load.concurrency or self.load.duration): self.log.debug( 'No iterations/concurrency/duration found, thread group modification is skipped' ) return # IMPORTANT: fix groups order as changing of element type changes order of getting of groups groups = list(self.tg_handler.groups(jmx)) if self.load.concurrency and not isinstance( self.load.concurrency, numeric_types): # property found for group in groups: self.tg_handler.convert(group=group, target=self.tg, load=self.load, concurrency=self.load.concurrency) else: target_list = zip(groups, self._get_concurrencies(groups)) for group, concurrency in target_list: self.tg_handler.convert(group=group, target=self.tg, load=self.load, concurrency=concurrency) if self.load.throughput: self._add_shaper(jmx) if self.load.steps and self.tg == self.TG: self.log.warning( "Stepping ramp-up isn't supported for regular ThreadGroup") def _get_concurrencies(self, groups): """ Collect concurrency values and calculate target concurrency for every thread group """ concurrency_list = [] for group in groups: concurrency_list.append(group.get_concurrency()) if concurrency_list and self.load.concurrency: total_old_concurrency = sum( concurrency_list ) # t_o_c != 0 because of logic of group.get_concurrency() for idx, concurrency in enumerate(concurrency_list): part_of_load = 1.0 * self.load.concurrency * concurrency / total_old_concurrency if part_of_load < 1: concurrency_list[idx] = 1 else: concurrency_list[idx] = int(round(part_of_load)) total_new_concurrency = sum(concurrency_list) leftover = self.load.concurrency - total_new_concurrency if leftover < 0: msg = "Had to add %s more threads to maintain thread group proportion" self.log.warning(msg, -leftover) elif leftover > 0: msg = "%s threads left undistributed due to thread group proportion" self.log.warning(msg, leftover) return concurrency_list def _add_shaper(self, jmx): """ Add shaper :param jmx: JMX :return: """ if not self.load.duration: self.log.warning( "You must set 'ramp-up' and/or 'hold-for' when using 'throughput' option" ) return etree_shaper = jmx.get_rps_shaper() if self.load.ramp_up: jmx.add_rps_shaper_schedule(etree_shaper, 1, self.load.throughput, self.load.ramp_up) if self.load.hold: jmx.add_rps_shaper_schedule(etree_shaper, self.load.throughput, self.load.throughput, self.load.hold) jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree_shaper) jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree.Element("hashTree"))
class LoadSettingsProcessor(object): TG = ThreadGroup.__name__ CTG = ConcurrencyThreadGroup.__name__ def __init__(self, executor): self.log = executor.log.getChild(self.__class__.__name__) self.load = executor.get_specific_load() self.raw_load = executor.get_raw_load() self.log.debug("Load: %s", self.load) self.force_ctg = executor.settings.get("force-ctg", True) self.tg = self._detect_thread_group(executor) self.tg_handler = ThreadGroupHandler(self.log) def _detect_thread_group(self, executor): """ Detect preferred thread group :param executor: :return: """ tg = self.TG if not self.force_ctg: return tg msg = 'Thread group detection: %s, regular ThreadGroup will be used' if not self.load.duration: self.log.debug(msg, 'duration not found') elif self.load.iterations: self.log.debug(msg, 'iterations are found') elif not executor.tool: msg = 'You must set executor tool (%s) for choosing of ConcurrencyThreadGroup' raise TaurusInternalException(msg % executor.tool_name) elif not executor.tool.ctg_plugin_installed(): self.log.warning(msg % 'plugin for ConcurrentThreadGroup not found') else: tg = self.CTG return tg def modify(self, jmx, is_jmx_generated=False): if not (self.raw_load.iterations or self.raw_load.concurrency or self.load.duration): self.log.debug('No iterations/concurrency/duration found, thread group modification is skipped') return # IMPORTANT: fix groups order as changing of element type changes order of getting of groups groups = list(self.tg_handler.groups(jmx)) # user concurrency is jmeter variable, write it to tg as is if isinstance(self.load.concurrency, str): target_list = [(group, self.load.concurrency) for group in groups] else: # concurrency is numeric or empty raw = self.load.concurrency is None # keep existed concurrency if self.load.concurrency is omitted concurrency_list = [] for group in groups: concurrency = group.get_concurrency(raw=raw) if concurrency is None: concurrency = 1 concurrency_list.append(concurrency) if not raw: # divide numeric concurrency self._divide_concurrency(concurrency_list) target_list = zip(groups, concurrency_list) for group, concurrency in target_list: iterations = None existed_tg = (not is_jmx_generated) and (group.gtype == self.TG) if not self.force_ctg and existed_tg: iterations = group.get_iterations() self.tg_handler.convert(source=group, target_gtype=self.tg, load=self.load, concurrency=concurrency, iterations=iterations) if self.load.throughput: self._add_shaper(jmx) if self.tg == self.TG and self.load.steps: self.log.warning("Stepping ramp-up isn't supported for regular ThreadGroup") def _divide_concurrency(self, concurrency_list): """ calculate target concurrency for every thread group """ total_old_concurrency = sum(concurrency_list) for idx, concurrency in enumerate(concurrency_list): if total_old_concurrency and concurrency_list[idx] != 0: part_of_load = 1.0 * self.load.concurrency * concurrency / total_old_concurrency concurrency_list[idx] = int(round(part_of_load)) if concurrency_list[idx] == 0: concurrency_list[idx] = 1 else: concurrency_list[idx] = 0 total_new_concurrency = sum(concurrency_list) leftover = self.load.concurrency - total_new_concurrency if leftover < 0: msg = "Had to add %s more threads to maintain thread group proportion" self.log.warning(msg, -leftover) elif leftover > 0: msg = "%s threads left undistributed due to thread group proportion" self.log.warning(msg, leftover) def _add_shaper(self, jmx): """ Add shaper :param jmx: JMX :return: """ if not self.load.duration: self.log.warning("You must set 'ramp-up' and/or 'hold-for' when using 'throughput' option") return etree_shaper = jmx.get_rps_shaper() if self.load.ramp_up: if isinstance(self.load.throughput, numeric_types) and self.load.duration: start_rps = self.load.throughput / float(self.load.duration) start_rps = max(start_rps, 0.001) # avoid zeroing start_rps = min(start_rps, 1.0) # avoid starting too fast else: start_rps = 1 if not self.load.steps: jmx.add_rps_shaper_schedule(etree_shaper, start_rps, self.load.throughput, self.load.ramp_up) else: step_h = self.load.throughput / self.load.steps step_w = float(self.load.ramp_up) / self.load.steps accum_time = 0 for step in range(1, self.load.steps + 1): jmx.add_rps_shaper_schedule(etree_shaper, step_h * step, step_h * step, step_w * step - accum_time) accum_time += cond_int(step_w * step - accum_time) if self.load.hold: jmx.add_rps_shaper_schedule(etree_shaper, self.load.throughput, self.load.throughput, self.load.hold) jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree_shaper) jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree.Element("hashTree"))
class LoadSettingsProcessor(object): TG = ThreadGroup.__name__ CTG = ConcurrencyThreadGroup.__name__ def __init__(self, executor): self.log = executor.log.getChild(self.__class__.__name__) self.load = executor.get_specific_load() self.tg = self._detect_thread_group(executor) self.tg_handler = ThreadGroupHandler(self.log) def _detect_thread_group(self, executor): """ Detect preferred thread group :param executor: :return: """ tg = self.TG if not executor.settings.get('force-ctg', True): return tg msg = 'Thread group detection: %s, regular ThreadGroup will be used' if not self.load.duration: self.log.debug(msg, 'duration not found') elif self.load.iterations: self.log.debug(msg, 'iterations are found') elif not executor.tool: msg = 'You must set executor tool (%s) for choosing of ConcurrencyThreadGroup' raise TaurusInternalException(msg % executor.tool_name) elif not executor.tool.ctg_plugin_installed(): self.log.warning(msg % 'plugin for ConcurrentThreadGroup not found') else: tg = self.CTG return tg def modify(self, jmx): if not (self.load.iterations or self.load.concurrency or self.load.duration): self.log.debug('No iterations/concurrency/duration found, thread group modification is skipped') return # IMPORTANT: fix groups order as changing of element type changes order of getting of groups groups = list(self.tg_handler.groups(jmx)) if self.load.concurrency and not isinstance(self.load.concurrency, numeric_types): # property found for group in groups: self.tg_handler.convert(group=group, target=self.tg, load=self.load, concurrency=self.load.concurrency) else: target_list = zip(groups, self._get_concurrencies(groups)) for group, concurrency in target_list: self.tg_handler.convert(group=group, target=self.tg, load=self.load, concurrency=concurrency) if self.load.throughput: self._add_shaper(jmx) if self.load.steps and self.tg == self.TG: self.log.warning("Stepping ramp-up isn't supported for regular ThreadGroup") def _get_concurrencies(self, groups): """ Collect concurrency values and calculate target concurrency for every thread group """ concurrency_list = [] for group in groups: concurrency_list.append(group.get_concurrency()) if concurrency_list and self.load.concurrency: total_old_concurrency = sum(concurrency_list) # t_o_c != 0 because of logic of group.get_concurrency() for idx, concurrency in enumerate(concurrency_list): part_of_load = 1.0 * self.load.concurrency * concurrency / total_old_concurrency if part_of_load < 1: concurrency_list[idx] = 1 else: concurrency_list[idx] = int(round(part_of_load)) total_new_concurrency = sum(concurrency_list) leftover = self.load.concurrency - total_new_concurrency if leftover < 0: msg = "Had to add %s more threads to maintain thread group proportion" self.log.warning(msg, -leftover) elif leftover > 0: msg = "%s threads left undistributed due to thread group proportion" self.log.warning(msg, leftover) return concurrency_list def _add_shaper(self, jmx): """ Add shaper :param jmx: JMX :return: """ if not self.load.duration: self.log.warning("You must set 'ramp-up' and/or 'hold-for' when using 'throughput' option") return etree_shaper = jmx.get_rps_shaper() if self.load.ramp_up: if isinstance(self.load.throughput, numeric_types) and self.load.duration: start_rps = self.load.throughput / float(self.load.duration) else: start_rps = 1 jmx.add_rps_shaper_schedule(etree_shaper, start_rps, self.load.throughput, self.load.ramp_up) if self.load.hold: jmx.add_rps_shaper_schedule(etree_shaper, self.load.throughput, self.load.throughput, self.load.hold) jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree_shaper) jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree.Element("hashTree"))