def build(self, value): func = self.callback or 'simpleapiCallback' result = u'%(func)s(%(data)s)' % { 'func': func.decode("utf-8"), 'data': json.dumps(value) } return result.encode("utf-8")
"sed -i \"s/port 6379/port {communication_port}/\" \ redis.conf && " "sed -i \"s/bind 127.0.0.1/bind {application_host_ip}/\" \ redis.conf ".format(communication_port=communication_port, application_host_ip=application_host_ip ))] volume_prep_config = { "name": "shared-data", "mountPath": "/prep_config" } initContainer = { "name": "prep-config", "image": image_name, "securityContext": securityContext, "command": cmdline_config, "volumeMounts": [ volume_prep_config ] } initContainers.append(initContainer) volumeMounts.append(volume_prep_config) command.append("taskset -c {} redis-server /prep_config/redis.conf".format(cpu_list)) json_format = json.dumps(pod) print(json_format)
cassandra_stress_cmd = ('"while true; do cassandra-stress mixed duration=90s ' '-pop seq=1..{} -node {} -port native={} -rate ' 'threads={}; done"'.format(number_of_rows, application_host_ip, communication_port, threads)) cmd = """/usr/bin/cassandra_stress_wrapper.pex \ --command '{cassandra_stress_cmd}' \ --metric_name_prefix 'cassandra_' \ --stderr 0 --kafka_brokers '{kafka_brokers}' --kafka_topic {kafka_topic} \ --log_level {log_level} \ --peak_load {peak_load} --load_metric_name {load_metric_name} \ --slo {slo} --sli_metric_name {sli_metric_name} \ --subprocess_shell \ --labels '{labels}'""".format( cassandra_stress_cmd=cassandra_stress_cmd, kafka_brokers=wrapper_kafka_brokers, log_level=wrapper_log_level, kafka_topic=wrapper_kafka_topic, labels=json.dumps(wrapper_labels), slo=slo, sli_metric_name="cassandra_p99", # @TODO peak_load should match cassandra_stress parameters peak_load=10000, load_metric_name="cassandra_qps") command.append(cmd) json_format = json.dumps(pod) print(json_format)
def to_regist_variables(name, method, url, data, headers, regist_variable='', regular='', is_commit=True, testcase_name="__testcase_name"): response_body = MethodRequest().request_value(method, url, data, headers) user_id = session.get('user_id') if 'html' in response_body: response_body = '<xmp> %s </xmp>' % response_body print('response_body:', response_body) try: if regist_variable: # 判断是否有注册变量和正则方法,有的话进行获取 if regular: # 判断是否有正则匹配规则 regular_list = regular.split(',') regist_variable_list = regist_variable.split(',') print('regular_list:', regular_list, len(regular_list), len(regist_variable_list), regist_variable_list) if len(regular_list) <= len(regist_variable_list): # 判断正则和注册变量数目是否相符 小于或等于 regist_variable_value_list = [] for index in range(len(regular_list)): # 循环取正则的规则 if '$.' in regular_list[index]: if not is_json(response_body): regist_variable_value = "不是合法的字典响应信息" else: keys = regular_list[index][2:].split('.') if '' in keys: keys.remove('') print('keys:', keys) regist_variable_value = json.loads( response_body) for key in keys: if key: try: if ']' in key and '[' in key: key, _index = key.split('[') regist_variable_value = regist_variable_value.get( key)[int(_index[:-1])] else: regist_variable_value = regist_variable_value.get( key) except AttributeError as e: print(e) regist_variable_value = '' print('regist_variable_value:', regist_variable_value, type(regist_variable_value), regist_variable_list[index]) elif '$[' in regular_list[index]: # try: print('session params', testcase_name, session[testcase_name]) p_index = int(regular_list[index][2:-1]) if testcase_name != '__testcase_name': regist_variable_value = session[testcase_name][ p_index] else: regist_variable_value = '__testcase_name' else: try: regist_variable_value = re.compile( regular_list[index]).findall(response_body) except Exception as e: regist_variable_value = str(e) if isinstance(regist_variable_value, (dict, list)): if len(regist_variable_value) == 1: regist_variable_value = regist_variable_value[ 0] if regist_variable_value == 0 or isinstance( regist_variable_value, str): pass elif isinstance(regist_variable_value, int): regist_variable_value = str(regist_variable_value) print('regist_variable_value int', regist_variable_value) else: if isinstance(regist_variable_value, (dict, list)): if len(regist_variable_value) == 1: regist_variable_value = regist_variable_value[ 0] regist_variable_value = json.dumps( regist_variable_value) print('json.dumps后:', regist_variable_value) elif not regist_variable_value: regist_variable_value = '' else: regist_variable_value = '未知的值' print('regist_variable_value last', regist_variable_value) regist_variable_value_list.append( regist_variable_value) regist_variable_list[index] = regist_variable_list[ index].strip() # 注册变量时清除空格 if Variables.query.filter( Variables.name == regist_variable_list[index], Variables.user_id == user_id).count() > 0: print( '%s 请求结束,存在此变量时:' % url, Variables.query.filter( Variables.name == regist_variable_list[index], Variables.user_id == user_id).first()) Variables.query.filter(Variables.name == regist_variable_list[index], Variables.user_id == user_id).first().value = \ regist_variable_value db.session.commit() else: private_variable_value = regist_variable_value private_variable = Variables( regist_variable_list[index], private_variable_value, is_private=1, user_id=user_id) db.session.add(private_variable) db.session.commit() return response_body, str(regist_variable_value_list) return response_body, '正则匹配规则数量过多' return response_body, '未存在正则匹配' return response_body, '存在未注册变量' except Exception as e: print('注册变量解析失败', e) return response_body, '注册变量解析失败'
controller_cmd = """java -Dspecjbb.forkjoin.workers={} \ -Xms4g -Xmx4g -jar {} -m distcontroller -p {}""".format( threads_count, specjbb_jar, config_path) # Add wrapper to controller_cmd; specjbb prints data to stderr. controller_cmd = """{wrapper} --command '{command}' --stderr 0 \ --kafka_brokers {brokers} --log_level DEBUG \ --kafka_topic {kafka_topic} --log_level DEBUG \ --metric_name_prefix 'specjbb_' \ --labels '{labels}' \ --peak_load \"{peak_load}\" --load_metric_name \ \"const\" --slo {slo} --sli_metric_name \ specjbb_p99_total_purchase""".format( wrapper=specjbb_wrapper, command=controller_cmd, brokers=wrapper_kafka_brokers, log=wrapper_log_level, kafka_topic=wrapper_kafka_topic, labels=json.dumps(wrapper_labels), peak_load=qps, slo=slo) # @TODO we should set max RAM assigned to JVM, but if set the job fails to run. injector_cmd = """java -jar {jar} -m txinjector -p {config} -G GRP1 -J JVM_B"""\ .format(jar=specjbb_jar, config=config_path) backend_cmd = """ /usr/bin/taskset -c {cpu_list} \ java -Xms4g -Xmx4g -Xmn2g -XX:-UseBiasedLocking -XX:+UseParallelOldGC \ -jar {jar} -m backend -p {config} -G GRP1 -J JVM_A"""\ .format(cpu_list=cpu_list, jar=specjbb_jar, config=config_path) volume_prep_config = { "name": "shared-data", "mountPath": "/prep_config"
# limitations under the License. from common import command, json, pod, \ wrapper_kafka_topic, wrapper_kafka_brokers, wrapper_log_level, \ wrapper_labels, slo, cpu_list command.append( "taskset -c {cpu_list} /tensorflow_benchmark_training_wrapper.pex --command '/usr/bin/python3.5" " -u /root/benchmarks/scripts/tf_cnn_benchmarks/tf_cnn_benchmarks.py " "--datasets_use_prefetch=True --batch_group_size=1 --device=cpu " "--data_format=NHWC --data_name=cifar10 --batch_size=64 --display_every=1 " "--model=resnet56 --train_dir=/saved_model/ --num_epochs=100 " "--num_intra_threads=10 --num_inter_threads=10' " "--metric_name_prefix 'tensorflow_benchmark_' " "--stderr 0 --kafka_brokers '{kafka_brokers}' --kafka_topic {kafka_topic} " "--log_level {log_level} " "--slo {slo} --sli_metric_name tensorflow_benchmark_training_speed " "--inverse_sli_metric_value " "--peak_load 1 --load_metric_name const " "--labels '{labels}'".format( cpu_list=cpu_list, kafka_brokers=wrapper_kafka_brokers, log_level=wrapper_log_level, kafka_topic=wrapper_kafka_topic, labels=json.dumps(wrapper_labels), slo=slo)) json_format = json.dumps(pod) print(json_format)
def build(self, value): return json.dumps(value, cls=SimpleAPIEncoder)
def kwargs(self, value): if action == 'build': return json.dumps(value, cls=SimpleAPIEncoder) elif action == 'parse': return self.parse(value)
def build(self, value): func = self.callback or 'simpleapiCallback' result = u'%(func)s(%(data)s)' % {'func': func.decode("utf-8"), 'data': json.dumps(value)} return result.encode("utf-8")
def analysis_params(self, params, is_change=None, testcase_name='__testcase_name'): if params in ("", None): params = "" return params while 1: print('analysis_before:', type(params), params) res = r'\${([^\${}]+)}' words = re.findall(re.compile(res), params) print('需要解析的变量:%s, 筛选出的变量: %s' % (params, words)) if len(words) == 0: print('最后的解析结果:len(words)', params) return params in_variables_num = 0 for word in words: if '随机' in word: print('随机开始', word) params = RangName(params).rand_str(testcase_name=testcase_name, analysis_word=word) else: if (word,) in self.variables: in_variables_num += 1 variable_value_query_sql = 'select value from variables where name=%s' variable_value = cdb().query_db(variable_value_query_sql, (word,), True)[0] print('variable_value: ${%s}' % word, variable_value) if is_change == "headers": params = params.replace('${%s}' % word, '%s' % variable_value) if word == 'auto_vdb_parameter': # 恢复VDB时代码处理VDB的参数 try: target_env_tag = cdb().query_db(variable_value_query_sql, ('target_env_tag',), True)[0] # print('auto_vdb_parameter:', target_env_tag) auto_vdb_cannot_parameter = cdb().query_db(variable_value_query_sql, ('auto_vdb_cannot_parameter',), True)[0] print('auto_vdb_parameter tag:', target_env_tag, params, auto_vdb_cannot_parameter, type(auto_vdb_cannot_parameter)) variable_value = json.loads(variable_value) variable_value.extend(json.loads(auto_vdb_cannot_parameter)) variable_value = json.dumps(variable_value) tag_name = cdb().query_db(variable_value_query_sql, 'tag_name', True)[0] params = params.replace( '${%s}' % word, variable_value).replace( target_env_tag, tag_name) print('auto_vdb_parameter:', tag_name, target_env_tag, params) except TypeError as e: print('auto_vdb_parameter error', e) continue elif word == 'auto_v2p_parameter': try: auto_v2p_cannot_parameter = cdb().query_db(variable_value_query_sql, ('auto_v2p_cannot_parameter',), True)[0] variable_value = json.loads(variable_value) variable_value.extend(json.loads(auto_v2p_cannot_parameter)) variable_value = json.dumps(variable_value) params = params.replace( '${%s}' % word, variable_value) print('auto_v2p_parameter:', params) except TypeError as e: print('auto_v2p_parameter error', e) continue params = params.replace('${%s}' % word, variable_value) if in_variables_num == 0: print('最后的解析结果:', params) return params