Beispiel #1
0
import os
from common import command, json, pod, cpu_list

# ----------------------------------------------------------------------------------------------------
###
# Params which can be modified by exporting environment variables.
###

# Port that stressed application listens on.
communication_port = os.environ.get('communication_port', 11211)

# twemcache worker threads count
worker_threads = int(os.environ.get('worker_threads') or 1)
max_memory = int(os.environ.get('max_memory') or 1024)

# ----------------------------------------------------------------------------------------------------

cmd = """taskset -c {cpu_list} twemcache --prealloc --hash-power=20 --max-memory={max_memory} \
--port={communication_port} --eviction-strategy=2 --verbosity=4 \
--threads={worker_threads} --backlog=128 -u twemcache \
--maximize-core-limit --slab-size=1048576 """.format(
    cpu_list=cpu_list,
    communication_port=communication_port,
    worker_threads=worker_threads,
    max_memory=max_memory)
command.append(cmd)

json_format = json.dumps(pod)
print(json_format)
Beispiel #2
0
                   "sed -i \"s/port 6379/port {communication_port}/\" \
                   redis.conf && "
                   "sed -i \"s/bind 127.0.0.1/bind {application_host_ip}/\" \
                   redis.conf ".format(communication_port=communication_port,
                                       application_host_ip=application_host_ip
                                       ))]

volume_prep_config = {
    "name": "shared-data",
    "mountPath": "/prep_config"
}

initContainer = {
    "name": "prep-config",
    "image": image_name,
    "securityContext": securityContext,
    "command": cmdline_config,
    "volumeMounts": [
        volume_prep_config
    ]
}
initContainers.append(initContainer)


volumeMounts.append(volume_prep_config)

command.append("taskset -c {} redis-server /prep_config/redis.conf".format(cpu_list))

json_format = json.dumps(pod)
print(json_format)
timeout = int(os.environ.get('timeout') or 30)
stressor = os.environ.get('stressor') or 'stream'
number_workers = int(os.environ.get('number_workers') or 1)
# ----------------------------------------------------------------------------------------------------

stress_ng_cmd = ('"while true; do taskset -c {} stress-ng --{}={} --timeout={}'
                 ' --metrics --metrics-brief -Y /dev/stdout;done"').format(
                    cpu_list, stressor, number_workers, timeout)

stress_ng_run_cmd = """/usr/bin/stress_ng_wrapper.pex --command '{stress_ng_cmd}' \
--metric_name_prefix {metric_name_prefix} \
--stderr 1 --kafka_brokers '{kafka_brokers}' --kafka_topic {kafka_topic} \
--log_level {log_level} \
--subprocess_shell \
--labels '{labels}' \
--slo {slo} --sli_metric_name '{sli_metric_name}'""".format(
    stress_ng_cmd=stress_ng_cmd,
    application=application,
    metric_name_prefix=application + "_",
    kafka_brokers=wrapper_kafka_brokers,
    kafka_topic=wrapper_kafka_topic,
    log_level=wrapper_log_level,
    labels=json.dumps(wrapper_labels),
    slo=slo,
    sli_metric_name='stress_ng_bogo_ops_per_second_usr_sys_time')

command.append(stress_ng_run_cmd)

json_format = json.dumps(pod)
print(json_format)
    java -Xms4g -Xmx4g -Xmn2g -XX:-UseBiasedLocking -XX:+UseParallelOldGC \
    -jar {jar} -m backend -p {config} -G GRP1 -J JVM_A"""\
    .format(cpu_list=cpu_list, jar=specjbb_jar, config=config_path)

volume_prep_config = {
    "name": "shared-data",
    "mountPath": "/prep_config"
}

prepare_config_container = {
    "name": "prep-config",
    "image": image_name + ":" + image_tag,
    "securityContext": securityContext,
    "command": config_create_cmd,
    "volumeMounts": [
        volume_prep_config
    ]
}
initContainers.append(prepare_config_container)
volumeMounts.append(volume_prep_config)

if component_type == 'controller':
    command.append(controller_cmd)
elif component_type == 'injector':
    command.append(injector_cmd)
elif component_type == 'backend':
    command.append(backend_cmd)

json_format = json.dumps(pod)
print(json_format)
# limitations under the License.


from common import command, json, pod, \
    wrapper_kafka_topic, wrapper_kafka_brokers, wrapper_log_level, \
    wrapper_labels, slo, cpu_list

command.append(
    "taskset -c {cpu_list} /tensorflow_benchmark_training_wrapper.pex --command '/usr/bin/python3.5"
    " -u /root/benchmarks/scripts/tf_cnn_benchmarks/tf_cnn_benchmarks.py "
    "--datasets_use_prefetch=True --batch_group_size=1 --device=cpu "
    "--data_format=NHWC --data_name=cifar10 --batch_size=64 --display_every=1 "
    "--model=resnet56 --train_dir=/saved_model/ --num_epochs=100 "
    "--num_intra_threads=10 --num_inter_threads=10' "
    "--metric_name_prefix 'tensorflow_benchmark_' "
    "--stderr 0 --kafka_brokers '{kafka_brokers}' --kafka_topic {kafka_topic} "
    "--log_level {log_level} "
    "--slo {slo} --sli_metric_name tensorflow_benchmark_training_speed "
    "--inverse_sli_metric_value "
    "--peak_load 1 --load_metric_name const "
    "--labels '{labels}'".format(
                cpu_list=cpu_list,
                kafka_brokers=wrapper_kafka_brokers,
                log_level=wrapper_log_level,
                kafka_topic=wrapper_kafka_topic,
                labels=json.dumps(wrapper_labels), slo=slo))


json_format = json.dumps(pod)
print(json_format)
Beispiel #6
0
        application_host_ip, communication_port, qps, qps, time, threads,
        connections)
else:
    mutilate_cmd = """ \"while true; do /mutilate/mutilate -s {}:{} \
    -Q {} --time={} --update=0.01 --threads={} -c {}; done\" """.format(
        application_host_ip, communication_port, qps, time, threads,
        connections)

mutilate_run_cmd = """/usr/bin/mutilate_wrapper.pex --command '{mutilate_cmd}' \
--metric_name_prefix {metric_name_prefix} \
--stderr 0 --kafka_brokers '{kafka_brokers}' --kafka_topic {kafka_topic} \
--log_level {log_level} \
--slo {slo} --sli_metric_name {application}_read_p{sli_percentile} \
--peak_load {peak_load} --load_metric_name {application}_qps \
--subprocess_shell \
--labels '{labels}'""".format(mutilate_cmd=mutilate_cmd,
                              application=application,
                              metric_name_prefix=application + "_",
                              kafka_brokers=wrapper_kafka_brokers,
                              kafka_topic=wrapper_kafka_topic,
                              log_level=wrapper_log_level,
                              labels=json.dumps(wrapper_labels),
                              slo=str(slo),
                              peak_load=qps,
                              sli_percentile=sli_percentile)

command.append(mutilate_run_cmd)

json_format = json.dumps(pod)
print(json_format)
Beispiel #7
0
                        -p workload.peroid={ycsb_period} \
                        -p workload.amplitude={ycsb_amplitude} \
                        -p workload.phase=0 \
                        -p operationcount={ycsb_operation_count}" \
                --metric_name_prefix 'cassandra_' \
                --stderr 1 --kafka_brokers "{kafka_brokers}" \
                --kafka_topic {kafka_topic} \
                --log_level {log_level} \
                --labels '{labels}' \
                --peak_load {peak_load} \
                --load_metric_name "cassandra_ops_per_sec" \
                --slo {slo} --sli_metric_name "cassandra_read_p9999"
          """.format(application_host_ip=application_host_ip,
                     communication_port=communication_port,
                     ycsb_target=ycsb_target,
                     ycsb_thread_count=ycsb_thread_count,
                     ycsb_period=ycsb_period,
                     ycsb_amplitude=ycsb_amplitude,
                     ycsb_operation_count=ycsb_operation_count,
                     kafka_brokers=wrapper_kafka_brokers,
                     kafka_topic=wrapper_kafka_topic,
                     log_level=wrapper_log_level,
                     labels=json.dumps(wrapper_labels),
                     peak_load=str(int(ycsb_target) + int(ycsb_amplitude)),
                     slo=slo)

command.append(ycsb_cassandra_run_cmd)

json_format = json.dumps(pod)
print(json_format)