Esempio n. 1
0
)

cluster.nodetool("flush")

confirm = input("Has compaction finished? Input 'yes':")
while confirm != 'yes':
    confirm = input("Has compaction finished? Input 'yes':")

print("Run started at:", datetime.now().strftime("%H:%M:%S"))

# Background load
background_load = cs.loop_stress(
    f'mixed ratio\\(write=1,read=1\\) duration=5m cl=QUORUM -pop dist=UNIFORM\\(1..{ROW_COUNT}\\) -log hdrfile=profile.hdr -graph file=report.html title=benchmark revision=benchmark-0 -mode native cql3 -rate "threads=700 throttle={BACKGROUND_LOAD_OPS // loadgenerator_count}/s" -node {cluster_string}'
)

iteration = Iteration(f'{profile_name}/replace-node', ignore_git=True)

# Turn off node to be replaced.
cluster.stop(load_index=(start_count - 1), erase_data=True)

replace_node_start = datetime.now()

# Start a replacement node
if props['cluster_type'] == 'scylla':
    cluster = Scylla(new_node_public_ips, new_node_private_ips,
                     all_private_ips[0], props)
    cluster.append_configuration(
        f"replace_address_first_boot: {cluster_private_ips[-1]}")
    cluster.start()
else:
    cluster = Cassandra(new_node_public_ips, new_node_private_ips,
Esempio n. 2
0
ops = props["ops"]
profile = props["profile"]

# Load information about the created machines.
env = common.load_yaml('environment.yml')
cluster_private_ips = env['cluster_private_ips']
cluster_string = ",".join(cluster_private_ips)

# Start with a clear prometheus.
prometheus.clear(env, props)

# Restart to cluster to make sure the Scylla starts fresh
# e.g. the memtable is flushed.
scylla.restart_cluster(env['cluster_public_ips'], props['cluster_user'],
                       props['ssh_options'])

iteration = Iteration(props["benchmark_name"])
# Setup cassandra stress
cs = CassandraStress(env['loadgenerator_public_ips'], props)
cs.install()
cs.prepare()
cs.upload(profile)
cs.stress(
    f'user profile=./{profile} "{ops}" duration={duration} -pop seq=1..{items} -log hdrfile=profile.hdr -graph file=report.html title=benchmark revision=benchmark-0 -mode native cql3 maxPending=1024 -rate {rate} -node {cluster_string}'
)
cs.collect_results(iteration.dir, warmup_seconds=warmup_seconds)

prometheus.download_and_clear(env, props, iteration)

print("Call 'unprovision_terraform.py' to destroy the created infrastructure!")
#!/bin/python3

import sys
from scyllaso import common
from scyllaso.common import Iteration
from datetime import datetime
from scyllaso import prometheus

print("Test started at:", datetime.now().strftime("%H:%M:%S"))

if len(sys.argv) < 2:
    raise Exception("Usage: ./download_prometheus.py [PROFILE_NAME]")

profile_name = sys.argv[1]

# Load properties
props = common.load_yaml(f'{profile_name}.yml')
env = common.load_yaml(f'environment_{profile_name}.yml')

iteration = Iteration(f'{profile_name}/prometheus-dump', ignore_git=True)

prometheus.download(env, props, iteration)
cs.install()
cs.prepare()

print("Loading started at:", datetime.now().strftime("%H:%M:%S"))

cs.stress_seq_range(
    ROW_COUNT, 'write cl=QUORUM',
    f'-schema "replication(strategy=SimpleStrategy,replication_factor={REPLICATION_FACTOR})" "compaction(strategy={COMPACTION_STRATEGY})" -log hdrfile=profile.hdr -graph file=report.html title=benchmark revision=benchmark-0 -mode native cql3 -rate "threads=700 throttle=33000/s" -node {cluster_string}'
)

print("Sleeping 2h")
time.sleep(60 * 60 * 2)

print("Run started at:", datetime.now().strftime("%H:%M:%S"))

iteration = Iteration(f'{profile_name}/repair_no_errors', ignore_git=True)

repair_start = datetime.now()

print("Reparing node 0 started at:", datetime.now().strftime("%H:%M:%S"))
cluster.nodetool("repair -full", 0)
print("Reparing node 0 ended at:", datetime.now().strftime("%H:%M:%S"))

repair_end = datetime.now()

with open(f'{iteration.dir}/result.txt', 'a') as writer:
    writer.write(
        f'Reparing node 0 took (s): {(repair_end - repair_start).total_seconds()}\n'
    )

print("Run ended at:", datetime.now().strftime("%H:%M:%S"))
Esempio n. 5
0
cluster.nodetool("flush")

confirm = input("Has compaction finished? Input 'yes':")
while confirm != 'yes':
    confirm = input("Has compaction finished? Input 'yes':")

print("Run started at:", datetime.now().strftime("%H:%M:%S"))

# Background load
BACKGROUND_LOAD_OPS = props["background_total_load_ops"] // loadgenerator_count
background_load = cs.loop_stress(
    f'mixed ratio\\(write=1,read=1\\) duration=5m cl=QUORUM -pop dist=UNIFORM\\(1..{ROW_COUNT}\\) -log hdrfile=profile.hdr -graph file=report.html title=benchmark revision=benchmark-0 -mode native cql3 -rate "threads=700 fixed={BACKGROUND_LOAD_OPS}/s" -node {cluster_string}'
)

iteration = Iteration(f'{profile_name}/compact', ignore_git=True)

compact_start = datetime.now()

for i in range(len(cluster_public_ips)):
    print("Compacting node", i, "started at:",
          datetime.now().strftime("%H:%M:%S"))
    cluster.nodetool("compact", i)
    print("Compacting node", i, "ended at:",
          datetime.now().strftime("%H:%M:%S"))

compact_end = datetime.now()

with open(f'{iteration.dir}/result.txt', 'a') as writer:
    writer.write(
        f'Major compaction on all nodes took (s): {(compact_end - compact_start).total_seconds()}\n'
confirm = input("Has compaction finished? Input 'yes':")
while confirm != 'yes':
    confirm = input("Has compaction finished? Input 'yes':")

print("Run started at:", datetime.now().strftime("%H:%M:%S"))

# Background load
BACKGROUND_LOAD_OPS = props["background_total_load_ops"] // loadgenerator_count
background_load = cs.loop_stress(
    f'mixed ratio\\(write=1,read=1\\) duration=5m cl=QUORUM -pop dist=UNIFORM\\(1..{ROW_COUNT}\\) -log hdrfile=profile.hdr -graph file=report.html title=benchmark revision=benchmark-0 -mode native cql3 -rate "threads=700 fixed={BACKGROUND_LOAD_OPS}/s" -node {cluster_string}'
)

add_nodes_start = datetime.now()

iteration = Iteration(f'{profile_name}/add-node', ignore_git=True)

# Start Scylla/Cassandra nodes
if props['cluster_type'] == 'scylla':
    s = Scylla(new_node_public_ips, new_node_private_ips, all_private_ips[0],
               props)
    s.start()
else:
    cassandra = Cassandra(new_node_public_ips, new_node_private_ips,
                          all_private_ips[0], props)
    cassandra.start()

add_nodes_end = datetime.now()

print("Run ended at:", datetime.now().strftime("%H:%M:%S"))
print("Adding nodes took:", (add_nodes_end - add_nodes_start).total_seconds(),
cs.stress_seq_range(ROW_COUNT, 'write cl=QUORUM', f'-schema "replication(strategy=SimpleStrategy,replication_factor={REPLICATION_FACTOR})" "compaction(strategy={COMPACTION_STRATEGY})" -log hdrfile=profile.hdr -graph file=report.html title=benchmark revision=benchmark-0 -mode native cql3 -rate "threads=700 throttle={THROTTLE}/s" -node {cluster_string}')

cluster.nodetool("flush")

# Warm-up cache: 35k read/s for 180 minutes
cs.stress(f'mixed ratio\\(write=0,read=1\\) duration=180m cl=QUORUM -pop dist=GAUSSIAN\\(1..{ROW_COUNT},{GAUSS_CENTER},{GAUSS_SIGMA}\\) -log hdrfile=profile.hdr -graph file=report.html title=benchmark revision=benchmark-0 -mode native cql3 -rate "threads=500 throttle=35000/s" -node {cluster_string}')

print("Run started at:", datetime.now().strftime("%H:%M:%S"))

rate = START_RATE

while True:
    print("Run iteration started at:", datetime.now().strftime("%H:%M:%S"))

    iteration = Iteration(f'{profile_name}/cassandra-stress-{rate}', ignore_git=True)

    cs.stress(f'mixed ratio\\(write={WRITE_COUNT},read={READ_COUNT}\\) duration={DURATION_MINUTES}m cl=QUORUM -pop dist=GAUSSIAN\\(1..{ROW_COUNT},{GAUSS_CENTER},{GAUSS_SIGMA}\\) -log hdrfile=profile.hdr -graph file=report.html title=benchmark revision=benchmark-0 -mode native cql3 -rate "threads=500 fixed={rate // loadgenerator_count}/s" -node {cluster_string}')

    cs.collect_results(iteration.dir)

    if WRITE_COUNT > 0:
        write_profile_summary = parse_profile_summary_file(f'{iteration.dir}/profile-summary.txt', 'WRITE')
        print('WRITE_PROFILE', write_profile_summary)

    if READ_COUNT > 0:
        read_profile_summary = parse_profile_summary_file(f'{iteration.dir}/profile-summary.txt', 'READ')
        print('READ_PROFILE', read_profile_summary)

    with open(f'{iteration.dir}/parsed_profile_summary_file.txt', 'a') as writer:
        if WRITE_COUNT > 0: