Exemple #1
0
    def stress_seq_range(self, row_count, command_part1, command_part2):
        load_ip_count = len(self.load_ips)
        row_count_per_ip = row_count // load_ip_count
        range_points = [1]
        for i in range(load_ip_count):
            range_points.append(range_points[-1] + row_count_per_ip)
        range_points[-1] = row_count

        population_commands = []
        # FIXME - cleanup
        for i in range(len(range_points) - 1):
            population_commands.append(
                f' n={range_points[i + 1] - range_points[i] + 1} -pop seq={range_points[i]}..{range_points[i + 1]} '
            )

        log(population_commands)

        log_important("Cassandra-Stress: started")
        run_parallel(
            self.__stress,
            [(ip, 10 if i > 0 else 0,
              command_part1 + pop_command + command_part2)
             for i, (ip, pop_command
                     ) in enumerate(zip(self.load_ips, population_commands))])
        log_important("Cassandra-Stress: done")
 def run(self, command):
     log_important(
         f'Disk Explorer run: started [{datetime.now().strftime("%H:%M:%S")}]'
     )
     log(f"python3 diskplorer.py {command}")
     run_parallel(self.__run, [(ip, command) for ip in self.ips])
     log_important(
         f'Disk Explorer run: done [{datetime.now().strftime("%H:%M:%S")}]')
Exemple #3
0
 def stress(self, command, load_index=None):
     if load_index is None:
         log_important("Cassandra-Stress: started")
         run_parallel(self.__stress,
                      [(ip, 10 if i > 0 else 0, command)
                       for i, ip in enumerate(self.load_ips)])
         log_important("Cassandra-Stress: done")
     else:
         self.__stress(self.load_ips[load_index], 0, command)
 def stress(self, command, load_index=None):
     if load_index is None:
         log_important("scylla-bench: started")
         run_parallel(self.__stress,
                      [(ip, command) for ip in self.load_ips])
         log_important("scylla-bench: done")
     else:
         log("using load_index " + str(load_index))
         self.__stress(self.load_ips[load_index], command)
Exemple #5
0
 def install(self):
     log_important("Installing Cassandra: started")
     if self.setup_raid:
         log_important("Installing Cassandra: setting up RAID")
         raid = RAID(self.cluster_public_ips, self.ssh_user, '/dev/nvme*n1',
                     'cassandra-raid', 0, self.properties)
         raid.install()
         log_important("Installing Cassandra: finished setting up RAID")
     run_parallel(self.__install,
                  [(ip, ) for ip in self.cluster_public_ips])
     log_important("Installing Cassandra: done")
Exemple #6
0
 def nodetool(self, command, load_index=None):
     if load_index is None:
         run_parallel(self.nodetool,
                      [(command, i)
                       for i in range(len(self.cluster_private_ips))])
     else:
         path_prefix = 'cassandra-raid/' if self.setup_raid else './'
         ssh = self.__new_ssh(self.cluster_public_ips[load_index])
         ssh.exec(
             f"{path_prefix}apache-cassandra-{self.cassandra_version}/bin/nodetool {command}"
         )
Exemple #7
0
    def stop(self, load_index=None, erase_data=False):
        if load_index is None:
            log_important("Stop Cassandra: started")
            run_parallel(self.__stop,
                         [(ip, ) for ip in self.cluster_public_ips])
            log_important("Stop Cassandra: done")
        else:
            self.__stop(self.cluster_public_ips[load_index])

            if erase_data:
                ssh = self.__new_ssh(self.cluster_public_ips[load_index])
                path_prefix = 'cassandra-raid/' if self.setup_raid else './'
                ssh.exec(
                    f"rm -rf {path_prefix}apache-cassandra-{self.cassandra_version}/data"
                )
    def collect_results(self, dir, warmup_seconds=None, cooldown_seconds=None):
        """
        Parameters
        ----------
        dir: str
            The download directory.
        """

        log_important(f"Collecting results: started")
        run_parallel(self.__collect, [(ip, dir) for ip in self.load_ips])

        p = HdrLogProcessor(self.properties,
                            warmup_seconds=warmup_seconds,
                            cooldown_seconds=cooldown_seconds)
        p.process(dir)

        log_important(f"Collecting results: done")
        log(f"Results can be found in [{dir}]")
Exemple #9
0
    def collect_results(self, dir, warmup_seconds=None, cooldown_seconds=None):
        """
        Parameters
        ----------
        dir: str
            The download directory.
        warmup_seconds : str
            The warmup period in seconds. If the value is set, additional files will 
            be created where the warmup period is trimmed.
        cooldown_seconds : str
            The cooldown period in seconds. If the value is set, additional files will 
            be created where the cooldown period is trimmed.            
        """

        log_important(f"Collecting results: started")
        run_parallel(self.__collect, [(ip, dir) for ip in self.load_ips])
        p = HdrLogProcessor(self.properties,
                            warmup_seconds=warmup_seconds,
                            cooldown_seconds=cooldown_seconds)
        p.process(dir)
        log_important(f"Collecting results: done")
        log(f"Results can be found in [{dir}]")
 def nodetool(self, command, load_index=None):
     if load_index is None:
         run_parallel(self.nodetool, [(command, i) for i in range(len(self.cluster_private_ips))])
     else:
         ssh = self.__new_ssh(self.cluster_public_ips[load_index])
         ssh.exec(f"nodetool {command}")
Exemple #11
0
 def install(self):
     log_important("Installing Cassandra-Stress: started")
     run_parallel(self.__install, [(ip, ) for ip in self.load_ips])
     log_important("Installing Cassandra-Stress: done")
Exemple #12
0
 def prepare(self, kill_java=True):
     log_important(f"Preparing load generator: started")
     run_parallel(self.__prepare, [(ip, kill_java) for ip in self.load_ips])
     log_important(f"Preparing load generator: done")
Exemple #13
0
 def ssh(self, command):
     run_parallel(self.__ssh, [(ip, command) for ip in self.load_ips])
Exemple #14
0
 def upload(self, file):
     log_important(f"Upload: started")
     run_parallel(self.__upload, [(ip, file) for ip in self.load_ips])
     log_important(f"Upload: done")
 def install(self):
     log_important("Installing Scylla: started")
     run_parallel(self.__install, [(ip,) for ip in self.cluster_public_ips])
     log_important("Installing Scylla: done")
Exemple #16
0
def collect_ec2_metadata(ips, ssh_user, ssh_options, dir):
    run_parallel(__collect_ec2_metadata,
                 [(ip, ssh_user, ssh_options, dir) for ip in ips])
 def install(self):
     log_important("Installing scylla_bench: started")
     run_parallel(self.__install, [(ip, ) for ip in self.load_ips])
     log_important("Installing scylla_bench: done")
Exemple #18
0
 def install(self):
     log_important(f"fio Installation: started")
     run_parallel(self.__install, [(ip,) for ip in self.ips])
     log_important(f"fio Installation: done")
def download(self, dir):
    log_important("Disk Explorer Download: started")
    run_parallel(self.__download, [(ip, dir) for ip in self.ips])
    log_important("Disk Explorer Download: done")
    log(f"Results can be found in [{dir}]")
Exemple #20
0
 def run(self, options):
     log_important(f"fio run: started")
     log(f"sudo fio {options}")
     run_parallel(self.__run, [(ip, options) for ip in self.ips])
     log_important(f"fio run: done")
Exemple #21
0
 def download(self, dir):
     log_important(f"fio download: started")
     run_parallel(self.__download, [(ip, dir) for ip in self.ips])
     log_important(f"fio download: done")