def run_test(argv):
    infrastructure = argv[1]
    config_path = argv[2]
    scenario_path = argv[3]
    logger = MyLogger("distributed_jmeter.log")

    results_path = _run_test.run_test(infrastructure, config_path, scenario_path, "%s/results" % os.path.abspath(os.path.dirname(__file__)), logger)

    with open("%s/SLO_violations" % results_path, "w") as fp:
        output = meet_sla_req.check("%s/response-times-over-time.csv" % results_path)
        fp.write(output)

    shutil.copyfile('%s' % config_path, '%s/config.ini' % results_path)
    shutil.copyfile('%s' % scenario_path, '%s/scenario.jmx' % results_path)

    return results_path
Exemple #2
0
def run_test(argv):
    infrastructure = argv[1]
    config_path = argv[2]
    scenario_path = argv[3]
    logger = MyLogger("distributed_jmeter.log")

    results_path = _run_test.run_test(
        infrastructure, config_path, scenario_path,
        "%s/results" % os.path.abspath(os.path.dirname(__file__)), logger)

    with open("%s/SLO_violations" % results_path, "w") as fp:
        output = meet_sla_req.check("%s/response-times-over-time.csv" %
                                    results_path)
        fp.write(output)

    shutil.copyfile('%s' % config_path, '%s/config.ini' % results_path)
    shutil.copyfile('%s' % scenario_path, '%s/scenario.jmx' % results_path)

    return results_path
Exemple #3
0
#!/usr/bin/python

import sys
import os
from cloudscale.distributed_jmeter import run_test
from cloudscale.distributed_jmeter.logger import Logger
from cloudscale.distributed_jmeter.scripts import meet_sla_req

if __name__ == "__main__":

    if len(sys.argv) > 1:
        infrastructure = sys.argv[1]
        config_path = sys.argv[2]
        scenario_path = sys.argv[3]
        logger = Logger("distributed_jmeter.log")

        results_path = run_test.run_test(infrastructure, config_path, scenario_path, "%s/results" % os.path.abspath(os.path.dirname(__file__)), logger)

        with open("%s/SLO_violations" % results_path, "w") as fp:
            output = meet_sla_req.check("%s/response-times-over-time.csv" % results_path)
            fp.write(output)

        print "See results in %s" % results_path
    else:
        print """Usage: python run.py <aws|openstack> <path_to_config> <path_to_scenario>"""
    def run_masters(self, ips):
        start_time = datetime.datetime.utcnow()
        import uuid
        tmp_userpath = "/tmp/{0}".format(os.path.basename(str(uuid.uuid4())))

        if not os.path.exists(tmp_userpath):
            os.makedirs(tmp_userpath, 0777)

        self.logger.log(self.output_directory)

        for ip in ips:
            self.logger.log("Running JMeter on instance %s" % ip)
            ssh = self.ssh_to_instance(ip)
            cmd = "(~/jmeter/bin/jmeter -n -t ~/scenario.jmx -l scenario.jtl -j scenario.log -Jall_threads=%s -Jstartup_threads=%s -Jrest_threads=%s -Jhost=%s;touch finish)" % (
                int(self.startup_threads)+int(self.rest_threads),
                self.startup_threads,
                self.rest_threads,
                self.host
            )
            self.logger.log(cmd)
            self.logger.log("Executing your JMeter scenario. This can take a while. Please wait ...")
            ssh.exec_command(cmd)
            ssh.close()

        i = 1
        threads = []
        for ip in ips:
            self.logger.log("Starting thread for %s" % ip)
            t = Thread(target=self.check_instance, args=(i, tmp_userpath, self.output_directory, ip))
            t.start()
            threads.append(t)
            i += 1

        for t in threads:
            t.join()

        self.terminate_instances(ips)
        end_time = datetime.datetime.utcnow()

        resultspath = self.output_directory

        cmd = "cp -r {0}/./ {1}/".format(tmp_userpath, resultspath)
        self.logger.log(cmd)
        p = subprocess.check_output(cmd.split())

        shutil.rmtree(tmp_userpath, True)

        filenames = ["{0}/scenario{1}.log".format(resultspath, j) for j in xrange(1, i)]
        self.logger.log(filenames)
        with open("{0}/scenario.log".format(resultspath), 'w') as outfile:
            for fname in filenames:
                with open(fname) as infile:
                    for line in infile:
                        outfile.write(line)

        cmd = "rm -rf %s" % " ".join(filenames)
        subprocess.call(cmd.split())
        cmd = "rm -rf %s/*.jtl" % resultspath
        subprocess.call(cmd.split())

        filenames = ["{0}/response-times-over-time{1}.csv".format(resultspath, j) for j in xrange(1, i)]
        self.logger.log(filenames)
        with open("{0}/response-times-over-time.csv".format(resultspath), 'w') as outfile:
            for fname in filenames:
                with open(fname) as infile:
                    for line in infile:
                        outfile.write(line)

        cmd = "rm -rf %s" % " ".join(filenames)
        subprocess.call(cmd.split())

        instances = self.get_instances_by_tag('Name', self.frontend_instances_identifier);
        instance_ids = [instance.id for instance in instances]
        rds_instance_ids = self.rds_identifiers
        ec2_data = self.get_cloudwatch_ec2_data(start_time, end_time, instance_ids)
        rds_data = self.get_cloudwatch_rds_data(start_time, end_time, rds_instance_ids)

        filename = "{0}/ec2-cpu.csv".format(resultspath)
        with open(filename, 'w') as fp:
            fp.write("instance_id,timestamp,average\n")
            for row in ec2_data:
                for data in row.get('data'):
                    fp.write("%s,%s,%s\n" % (row.get('instance_id'), self.unix_time(data['Timestamp']), data['Average']))

        filename = "{0}/rds-cpu.csv".format(resultspath)
        with open(filename, 'w') as fp:
            fp.write("instance_id,timestamp,average\n")
            for row in rds_data:
                for data in row.get('data'):
                    fp.write("%s,%s,%s\n" % (row.get('instance_id'), self.unix_time(data['Timestamp']), data['Average']))

        #if self.is_autoscalable:
            #activites = self.get_autoscalability_data(start_time, end_time)
            #self.write_autoscalability_data(resultspath, activites)
        #else:
            #self.write_autoscalability_data(resultspath, [])

        slo_output = check("{0}/response-times-over-time.csv".format(resultspath))
        self.logger.log("<br>".join(slo_output).split('\n'))
        self.logger.log("Visualizing....")
        v = Visualize(self.num_threads, self.scenario_duration, self.r_path,
                      "{0}/response-times-over-time.csv".format(resultspath),
                      "{0}/autoscalability.log".format(resultspath))
        v.save()

        self.logger.log("finished!", fin=True)
        with open("{0}/finish".format(resultspath), "w") as fp:
            fp.write("finish")
Exemple #5
0
    def run_masters(self, ips):
        start_time = datetime.datetime.utcnow()

        tmp_userpath = "/tmp/{0}".format(
            os.path.basename(self.scenario_path)[:-4])

        if not os.path.exists(tmp_userpath):
            os.makedirs(tmp_userpath, 0777)

        self.logger.log(self.output_directory)

        for ip in ips:
            self.logger.log("Running JMeter on instance %s" % ip)
            ssh = self.ssh_to_instance(ip)
            cmd = "(~/jmeter/bin/jmeter -n -t ~/scenario.jmx -l scenario.jtl -j scenario.log -Jall_threads=%s -Jstartup_threads=%s -Jrest_threads=%s -Jhost=%s;touch finish)" % (
                int(self.startup_threads) + int(self.rest_threads),
                self.startup_threads, self.rest_threads, self.host)
            self.logger.log(cmd)
            self.logger.log(
                "Executing your JMeter scenario. This can take a while. Please wait ..."
            )
            ssh.exec_command(cmd)
            ssh.close()

        i = 1
        threads = []
        for ip in ips:
            self.logger.log("Starting thread for %s" % ip)
            t = Thread(target=self.check_instance,
                       args=(i, tmp_userpath, self.output_directory, ip))
            t.start()
            threads.append(t)
            i += 1

        for t in threads:
            t.join()

        self.terminate_instances(ips)
        end_time = datetime.datetime.utcnow()

        instances = self.get_instances_by_tag(
            'Name', self.frontend_instances_identifier)
        instance_ids = [instance.id for instance in instances]
        rds_instance_ids = self.rds_identifiers
        ec2_data = self.get_cloudwatch_ec2_data(start_time, end_time,
                                                instance_ids)
        rds_data = self.get_cloudwatch_rds_data(start_time, end_time,
                                                rds_instance_ids)

        resultspath = self.output_directory

        cmd = "cp -r {0}/./ {1}/".format(tmp_userpath, resultspath)
        self.logger.log(cmd)
        p = subprocess.check_output(cmd.split())

        shutil.rmtree(tmp_userpath, True)

        filenames = [
            "{0}/scenario{1}.log".format(resultspath, j) for j in xrange(1, i)
        ]
        self.logger.log(filenames)
        with open("{0}/scenario.log".format(resultspath), 'w') as outfile:
            for fname in filenames:
                with open(fname) as infile:
                    for line in infile:
                        outfile.write(line)

        cmd = "rm -rf %s" % " ".join(filenames)
        subprocess.call(cmd.split())
        cmd = "rm -rf %s/*.jtl" % resultspath
        subprocess.call(cmd.split())

        filenames = [
            "{0}/response-times-over-time{1}.csv".format(resultspath, j)
            for j in xrange(1, i)
        ]
        self.logger.log(filenames)
        with open("{0}/response-times-over-time.csv".format(resultspath),
                  'w') as outfile:
            for fname in filenames:
                with open(fname) as infile:
                    for line in infile:
                        outfile.write(line)

        cmd = "rm -rf %s" % " ".join(filenames)
        subprocess.call(cmd.split())

        filename = "{0}/ec2-cpu.csv".format(resultspath)
        with open(filename, 'w') as fp:
            fp.write("instance_id,timestamp,average\n")
            for row in ec2_data:
                for data in row.get('data'):
                    fp.write(
                        "%s,%s,%s\n" %
                        (row.get('instance_id'),
                         self.unix_time(data['Timestamp']), data['Average']))

        filename = "{0}/rds-cpu.csv".format(resultspath)
        with open(filename, 'w') as fp:
            fp.write("instance_id,timestamp,average\n")
            for row in rds_data:
                for data in row.get('data'):
                    fp.write(
                        "%s,%s,%s\n" %
                        (row.get('instance_id'),
                         self.unix_time(data['Timestamp']), data['Average']))

        if self.is_autoscalable:
            activites = self.get_autoscalability_data(start_time, end_time)
            self.write_autoscalability_data(resultspath, activites)
        else:
            self.write_autoscalability_data(resultspath, [])

        slo_output = check(
            "{0}/response-times-over-time.csv".format(resultspath))
        self.logger.log("<br>".join(slo_output).split('\n'))
        self.logger.log("Visualizing....")
        v = Visualize(self.num_threads, int(self.scenario_duration),
                      self.r_path,
                      "{0}/response-times-over-time.csv".format(resultspath),
                      "{0}/autoscalability.log".format(resultspath))
        v.save()

        self.logger.log("finished!", fin=True)
        with open("{0}/finish".format(resultspath), "w") as fp:
            fp.write("finish")