示例#1
0
    def launch(self):
        for concurrency in self.concurrencies:
            for url in self.urls:
                if self.requests:
                    requests = max(self.requests, concurrency)
                    cmdline = "ab -k -c %d -n %d %s" % (concurrency, requests,
                                                        url)
                else:
                    cmdline = "ab -k -c %d -t %d -n 500000 %s" % (
                        concurrency, self.time, url)

                test = ptTest(url,
                              category="concurrency=%d" % concurrency,
                              group="Throughput",
                              metrics="req/sec",
                              errors=0,
                              loops=0,
                              cmdline=cmdline)

                for i in range(0, self.iterations):
                    status, stdout, stderr = test.execute()
                    if status:
                        print(stderr, file=sys.stderr)
                        sys.exit(EXIT_AB_ERROR)
                    self.parse_ab_stdout(concurrency, test.cmdline, stdout,
                                         test)

                self.suite.addTest(test)
                self.suite.upload()
示例#2
0
    def _pt_update_results(self, page):
        if self.opts.pt_project is None or page is None:
            return

        basename = page.get_full_name()
        if basename is None:
            return

        for s, m, f, g in (("Rendering", "msec", "dur", "Page rendering"),
                           ("Footprint", "KB", "ram_usage_kb",
                            "Browser memory footprint")):
            name = "%s [%s, %s]" % (basename, s, m)
            group = "%s (%s)" % (g, m)
            test = self.pt_suite.getTest(name, group=group)
            if test is None:
                test = ptTest(name,
                              less_better=True,
                              group=group,
                              metrics=m,
                              description=page.url)
                self.pt_suite.addTest(test)

            test.add_score(page.__dict__[f])
            test.duration_sec += int(math.ceil(page.dur))

        self._pt_upload()
def main(suite):
    suite.addTest(
        ptTest("Simple benchmark",
               description="A simple benchmark output",
               metrics="loops/sec",
               scores=[random.randint(10, 20) / 10.0],
               loops=100))

    suite.upload()
def main(suite):
    suite.addLink('Grafana', 'http://grafana.localdomain/')
    suite.addLink('Login page', 'http://192.168.100.3/login')

    s1 = suite.addNode(
        ptHost("s1",
               ip="192.168.0.1",
               hostname="server1.localdomain",
               version="CentOS 7.4",
               cpus=32,
               ram_gb=128))
    s2 = suite.addNode(
        ptHost("s2",
               ip="192.168.0.2",
               hostname="server2.localdomain",
               version="CentOS 7.4",
               cpus=32,
               ram_gb=128))
    s3 = suite.addNode(
        ptHost("s3",
               ip="192.168.0.3",
               hostname="server3.localdomain",
               version="CentOS 7.4",
               cpus=32,
               ram_gb=128))
    s4 = suite.addNode(
        ptHost("s4",
               ip="192.168.0.4",
               hostname="server4.localdomain",
               version="CentOS 7.4",
               cpus=16,
               ram_gb=64))

    vm1 = s1.addNode(
        ptVM("vm1",
             ip="192.168.100.1",
             version="CentOS 7.4",
             virt_type="KVM VM",
             cpus=4,
             ram_gb=32))
    vm2 = s1.addNode(
        ptVM("vm2",
             ip="192.168.100.2",
             version="CentOS 7.4",
             virt_type="KVM VM",
             cpus=4,
             ram_gb=32))
    vm3 = s2.addNode(
        ptVM("vm3",
             ip="192.168.100.3",
             version="CentOS 7.4",
             virt_type="KVM VM",
             cpus=8,
             ram_gb=64))
    vm4 = s3.addNode(
        ptVM("vm4",
             ip="192.168.100.4",
             version="CentOS 7.4",
             virt_type="KVM VM",
             cpus=8,
             ram_gb=64))

    suite.addNode(ptHost("client", params="Python3", scan_info=True))

    vm1.addNode(ptComponent("database", version="1.0.12"))
    vm2.addNode(ptComponent("backend", version="1.0.12"))
    vm3.addNode(ptComponent("UI#1", version="1.0.13"))
    vm4.addNode(ptComponent("UI#2", version="1.0.13"))

    g = "Latency tests"

    suite.addTest(
        ptTest(
            "Simple user login test",
            less_better=True,
            description=
            "Login under a user, 1 parallel client, time includes navigation to home page",
            group=g,
            metrics="sec",
            scores=[0.6, 0.72, 0.65 + random.randint(0, 10) / 10.0],
            deviations=[0.05, 0.12, 0.03],
            loops=100,
            links={"repo": "https://github.com/perfguru87/perftracker-client"},
            attribs={"version": str(__version__)}))
    suite.addTest(
        ptTest(
            "Simple admin login test",
            less_better=True,
            description="Login under admin, 1 parallel client",
            group=g,
            metrics="sec",
            scores=[0.8, 0.9, 1.2 + random.randint(0, 10) / 10.0],
            deviations=[0.03, 0.09, 0.08],
            loops=100,
            links={"repo": "https://github.com/perfguru87/perftracker-client"},
            attribs={"version": str(__version__)}))

    for p in range(1, 5 + random.randint(0, 2)):
        suite.addTest(
            ptTest("Login time",
                   group=g,
                   metrics="sec",
                   less_better=True,
                   category="%d parallel users" % (2**p),
                   scores=[0.3 + sqrt(p) + random.randint(0, 20) / 40.0]))

    for p in range(1, 20 + random.randint(0, 10)):
        suite.addTest(
            ptTest(
                "Pages response time, 1 parallel client",
                group=g,
                metrics="sec",
                less_better=True,
                category="page #%3d" % p,
                scores=[0.3 + random.randint(0, 20) / 40.0],
                errors=['4xx error'] if random.randint(0, 30) == 0 else [],
                warnings=['HTTP 500'] if random.randint(0, 20) == 0 else [],
                status="FAILED" if random.randint(0, 25) == 0 else "SUCCESS"))

    for p in range(1, 100 + random.randint(0, 100)):
        suite.addTest(
            ptTest(
                "Home page response time",
                group=g,
                metrics="sec",
                less_better=True,
                category="DB size %d GB" % p,
                scores=[0.3 + (sqrt(p) + random.randint(0, 20)) / 40],
                errors=['4xx error'] if random.randint(0, 30) == 0 else [],
                warnings=['HTTP 500'] if random.randint(0, 20) == 0 else [],
                status="FAILED" if random.randint(0, 25) == 0 else "SUCCESS"))

    for p in range(1, 100 + random.randint(0, 100)):
        suite.addTest(
            ptTest(
                "Dashboard page response time",
                group=g,
                metrics="sec",
                less_better=True,
                category="DB size %d GB" % p,
                scores=[0.8 + (sqrt(p) + random.randint(0, 20)) / 40],
                errors=['4xx error'] if random.randint(0, 30) == 0 else [],
                warnings=['HTTP 500'] if random.randint(0, 20) == 0 else [],
                status="FAILED" if random.randint(0, 25) == 0 else "SUCCESS"))

    suite.upload()

    g = "Throughput tests"

    for p in range(1, 5 + random.randint(0, 2)):
        suite.addTest(
            ptTest("Home page throughput",
                   group=g,
                   metrics="pages/sec",
                   category="%d parallel clients" % (2**p),
                   scores=[10 + sqrt(p) + random.randint(0, 20) / 5]))

    suite.upload()

    g = "Download throughput"

    for p in range(1, 5 + random.randint(0, 2)):
        suite.addTest(
            ptTest("Download throughput",
                   group=g,
                   metrics="Bytes/sec",
                   category="%d parallel clients" % (2**p),
                   scores=[
                       10000000000 + random.randint(10 * p, 20 * p) * 5000000
                   ]))

    suite.upload()

    g = "Upgrade"

    suite.addTest(
        ptTest("Upgrade with small database",
               group=g,
               metrics="seconds",
               scores=[124, 125, 123]))
    suite.addTest(
        ptTest("Upgrade with large database",
               group=g,
               metrics="seconds",
               scores=[344, 329, 351]))

    suite.upload()
示例#5
0
def main(suite):
    s1 = suite.addNode(
        ptHost("s1",
               ip="192.168.0.1",
               hostname="s1.mydomain",
               version="ESX 7.0",
               cpus=32,
               ram_gb=128))
    vm1 = s1.addNode(
        ptVM("account-server-vm",
             ip="192.168.100.1",
             version="CentOS 7.4",
             cpus=16,
             ram_gb=64))
    vm1 = s1.addNode(
        ptVM("test-client-vm",
             ip="192.168.100.2",
             version="CentOS 7.4",
             cpus=16,
             ram_gb=64))

    chunk = 50

    for accounts_in_db in range(0, 1050, chunk):

        volume = "%d accounts in DB" % (1000 * accounts_in_db)

        group = "Provisioning tests"

        suite.addTest(
            ptTest(
                "Create accounts in 1 thread",
                group=group,
                category=volume,
                metrics="accounts/sec",
                duration_sec=900,
                description="POST /accounts in 1 thread (create %d accounts)" %
                (chunk * 0.1),
                scores=[60 / sqrt(sqrt(sqrt(accounts_in_db + 100)))
                        ]))  # fake score

        suite.addTest(
            ptTest(
                "Create accounts in 16 threads",
                group=group,
                category=volume,
                metrics="accounts/sec",
                duration_sec=3200,
                description="POST /accounts in 16 threads (create %d accounts)"
                % (chunk * 0.9),
                scores=[600 / (1.1 * sqrt(accounts_in_db + 100))
                        ]))  # fake score

        group = "Read-only tests"

        suite.addTest(
            ptTest("GET information for some account in 1 thread",
                   group=group,
                   category=volume,
                   metrics="accounts/sec",
                   duration_sec=30,
                   description="GET /account/1 in 1 thread (30 sec)",
                   scores=[120 - sqrt(accounts_in_db + 10)]))  # fake score

        suite.addTest(
            ptTest("GET information for some account in 8 threads",
                   group=group,
                   category=volume,
                   metrics="accounts/sec",
                   duration_sec=30,
                   description="GET /account/1 in 8 threads (30 sec)",
                   scores=[430 - 2 * sqrt(accounts_in_db + 10)]))  # fake score

        suite.addTest(
            ptTest("GET information for some account in 64 threads",
                   group=group,
                   category=volume,
                   metrics="accounts/sec",
                   duration_sec=30,
                   description="GET /account/1 in 64 threads (30 sec)",
                   scores=[850 - 1.5 * sqrt(accounts_in_db + 10)
                           ]))  # fake score

        suite.addTest(
            ptTest("GET list of 10 first accounts in 1 thread",
                   group=group,
                   category=volume,
                   metrics="lists/sec",
                   duration_sec=30,
                   description="GET /accounts/?limit=10 in 1 thread (30 sec)",
                   scores=[95 - 0.3 * sqrt(accounts_in_db + 10)
                           ]))  # fake score

        suite.addTest(
            ptTest("GET list of 10 first accounts in 8 thread",
                   group=group,
                   category=volume,
                   metrics="lists/sec",
                   duration_sec=30,
                   description="GET /accounts/?limit=10 in 1 thread (30 sec)",
                   scores=[105 - 0.4 * sqrt(accounts_in_db + 10)
                           ]))  # fake score

        suite.addTest(
            ptTest("GET list of 10 first accounts in 64 thread",
                   group=group,
                   category=volume,
                   metrics="lists/sec",
                   duration_sec=30,
                   description="GET /accounts/?limit=10 in 64 thread (30 sec)",
                   scores=[115 - 0.3 * sqrt(accounts_in_db + 10)
                           ]))  # fake score

        group = "Modificating tests"

        suite.addTest(
            ptTest(
                "Update accounts in 1 thread",
                group=group,
                category=volume,
                metrics="accounts/sec",
                duration_sec=89,
                description="PUT /accounts in 1 thread (update 100 accounts)",
                scores=[110 - 0.2 * sqrt(accounts_in_db + 10)]))  # fake score

        suite.addTest(
            ptTest("Update accounts in 16 threads",
                   group=group,
                   category=volume,
                   metrics="accounts/sec",
                   duration_sec=970,
                   description=
                   "PUT /accounts in 16 threads (update 1600 accounts)",
                   scores=[130 - 0.3 * sqrt(accounts_in_db + 10)
                           ]))  # fake score

        suite.addTest(
            ptTest(
                "Delete accounts in 1 thread",
                group=group,
                category=volume,
                metrics="accounts/sec",
                duration_sec=520,
                description="PUT /accounts in 1 thread (update 100 accounts)",
                scores=[20 - 0.2 * sqrt(accounts_in_db + 10)]))  # fake score

        suite.addTest(
            ptTest("Delete accounts in 16 threads",
                   group=group,
                   category=volume,
                   metrics="accounts/sec",
                   duration_sec=540,
                   description=
                   "PUT /accounts in 16 threads (update 1600 accounts)",
                   scores=[320 - 0.1 * sqrt(accounts_in_db + 10)
                           ]))  # fake score

        suite.upload()
def run(suite, as_json, from_file, cmdline):

    if from_file:
        f = open(from_file, 'r')
        out = f.read()
        f.close()
    else:
        status, out, err = execute(cmdline)
        if status:
            print("command line failed with status %d" % status)
            sys.exit(-1)
        out = out

    out = out.strip()
    if type(out) == bytes:
        out = out.decode()

    def validate(test, line):
        if not test.tag:
            logging.debug("'tag' is not found: %s" % line)
            return False

        if len(test.scores) == 0:
            logging.warning("'scores' is not found: %s" % line)
            return False

        logging.info("parsed: %s" % repr(test))
        return True

    if as_json:
        data = json.loads(out)
        for d in data:
            try:
                t = ptTest(d['tag'],
                           scores=[float(d['score'])]
                           if d.get('score', None) else d['scores'],
                           deviations=[float(d['deviation'])] if d.get(
                               'deviation', None) else d.get('deviations', []),
                           duration_sec=float(d['duration_sec']),
                           loops=int(d['loops']),
                           metrics=d['metrics'],
                           cmdline=d.get('cmdline', ''),
                           less_better=_bool(d.get('less_better', False)),
                           errors=int(d.get('errors', 0)),
                           warnings=int(d.get('warnings', 0)),
                           group=d.get('group', ''),
                           category=d.get('category', ''),
                           description=d.get('description', ''))
            except TypeError as e:
                logging.error("can't parse: %s" % (str(d)))
                raise

            if validate(t, str(d)):
                suite.addTest(t)
    else:
        r = re.compile("(?P<tag>\w+):\s+(?P<val>.*)$")

        data = []

        for line in out.split("\n"):
            test = ptTest("")

            for kv in line.split(";"):
                m = r.match(kv.strip())
                if not m:
                    continue
                tag = m.group('tag').lower()
                val = m.group('val')

                try:
                    if tag == 'score':
                        test.scores = [float(val)]
                    elif tag == 'deviation':
                        test.deviations = [float(val)]
                    elif tag in ('scores', 'deviations'):
                        test.__dict__[tag] = [
                            float(v) for v in json.loads(val)
                        ]
                    elif tag in ('duration_sec', ):
                        test.__dict__[tag] = float(val)
                    elif tag in ('loops', 'errors', 'warnings'):
                        test.__dict__[tag] = int(val)
                    elif tag in ('tag', 'metrics', 'cmdline', 'group',
                                 'category', 'description'):
                        test.__dict__[tag] = val
                    elif tag in ('less_better', ):
                        test.__dict__[tag] = _bool(val)
                except ValueError as e:
                    logging.error("error in line: %s" % line)
                    logging.error(str(e))
                    sys.exit(-1)

            if validate(test, line):
                suite.addTest(test)

    logging.info("%d tests found" % len(suite.tests))
    if not len(suite.tests):
        print("aborting")
        sys.exit(-1)

    suite.upload()