Пример #1
0
    def execute(self):
        if os.getenv("SLAVES_LDJSON"):
            fname = os.getenv("SLAVES_LDJSON")
            is_csv = False
        elif os.getenv("JTL"):
            fname = os.getenv("JTL")
            is_csv = True
        else:
            raise ValueError("Please specify JTL or SLAVES_LDJSON environment variable")

        with open(fname, 'wt') as self.fhd:
            if is_csv:
                fieldnames = list(self.__getrec(None, None, None, None).keys())
                dialect = guess_csv_dialect(",".join(fieldnames))
                self.writer = csv.DictWriter(self.fhd, fieldnames=fieldnames, dialect=dialect)
                self.writer.writeheader()
                self.fhd.flush()
            else:
                self.writer = None  # FIXME: bad code design, have zero object for it

            events.request_success += self.__on_request_success
            events.request_failure += self.__on_request_failure
            events.locust_error += self.__on_exception
            events.slave_report += self.__on_slave_report
            events.quitting += self.__on_quit

            main.main()
            self.fhd.flush()
Пример #2
0
def execute():
    if os.getenv("SLAVES_LDJSON"):
        fname = os.getenv("SLAVES_LDJSON")
        is_csv = False
    elif os.getenv("JTL"):
        fname = os.getenv("JTL")
        is_csv = True
    else:
        raise ValueError("Please specify JTL or SLAVES_LDJSON environment variable")

    with open(fname, "wt") as fhd:
        if is_csv:
            writer = csv.DictWriter(fhd, getrec(None, None, None, None).keys())
            writer.writeheader()
            fhd.flush()
        else:
            writer = None  # FIXME: bad code design, have zero object for it

        def on_request_success(request_type, name, response_time, response_length):
            writer.writerow(getrec(request_type, name, response_time, response_length))
            fhd.flush()

        def on_request_failure(request_type, name, response_time, exception):
            writer.writerow(getrec(request_type, name, response_time, 0, exception))
            fhd.flush()

        def on_slave_report(client_id, data):
            if data["stats"] or data["errors"]:
                data["client_id"] = client_id
                fhd.write("%s\n" % json.dumps(data))
                fhd.flush()

        events.request_success += on_request_success
        events.request_failure += on_request_failure
        events.slave_report += on_slave_report

        main.main()
        fhd.flush()
Пример #3
0
    def execute(self):
        if os.getenv("SLAVES_LDJSON"):
            fname = os.getenv("SLAVES_LDJSON")
            is_csv = False
        elif os.getenv("JTL"):
            fname = os.getenv("JTL")
            is_csv = True
        else:
            raise ValueError("Please specify JTL or SLAVES_LDJSON environment variable")

        with open(fname, 'wt') as self.fhd:
            if is_csv:
                self.writer = csv.DictWriter(self.fhd, self.__getrec(None, None, None, None).keys())
                self.writer.writeheader()
                self.fhd.flush()
            else:
                self.writer = None  # FIXME: bad code design, have zero object for it

            events.request_success += self.__on_request_success
            events.request_failure += self.__on_request_failure
            events.slave_report += self.__on_slave_report

            main.main()
            self.fhd.flush()
Пример #4
0
def start_master(sys_argv):
    sys_argv.append("--master")
    sys.argv = sys_argv
    main()
Пример #5
0
def start_locust_main():
    from locust.main import main
    main()
Пример #6
0
def main():
    options = parse_options()

    ssh_pvt_key_ssm_param_name = options.ssh_pvt_key_ssm_param_name

    def get_ssh_identity_file():
        if ssh_pvt_key_ssm_param_name is not None:
            ssm = boto3.client('ssm')
            ssh_pvt_key = ssm.get_parameter(Name=ssh_pvt_key_ssm_param_name, WithDecryption=True)['Parameter']['Value']
            fd, file_name = tempfile.mkstemp()
            with open(file_name, 'w') as file:
                file.write(ssh_pvt_key)
                os.close(fd)
                return file_name
        return None

    ssh_identity_file = get_ssh_identity_file()

    locustfile_selector = LocustFileSelectorPipeline(
        [GitLocustFileSelectorMiddleware(ssh_identity_file=ssh_identity_file)])

    locustfile_source = locustfile_selector.select(options.locustfile_source)

    locusfile = locustfile_source.fetch()

    def get_percentiles(stat_entry):
        return {str(e) + '%': stat_entry.get_response_time_percentile(e) for e in PERCENTILES_TO_REPORT}

    def print_formatted_stats_on_primary_node(stats):
        if options.master_host is not None:  # Slave mode. Do not print stats on slaves
            return
        for key in sorted(six.iterkeys(stats.entries)):
            item = stats.entries[key]
            console_logger.info(json.dumps({**{
                "locust_stat_type": "standard",
                "rps": item.total_rps
            }, **item.serialize()}))

        percentile_stats = [{**{
            "locust_stat_type": "percentile",
            "name": stats.entries[key].name,
            "method": stats.entries[key].method,
            "num_request": stats.entries[key].num_requests
        }, **get_percentiles(stats.entries[key])} for key in sorted(six.iterkeys(stats.entries))]
        for item in percentile_stats:
            console_logger.info(json.dumps(item))

    def create_standard_metric_data(stat_entry):
        return [
            {
                'MetricName': 'Req/s', 'Value': stat_entry.total_rps,
                'Unit': 'Count/Second'
            },
            {
                'MetricName': 'Min Response Time',
                'Value': stat_entry.min_response_time,
                'Unit': 'Milliseconds'
            },
            {
                'MetricName': 'Max Response Time',
                'Value': stat_entry.max_response_time,
                'Unit': 'Milliseconds'
            },
            {
                'MetricName': 'Avg Response Time',
                'Value': stat_entry.avg_response_time,
                'Unit': 'Milliseconds'
            },
            {
                'MetricName': 'Median Response Time',
                'Value': stat_entry.median_response_time,
                'Unit': 'Milliseconds'
            },
            {
                'MetricName': 'Total Requests',
                'Value': stat_entry.num_requests,
                'Unit': 'Count'
            },
            {
                'MetricName': 'Total Failed Requests',
                'Value': stat_entry.num_failures,
                'Unit': 'Count'
            },
        ]

    def create_percentile_metric_data(stat_entry):
        return [{
            'MetricName': str(p * 100) + '% Latency',
            'Value': stat_entry.get_response_time_percentile(p),
            'Unit': 'Percent'
        } for p in PERCENTILES_TO_REPORT]

    def create_metric_data(stat_entry):
        timestamp = datetime.utcnow()

        return list(map(lambda e: {**e, **{
            'Timestamp': timestamp,
            'Dimensions': [
                {
                    'Name': 'Method',
                    'Value': stat_entry.method
                },
                {
                    'Name': 'Name',
                    'Value': stat_entry.name
                },
                {
                    'Name': 'Host',
                    'Value': options.host
                }]}}, create_standard_metric_data(stat_entry) + create_percentile_metric_data(stat_entry)))

    cloudwatch = None

    def ensure_cloudwatch_client_created():
        nonlocal cloudwatch
        if cloudwatch is not None:
            return
        cloudwatch = boto3.client('cloudwatch')

    def report_to_cloudwatch_metrics(stats):
        namespace = options.cloudwatch_metric_ns
        if namespace is None:
            return

        if options.master_host is not None:  # represents that this is the master node
            #  Metrics will not be reported if this is not the master node
            return
        ensure_cloudwatch_client_created()
        for entry in six.itervalues(stats.entries):
            metric_data = create_metric_data(entry)
            cloudwatch.put_metric_data(Namespace=namespace, MetricData=metric_data)

    def on_exit(**kwargs):
        if ssh_identity_file is not None:
            os.remove(ssh_identity_file)
        locustfile_source.cleanup()
        print_formatted_stats_on_primary_node(runners.locust_runner.stats)
        report_to_cloudwatch_metrics(runners.locust_runner.stats)
        console_logger.info('exiting')

    argv = [sys.argv[0], '-f', locusfile, '--no-web', '-c', options.num_clients, '-r', options.hatch_rate,
            '-H', options.host]

    if options.expect_slaves is not None:
        argv += ['--master', '--expect-slaves', str(options.expect_slaves)]

    if options.master_host is not None:
        argv += ['--slave', '--master-host', str(options.master_host)]
        if options.step_time:
            argv += ['--step-time', options.step_time, '--step-clients', str(options.step_clients)]
    else:
        argv += ['--run-time', options.run_time]

    if options.step_load:
        argv += ['--step-load']

    sys.argv = argv

    atexit.register(on_exit)
    console_logger.info('starting')
    locust_main.main()
Пример #7
0
                request_response = self.response_queue.get()
                cw_logs_batch.append(request_response.get_cw_logs_record())
                cw_metrics_batch.append(request_response.get_cw_metrics_status_record())
                cw_metrics_batch.append(request_response.get_cw_metrics_count_record())
                if self.usercount: cw_metrics_batch.append(self.usercount.get_metric_data())

                self.response_queue.task_done()
            log.debug("Queue size:["+str(self.response_queue.qsize())+"]")
        result['cw_logs_batch']=cw_logs_batch
        result['cw_metrics_batch']=cw_metrics_batch
        return result



if __name__ == "__main__":
   parser, options, arguments = main.parse_options()

   host = ''
   if options.host:
       host = options.host

   cwconn = CloudWatchConnector(host=host, namespace=CW_METRICS_NAMESPACE,loggroup=CW_LOGS_LOG_GROUP,logstream=CW_LOGS_LOG_STREAM)
   events.locust_start_hatching += cwconn.on_locust_start_hatching
   events.request_success += cwconn.on_request_success
   events.request_failure += cwconn.on_request_failure
   events.hatch_complete += cwconn.on_hatch_complete

   main.main()


Пример #8
0
def start_slave(sys_argv):
    if "--slave" not in sys_argv:
        sys_argv.extend(["--slave"])

    sys.argv = sys_argv
    main()
Пример #9
0
def start_master(sys_argv):
    sys_argv.append("--master")
    sys.argv = sys_argv
    main()
            writer = csv.DictWriter(fhd, getrec(None, None, None, None).keys())
            writer.writeheader()
            fhd.flush()
        else:
            writer = None  # FIXME: bad code design, have zero object for it


        def on_request_success(request_type, name, response_time, response_length):
            writer.writerow(getrec(request_type, name, response_time, response_length))
            fhd.flush()


        def on_request_failure(request_type, name, response_time, exception):
            writer.writerow(getrec(request_type, name, response_time, 0, exception))
            fhd.flush()


        def on_slave_report(client_id, data):
            if data['stats'] or data['errors']:
                data['client_id'] = client_id
                fhd.write("%s\n" % json.dumps(data))
                fhd.flush()


        events.request_success += on_request_success
        events.request_failure += on_request_failure
        events.slave_report += on_slave_report

        main.main()
        fhd.flush()
Пример #11
0
    dbname = db_params['dbname']
    user = db_params['user']
    password = db_params['password']

    pool_size = 2  # maximum number of concurrent queries per locust before they block
    redshift_cache_query_results = True  # set False for Redshift only

    min_wait = 500
    max_wait = 1000

    task_set = PostgresLocustClientTasks


if __name__ == "__main__":
    # http://localhost:8089/
    locusts = 5  # specifies the number of Locust users to spawn
    hatch_rate = 1  # -r specifies the hatch rate (number of users to spawn per second)

    # starting with locust 0.9
    # -t Stop after the specified amount of time, e.g. (300s, 20m, 3h, 1h30m, etc.).

    args = [
        '-f',
        os.path.basename(__file__), '--no-web', '--csv=locust_results', '-c',
        str(locusts), '-r',
        str(hatch_rate)
    ]
    old_sys_argv = sys.argv
    sys.argv = [old_sys_argv[0]] + args
    main()
Пример #12
0
def start_slave(sys_argv):
    print sys_argv
    sys_argv.append("--slave")
    sys_argv.append("--no-reset-stats")
    sys.argv = sys_argv
    main()
Пример #13
0
def start_master(sys_argv):
    sys_argv.append("yjktask.py")
    sys_argv.append("--master")
    sys_argv.append("--no-reset-stats")
    sys.argv = sys_argv
    main()
Пример #14
0
#!/Users/cary/Documents/python/locust/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys

from locust.main import main

if __name__ == '__main__':
    sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
    sys.exit(main())
Пример #15
0
def start_slave(sys_argv):
    sys_argv.extend(["--slave"])
    sys.argv = sys_argv
    main()
Пример #16
0
def start_slave(sys_argv):
    if "--slave" not in sys_argv:
        sys_argv.extend(["--slave"])

    sys.argv = sys_argv
    main()
def start_locust_main():
    locust_main.main()
Пример #18
0
def start_slave(sys_argv):
    sys_argv.extend(["--slave"])
    sys.argv = sys_argv
    main()