Esempio n. 1
0
    def _write_schedule_file(self, load, scheduler, sfd):
        cnt = 0
        payload_entry_count = None
        pbar = None
        start_time = time.time()
        for item in scheduler.generate():
            # item : (time_offset, payload_len, payload_offset, payload, marker, record_type, overall_len)
            time_offset, payload_len, _, payload, marker, _, _ = item

            if scheduler.iterations > 1 and payload_entry_count is None:
                payload_entry_count = scheduler.count
                estimated_size = self._estimate_schedule_size(load, payload_entry_count)
                self.log.debug("Estimated schedule size: %s", estimated_size)
                if estimated_size:
                    pbar = IncrementableProgressBar(maxval=estimated_size)
                    pbar.catchup(start_time, cnt)

            if time_offset < 0:  # special case, run worker with no delay
                time_offset = 0.0

            sfd.write(b("%s %s %s%s" % (payload_len, int(1000 * time_offset), marker, self.NEWLINE)))
            sfd.write(b("%s%s" % (payload, self.NEWLINE)))

            cnt += 1
            if pbar:
                pbar.increment()
        self.log.debug("Actual schedule size: %s", cnt)
        if pbar:
            pbar.finish()
Esempio n. 2
0
    def _write_schedule_file(self, load, scheduler, sfd):
        cnt = 0
        payload_entry_count = None
        pbar = None
        start_time = time.time()
        for item in scheduler.generate():
            # item : (time_offset, payload_len, payload_offset, payload, marker, record_type, overall_len)
            time_offset, payload_len, _, payload, marker, _, _ = item

            if scheduler.iterations > 1 and payload_entry_count is None:
                payload_entry_count = scheduler.count
                estimated_size = self._estimate_schedule_size(
                    load, payload_entry_count)
                self.log.debug("Estimated schedule size: %s", estimated_size)
                if estimated_size:
                    pbar = IncrementableProgressBar(maxval=estimated_size)
                    pbar.catchup(start_time, cnt)

            if time_offset < 0:  # special case, run worker with no delay
                time_offset = 0.0

            sfd.write(
                b("%s %s %s%s" % (payload_len, int(
                    1000 * time_offset), marker, self.NEWLINE)))
            sfd.write(b("%s%s" % (payload, self.NEWLINE)))

            cnt += 1
            if pbar:
                pbar.increment()
        self.log.debug("Actual schedule size: %s", cnt)
        if pbar:
            pbar.finish()
Esempio n. 3
0
 def start(self):
     self.socket.send(
         b("interval:%s\n" % self.interval if self.interval > 0 else 1))
     command = "metrics:%s\n" % self._metrics_command
     self.log.debug("Sending metrics command: %s", command)
     self.socket.send(b(command))
     self.socket.setblocking(False)
Esempio n. 4
0
    def connect(self):
        try:
            self.socket.connect((self.address, self.port))
            self.socket.send(b("test\n"))
            resp = self.socket.recv(4)
            assert resp == b("Yep\n")
            self.log.debug("Connected to serverAgent at %s:%s successfully",
                           self.address, self.port)
        except BaseException as exc:
            self.log.warning("Error during connecting to agent at %s:%s: %s",
                             self.address, self.port, exc)
            msg = "Failed to connect to serverAgent at %s:%s" % (self.address,
                                                                 self.port)
            raise TaurusNetworkError(msg)

        if self.config.get("logging", False):
            if not PY3:
                self.log.warning("Logging option doesn't work on python2.")
            else:
                self.logs_file = self.engine.create_artifact(
                    "SAlogs_{}_{}".format(self.address, self.port), ".csv")
                with open(self.logs_file, "a", newline='') as sa_logs:
                    logs_writer = csv.writer(sa_logs, delimiter=',')
                    metrics = ['ts'] + sorted(
                        [metric for metric in self._result_fields])
                    logs_writer.writerow(metrics)
Esempio n. 5
0
    def test_server_agent(self):
        obj = Monitoring()
        obj.engine = EngineEmul()
        obj.parameters.merge({
            "server-agent": [{
                "address": "127.0.0.1:4444",
                "logging": "True",
                "metrics": [
                    "cpu",
                    "disks"
                ]
            }, {
                "address": "10.0.0.1",
                "metrics": [
                    "something1",
                    "something2"
                ]
            }]
        })

        listener = LoggingMonListener()
        obj.add_listener(listener)

        widget = obj.get_widget()
        obj.add_listener(widget)

        crit_conf = BetterDict.from_dict({"condition": ">", "threshold": 5, "subject": "127.0.0.1:4444/cpu"})
        criteria = MonitoringCriteria(crit_conf, obj)
        obj.add_listener(criteria)

        obj.client_classes = {'server-agent': ServerAgentClientEmul}

        obj.prepare()
        obj.startup()

        for i in range(1, 10):
            obj.clients[0].socket.recv_data += b("%s\t%s\t\n" % (i, i*10))
            obj.check()
            ROOT_LOGGER.debug("Criteria state: %s", criteria)
            time.sleep(obj.engine.check_interval)

        obj.shutdown()
        obj.post_process()

        self.assertEquals(b("test\ninterval:1\nmetrics:cpu\tdisks\nexit\n"), obj.clients[0].socket.sent_data)

        if PY3:
            self.assertIsNotNone(obj.clients[0].logs_file)
            with open(obj.clients[0].logs_file) as serveragent_logs:
                logs_reader = csv.reader(serveragent_logs)
                logs_reader = list(logs_reader)
            self.assertEquals(['ts', 'cpu', 'disks'], logs_reader[0])
            for i in range(1, 10):
                self.assertEquals([str(i), str(i * 10)], logs_reader[i][1:])
Esempio n. 6
0
 def connect(self):
     try:
         self.socket.connect((self.address, self.port))
         self.socket.send(b("test\n"))
         resp = self.socket.recv(4)
         assert resp == b("Yep\n")
         self.log.debug("Connected to serverAgent at %s:%s successfully", self.address, self.port)
     except BaseException as exc:
         self.log.warning("Error during connecting to agent at %s:%s: %s", self.address, self.port, exc)
         msg = "Failed to connect to serverAgent at %s:%s" % (self.address, self.port)
         raise TaurusNetworkError(msg)
Esempio n. 7
0
 def connect(self):
     try:
         self.socket.connect((self.address, self.port))
         self.socket.send(b("test\n"))
         resp = self.socket.recv(4)
         assert resp == b("Yep\n")
         self.log.debug("Connected to serverAgent at %s:%s successfully", self.address, self.port)
     except BaseException as exc:
         self.log.warning("Error during connecting to agent at %s:%s: %s", self.address, self.port, exc)
         msg = "Failed to connect to serverAgent at %s:%s" % (self.address, self.port)
         raise TaurusNetworkError(msg)
Esempio n. 8
0
    def _payload_reader(self):
        self.iterations = 1
        rec_type = self.REC_TYPE_SCHEDULE
        while True:
            payload_offset = self.payload_fhd.tell()
            line = self.payload_fhd.readline()
            if not line:  # rewind
                self.payload_fhd.seek(0)
                self.iterations += 1

                if self.need_start_loop is not None and self.need_start_loop and not self.iteration_limit:
                    self.need_start_loop = False
                    self.iteration_limit = self.iterations
                    rec_type = self.REC_TYPE_LOOP_START

                if self.iteration_limit and self.iterations > self.iteration_limit:
                    self.log.debug("Schedule iterations limit reached: %s", self.iteration_limit)
                    break

                continue

            if not line.strip():  # we're fine to skip empty lines between records
                continue

            parts = line.split(b(' '))
            if len(parts) < 2:
                raise RuntimeError("Wrong format for meta-info line: %s", line)

            payload_len, marker = parts
            marker = marker.decode()
            payload_len = int(payload_len)
            payload = self.payload_fhd.read(payload_len).decode()
            yield payload_len, payload_offset, payload, marker.strip(), len(line), rec_type
            rec_type = self.REC_TYPE_SCHEDULE
Esempio n. 9
0
 def test_schedule_concurrency(self):
     self.obj.execution.merge({"concurrency": 5, "ramp-up": 10, "hold-for": 5})
     scheduler = self.get_scheduler(b("5 test1\ntest1\n5 test2\ntest2\n"))
     items = list(scheduler.generate())
     self.assertEqual(8, len(items))
     self.assertEqual(-1, items[5][0])  # instance became unlimited
     self.assertEqual(Scheduler.REC_TYPE_LOOP_START, items[6][5])  # looped payload
Esempio n. 10
0
    def _write_schedule_file(self, load, pbar, scheduler, sfd):
        prev_offset = 0
        accum_interval = 0.0
        cnt = 0
        for item in scheduler.generate():
            time_offset, payload_len, payload_offset, payload, marker, record_type, overall_len = item

            if cnt % 5000 == 0:  # it's just the number, to throttle down updates...
                if time_offset >= 0:
                    progress = time_offset
                elif prev_offset >= 0:
                    progress = prev_offset
                else:  # both time_offset and prev_offset are < 0
                    progress = 0
                pbar.update(progress if 0 <= progress < load.duration else load.duration)
            cnt += 1

            if time_offset >= 0:
                accum_interval += 1000 * (time_offset - prev_offset)
                interval = int(math.floor(accum_interval))
                accum_interval -= interval
            else:
                interval = 0xFFFFFF

            type_and_delay = struct.pack("I", interval)[:-1] + b(chr(record_type))
            payload_len_bytes = struct.pack('I', overall_len)
            payload_offset_bytes = struct.pack('Q', payload_offset)

            sfd.write(type_and_delay + payload_len_bytes + payload_offset_bytes)
            prev_offset = time_offset
Esempio n. 11
0
 def test_schedule_concurrency(self):
     self.configure({ScenarioExecutor.EXEC: {"concurrency": 5, "ramp-up": 10, "hold-for": 5}})
     scheduler = self.get_scheduler(b("5 test1\ntest1\n5 test2\ntest2\n"))
     items = list(scheduler.generate())
     self.assertEqual(8, len(items))
     self.assertEqual(-1, items[5][0])  # instance became unlimited
     self.assertEqual(Scheduler.REC_TYPE_LOOP_START, items[6][5])  # looped payload
Esempio n. 12
0
 def test_schedule_empty(self):
     # concurrency: 1, iterations: 1
     scheduler = self.get_scheduler(b("4 test\ntest\n"))
     items = list(scheduler.generate())
     for item in items:
         ROOT_LOGGER.debug("Item: %s", item)
     self.assertEqual(1, len(items))
Esempio n. 13
0
File: cli.py Progetto: VegiS/taurus
def main():
    """
    This function is used as entrypoint by setuptools
    """
    usage = "Usage: bzt [options] [configs] [-aliases]"
    dsc = "BlazeMeter Taurus Tool v%s, the configuration-driven test running engine" % bzt.VERSION
    parser = OptionParserWithAliases(usage=usage, description=dsc, prog="bzt")
    parser.add_option("-l", "--log", action="store", default="bzt.log", help="Log file location")
    parser.add_option("-o", "--option", action="append", help="Override option in config")
    parser.add_option("-q", "--quiet", action="store_true", help="Only errors and warnings printed to console")
    parser.add_option("-v", "--verbose", action="store_true", help="Prints all logging messages to console")
    parser.add_option("-n", "--no-system-configs", action="store_true", help="Skip system and user config files")

    parsed_options, parsed_configs = parser.parse_args()

    executor = CLI(parsed_options)

    if not is_windows():
        readable = select([sys.stdin], [], [], 0.1)[0]
        for stream in readable:
            stdin = stream.read()
            if stdin:
                with NamedTemporaryFile(prefix="stdin_", suffix=".config", delete=False) as fhd:
                    fhd.write(b(stdin))
                    parsed_configs.append(fhd.name)

    try:
        code = executor.perform(parsed_configs)
    except BaseException as exc_top:
        logging.error("%s: %s", type(exc_top).__name__, exc_top)
        logging.debug("Exception: %s", traceback.format_exc())
        code = 1

    exit(code)
Esempio n. 14
0
    def test_schedule_rps(self):
        self.obj.engine.config.merge({"provisioning": "test"})
        rps = 9
        rampup = 12
        self.obj.execution.merge({
            "throughput": rps,
            "ramp-up": rampup,
            "steps": 3,
            "hold-for": 0
        })
        scheduler = self.get_scheduler(b("4 test\ntest\n"))

        cnt = 0
        cur = 0
        currps = 0
        for item in scheduler.generate():
            # logging.debug("Item: %s", item)
            if int(math.ceil(item[0])) != cur:
                # self.assertLessEqual(currps, rps)
                cur = int(math.ceil(item[0]))
                logging.debug("RPS: %s", currps)
                currps = 0

            cnt += 1
            currps += 1

        logging.debug("RPS: %s", currps)
Esempio n. 15
0
 def test_schedule_empty(self):
     # concurrency: 1, iterations: 1
     scheduler = self.get_scheduler(b("4 test\ntest\n"))
     items = list(scheduler.generate())
     for item in items:
         ROOT_LOGGER.debug("Item: %s", item)
     self.assertEqual(1, len(items))
Esempio n. 16
0
 def test_schedule_concurrency_steps(self):
     self.obj.execution.merge({"concurrency": 5, "ramp-up": 10, "steps": 3})
     scheduler = self.get_scheduler(b("5 test1\ntest1\n5 test2\ntest2\n"))
     items = list(scheduler.generate())
     self.assertEqual(8, len(items))
     self.assertEqual(-1, items[5][0])  # instance became unlimited
     self.assertEqual(Scheduler.REC_TYPE_LOOP_START, items[6][5])  # looped payload
Esempio n. 17
0
        def test_schedule_rps(self):
            executor = PBenchExecutor()
            executor.engine = EngineEmul()
            executor.engine.config.merge({"provisioning": "test"})
            rps = 9
            rampup = 12
            executor.execution.merge({
                "throughput": rps,
                "ramp-up": rampup,
                "steps": 3,
                "hold-for": 0
            })
            obj = Scheduler(executor.get_load(),
                            io.BytesIO(b("4 test\ntest\n")),
                            logging.getLogger(""))

            cnt = 0
            cur = 0
            currps = 0
            for item in obj.generate():
                # logging.debug("Item: %s", item)
                if int(math.ceil(item[0])) != cur:
                    # self.assertLessEqual(currps, rps)
                    cur = int(math.ceil(item[0]))
                    logging.debug("RPS: %s", currps)
                    currps = 0

                cnt += 1
                currps += 1

            logging.debug("RPS: %s", currps)
Esempio n. 18
0
    def test_schedule_rps(self):
        rps = 9
        rampup = 12
        self.configure({
            "provisioning": "test",
            EXEC: {
                "throughput": rps,
                "ramp-up": rampup,
                "steps": 3,
                "hold-for": 0
            }
        })
        scheduler = self.get_scheduler(b("4 test\ntest\n"))

        cnt = 0
        cur = 0
        currps = 0
        for item in scheduler.generate():
            if int(math.ceil(item[0])) != cur:
                # self.assertLessEqual(currps, rps)
                cur = int(math.ceil(item[0]))
                ROOT_LOGGER.debug("RPS: %s", currps)
                currps = 0

            cnt += 1
            currps += 1

        ROOT_LOGGER.debug("RPS: %s", currps)
Esempio n. 19
0
    def _payload_reader(self):
        self.iterations = 1
        rec_type = self.REC_TYPE_SCHEDULE
        while True:
            payload_offset = self.payload_fhd.tell()
            line = self.payload_fhd.readline()
            if not line:  # rewind
                self.payload_fhd.seek(0)
                self.iterations += 1

                if self.need_start_loop is not None and self.need_start_loop and not self.iteration_limit:
                    self.need_start_loop = False
                    self.iteration_limit = self.iterations
                    rec_type = self.REC_TYPE_LOOP_START

                if self.iteration_limit and self.iterations > self.iteration_limit:
                    self.log.debug("Schedule iterations limit reached: %s", self.iteration_limit)
                    break

                continue

            if not line.strip():  # we're fine to skip empty lines between records
                continue

            parts = line.split(b(' '))
            if len(parts) < 2:
                raise TaurusInternalException("Wrong format for meta-info line: %s" % line)

            payload_len, marker = parts
            marker = marker.decode()
            payload_len = int(payload_len)
            payload = self.payload_fhd.read(payload_len).decode()
            yield payload_len, payload_offset, payload, marker.strip(), len(line), rec_type
            rec_type = self.REC_TYPE_SCHEDULE
Esempio n. 20
0
 def test_schedule_concurrency_steps(self):
     self.configure({ScenarioExecutor.EXEC: {"concurrency": 5, "ramp-up": 10, "steps": 3}})
     scheduler = self.get_scheduler(b("5 test1\ntest1\n5 test2\ntest2\n"))
     items = list(scheduler.generate())
     self.assertEqual(8, len(items))
     self.assertEqual(-1, items[5][0])  # instance became unlimited
     self.assertEqual(Scheduler.REC_TYPE_LOOP_START, items[6][5])  # looped payload
Esempio n. 21
0
 def test_schedule_throughput_only(self):
     executor = PBenchExecutor()
     executor.engine = EngineEmul()
     executor.execution.merge({"throughput": 5})
     obj = Scheduler(executor.get_load(),
                     io.BytesIO(b("5 test1\ntest1\n5 test2\ntest2\n")), logging.getLogger(""))
     items = list(obj.generate())
     self.assertTrue(len(items) > 0)
Esempio n. 22
0
    def form_as_bytes(self):
        """
        represents form contents as bytes in python3 or 8-bit str in python2
        """
        result_list = []
        for item in self.__convert_to_list():
            # if (8-bit str (2.7) or bytes (3.x), then no processing, just add, else - encode)
            if isinstance(item, binary_type):
                result_list.append(item)
            elif isinstance(item, text_type):
                result_list.append(item.encode())
            else:
                raise TaurusInternalException("Unhandled form data type: %s" % type(item))

        res_bytes = b("\r\n").join(result_list)
        res_bytes += b("\r\n")
        return res_bytes
Esempio n. 23
0
 def disconnect(self):
     self.log.debug("Closing connection with agent at %s:%s...", self.address, self.port)
     try:
         self.socket.send(b("exit\n"))
     except BaseException as exc:
         self.log.warning("Error during disconnecting from agent at %s:%s: %s", self.address, self.port, exc)
     finally:
         self.socket.close()
Esempio n. 24
0
 def disconnect(self):
     self.log.debug("Closing connection with agent at %s:%s...", self.address, self.port)
     try:
         self.socket.send(b("exit\n"))
     except BaseException as exc:
         self.log.warning("Error during disconnecting from agent at %s:%s: %s", self.address, self.port, exc)
     finally:
         self.socket.close()
Esempio n. 25
0
    def form_as_bytes(self):
        """
        represents form contents as bytes in python3 or 8-bit str in python2
        """
        result_list = []
        for item in self.__convert_to_list():
            # if (8-bit str (2.7) or bytes (3.x), then no processing, just add, else - encode)
            if isinstance(item, binary_type):
                result_list.append(item)
            elif isinstance(item, text_type):
                result_list.append(item.encode())
            else:
                raise BaseException

        res_bytes = b("\r\n").join(result_list)
        res_bytes += b("\r\n")
        return res_bytes
Esempio n. 26
0
    def test_server_agent(self):
        obj = Monitoring()
        obj.engine = EngineEmul()
        obj.parameters.merge({
            "server-agent": [{
                "address": "127.0.0.1:4444",
                "metrics": [
                    "cpu",
                    "disks"
                ]
            }, {
                "address": "10.0.0.1",
                "metrics": [
                    "something1",
                    "something2"
                ]
            }]
        })

        listener = LoggingMonListener()
        obj.add_listener(listener)

        widget = obj.get_widget()
        obj.add_listener(widget)

        crit_conf = BetterDict()
        crit_conf.merge({"threshold": 5, "subject": "127.0.0.1:4444/cpu"})
        criteria = MonitoringCriteria(crit_conf, obj)
        obj.add_listener(criteria)

        obj.client_classes = {'server-agent': ServerAgentClientEmul}

        obj.prepare()
        obj.startup()

        for _ in range(1, 10):
            obj.clients[0].socket.recv_data += b("%s\t%s\n" % (random.random(), random.random()))
            obj.check()
            logging.debug("Criteria state: %s", criteria)
            time.sleep(1)

        obj.shutdown()
        obj.post_process()

        self.assertEquals(b("test\ninterval:1\nmetrics:cpu\tdisks\nexit\n"), obj.clients[0].socket.sent_data)
Esempio n. 27
0
    def test_server_agent(self):
        obj = Monitoring()
        obj.engine = EngineEmul()
        obj.parameters.merge({
            "server-agent": [{
                "address": "127.0.0.1:4444",
                "metrics": [
                    "cpu",
                    "disks"
                ]
            }, {
                "address": "10.0.0.1",
                "metrics": [
                    "something1",
                    "something2"
                ]
            }]
        })

        listener = LoggingMonListener()
        obj.add_listener(listener)

        widget = obj.get_widget()
        obj.add_listener(widget)

        crit_conf = BetterDict()
        crit_conf.merge({"threshold": 5, "subject": "127.0.0.1:4444/cpu"})
        criteria = MonitoringCriteria(crit_conf, obj)
        obj.add_listener(criteria)

        obj.client_classes = {'server-agent': ServerAgentClientEmul}

        obj.prepare()
        obj.startup()

        for _ in range(1, 10):
            obj.clients[0].socket.recv_data += b("%s\t%s\n" % (random.random(), random.random()))
            obj.check()
            logging.debug("Criteria state: %s", criteria)
            time.sleep(1)

        obj.shutdown()
        obj.post_process()

        self.assertEquals(b("test\ninterval:1\nmetrics:cpu\tdisks\nexit\n"), obj.clients[0].socket.sent_data)
Esempio n. 28
0
 def test_schedule_empty(self):
     executor = PBenchExecutor()
     executor.engine = EngineEmul()
     # concurrency: 1, iterations: 1
     obj = Scheduler(executor.get_load(), io.BytesIO(b("4 test\ntest\n")), logging.getLogger(""))
     items = list(obj.generate())
     for item in items:
         logging.debug("Item: %s", item)
     self.assertEqual(1, len(items))
Esempio n. 29
0
 def test_schedule_concurrency_steps(self):
     executor = PBenchExecutor()
     executor.engine = EngineEmul()
     executor.execution.merge({"concurrency": 5, "ramp-up": 10, "steps": 3})
     obj = Scheduler(executor.get_load(), io.BytesIO(b("5 test1\ntest1\n5 test2\ntest2\n")), logging.getLogger(""))
     items = list(obj.generate())
     self.assertEqual(8, len(items))
     self.assertEqual(-1, items[5][0])  # instance became unlimited
     self.assertEqual(Scheduler.REC_TYPE_LOOP_START, items[6][5])  # looped payload
Esempio n. 30
0
 def test_schedule_empty(self):
     executor = PBenchExecutor()
     executor.engine = EngineEmul()
     # concurrency: 1, iterations: 1
     obj = Scheduler(executor.get_load(), io.BytesIO(b("4 test\ntest\n")), logging.getLogger(""))
     items = list(obj.generate())
     for item in items:
         logging.debug("Item: %s", item)
     self.assertEqual(1, len(items))
Esempio n. 31
0
 def test_schedule_with_no_rampup(self):
     self.obj.execution.merge({
         "concurrency": 10,
         "ramp-up": None,
         "steps": 3,
         "hold-for": 10
     })
     # this line shouln't throw an exception
     self.get_scheduler(b("4 test\ntest\n"))
Esempio n. 32
0
 def test_schedule_concurrency_steps(self):
     executor = PBenchExecutor()
     executor.engine = EngineEmul()
     executor.execution.merge({"concurrency": 5, "ramp-up": 10, "steps": 3})
     obj = Scheduler(executor.get_load(),
                     io.BytesIO(b("5 test1\ntest1\n5 test2\ntest2\n")), logging.getLogger(""))
     items = list(obj.generate())
     self.assertEqual(8, len(items))
     self.assertEqual(-1, items[5][0])  # instance became unlimited
     self.assertEqual(Scheduler.REC_TYPE_LOOP_START, items[6][5])  # looped payload
Esempio n. 33
0
 def test_schedule_with_no_rampup(self):
     self.configure({
         EXEC: {
             "concurrency": 10,
             "ramp-up": None,
             "steps": 3,
             "hold-for": 10
         }
     })
     # this line shouln't throw an exception
     self.get_scheduler(b("4 test\ntest\n"))
Esempio n. 34
0
def main():
    """
    This function is used as entrypoint by setuptools
    """
    usage = "Usage: bzt [options] [configs] [-aliases]"
    dsc = "BlazeMeter Taurus Tool v%s, the configuration-driven test running engine" % bzt.VERSION
    parser = OptionParserWithAliases(usage=usage, description=dsc, prog="bzt")
    parser.add_option('-l',
                      '--log',
                      action='store',
                      default="bzt.log",
                      help="Log file location")
    parser.add_option('-o',
                      '--option',
                      action='append',
                      help="Override option in config")
    parser.add_option('-q',
                      '--quiet',
                      action='store_true',
                      help="Only errors and warnings printed to console")
    parser.add_option('-v',
                      '--verbose',
                      action='store_true',
                      help="Prints all logging messages to console")
    parser.add_option('-n',
                      '--no-system-configs',
                      action='store_true',
                      help="Skip system and user config files")

    parsed_options, parsed_configs = parser.parse_args()

    executor = CLI(parsed_options)

    if not is_windows():
        readable = select([sys.stdin], [], [], 0.1)[0]
        for stream in readable:
            stdin = stream.read()
            if stdin:
                with NamedTemporaryFile(prefix="stdin_",
                                        suffix=".config",
                                        delete=False) as fhd:
                    fhd.write(b(stdin))
                    parsed_configs.append(fhd.name)

    try:
        code = executor.perform(parsed_configs)
    except BaseException as exc_top:
        logging.error("%s: %s", type(exc_top).__name__, exc_top)
        logging.debug("Exception: %s", traceback.format_exc())
        code = 1

    exit(code)
Esempio n. 35
0
 def test_schedule_with_no_rampup(self):
     executor = PBenchExecutor()
     executor.engine = EngineEmul()
     executor.execution.merge({
         "concurrency": 10,
         "ramp-up": None,
         "steps": 3,
         "hold-for": 10
     })
     # this line shouln't throw an exception
     obj = Scheduler(executor.get_load(),
                     io.BytesIO(b("4 test\ntest\n")),
                     logging.getLogger(""))
Esempio n. 36
0
    def test_server_agent_encoding(self):
        obj = Monitoring()
        obj.engine = EngineEmul()
        obj.parameters.merge({
            "server-agent": [{
                "address": "127.0.0.1:4444",
                "metrics": ["cpu", "disks"]
            }]
        })

        obj.client_classes = {'server-agent': ServerAgentClientEmul}
        obj.prepare()

        self.assertEquals(b("test\n"), obj.clients[0].socket.sent_data)
Esempio n. 37
0
    def test_server_agent_encoding(self):
        obj = Monitoring()
        obj.engine = EngineEmul()
        obj.parameters.merge({
            "server-agent": [{
                "address": "127.0.0.1:4444",
                "metrics": [
                    "cpu",
                    "disks"
                ]
            }]
        })

        obj.client_classes = {'server-agent': ServerAgentClientEmul}
        obj.prepare()

        self.assertEquals(b("test\n"), obj.clients[0].socket.sent_data)
Esempio n. 38
0
    def _write_schedule_file(self, load, scheduler, sfd):
        prev_offset = 0
        accum_interval = 0.0
        cnt = 0
        payload_entry_count = None
        pbar = None
        start_time = time.time()
        for item in scheduler.generate():
            # item : (time_offset, payload_len, payload_offset, payload, marker, record_type, overall_len)
            time_offset, _, payload_offset, _, _, record_type, overall_len = item

            if scheduler.iterations > 1 and payload_entry_count is None:
                payload_entry_count = scheduler.count
                estimated_size = self._estimate_schedule_size(
                    load, payload_entry_count)
                self.log.debug("Estimated schedule size: %s", estimated_size)
                if estimated_size:
                    pbar = IncrementableProgressBar(maxval=estimated_size)
                    pbar.catchup(start_time, cnt)

            if time_offset >= 0:
                accum_interval += 1000 * (time_offset - prev_offset)
                interval = int(math.floor(accum_interval))
                accum_interval -= interval
            else:
                interval = 0xFFFFFF

            type_and_delay = struct.pack("I", interval)[:-1] + b(
                chr(record_type))
            payload_len_bytes = struct.pack('I', overall_len)
            payload_offset_bytes = struct.pack('Q', payload_offset)

            sfd.write(type_and_delay + payload_len_bytes +
                      payload_offset_bytes)

            if pbar:
                pbar.increment()
            cnt += 1
            prev_offset = time_offset
        self.log.debug("Actual schedule size: %s", cnt)
        if pbar:
            pbar.finish()
Esempio n. 39
0
    def test_schedule_rps(self):
        self.obj.engine.config.merge({"provisioning": "test"})
        rps = 9
        rampup = 12
        self.obj.execution.merge({"throughput": rps, "ramp-up": rampup, "steps": 3, "hold-for": 0})
        scheduler = self.get_scheduler(b("4 test\ntest\n"))

        cnt = 0
        cur = 0
        currps = 0
        for item in scheduler.generate():
            if int(math.ceil(item[0])) != cur:
                # self.assertLessEqual(currps, rps)
                cur = int(math.ceil(item[0]))
                ROOT_LOGGER.debug("RPS: %s", currps)
                currps = 0

            cnt += 1
            currps += 1

        ROOT_LOGGER.debug("RPS: %s", currps)
Esempio n. 40
0
    def _write_schedule_file(self, load, scheduler, sfd):
        prev_offset = 0
        accum_interval = 0.0
        cnt = 0
        payload_entry_count = None
        pbar = None
        start_time = time.time()
        for item in scheduler.generate():
            # item : (time_offset, payload_len, payload_offset, payload, marker, record_type, overall_len)
            time_offset, _, payload_offset, _, _, record_type, overall_len = item

            if scheduler.iterations > 1 and payload_entry_count is None:
                payload_entry_count = scheduler.count
                estimated_size = self._estimate_schedule_size(load, payload_entry_count)
                self.log.debug("Estimated schedule size: %s", estimated_size)
                if estimated_size:
                    pbar = IncrementableProgressBar(maxval=estimated_size)
                    pbar.catchup(start_time, cnt)

            if time_offset >= 0:
                accum_interval += 1000 * (time_offset - prev_offset)
                interval = int(math.floor(accum_interval))
                accum_interval -= interval
            else:
                interval = 0xFFFFFF

            type_and_delay = struct.pack("I", interval)[:-1] + b(chr(record_type))
            payload_len_bytes = struct.pack('I', overall_len)
            payload_offset_bytes = struct.pack('Q', payload_offset)

            sfd.write(type_and_delay + payload_len_bytes + payload_offset_bytes)

            if pbar:
                pbar.increment()
            cnt += 1
            prev_offset = time_offset
        self.log.debug("Actual schedule size: %s", cnt)
        if pbar:
            pbar.finish()
Esempio n. 41
0
        def test_schedule_rps(self):
            executor = PBenchExecutor()
            executor.engine = EngineEmul()
            executor.engine.config.merge({"provisioning": "test"})
            rps = 9
            rampup = 12
            executor.execution.merge({"throughput": rps, "ramp-up": rampup, "steps": 3, "hold-for": 0})
            obj = Scheduler(executor.get_load(), io.BytesIO(b("4 test\ntest\n")), logging.getLogger(""))

            cnt = 0
            cur = 0
            currps = 0
            for item in obj.generate():
                # logging.debug("Item: %s", item)
                if int(math.ceil(item[0])) != cur:
                    # self.assertLessEqual(currps, rps)
                    cur = int(math.ceil(item[0]))
                    logging.debug("RPS: %s", currps)
                    currps = 0

                cnt += 1
                currps += 1

            logging.debug("RPS: %s", currps)
Esempio n. 42
0
 def __init__(self, family=AF_INET, atype=SOCK_STREAM, proto=0, _sock=None):
     self.recv_data = b("")
     self.sent_data = b("")
Esempio n. 43
0
 def test_schedule_throughput_only(self):
     self.configure({EXEC: {"throughput": 5}})
     scheduler = self.get_scheduler(b("5 test1\ntest1\n5 test2\ntest2\n"))
     items = list(scheduler.generate())
     self.assertTrue(len(items) > 0)
Esempio n. 44
0
 def test_schedule_throughput_only(self):
     self.configure({ScenarioExecutor.EXEC: {"throughput": 5}})
     scheduler = self.get_scheduler(b("5 test1\ntest1\n5 test2\ntest2\n"))
     items = list(scheduler.generate())
     self.assertTrue(len(items) > 0)
Esempio n. 45
0
 def __init__(self, parent_logger, label, config, engine):
     super(ServerAgentClientEmul, self).__init__(parent_logger, label, config, engine)
     self.socket = SocketEmul()
     self.socket.recv_data = b("Yep\n")
     self.select = self.select_emul
Esempio n. 46
0
 def test_schedule_with_no_rampup(self):
     executor = PBenchExecutor()
     executor.engine = EngineEmul()
     executor.execution.merge({"concurrency": 10, "ramp-up": None, "steps": 3, "hold-for": 10})
     # this line shouln't throw an exception
     obj = Scheduler(executor.get_load(), io.BytesIO(b("4 test\ntest\n")), logging.getLogger(""))
Esempio n. 47
0
 def test_schedule_with_no_rampup(self):
     self.obj.execution.merge({"concurrency": 10, "ramp-up": None, "steps": 3, "hold-for": 10})
     # this line shouln't throw an exception
     self.get_scheduler(b("4 test\ntest\n"))
Esempio n. 48
0
 def start(self):
     self.socket.send(b("interval:%s\n" % self.interval))
     command = "metrics:%s\n" % self._metrics_command
     self.log.debug("Sending metrics command: %s", command)
     self.socket.send(b(command))
     self.socket.setblocking(False)
Esempio n. 49
0
 def __init__(self, parent_logger, label, config, engine):
     super(ServerAgentClientEmul, self).__init__(parent_logger, label,
                                                 config, engine)
     self.socket = SocketEmul()
     self.socket.recv_data = b("Yep\n")
     self.select = self.select_emul
Esempio n. 50
0
 def test_schedule_with_no_rampup(self):
     self.configure({ScenarioExecutor.EXEC: {"concurrency": 10, "ramp-up": None, "steps": 3, "hold-for": 10}})
     # this line shouln't throw an exception
     self.get_scheduler(b("4 test\ntest\n"))
Esempio n. 51
0
 def __init__(self, family=AF_INET, atype=SOCK_STREAM, proto=0, _sock=None):
     self.recv_data = b("")
     self.sent_data = b("")
Esempio n. 52
0
 def test_schedule_throughput_only(self):
     self.obj.execution.merge({"throughput": 5})
     scheduler = self.get_scheduler(b("5 test1\ntest1\n5 test2\ntest2\n"))
     items = list(scheduler.generate())
     self.assertTrue(len(items) > 0)
Esempio n. 53
0
 def test_schedule_throughput_only(self):
     self.obj.execution.merge({"throughput": 5})
     scheduler = self.get_scheduler(b("5 test1\ntest1\n5 test2\ntest2\n"))
     items = list(scheduler.generate())
     self.assertTrue(len(items) > 0)