示例#1
0
 async def cycle(self) -> None:
     data = (await self.cli.run_json_raw("pg dump")).strip()
     if data.startswith("dumped all"):
         data = data.replace("dumped all", "", 1).lstrip()
     rec = pack_record(RecId.pgdump, data)
     if rec:
         self.record_file.write_record(*rec)
     logger.debug(f"Pg dump provides {b2ssize(len(rec[1]))}B")
示例#2
0
async def get_historic(osd_ids: List[int],
                       size: int,
                       duration: float,
                       ceph_extra_args: List[str],
                       cmd_timeout: float,
                       release_i: int,
                       min_duration: int = 0) -> bytes:
    cli = CephCLI(node=None,
                  extra_params=ceph_extra_args,
                  timeout=cmd_timeout,
                  release=CephRelease(release_i))
    all_ops: Dict[int, List[CephOp]] = {}
    curr_ops: Set[str] = set()

    for osd_id in osd_ids:
        try:
            raw_ops = await cli.get_historic(osd_id)
        except (subprocess.CalledProcessError, OSError):
            continue

        if raw_ops['size'] != size or raw_ops['duration'] != duration:
            raise RuntimeError(
                f"Historic ops setting changed for osd {osd_id}. Expect: duration={duration}, size={size}"
                +
                f". Get: duration={raw_ops['duration']}, size={raw_ops['size']}"
            )

        for raw_op in raw_ops['ops']:
            if min_duration > int(raw_op.get('duration') * 1000):
                continue
            try:
                _, op = CephOp.parse_op(raw_op)
                if not op:
                    continue
            except Exception:
                continue

            if op.tp is not None and op.description not in previous_ops:
                op.pack_pool_id = op.pool_id
                all_ops.setdefault(osd_id, []).append(op)
                curr_ops.add(op.description)

    previous_ops.clear()
    previous_ops.update(curr_ops)
    chunks = []
    for osd_id, ops in all_ops.items():
        tpl = osd_id, int(time.time(
        )), ops, HistoricFields.compact | HistoricFields.with_names
        chunks.append(pack_record(RecId.ops, tpl)[1])
    return msgpack.packb(chunks, use_bin_type=True)
示例#3
0
    async def cycle(self) -> None:
        logger.debug(f"Run cluster info: {self.cfg.extra_cmd}")
        output = {'time': int(time.time())}

        for cmd in self.cfg.extra_cmd:
            try:
                output[cmd] = await self.cli.run_no_ceph(cmd)
            except subprocess.SubprocessError as exc:
                logger.error("Cmd failed: %s", exc)

        if len(output) > 1:
            rec = pack_record(RecId.cluster_info, output)
            if rec:
                self.record_file.write_record(*rec)
            logger.debug(f"Cluster info provides {b2ssize(len(rec[1]))}B")
示例#4
0
    async def cycle(self) -> None:
        total_size = 0
        async for rec_id, data in self.dump_historic():
            if self.record_file:
                if rec_id is RecId.ops:
                    data = *data, HistoricFields.compact
                rec = pack_record(rec_id, data)
                if rec:
                    total_size += len(rec[1])
                    self.record_file.write_record(*rec, flush=False)

        if self.record_file:
            self.record_file.flush()

        logger.debug(f"Dump osd provides {b2ssize(total_size)}B")
示例#5
0
    def start(self) -> None:
        params = {
            'hostname': socket.gethostname(),
            'config': self.cfg.__dict__
        }
        self.record_file.write_record(*pack_record(RecId.params, params))

        assert not self.active_loops_tasks
        self.historic = DumpHistoric(self.cli, self.cfg, self.record_file)

        recorders = [(self.cfg.duration, self.historic)]

        info_dumper = InfoDumper(self.cli, self.cfg, self.record_file)
        pg_dumper = DumpPGDump(self.cli, self.cfg, self.record_file)
        recorders.extend([(self.cfg.extra_dump_timeout, info_dumper),
                          (self.cfg.pg_dump_timeout, pg_dumper)])
        self.any_failed = False
        self.active_loops_tasks = {
            asyncio.create_task(self.loop(timeout, recorder))
            for timeout, recorder in recorders
        }
示例#6
0
def test_packing():
    ops = []
    for op_j in json.loads(fc)['ops']:
        _, op = CephOp.parse_op(op_j)
        if op:
            op.pack_pool_id = op.pool_id
            ops.append(op)

    data = pack_historic(ops,
                         fields=HistoricFields.compact
                         | HistoricFields.with_names,
                         extra=(1, 2))
    extra, itr = unpack_historic(data)
    assert extra == [1, 2]
    uops = list(itr)

    assert len(ops) == len(uops)

    ops.sort(key=lambda x: x.obj_name)
    uops.sort(key=lambda x: x["obj_name"])

    for op, uop in zip(ops, uops):
        assert op.pg == uop["pg"]
        assert op.pack_pool_id == uop["pack_pool_id"]
        assert op.obj_name == uop["obj_name"]

    tpl = (1, 2, ops, HistoricFields.compact | HistoricFields.with_names)
    _, data2 = pack_record(RecId.ops, tpl)
    uops2 = unpack_record(RecId.ops, data2)

    assert len(ops) == len(uops2)

    uops2.sort(key=lambda x: x["obj_name"])

    for op, uop2 in zip(ops, uops2):
        assert op.pg == uop2["pg"]
        assert op.pack_pool_id == uop2["pack_pool_id"]
        assert op.obj_name == uop2["obj_name"]
        assert 1 == uop2["osd_id"]
        assert 2 == uop2["time"]