Exemplo n.º 1
0
 def testLocalhost(self, _get_local_ip):
     _get_local_ip.return_value = Ok("192.168.1.2")
     result = lib.resolve_to_ip("localhost")
     self.assertTrue(result.is_ok())
     self.assertTrue(result.value == ip_address("192.168.1.2"))
Exemplo n.º 2
0
async def downloadFile(path, session):
    resp = await session.request(method="GET", url=path)
    resp.raise_for_status()
    return Ok(await resp.text())
 def check_safety(cls, block, state, validator_set) -> Result[Error, bool]:
     # TODO: implement
     return Ok(True)
Exemplo n.º 4
0
 def TotalAllocatedMB(self) -> FailableFloat:
     return self.Sum(lambda gc: Ok(gc.AllocedSinceLastGCMB))
Exemplo n.º 5
0
def format_block_device(brick_device: BrickDevice,
                        filesystem: Filesystem) -> AsyncInit:
    """
    Format a block device with a given filesystem asynchronously.
    :param brick_device: BrickDevice.
    :param filesystem: Filesystem.
    :return: AsyncInit.  Starts formatting immediately and gives back a handle
    to access it.
    """
    device = brick_device.dev_path
    if type(filesystem) is Btrfs:
        filesystem = typing.cast(Btrfs, filesystem)
        arg_list = [
            "mkfs.btrfs", "-m", filesystem.metadata_profile, "-l",
            filesystem.leaf_size, "-n", filesystem.node_size, device
        ]
        # Check if mkfs.btrfs is installed
        if not os.path.exists("/sbin/mkfs.btrfs"):
            log("Installing btrfs utils")
            apt_install(["btrfs-tools"])

        return Ok(
            AsyncInit(format_child=subprocess.Popen(arg_list),
                      post_setup_commands=[],
                      device=brick_device))
    elif type(filesystem) is Xfs:
        filesystem = typing.cast(Xfs, filesystem)
        arg_list = ["/sbin/mkfs.xfs"]
        if filesystem.inode_size is not None:
            arg_list.append("-i")
            arg_list.append("size{}=".format(filesystem.inode_size))

        if filesystem.force:
            arg_list.append("-f")

        if filesystem.block_size is not None:
            block_size = filesystem.block_size
            if not power_of_2(block_size):
                log("block_size {} is not a power of two. Rounding up to "
                    "nearest power of 2".format(block_size))
                block_size = next_power_of_two(block_size)

            arg_list.append("-b")
            arg_list.append("size={}".format(filesystem.block_size))

        if filesystem.stripe_size is not None and filesystem.stripe_width \
                is not None:
            arg_list.append("-d")
            arg_list.append("su={}".format(filesystem.stripe_size))
            arg_list.append("sw={}".format(filesystem.stripe_width))
        arg_list.append(device)

        # Check if mkfs.xfs is installed
        if not os.path.exists("/sbin/mkfs.xfs"):
            log("Installing xfs utils")
            apt_install(["xfsprogs"])

        format_handle = subprocess.Popen(arg_list)
        return Ok(
            AsyncInit(format_child=format_handle,
                      post_setup_commands=[],
                      device=brick_device))

    elif type(filesystem) is Zfs:
        filesystem = typing.cast(Zfs, filesystem)
        # Check if zfs is installed
        if not os.path.exists("/sbin/zfs"):
            log("Installing zfs utils")
            apt_install(["zfsutils-linux"])

        base_name = device.basename()
        # Mount at /mnt/dev_name
        post_setup_commands = []
        arg_list = [
            "/sbin/zpool", "create", "-f", "-m", "/mnt/{}".format(base_name),
            base_name, device
        ]
        zpool_create = subprocess.Popen(arg_list)

        if filesystem.block_size is not None:
            # If zpool creation is successful then we set these
            block_size = filesystem.block_size
            log("block_size {} is not a power of two. Rounding up to nearest "
                "power of 2".format(block_size))
            block_size = next_power_of_two(block_size)
            post_setup_commands.append(
                ("/sbin/zfs",
                 ["set", "recordsize={}".format(block_size), base_name]))
        if filesystem.compression is not None:
            post_setup_commands.append(
                ("/sbin/zfs", ["set", "compression=on", base_name]))

        post_setup_commands.append(
            ("/sbin/zfs", ["set", "acltype=posixacl", base_name]))
        post_setup_commands.append(
            ("/sbin/zfs", ["set", "atime=off", base_name]))
        return Ok(
            AsyncInit(format_child=zpool_create,
                      post_setup_commands=post_setup_commands,
                      device=brick_device))

    elif type(filesystem) is Ext4:
        filesystem = typing.cast(Ext4, filesystem)
        arg_list = ["mkfs.ext4", "-m", filesystem.reserved_blocks_percentage]
        if filesystem.inode_size is not None:
            arg_list.append("-I")
            arg_list.append(filesystem.inode_size)

        if filesystem.stride is not None:
            arg_list.append("-E")
            arg_list.append("stride={}".format(filesystem.stride))

        if filesystem.stripe_width is not None:
            arg_list.append("-E")
            arg_list.append("stripe_width={}".format(filesystem.stripe_width))

        arg_list.append(device)

        return Ok(
            AsyncInit(format_child=subprocess.Popen(arg_list),
                      post_setup_commands=[],
                      device=brick_device))
        MARK_ROOT_TIME_GETTERS,
        MARK_ROOT_PROMOTED_GETTERS,
        ALL_GETTERS_FROM_JOIN_ANALYSIS,
    )

SINGLE_HEAP_METRIC_GETTERS: Mapping[
    SingleHeapMetric, PerHeapGetter] = combine_mappings(
        _PER_HEAP_HISTORY_GETTERS,
        _get_per_heap_history_getters_for_gens(),
        {
            SingleHeapMetric(
                "TotalStolenMSec",
                doc="Sum of each time the processor was stolen for this heap's thread.",
            ):
            lambda hp:
            # TODO: use new join analysis instead
            Ok(
                sum(kv.Value.Item1
                    for kv in hp.clr.Analysis.GetLostCpuBreakdownForHeap(
                        hp.gc.trace_gc, unwrap(hp.server_gc_history))))
        },
    )

ALL_SINGLE_HEAP_METRICS: Sequence[SingleHeapMetric] = tuple(
    SINGLE_HEAP_METRIC_GETTERS.keys())


def get_single_heap_stat(hp: ProcessedHeap,
                         metric: SingleHeapMetric) -> FailableValue:
    return SINGLE_HEAP_METRIC_GETTERS[metric](hp)
Exemplo n.º 7
0
 def HeapSizePeakMB_Max(self) -> FailableFloat:
     return self.Max(lambda gc: Ok(gc.HeapSizePeakMB))
Exemplo n.º 8
0
def _stats_list_for_proc(proc: ProcessedTrace, run_metrics: RunMetrics) -> FailableValues:
    if is_empty(proc.gcs):
        return Err("no gcs")
    else:
        return Ok([stat_for_proc(proc, metric) for metric in run_metrics])
Exemplo n.º 9
0
 def handle_success(value: float) -> FailableFloat:
     if all_par_set:
         return Ok(get_signed_diff_fraction(value, non_null(scorer.par)) * scorer.weight)
     else:
         return Ok(value * scorer.weight)
Exemplo n.º 10
0
    fn_of_property,
    FailableInt,
    NamedRunMetric,
    ProcessedGC,
    ProcessInfo,
    ProcessedTrace,
    RunMetric,
    RunMetrics,
    run_metric_must_exist_for_name,
    ScoreRunMetric,
    FailableValue,
    FailableValues,
)

_GCPERFSIM_RESULT_GETTERS: Mapping[NamedRunMetric, Callable[[GCPerfSimResult], FailableValue]] = {
    NamedRunMetric("InternalSecondsTaken", is_from_test_status=True): lambda g: Ok(g.seconds_taken),
    NamedRunMetric("FinalHeapSizeGB", is_from_test_status=True): lambda g: Err(
        "final_heap_size_bytes was not in test result, this can happen on runtimes < 3.0"
    )
    if g.final_heap_size_bytes is None
    else Ok(bytes_to_gb(g.final_heap_size_bytes)),
    NamedRunMetric("FinalFragmentationGB", is_from_test_status=True): lambda g: Err(
        "final_fragmentation_bytes was not in test result, this can happen on runtimes < 3.0"
    )
    if g.final_fragmentation_bytes is None
    else Ok(bytes_to_gb(g.final_fragmentation_bytes)),
    NamedRunMetric("FinalTotalMemoryGB", is_from_test_status=True): lambda g: Ok(
        bytes_to_gb(g.final_total_memory_bytes)
    ),
    NamedRunMetric("Gen0CollectionCount", is_from_test_status=True): lambda g: Ok(
        g.collection_counts[0]
def _bytes_allocated_between_gcs(prev: Optional[ProcessedGC], cur: ProcessedGC,
                                 gen: Gens) -> Result[str, int]:
    after_prev = Ok(0) if prev is None else prev.total_bytes_after(gen)
    before_cur = cur.total_bytes_before(gen)
    return map_ok_2(before_cur, after_prev, sub)
    GC_REASON_METRICS,
    GC_HEAP_COMPACT_REASON_METRICS,
    GC_HEAP_EXPAND_REASON_METRICS,
    {
        SingleGCMetric("IsGen0", type=MetricType.bool):
        ok_of_property(ProcessedGC.IsGen0),
        SingleGCMetric("IsGen1", type=MetricType.bool):
        ok_of_property(ProcessedGC.IsGen1),
        SingleGCMetric("IsGen2", type=MetricType.bool):
        ok_of_property(ProcessedGC.IsGen2),
        SingleGCMetric("IsBlockingGen2", type=MetricType.bool):
        ok_of_property(ProcessedGC.IsBlockingGen2),
        SingleGCMetric("IsEphemeral", type=MetricType.bool):
        ok_of_property(ProcessedGC.IsEphemeral),
        SingleGCMetric("IsNonBackground", type=MetricType.bool):
        lambda gc: Ok(not gc.IsBackground),
    },
)
GC_BOOLEAN_METRICS = tuple(GC_BOOLEAN_METRICS_GETTERS.keys())
for bool_metric in GC_BOOLEAN_METRICS:
    assert bool_metric.type == MetricType.bool, f"Metric {bool_metric} should be type bool"


def _get_pinned_object_percentage(gc: ProcessedGC) -> FailableFloat:
    pct = gc.PinnedObjectPercentage
    assert pct is None or 0 <= pct <= 100
    return option_to_result(pct, lambda: "GetPinnedObjectPercentage() failed")


def _get_total_gc_time(gc: ProcessedGC) -> FailableFloat:
    t = gc.TotalGCTime