Exemplo n.º 1
0
def guesstimate(
    task: list[str],
    best: list[int],
    expected: list[int],
    worst: list[int],
    simulations: int,
    random_seed: Optional[int] = None,
) -> DataFrame:
    """Estimate the duration of a project based on three-point estimation of breakdown
    tasks or milestones using a Monte Carlo simulation.
    The project duration is measured in the same unit used to estimate the duration of
    the tasks or milestones. It can be a unit of time (e.g. days, weeks) or story
    points.
    Eeach simulation is the sum of the duration of each tasks picked at random from a
    modified-PERT distribution computed using the best-case, expected and worst-case
    estimates provided.
    In order to make test reproducible, an optional parameter `random_state` has been
    introduced.
    """
    simulate_project = generate_project_simulator(
        task=task,
        best=best,
        expected=expected,
        worst=worst,
        random_seed=random_seed,
    )
    duration = [simulate_project() for _ in range(simulations)]
    return compute_stats(duration)
Exemplo n.º 2
0
def test_cmd_guesstimate() -> None:
    parser = create_parser()
    args = parser.parse_args(
        ["guesstimate", "tests/guess/csvs/tasks.csv", "--simulations", "10"]
    )
    actual = cmd_guesstimate(args, random_seed=1234)
    expected = compute_stats([88, 92, 82, 93, 80, 97, 84, 95, 102, 86])
    pandas.testing.assert_frame_equal(expected, actual)
Exemplo n.º 3
0
def test_guesstimate() -> None:
    actual = guesstimate(
        task=["Task A", "Task B", "Task C", "Task D", "Task E", "Task F"],
        best=[5, 6, 1, 10, 5, 12],
        expected=[10, 12, 13, 13, 7, 25],
        worst=[20, 40, 24, 15, 12, 34],
        simulations=10,
        random_seed=1234,
    )
    expected = compute_stats([88, 92, 82, 93, 80, 97, 84, 95, 102, 86])
    pandas.testing.assert_frame_equal(expected, actual)
Exemplo n.º 4
0
def test_guesstimate_best_equal_worst() -> None:
    actual = guesstimate(
        task=["Task A", "Task B"],
        best=[5, 6],
        expected=[5, 6],
        worst=[5, 6],
        simulations=10,
        random_seed=1234,
    )
    expected = compute_stats([11, 11, 11, 11, 11, 11, 11, 11, 11, 11])
    pandas.testing.assert_frame_equal(expected, actual)
Exemplo n.º 5
0
def test_estimate_normal_no_changes() -> None:
    actual = estimate(
        scope=100,
        velocity=[5.0, 6.0, 10.0],
        change=[0, 0, 0],
        normal=True,
        simulations=10,
        random_seed=1234,
    )
    expected = compute_stats([14, 14, 15, 13, 12, 13, 15, 14, 15, 13])
    pandas.testing.assert_frame_equal(expected, actual)
Exemplo n.º 6
0
def test_estimate_normal() -> None:
    actual = estimate(
        scope=100,
        velocity=[5.0, 6.0, 10.0],
        change=[1.0, 2.0, 3.0],
        normal=True,
        simulations=10,
        random_seed=1234,
    )
    expected = compute_stats([21, 18, 17, 22, 19, 20, 23, 18, 19, 18])
    pandas.testing.assert_frame_equal(expected, actual)
Exemplo n.º 7
0
def test_cmd_estimate() -> None:
    parser = create_parser()
    args = parser.parse_args(
        ["estimate", "tests/est/csvs/sprints.csv", "100", "--simulations", "10"]
    )
    actual = cmd_estimate(args, random_seed=1234)
    expected = compute_stats([6, 7, 8, 8, 8, 7, 9, 8, 6, 7])
    pandas.testing.assert_frame_equal(expected, actual)
    args = parser.parse_args(
        [
            "estimate",
            "tests/est/csvs/sprints.csv",
            "100",
            "--normal",
            "--simulations",
            "10",
        ]
    )
    actual = cmd_estimate(args, random_seed=1234)
    expected = compute_stats([7, 7, 7, 8, 8, 7, 8, 7, 7, 7])
    pandas.testing.assert_frame_equal(expected, actual)
Exemplo n.º 8
0
Arquivo: est.py Projeto: poros/dumbpm
def estimate(
    scope: int,
    velocity: list[float],
    change: list[float],
    normal: bool,
    simulations: int,
    random_seed: Optional[int] = None,
) -> DataFrame:
    """Estimate the duration of a project based on past sprints velocity and scope
    changes using a Monte Carlo simulation.
    The duration estimate is measured in number of sprints.
    Every simulations is composed by several iterations, each of which represents a
    sprint.
    By default, velocity and scope change for each iteration are picked at random
    following a uniform probability distribution from the provided historical data.
    If `normal` is True, the input will be modelled as normal distribution from which
    velocity and scope changes will be derived.
    In order to make test reproducible, an optional parameter `random_state` has been
    introduced.
    """
    duration = []
    max_sprints = compute_max_sprints(scope=scope,
                                      velocity=velocity,
                                      change=change)
    simulate_sprints = generate_sprints_simulator(
        velocity=velocity,
        change=change,
        max_sprints=max_sprints,
        normal=normal,
        random_seed=random_seed,
    )
    for i in range(simulations):
        rn_velocity, rn_change = simulate_sprints()
        duration.append(
            compute_duration(
                scope=scope,
                velocity=rn_velocity,
                change=rn_change,
            ))
    return compute_stats(duration)