예제 #1
0
 def test_datetime_to_epoch(self):
     """
     `datetime_to_epoch` returns EPOCH seconds for given datetime
     """
     self.assertEqual(
         timestamp.datetime_to_epoch(datetime(1970, 1, 1, 0, 0, 0)), 0.0)
     self.assertEqual(
         timestamp.datetime_to_epoch(datetime(2015, 5, 1, 4, 51, 12,
                                              78580)), 1430455872.078580)
예제 #2
0
 def test_datetime_to_epoch(self):
     """
     `datetime_to_epoch` returns EPOCH seconds for given datetime
     """
     self.assertEqual(
         timestamp.datetime_to_epoch(datetime(1970, 1, 1, 0, 0, 0)),
         0.0)
     self.assertEqual(
         timestamp.datetime_to_epoch(
             datetime(2015, 5, 1, 4, 51, 12, 78580)),
         1430455872.078580)
예제 #3
0
def group_steps(group):
    """
    Return Effect of list of steps that would be performed on the group
    if convergence is triggered on it with desired=actual
    """
    now_dt = yield Effect(Func(datetime.utcnow))
    all_data_eff = convergence_exec_data(
        group["tenantId"], group["groupId"], now_dt, get_executor)
    all_data = yield Effect(TenantScope(all_data_eff, group["tenantId"]))
    (executor, scaling_group, group_state, desired_group_state,
     resources) = all_data
    desired_group_state.desired = len(resources['servers'])
    steps = executor.plan(desired_group_state, datetime_to_epoch(now_dt),
                          3600, **resources)
    yield do_return(steps)
예제 #4
0
def group_steps(group):
    """
    Return Effect of list of steps that would be performed on the group
    if convergence is triggered on it with desired=actual
    """
    now_dt = yield Effect(Func(datetime.utcnow))
    all_data_eff = convergence_exec_data(group["tenantId"], group["groupId"],
                                         now_dt, get_executor)
    all_data = yield Effect(TenantScope(all_data_eff, group["tenantId"]))
    (executor, scaling_group, group_state, desired_group_state,
     resources) = all_data
    desired_group_state.desired = len(resources['servers'])
    steps = executor.plan(desired_group_state, datetime_to_epoch(now_dt), 3600,
                          **resources)
    yield do_return(steps)
예제 #5
0
def group_steps(group):
    """
    Return Effect of list of steps that would be performed on the group
    if convergence is triggered on it with desired=actual. Also returns
    current delta of desired and actual
    """
    now_dt = yield Effect(Func(datetime.utcnow))
    all_data_eff = convergence_exec_data(
        group["tenantId"], group["groupId"], now_dt, get_executor)
    try:
        all_data = yield Effect(TenantScope(all_data_eff, group["tenantId"]))
    except Exception as e:
        yield do_return((e, 0))
    (executor, scaling_group, group_state, desired_group_state,
     resources) = all_data
    delta = desired_group_state.capacity - len(resources['servers'])
    desired_group_state.capacity = len(resources['servers'])
    steps = executor.plan(desired_group_state, datetime_to_epoch(now_dt),
                          3600, {}, **resources)
    yield do_return((steps, delta))
예제 #6
0
def execute_convergence(tenant_id,
                        group_id,
                        build_timeout,
                        waiting,
                        limited_retry_iterations,
                        step_limits,
                        get_executor=get_executor):
    """
    Gather data, plan a convergence, save active and pending servers to the
    group state, and then execute the convergence.

    :param str tenant_id: the tenant ID for the group to converge
    :param str group_id: the ID of the group to be converged
    :param number build_timeout: number of seconds to wait for servers to be in
        building before it's is timed out and deleted
    :param Reference waiting: pmap of waiting groups
    :param int limited_retry_iterations: number of iterations to wait for
        LIMITED_RETRY steps
    :param dict step_limits: Mapping of step class to number of executions
        allowed in a convergence cycle
    :param callable get_executor: like :func`get_executor`, used for testing.

    :return: Effect of :obj:`ConvergenceIterationStatus`.
    :raise: :obj:`NoSuchScalingGroupError` if the group doesn't exist.
    """
    clean_waiting = _clean_waiting(waiting, group_id)
    # Gather data
    yield msg("begin-convergence")
    now_dt = yield Effect(Func(datetime.utcnow))
    all_data = yield msg_with_time(
        "gather-convergence-data",
        convergence_exec_data(tenant_id,
                              group_id,
                              now_dt,
                              get_executor=get_executor))
    (executor, scaling_group, group_state, desired_group_state,
     resources) = all_data

    # prepare plan
    steps = executor.plan(desired_group_state, datetime_to_epoch(now_dt),
                          build_timeout, step_limits, **resources)
    yield log_steps(steps)

    # Execute plan
    yield msg('execute-convergence',
              steps=steps,
              now=now_dt,
              desired=desired_group_state,
              **resources)
    worst_status, reasons = yield _execute_steps(steps)

    if worst_status != StepResult.LIMITED_RETRY:
        # If we're not waiting any more, there's no point in keeping track of
        # the group
        yield clean_waiting

    # Handle the status from execution
    if worst_status == StepResult.SUCCESS:
        result = yield convergence_succeeded(executor, scaling_group,
                                             group_state, resources, now_dt)
    elif worst_status == StepResult.FAILURE:
        result = yield convergence_failed(scaling_group, reasons)
    elif worst_status is StepResult.LIMITED_RETRY:
        # We allow further iterations to proceed as long as we haven't been
        # waiting for a LIMITED_RETRY for N consecutive iterations.
        current_iterations = (yield waiting.read()).get(group_id, 0)
        if current_iterations > limited_retry_iterations:
            yield msg('converge-limited-retry-too-long')
            yield clean_waiting
            # Prefix "Timed out" to all limited retry reasons
            result = yield convergence_failed(scaling_group, reasons, True)
        else:
            yield waiting.modify(lambda group_iterations: group_iterations.set(
                group_id, current_iterations + 1))
            result = ConvergenceIterationStatus.Continue()
    else:
        result = ConvergenceIterationStatus.Continue()
    yield do_return(result)
예제 #7
0
파일: service.py 프로젝트: rackerlabs/otter
def execute_convergence(tenant_id, group_id, build_timeout, waiting,
                        limited_retry_iterations, step_limits,
                        get_executor=get_executor):
    """
    Gather data, plan a convergence, save active and pending servers to the
    group state, and then execute the convergence.

    :param str tenant_id: the tenant ID for the group to converge
    :param str group_id: the ID of the group to be converged
    :param number build_timeout: number of seconds to wait for servers to be in
        building before it's is timed out and deleted
    :param Reference waiting: pmap of waiting groups
    :param int limited_retry_iterations: number of iterations to wait for
        LIMITED_RETRY steps
    :param dict step_limits: Mapping of step class to number of executions
        allowed in a convergence cycle
    :param callable get_executor: like :func`get_executor`, used for testing.

    :return: Effect of :obj:`ConvergenceIterationStatus`.
    :raise: :obj:`NoSuchScalingGroupError` if the group doesn't exist.
    """
    clean_waiting = _clean_waiting(waiting, group_id)

    # Begin convergence by updating group status to ACTIVE
    yield msg("begin-convergence")
    try:
        yield Effect(LoadAndUpdateGroupStatus(tenant_id, group_id,
                                              ScalingGroupStatus.ACTIVE))
    except NoSuchScalingGroupError:
        # Expected for DELETING group. Ignore.
        pass

    # Gather data
    now_dt = yield Effect(Func(datetime.utcnow))
    try:
        all_data = yield msg_with_time(
            "gather-convergence-data",
            convergence_exec_data(tenant_id, group_id, now_dt,
                                  get_executor=get_executor))
        (executor, scaling_group, group_state, desired_group_state,
         resources) = all_data
    except FirstError as fe:
        if fe.exc_info[0] is NoSuchEndpoint:
            result = yield convergence_failed(
                tenant_id, group_id, [ErrorReason.Exception(fe.exc_info)])
            yield do_return(result)
        raise fe

    # prepare plan
    steps = executor.plan(desired_group_state, datetime_to_epoch(now_dt),
                          build_timeout, step_limits, **resources)
    yield log_steps(steps)

    # Execute plan
    yield msg('execute-convergence',
              steps=steps, now=now_dt, desired=desired_group_state,
              **resources)
    worst_status, reasons = yield _execute_steps(steps)

    if worst_status != StepResult.LIMITED_RETRY:
        # If we're not waiting any more, there's no point in keeping track of
        # the group
        yield clean_waiting

    # Handle the status from execution
    if worst_status == StepResult.SUCCESS:
        result = yield convergence_succeeded(
            executor, scaling_group, group_state, resources)
    elif worst_status == StepResult.FAILURE:
        result = yield convergence_failed(tenant_id, group_id, reasons)
    elif worst_status is StepResult.LIMITED_RETRY:
        # We allow further iterations to proceed as long as we haven't been
        # waiting for a LIMITED_RETRY for N consecutive iterations.
        current_iterations = (yield waiting.read()).get(group_id, 0)
        if current_iterations > limited_retry_iterations:
            yield msg('converge-limited-retry-too-long')
            yield clean_waiting
            # Prefix "Timed out" to all limited retry reasons
            result = yield convergence_failed(tenant_id, group_id, reasons,
                                              True)
        else:
            yield waiting.modify(
                lambda group_iterations:
                    group_iterations.set(group_id, current_iterations + 1))
            result = ConvergenceIterationStatus.Continue()
    else:
        result = ConvergenceIterationStatus.Continue()
    yield do_return(result)
예제 #8
0
파일: service.py 프로젝트: glyph/otter
def execute_convergence(tenant_id, group_id, build_timeout, waiting,
                        limited_retry_iterations, get_executor=get_executor):
    """
    Gather data, plan a convergence, save active and pending servers to the
    group state, and then execute the convergence.

    :param str tenant_id: the tenant ID for the group to converge
    :param str group_id: the ID of the group to be converged
    :param number build_timeout: number of seconds to wait for servers to be in
        building before it's is timed out and deleted
    :param Reference waiting: pmap of waiting groups
    :param int limited_retry_iterations: number of iterations to wait for
        LIMITED_RETRY steps
    :param callable get_all_convergence_data: like
        :func`get_all_convergence_data`, used for testing.
    :param callable plan: like :func:`plan`, to be used for test injection only

    :return: Effect of :obj:`ConvergenceIterationStatus`.
    :raise: :obj:`NoSuchScalingGroupError` if the group doesn't exist.
    """
    clean_waiting = _clean_waiting(waiting, group_id)
    # Gather data
    yield msg("begin-convergence")
    now_dt = yield Effect(Func(datetime.utcnow))
    all_data = yield msg_with_time(
        "gather-convergence-data",
        convergence_exec_data(tenant_id, group_id, now_dt,
                              get_executor=get_executor))
    (executor, scaling_group, group_state, desired_group_state,
     resources) = all_data

    # prepare plan
    steps = executor.plan(desired_group_state, datetime_to_epoch(now_dt),
                          build_timeout, **resources)
    yield log_steps(steps)

    # Execute plan
    yield msg('execute-convergence',
              steps=steps, now=now_dt, desired=desired_group_state,
              **resources)
    worst_status, reasons = yield _execute_steps(steps)

    if worst_status != StepResult.LIMITED_RETRY:
        # If we're not waiting any more, there's no point in keeping track of
        # the group
        yield clean_waiting

    # Handle the status from execution
    if worst_status == StepResult.SUCCESS:
        result = yield convergence_succeeded(
            executor, scaling_group, group_state, resources, now_dt)
    elif worst_status == StepResult.FAILURE:
        result = yield convergence_failed(scaling_group, reasons)
    elif worst_status is StepResult.LIMITED_RETRY:
        # We allow further iterations to proceed as long as we haven't been
        # waiting for a LIMITED_RETRY for N consecutive iterations.
        current_iterations = (yield waiting.read()).get(group_id, 0)
        if current_iterations > limited_retry_iterations:
            yield msg('converge-limited-retry-too-long')
            yield clean_waiting
            result = yield convergence_failed(scaling_group, reasons)
        else:
            yield waiting.modify(
                lambda group_iterations:
                    group_iterations.set(group_id, current_iterations + 1))
            result = ConvergenceIterationStatus.Continue()
    else:
        result = ConvergenceIterationStatus.Continue()
    yield do_return(result)
예제 #9
0
파일: metrics.py 프로젝트: glyph/otter
def update_last_info(fname, tenants_len, time):
    eff = Effect(
        WriteFileLines(
            fname, [tenants_len, datetime_to_epoch(time)]))
    return eff.on(error=lambda e: err(e, "error updating number of tenants"))