Example #1
0
def computeExecutionStatus(qFilter, startSprint, endSprint, tqParams, tqxParams, props_only=False):
    """
    Given a Q object and a pair of starting and ending sprints, calculate the
    test execution status and return the google visualization response
    """
    # Get start and end of 'custom sprint' or actual sprints
    if tqParams["sprint"] == "custom":
        sprintStartDate = datetime.datetime.strptime(tqParams["start"], "%m-%d-%Y").date()
        endDate = datetime.datetime.strptime(tqParams["end"], "%m-%d-%Y").date()
        startName = "Custom"
        endName = ""
    else:
        sprintStartDate = startSprint.start_date
        endDate = endSprint.end_date
        startName = startSprint.name
        endName = endSprint.name if startSprint != endSprint else ""

    # Create lookup table for when each planned execution passed
    allPlannedExecs = Planned_Exec.active.filter(qFilter)
    allLogs = (
        TestLog.objects.filter(
            testplan__in=allPlannedExecs,
            test_starttime__range=(
                sprintStartDate,
                endDate + datetime.timedelta(days=1),
            ),  # Without extra day, logs generated on last day of sprint aren't included
        )
        .order_by("test_starttime")
        .values("passed", "test_starttime", "testplan_id")
    )
    logLookupByPE = defaultdict(list)
    for log in allLogs:
        logLookupByPE[log["testplan_id"]].append({"passed": log["passed"], "datetime": log["test_starttime"].date()})

    # Get list of relevant Planned Executions, convert datetimes
    plannedExecs = allPlannedExecs.values("id", "create_date", "end_date")
    dates = defaultdict(Counter)
    # For every planned execution
    for pe in plannedExecs:
        # For every day between max(when that PE was created, when the sprint started)
        # and min(when that PE was end_dated and when the sprint ended)
        pleStart = max([pe["create_date"].date(), sprintStartDate])
        # Deal with case where test executed before Planned Exec was created:
        begin = pleStart if not logLookupByPE[pe["id"]] else min([pleStart, logLookupByPE[pe["id"]][0]["datetime"]])
        end = endDate if not pe["end_date"] else min([pe["end_date"].date(), endDate])

        # Iterate through all days planned exec was active during this stretch
        status = "NR"
        index = 0
        for date in dateRange(begin, end):
            while (
                logLookupByPE[pe["id"]] and logLookupByPE[pe["id"]][index]["datetime"] == date
            ):  # Buster, you can't do that on the balcony buddy?
                status = "PA" if logLookupByPE[pe["id"]][index]["passed"] else "FA"
                logLookupByPE[pe["id"]].pop(0)
            dates[date][status] += 1

    # Configure stats response
    data = []
    for date in dateRange(sprintStartDate, endDate):
        fail = dates[date]["FA"]
        pass_ = dates[date]["PA"]
        executed = fail + pass_
        total = executed + dates[date]["NR"]
        data.append((date, fail, pass_, executed, total))

    # if debug/execution, chop off the start and ending stretches of non-changing data
    if tqParams["sprint"] == "debugexecution":
        # Remove empty data at start, but preserve last empty day
        while data and not any(data[0][1:]):
            if len(data) > 1 and not any(data[1][1:]):
                data.pop(0)
            else:
                break
        # Remove days in the future
        while data and data[-1][0] > date.today():
            data.pop(-1)

    # final count equals last daily total
    count = total

    # Done with the loop, setup the dataTable with some properties of the last
    # data point to give the current status for the bar chart/table
    description = [
        ("date", "Date"),
        ("Fail", "number"),
        ("Pass", "number"),
        ("Executed", "number"),
        ("Count", "number"),
    ]
    props = {
        "start": "%s (%s)" % (startName, sprintStartDate.strftime("%m/%d/%Y")),
        "end": "%s (%s)" % (endName, endDate.strftime("%m/%d/%Y")),
        "pass_cnt": pass_,
        "fail_cnt": fail,
        "exec_cnt": executed,
        "notrun_cnt": count - executed,
        "count": count,
    }

    # percent method deals with 0 case
    props["pass_pct"] = percent(pass_, count)
    props["fail_pct"] = percent(fail, count)
    props["exec_pct"] = percent(executed, count)
    props["notrun_pct"] = percent(count - executed, count)

    # Let's do some protocol status as well
    protocols = Protocol.active.filter(qFilter).values_list("state", "prebaselined")
    pcount = len(protocols)
    protocolStates = Counter()
    for p in protocols:
        protocolStates[p] += 1

    props["prot_total"] = pcount
    for state in Protocol.PROTOCOL_COMPLIANCE_STATES:
        numInState = protocolStates[(state[0], False)]
        props["prot_" + state[0] + "_cnt"] = numInState
        props["prot_" + state[0] + "_pct"] = percent(numInState, pcount)
        props["prot_" + state[0] + "_name"] = state[1]

    # Also add prebaselined states (manifested as baselined state, with prebaselined=True)
    numInState = protocolStates[(Protocol.STATE_PROTOCOL_BASELINED, True)]
    props["prot_" + "P" + Protocol.STATE_PROTOCOL_BASELINED + "_cnt"] = numInState
    props["prot_" + "P" + Protocol.STATE_PROTOCOL_BASELINED + "_pct"] = percent(numInState, pcount)
    props["prot_" + "P" + Protocol.STATE_PROTOCOL_BASELINED + "_name"] = "Prior Baseline"

    # And finally, let's do some test development status as well for semi-auto and full-auto
    # Todo This SQL could be removed since we're just filtering an existing result
    pes = Planned_Exec.active.filter(
        qFilter, case__exec_type__in=[Case.EXECUTION_SEMI_AUTO, Case.EXECUTION_FULL_AUTO]
    ).values_list("case__auto_state", "case__prebaselined")
    peCount = len(pes)
    peStates = Counter()
    for pe in pes:
        peStates[pe] += 1

    # Total
    props["test_total"] = len(pes)

    for state in Case.AUTOMATION_STATES:
        numInState = peStates[(state[0], False)]
        props["test_" + state[0] + "_cnt"] = numInState
        props["test_" + state[0] + "_pct"] = percent(numInState, peCount)
        props["test_" + state[0] + "_name"] = state[1]

    # Also add prebaselined states (manifested as baselined state, with prebaselined=True)
    numInState = peStates[(Case.STATE_CASE_BASELINED, True)]
    props["test_" + "P" + Case.STATE_CASE_BASELINED + "_cnt"] = numInState
    props["test_" + "P" + Case.STATE_CASE_BASELINED + "_pct"] = percent(numInState, peCount)
    props["test_" + "P" + Case.STATE_CASE_BASELINED + "_name"] = "Prior Baseline"

    # Create the datatable and fill it with data
    dataTable = gviz_api.DataTable(description, custom_properties=props)

    # Finished, load the data
    dataTable.LoadData(data)

    if props_only:
        return props

    else:
        # Return the JSON response of the datatable
        if "responseHandler" in tqParams:
            return HttpResponse(
                dataTable.ToJSonResponse(req_id=tqxParams["reqId"], response_handler=tqParams["responseHandler"]),
                mimetype="application/json; charset=utf8",
            )

        else:
            return HttpResponse(
                dataTable.ToJSonResponse(req_id=tqxParams["reqId"]), mimetype="application/json; charset=utf8"
            )