def BenchMarkOnErrors():
        ErrorsMeanPythonSum, Sizes = [], list(range(10, 2000, 10))
        ErrorsMeanNumpySum = []
        ErrorMeanStdPythonSum, ErrorMeanStdNumpySum = [], []
        for I in Sizes:
            LoopTempVar = GetListofErrorsFor(python_sum, trials=30, listSize=I)
            ErrorsMeanPythonSum.append(LoopTempVar[0])
            ErrorMeanStdPythonSum.append(LoopTempVar[1])
            LoopTempVar = GetListofErrorsFor(numpy_sum, trials=30, listSize=I)
            ErrorsMeanNumpySum.append(LoopTempVar[0])
            ErrorMeanStdNumpySum.append(LoopTempVar[1])
        # Plot the Mean of Errors --------------------------------------------------------------------------------------
        fig, ax = pyplt.subplots()
        pyplt.scatter(Sizes,
                      ErrorsMeanPythonSum,
                      label="Default Sum Mean Errors")
        pyplt.scatter(Sizes,
                      ErrorsMeanNumpySum,
                      color="r",
                      label="Numpy Sum Mean Errors")
        legend = ax.legend(loc='upper left', shadow=True, fontsize='small')
        ax.set_xlabel("Array Size")
        ax.set_ylabel("Errors")

        # Plot the Std curve -------------------------------------------------------------------------------------------
        PythonSumStds = ([
            M + D for M, D in zip(ErrorsMeanPythonSum, ErrorMeanStdPythonSum)
        ], [M - D for M, D in zip(ErrorsMeanPythonSum, ErrorMeanStdPythonSum)])
        NumpySumStds = ([
            M + D for M, D in zip(ErrorsMeanNumpySum, ErrorMeanStdNumpySum)
        ], [M - D for M, D in zip(ErrorsMeanNumpySum, ErrorMeanStdNumpySum)])
        pyplt.plot(Sizes, PythonSumStds[0], color="b")
        pyplt.plot(Sizes, PythonSumStds[1], color="b")
        pyplt.plot(Sizes, NumpySumStds[0], color="r")
        pyplt.plot(Sizes, NumpySumStds[1], color="r")

        # Plot and save these things -----------------------------------------------------------------------------------
        pyplt.savefig("Error plots for Numpy, and default python.png", dpi=400)
        pyplt.show()
        JsonData = {}
        JsonData["Sizes"] = Sizes
        JsonData["ErrorsMeanPythonSum"] = ErrorsMeanPythonSum
        JsonData["ErrorsMeanNumpySum"] = ErrorsMeanNumpySum
        JsonData["PythonSumStdUpper"], JsonData[
            "PythonSumStdLower"] = PythonSumStds[0], PythonSumStds[1]
        JsonData["NumpySumStdUpper"], JsonData[
            "NumpySumStdLower"] = NumpySumStds[0], NumpySumStds[1]
        qj.json_encode(JsonData, "errors.json")
    def PlotTheExecutionTime():
        # Prepare the Json to store things -----------------------------------------------------------------------------
        JsonData = {}

        fig, ax = pyplt.subplots()
        Xs = list(range(10, 2000, 10))
        JsonData["Sizes"] = Xs
        # Calling the modules and scatter plot the data ----------------------------------------------------------------
        Means, Upper, Lower = BenchMarkOnTimesFor(fxn=khan_sum, sizes=Xs)
        ax.scatter(Xs, Means, color="r", s=0.5, label="Kahan sum")
        JsonData["Kahan Sum"] = {
            "Means": Means,
            "Upper": Upper,
            "Lower": Lower
        }

        Means, Upper, Lower = BenchMarkOnTimesFor(fxn=python_fsum, sizes=Xs)
        ax.scatter(Xs, Means, color="b", s=0.5, label="python fsum")
        JsonData["Python Fsum"] = {
            "Means": Means,
            "Upper": Upper,
            "Lower": Lower
        }

        Means, Upper, Lower = BenchMarkOnTimesFor(fxn=numpy_sum, sizes=Xs)
        ax.scatter(Xs, Means, color="g", s=0.5, label="numpy sum")
        JsonData["Numpy Sum"] = {
            "Means": Means,
            "Upper": Upper,
            "Lower": Lower
        }

        Means, Upper, Lower = BenchMarkOnTimesFor(fxn=rational_sum, sizes=Xs)
        ax.scatter(Xs, Means, color="black", s=0.5, label="rational sum")
        JsonData["Rational Sum"] = {
            "Means": Means,
            "Upper": Upper,
            "Lower": Lower
        }

        legend = ax.legend(loc='upper left', shadow=True, fontsize='small')
        ax.set_xlabel("Array Size")
        ax.set_ylabel("Execution time/sec")
        pyplt.savefig("Execution time.png", dpi=400)
        # showing and saving stuff: ------------------------------------------------------------------------------------
        fig.show()
        qj.json_encode(JsonData, "Execution time.json")
Exemple #3
0
def bench_bb_with_dp(trials: int):
    def dp_solve(P, W, B):
        _, Opt = knapsack_dp_dual(P, W, B)
        return Opt

    def bb_solve(P, W, B):
        _, Opt = branch_and_bound(P, W, B)
        return Opt

    ItemCount = 20
    ItemProfitsWeightsRange = 1000
    KnapSackSparseness = 0.1
    ProblemList = [
        rand_problem_ints(ItemProfitsWeightsRange, ItemCount,
                          KnapSackSparseness) for P in range(trials)
    ]
    ProblemList += [
        rand_problem_exponential(ItemProfitsWeightsRange, ItemCount,
                                 KnapSackSparseness) for P in range(trials)
    ]
    bb_time, bb_opt = run_solve_on(ProblemList, bb_solve)
    dp_time, dp_opt = run_solve_on(ProblemList, dp_solve)
    CSVHeader = ["bb_time", "dp_time", "bb_opt", "dp_opt"]
    CSVCols = [bb_time, dp_time, bb_opt, dp_opt]

    def MakeJsonResults():
        Json = {}
        Json["bb_time"], Json["dp_time"] = bb_time, dp_time
        return Json

    core.csv_col_save("bb, dp bench.csv", colHeader=CSVHeader, cols=CSVCols)
    quick_json.json_encode(MakeJsonResults(), filename="bb, dp bench.json")

    print("Tests Detailed: ")
    print(
        f"The same set of problem is run on both BB and DP, time and optimal value is recored. "
    )
    print(
        "The optimal value for both solver should be the same and the time cost is the interests. "
    )
    print(
        f"Item Count: {ItemProfitsWeightsRange}, Item's Profits and Weight range: (0, {ItemProfitsWeightsRange}), "
        f"Knapsack Sparseness: {KnapSackSparseness}")
Exemple #4
0
    def CompareSolversForProblemSize():
        problemSize = 80
        ProblemList = get_problem_list(30, problemSize, 0.2, 3)

        def PulpSolver(p, w, c, b):
            SolverInstance = ks.EknapsackSimplex(p, w, c, b)
            return SolverInstance.solve()

        def GreedyBBSolver(p, w, c, b):
            SolverInstance = ks.EknapsackGreedy(p, w, c, b)
            return SolverInstance.solve()

        ExecutionTimePulp, ObjectivePulp = benchmark_solver_for(
            PulpSolver, ProblemList)
        ExecutionTimeGreed, ObjectiveGreed = benchmark_solver_for(
            GreedyBBSolver, ProblemList)

        print(ExecutionTimePulp)
        print(ExecutionTimeGreed)
        print(ObjectivePulp)
        print(ObjectiveGreed)

        JsonData = {}
        JsonData["Problem_size"] = problemSize
        JsonData["PulpSolver"] = {}
        JsonData["GreedyBBSolver"] = {}
        JsonData["PulpSolver"]["Execution_Time"] = ExecutionTimePulp
        JsonData["PulpSolver"]["Objective_value"] = ObjectivePulp
        JsonData["GreedyBBSolver"]["Execution_Time"] = ExecutionTimeGreed
        JsonData["GreedyBBSolver"]["Objective_value"] = ObjectiveGreed
        from quick_json import quick_json as qj
        qj.json_encode(
            obj=JsonData,
            filename=
            f"Extended_knapsack_benchmark_results_problemsize{problemSize}.json"
        )