コード例 #1
0
ファイル: test_approximate.py プロジェクト: winterdl/util
def test_approximate():
    from util.approximate.testing import test_plot
    from util.approximate import condition, LSHEP, Voronoi, Delaunay, \
        NearestNeighbor, KNN, Shepard
    from util.approximate.delaunay import DelaunayP1, DelaunayP2

    print("Adding surface to plot..")
    # model = LSHEP()
    # model = condition(Voronoi, method="MPCA")
    # model = condition(Delaunay, method="MPCA", scale=True)
    model = DelaunayP2()
    # model = NearestNeighbor(k=4, method=Voronoi)
    # model = condition(KNN, display=True)(display=True)
    # model = condition(Shepard, display=True)()
    # model = condition(Voronoi, method="MPCA", display=True)()
    # model = LSHEP()
    # f = lambda x: (x[0] - .5)**2 + x[1]
    p, _, _ = test_plot(
        model,
        N=100,
        D=2,
        low=-.1,
        upp=1.1,
        noise=0,  # fun=f,
        random=False,
        plot_points=4000,
        classifier=False)  # 6, 8
    # model.errors()
    print("Generating plot HTML..")
    p.show()
コード例 #2
0
ファイル: __init__.py プロジェクト: winterdl/util
        weights = []
        idx = np.arange(self.points.shape[1])
        start = time.time()
        for i, pt in enumerate(points):
            # Update user if time has elapsed
            if ((time.time() - start) > display_wait_sec):
                start = time.time()
                print(f" {100.*i/len(points):.2f}%", end="\r", flush=True)
            # Calculate the support at this point
            _, support, error = self.voronoi.predict(self.points, self.dots,
                                                     np.asarray(pt, order='F'))
            if (error == 1):
                raise (OverflowError(
                    "The scale of the data caused a squared float to overflow, consider normalizing data."
                ))
            elif (error == 2):
                raise (DuplicateInterpolationPoints(
                    "Some fit points were duplicated."))
            supported_points = idx[support > 0]
            supported_weights = support[supported_points]
            indices.append(supported_points)
            weights.append(supported_weights)
        return indices, weights


if __name__ == "__main__":
    from util.approximate.testing import test_plot
    model = Voronoi()
    p, x, y = test_plot(model, random=True, N=20)
    p.show()
コード例 #3
0
ファイル: __init__.py プロジェクト: winterdl/util
# ==================================
#      Support Vector Regressor
# ==================================
class SVR(Approximator):
    def __init__(self, *args, kernel=SVR_KERNEL, **kwargs):
        # Disable the future warnings that SVR gives.
        import warnings
        warnings.filterwarnings("ignore", category=FutureWarning)
        # Import the model from sklearn.
        from sklearn.svm import SVR
        self.SVR = SVR
        kwargs.update(dict(kernel=kernel))
        self.svr = self.SVR(*args, **kwargs)

    def _fit(self, x, y, *args, **kwargs):
        if (y.shape[1] > 1): raise (Exception("SVR only supports 1D output."))
        y = y[:, 0]
        return self.svr.fit(x, y, *args, **kwargs)

    def _predict(self, *args, **kwargs):
        output = self.svr.predict(*args, **kwargs)
        if (len(output.shape) == 1): output = output[:, None]
        return output


if __name__ == "__main__":
    from util.approximate.testing import test_plot
    m = SVR()
    p, x, y = test_plot(m, random=True, N=200)
    p.show()
コード例 #4
0
        indices = []
        weights = []
        # Normalize the weights (so they are convex).
        for i in range(len(all_weights)):
            to_keep = idx[all_weights[i,:] > 0]
            indices.append( to_keep )
            weights.append( all_weights[i,to_keep] )
        return indices, weights


if __name__ == "__main__":
    from util.plot import Plot
    from util.approximate.testing import test_plot
    model = BoxMesh()

    p,x,y = test_plot(model)
    p.plot(show=False)
    x = model.points.T
    print(x)


    # ==============================================
    #      Display the boxes that were computed     
    # ==============================================
    p = Plot()
    # Get the extreme points.
    min_x = np.min(x[:,0]) - .1
    max_x = np.max(x[:,0]) + .1
    min_y = np.min(x[:,1]) - .1
    max_y = np.max(x[:,1]) + .1
    # Get the box edges (about the centers).
コード例 #5
0
    # Find the k with the lowest mean error.
    best_k = min(k_values.items(), key=lambda i: i[1])[0]
    if display:
        name = "mean" if mean else "minimum"
        from math import log10, ceil
        print('-' * 52)
        print(" Estimated " + name + " error for various choices of 'k':")
        for k in sorted(k_values):
            extra = "  <-- chosen 'k'" if k == best_k else ""
            print(f"  k = {k:{ceil(log10(max_k))}d} ~ {k_values[k]:.4e}" +
                  extra)
        print('-' * 52)
    # Return the "k" with the minimum mean difference
    return best_k


if __name__ == "__main__":
    TEST_AUTO = False
    if TEST_AUTO:
        d = 10
        n = 511
        points = np.random.random(size=(n, d))
        values = np.random.random(size=(n, d))
        k = auto(points, values, display=True)

    from util.approximate import condition
    m = condition(NearestNeighbor, method="MPCA", display=True)()
    from util.approximate.testing import test_plot
    p, x, y = test_plot(m, N=30, fun=lambda x: x[0]**3, plot_points=4000)
    p.show()
コード例 #6
0
        # interpolation points.
        return indices, weights

    # Print and return a summary of the errors experienced
    def errors(self):
        print("%i normal returns." % self.ierrors.get(0, 0))
        print(("%i points outside the radius of influence " +
               "of all other points.") % self.ierrors.get(1, 0))
        return self.ierrors.get(0, 0), self.ierrors.get(1, 0)


if __name__ == "__main__":
    from util.approximate.testing import test_plot

    model = ShepMod()
    p, x, y = test_plot(model, random=True, N=200)  #, low=-1, upp=2)
    p.show()
    print()
    model.errors()

    # # Test when predictions are outside of the radius of influence.
    # x = np.random.random((102, 100))
    # model.fit(x)
    # print(model.rw)
    # print(np.linalg.norm(x[0] - x, axis=1))
    # print(np.linalg.norm(np.random.random(100) - x, axis=1))
    # model(np.random.random((1000, 100)))

    # # Test distribution prediction.
    # from util.plot import Plot
    # from util.stats import cdf_fit