Beispiel #1
0
def _lstsq(X, y, rcond):
    "Do least squares on the arguments"
    try:
        return lstsq(X, y, rcond)
    except TypeError:
        get_linalg_funcs()
        return lstsq(X, y, rcond)
    def prep_extrapolator(self, z, bbox=None):
        if bbox is None:
            bbox = (self.x[0], self.x[0], self.y[0], self.y[0])
        minx, maxx, miny, maxy = np.asarray(bbox, np.float64)
        minx = min(minx, np.minimum.reduce(self.x))
        miny = min(miny, np.minimum.reduce(self.y))
        maxx = max(maxx, np.maximum.reduce(self.x))
        maxy = max(maxy, np.maximum.reduce(self.y))
        M = max((maxx - minx) / 2, (maxy - miny) / 2)
        midx = (minx + maxx) / 2.0
        midy = (miny + maxy) / 2.0

        xp, yp = np.array([[midx + 3 * M, midx, midx - 3 * M],
                           [midy, midy + 3 * M, midy - 3 * M]])
        x1 = np.hstack((self.x, xp))
        y1 = np.hstack((self.y, yp))
        newtri = self.__class__(x1, y1)

        # do a least-squares fit to a plane to make pseudo-data
        xy1 = np.ones((len(self.x), 3), np.float64)
        xy1[:, 0] = self.x
        xy1[:, 1] = self.y
        from numpy.dual import lstsq
        c, res, rank, s = lstsq(xy1, z)
        zp = np.hstack((z, xp * c[0] + yp * c[1] + c[2]))

        return newtri, zp
Beispiel #3
0
    def prep_extrapolator(self, z, bbox=None):
        if bbox is None:
            bbox = (self.x[0], self.x[0], self.y[0], self.y[0])
        minx, maxx, miny, maxy = np.asarray(bbox, np.float64)
        minx = min(minx, np.minimum.reduce(self.x))
        miny = min(miny, np.minimum.reduce(self.y))
        maxx = max(maxx, np.maximum.reduce(self.x))
        maxy = max(maxy, np.maximum.reduce(self.y))
        M = max((maxx - minx) / 2, (maxy - miny) / 2)
        midx = (minx + maxx) / 2.0
        midy = (miny + maxy) / 2.0

        xp, yp = np.array([[midx + 3 * M, midx, midx - 3 * M],
                           [midy, midy + 3 * M, midy - 3 * M]])
        x1 = np.hstack((self.x, xp))
        y1 = np.hstack((self.y, yp))
        newtri = self.__class__(x1, y1)

        # do a least-squares fit to a plane to make pseudo-data
        xy1 = np.ones((len(self.x), 3), np.float64)
        xy1[:, 0] = self.x
        xy1[:, 1] = self.y
        from numpy.dual import lstsq
        c, res, rank, s = lstsq(xy1, z)
        zp = np.hstack((z, xp * c[0] + yp * c[1] + c[2]))

        return newtri, zp
    def predict(self, x):
        """Calculates a predicted value of the response based on the current
        trained model for the supplied list of inputs.

        :param x: Point at which the surrogate is evaluated.
        """

        super(KrigingSurrogate, self).predict(x)

        X, Y = self.X, self.Y
        thetas = power(10., self.thetas)
        r = exp(-thetas.dot(square((x - X).T)))

        if self.R_fact is not None:
            # Cholesky Decomposition
            sol = cho_solve(self.R_fact, r).T
        else:
            # Linear Least Squares
            sol = lstsq(self.R, r)[0].T

        f = self.mu + dot(r, self.R_solve_ymu)
        term1 = dot(r, sol)

        # Note: sum(sol) should be 1, since Kriging is an unbiased
        # estimator. This measures the effect of numerical instabilities.
        bias = (1.0 - sum(sol))**2. / sum(self.R_solve_one)

        mse = self.sig2 * (1.0 - term1 + bias)
        rmse = sqrt(abs(mse))

        return f, rmse
Beispiel #5
0
    def predict(self, x):
        """
        Calculates a predicted value of the response based on the current
        trained model for the supplied list of inputs.

        Args
        ----
        x : array-like
            Point at which the surrogate is evaluated.
        """

        super(KrigingSurrogate, self).predict(x)

        X, Y = self.X, self.Y
        thetas = power(10., self.thetas)
        r = exp(-thetas.dot(square((x - X).T)))

        if self.R_fact is not None:
            # Cholesky Decomposition
            sol = cho_solve(self.R_fact, r).T
        else:
            # Linear Least Squares
            sol = lstsq(self.R, r)[0].T

        f = self.mu + dot(r, self.R_solve_ymu)
        term1 = dot(r, sol)

        # Note: sum(sol) should be 1, since Kriging is an unbiased
        # estimator. This measures the effect of numerical instabilities.
        bias = (1.0 - sum(sol)) ** 2. / sum(self.R_solve_one)

        mse = self.sig2 * (1.0 - term1 + bias)
        rmse = sqrt(abs(mse))

        return f, rmse
Beispiel #6
0
    def _calculate_log_likelihood(self):
        """
        Calculates the log-likelihood (up to a constant) for a given
        self.theta.

        """
        R = zeros((self.n, self.n))
        X, Y = self.X, self.Y
        thetas = power(10., self.thetas)

        # exponentially weighted distance formula
        for i in range(self.n):
            R[i, i + 1:self.n] = exp(
                -thetas.dot(square(X[i, ...] - X[i + 1:self.n, ...]).T))

        R *= (1.0 - self.nugget)
        R += R.T + eye(self.n)
        self.R = R

        one = ones(self.n)
        rhs = column_stack([Y, one])
        try:
            # Cholesky Decomposition
            self.R_fact = cho_factor(R)
            sol = cho_solve(self.R_fact, rhs)
            solve = lambda x: cho_solve(self.R_fact, x)
            det_factor = log(abs(prod(diagonal(self.R_fact[0]))**2) + 1.e-16)

        except (linalg.LinAlgError, ValueError):
            # Since Cholesky failed, try linear least squares
            self.R_fact = None  # reset this to none, so we know not to use Cholesky
            sol = lstsq(self.R, rhs)[0]
            solve = lambda x: lstsq(self.R, x)[0]
            det_factor = slogdet(self.R)[1]

        self.mu = dot(one, sol[:, :-1]) / dot(one, sol[:, -1])
        y_minus_mu = Y - self.mu
        self.R_solve_ymu = solve(y_minus_mu)
        self.R_solve_one = sol[:, -1]
        self.sig2 = dot(y_minus_mu.T, self.R_solve_ymu) / self.n

        if isinstance(self.sig2, ndarray):
            self.log_likelihood = -self.n/2. * slogdet(self.sig2)[1] \
                                  - 1./2.*det_factor
        else:
            self.log_likelihood = -self.n/2. * log(self.sig2) \
                                  - 1./2.*det_factor
Beispiel #7
0
    def _calculate_log_likelihood(self):
        """
        Calculates the log-likelihood (up to a constant) for a given
        self.theta.

        """
        R = zeros((self.n, self.n))
        X, Y = self.X, self.Y
        thetas = power(10., self.thetas)

        # exponentially weighted distance formula
        for i in range(self.n):
            R[i, i+1:self.n] = exp(-thetas.dot(square(X[i, ...] - X[i+1:self.n, ...]).T))

        R *= (1.0 - self.nugget)
        R += R.T + eye(self.n)
        self.R = R

        one = ones(self.n)
        rhs = column_stack([Y, one])
        try:
            # Cholesky Decomposition
            self.R_fact = cho_factor(R)
            sol = cho_solve(self.R_fact, rhs)
            solve = lambda x: cho_solve(self.R_fact, x)
            det_factor = log(abs(prod(diagonal(self.R_fact[0])) ** 2) + 1.e-16)

        except (linalg.LinAlgError, ValueError):
            # Since Cholesky failed, try linear least squares
            self.R_fact = None  # reset this to none, so we know not to use Cholesky
            sol = lstsq(self.R, rhs)[0]
            solve = lambda x: lstsq(self.R, x)[0]
            det_factor = slogdet(self.R)[1]

        self.mu = dot(one, sol[:, :-1]) / dot(one, sol[:, -1])
        y_minus_mu = Y - self.mu
        self.R_solve_ymu = solve(y_minus_mu)
        self.R_solve_one = sol[:, -1]
        self.sig2 = dot(y_minus_mu.T, self.R_solve_ymu) / self.n

        if isinstance(self.sig2, ndarray):
            self.log_likelihood = -self.n/2. * slogdet(self.sig2)[1] \
                                  - 1./2.*det_factor
        else:
            self.log_likelihood = -self.n/2. * log(self.sig2) \
                                  - 1./2.*det_factor
Beispiel #8
0
    def train(self, x, y):
        """ Calculate response surface equation coefficients using least
        squares regression.

        Args
        ----
        x : array-like
            Training input locations

        y : array-like
            Model responses at given inputs.
        """

        super(ResponseSurface, self).train(x, y)

        m = self.m = x.shape[0]
        n = self.n = x.shape[1]

        X = zeros((m, ((n + 1) * (n + 2)) // 2))

        # Modify X to include constant, squared terms and cross terms

        # Constant Terms
        X[:, 0] = 1.0

        # Linear Terms
        X[:, 1:n+1] = x

        # Quadratic Terms
        X_offset = X[:, n + 1:]
        for i in range(n):
            # Z = einsum('i,ij->ij', X, Y) is equivalent to, but much faster and
            # memory efficient than, diag(X).dot(Y) for vector X and 2D array Y.
            # I.e. Z[i,j] = X[i]*Y[i,j]
            X_offset[:, :n - i] = einsum('i,ij->ij', x[:, i], x[:, i:])
            X_offset = X_offset[:, n-i:]

        # Determine response surface equation coefficients (betas) using least squares
        self.betas, rs, r, s = lstsq(X, y)
Beispiel #9
0
    def train(self, x, y):
        """ Calculate response surface equation coefficients using least
        squares regression.

        Args
        ----
        x : array-like
            Training input locations

        y : array-like
            Model responses at given inputs.
        """

        super(ResponseSurface, self).train(x, y)

        m = self.m = x.shape[0]
        n = self.n = x.shape[1]

        X = zeros((m, ((n + 1) * (n + 2)) // 2))

        # Modify X to include constant, squared terms and cross terms

        # Constant Terms
        X[:, 0] = 1.0

        # Linear Terms
        X[:, 1:n + 1] = x

        # Quadratic Terms
        X_offset = X[:, n + 1:]
        for i in range(n):
            # Z = einsum('i,ij->ij', X, Y) is equivalent to, but much faster and
            # memory efficient than, diag(X).dot(Y) for vector X and 2D array Y.
            # I.e. Z[i,j] = X[i]*Y[i,j]
            X_offset[:, :n - i] = einsum('i,ij->ij', x[:, i], x[:, i:])
            X_offset = X_offset[:, n - i:]

        # Determine response surface equation coefficients (betas) using least squares
        self.betas, rs, r, s = lstsq(X, y)
Beispiel #10
0
    trainX = []
    trainY = []
    arrayX, arrayY = (prepare_data(df))

    for (df_x, sr_y) in zip(np.asarray(arrayX), np.asarray(arrayY)):
        trainX.append(np.asmatrix(df_x))
        trainY.append(np.asmatrix(sr_y))

    # 為預先準備理論最佳值 W & b
    tmpArrayX = pd.DataFrame(arrayX);
    tmpArrayX['b'] = 1
    arrayXWithB = tmpArrayX.as_matrix();
    # print("arrayX:", tmpArrayX.as_matrix(), "\narrayY:", np.asmatrix(arrayX))
    # print("trainX:", trainX, "\ntrainY:", trainY)

    WR = lstsq(arrayXWithB, np.asmatrix(arrayY).T)[0]
    print("WR:", WR, " size:", len(WR))
    print("lstsq:", np.sum(np.abs(np.subtract(np.asmatrix(arrayY).T, np.matmul(tmpArrayX, WR)))))

    # 開始 train loop
    for epoch in range(training_epochs):
        for (x, y) in zip(trainX, trainY):
            _, summary = sess.run([optimizer, merged_summary_op], feed_dict={X: x, Y: y})
            train_writer.add_summary(summary, epoch)

        if epoch % display_step == 0:
            result = 0
            for (x, y) in zip(trainX, trainY):
                tmp = sess.run(lossS, feed_dict={X: x, Y: y})
                result += tmp
            # 每 train 1000 次顯示 w & b result
def on_same_plane(xyz_ls):
    #print(xyz_ls)
    r,_,_,_ = lstsq(xyz_ls, [0]*len(xyz_ls))
    #print(len(r))
    ABC = A,B,C = r
    return [inner(ABC, xyz) for xyz in xyz_ls]