コード例 #1
0
def test():
    anp = np.random.randn(4, 5)
    b = random.randint(1, 13)
    a = lg.array(anp)

    np.power(anp, b, out=anp)
    lg.power(a, b, out=a)

    assert lg.allclose(a, anp)

    return
コード例 #2
0
def test():
    anp = np.random.randn(4, 5)
    b = random.randint(1, 13)
    a = lg.array(anp)

    # test power with scalar on rhs
    assert lg.allclose(lg.power(a, b), np.power(anp, b))

    # test power with scalar on lhs
    assert lg.allclose(lg.power(b, a), np.power(b, anp))

    return
コード例 #3
0
def test():
    bases_np = np.random.randn(4, 5)

    # avoid fractional exponents
    exponents_np = np.random.randint(10, size=(4, 5)).astype(np.float64)

    bases = lg.array(bases_np)
    exponents = lg.array(exponents_np)

    np.power(bases_np, exponents_np, out=bases_np)
    lg.power(bases, exponents, out=bases)

    assert lg.allclose(bases, bases_np)
コード例 #4
0
def test():
    random.seed(13)
    a = random.randint(1, 42)
    b = random.randint(1, 3)

    # test power
    assert np.array_equal(lg.power(a, b), np.power(a, b))

    return
コード例 #5
0
def linear_regression(T,
                      features,
                      target,
                      steps,
                      learning_rate,
                      sample,
                      add_intercept=False):
    if add_intercept:
        intercept = np.ones((features.shape[0], 1), dtype=T)
        features = np.hstack((intercept, features))

    weights = np.zeros(features.shape[1], dtype=T)

    for step in range(steps):
        scores = np.dot(features, weights)
        error = scores - target
        gradient = -(1.0 / len(features)) * error.dot(features)
        weights += learning_rate * gradient

        if step % sample == 0:
            print("Error of step " + str(step) + ": " +
                  str(np.sum(np.power(error, 2))))

    return weights