Beispiel #1
0
def minimize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):

    data = zip(x,y)
    theta = theta_0
    alpha = alpha_0
    min_theta, min_value = None, float("inf")
    iterations_with_no_improvement = 0

    #stop if we go over 100 iterations with no improvement
    while iterations_with_no_improvement < 100:
        value = sum(target_fn(x_i, y_i, theta) for x_i, y_i in data)

        if value < min_value:
            min_theta, min_value = theta, value
            iterations_with_no_improvement = 0
            alpha = alpha_0
        else:
            iterations_with_no_improvement += 1
            alpha *= 0.9

        for x_i, y_i in in_random_order(data):
            gradient_i = gradient_fn(x_i, y_i, theta)
            theta = vector_subtract(theta, scalar_multiply(alpha, gradient_i))

    return min_theta
def minimize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):

    data = list(zip(x, y))
    theta = theta_0  # palpite inicial
    alpha = alpha_0  # tamanho do passo inicial
    min_theta, min_value = None, float('inf')  # o minimo até agora
    iterations_with_no_improvement = 0

    # Se formos até 100 iterações sem melhorias, paramos
    while iterations_with_no_improvement < 100:
        value = sum(target_fn(x_i, y_i, theta) for x_i, y_i in data)

        if value < min_value:

            # Se achou um novo minimo, lembre-se
            # e volte para o tamanho do passo original
            min_theta, min_value = theta, value
            iterations_with_no_improvement = 0
            alpha = alpha_0
        else:

            # Do contrario, não estamos melhorando, portanto tente
            # diminuir o tamanho do passo
            iterations_with_no_improvement += 1
            alpha *= 0.9

        # E ande um passo gradiente para todos os pontos de dados
        for x_i, y_i in in_random_order(data):
            gradient_i = gradient_fn(x_i, y_i, theta)
            theta = vector_subtract(theta, scalar_multiply(alpha, gradient_i))

    return min_theta
def minimize_stochastic(errorCuadratico,
                        gradient_fn,
                        caracteristicasPropiedades,
                        valoresPropiedades,
                        theta_0,
                        alpha_0=0.01):
    data = list(zip(caracteristicasPropiedades, valoresPropiedades))
    theta = theta_0
    alpha = alpha_0
    min_theta, min_value = None, float("inf")
    iterations_with_no_improvement = 0
    while iterations_with_no_improvement < 100:
        value = sum(
            errorCuadratico(caracteristicasPropiedad_i, valorPropiedad_i,
                            theta)
            for caracteristicasPropiedad_i, valorPropiedad_i in data)
        if value < min_value:
            min_theta, min_value = theta, value
            iterations_with_no_improvement = 0
            alpha = alpha_0
        else:
            iterations_with_no_improvement += 1
            alpha *= 0.9
        for caracteristicasPropiedad_i, valorPropiedad_i in in_random_order(
                data):
            gradient_i = gradient_fn(caracteristicasPropiedad_i,
                                     valorPropiedad_i, theta)
            theta = vector_subtract(theta, scalar_multiply(alpha, gradient_i))
    return min_theta
def minimize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):
    data = list(zip(x, y))
    theta = theta_0  # initial guess
    alpha = alpha_0  # initial step size
    min_theta, min_value = None, float("inf")  # the minimum so far
    iterations_with_no_improvement = 0

    # if we ever go 100 iterations with no improvement, stop
    while iterations_with_no_improvement < 100:
        value = sum(target_fn(x_i, y_i, theta) for x_i, y_i in data)

        if value < min_value:
            # if we've found a new minimum, remember it
            # and go back to the original step size
            min_theta, min_value = theta, value
            iterations_with_no_improvement = 0
            alpha = alpha_0
        else:
            # otherwise we're not improving, so try shrinking the step size
            iterations_with_no_improvement += 1
            alpha *= 0.9

        # and take a gradient step for each of the data points
        for x_i, y_i in in_random_order(data):
            gradient_i = gradient_fn(x_i, y_i, theta)
            theta = la.vector_subtract(theta,
                                       la.scalar_multiply(alpha, gradient_i))

    return min_theta
Beispiel #5
0
def minimize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):

    data = zip(x, y)
    theta = theta_0
    alpha = alpha_0
    min_theta, min_value = None, float("inf")
    iterations_with_no_improvement = 0

    #stop if we go over 100 iterations with no improvement
    while iterations_with_no_improvement < 100:
        value = sum(target_fn(x_i, y_i, theta) for x_i, y_i in data)

        if value < min_value:
            min_theta, min_value = theta, value
            iterations_with_no_improvement = 0
            alpha = alpha_0
        else:
            iterations_with_no_improvement += 1
            alpha *= 0.9

        for x_i, y_i in in_random_order(data):
            gradient_i = gradient_fn(x_i, y_i, theta)
            theta = vector_subtract(theta, scalar_multiply(alpha, gradient_i))

    return min_theta
def minimize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):

    data = list(zip(x, y))
    theta = theta_0                             # initial guess
    alpha = alpha_0                             # initial step size
    min_theta, min_value = None, float("inf")   # the minimum so far
    iterations_with_no_improvement = 0

    # if we ever go 100 iterations with no improvement, stop
    while iterations_with_no_improvement < 100:
        value = sum( target_fn(x_i, y_i, theta) for x_i, y_i in data )

        if value < min_value:
            # if we've found a new minimum, remember it
            # and go back to the original step size
            min_theta, min_value = theta, value
            iterations_with_no_improvement = 0
            alpha = alpha_0
        else:
            # otherwise we're not improving, so try shrinking the step size
            iterations_with_no_improvement += 1
            alpha *= 0.9

        # and take a gradient step for each of the data points
        for x_i, y_i in in_random_order(data):
            gradient_i = gradient_fn(x_i, y_i, theta)
            theta = vector_subtract(theta, scalar_multiply(alpha, gradient_i))

    return min_theta
def find_eigenvector(A, tolerance=0.00001):
    guess = [1 for __ in A]

    while True:
        result = matrix_operate(A, guess)
        length = magnitude(result)
        next_guess = scalar_multiply(1/length, result)
        
        if distance(guess, next_guess) < tolerance:
            return next_guess, length # eigenvector, eigenvalue
        
        guess = next_guess
def find_eigenvector(A, tolerance=0.00001):
    guess = [1 for __ in A]

    while True:
        result = matrix_operate(A, guess)
        length = magnitude(result)
        next_guess = scalar_multiply(1 / length, result)

        if distance(guess, next_guess) < tolerance:
            return next_guess, length  # eigenvector, eigenvalue

        guess = next_guess
Beispiel #9
0
def find_eigenvector(A, tolerance=0.00001):
    guess = [1 for __ in A]

    while True:
        # 计算结果向量
        result = matrix_operate(A, guess)
        # 向量的模
        length = magnitude(result)

        # 下一个向量,标量(1/length)和向量(result)的乘法,
        next_guess = scalar_multiply(1/length, result)

        # 两个向量的距离小于某个阙值则返回更新后的向量和向量的模
        if distance(guess, next_guess) < tolerance:
            return next_guess, length # eigenvector, eigenvalue
        
        guess = next_guess
Beispiel #10
0
def minimize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):

    #data = zip(x, y)
    theta = theta_0  # initial guess
    alpha = alpha_0  # initial step size
    min_theta = None
    min_value = float("inf")  # the minimum so far
    iterations_with_no_improvement = 0
    #value = 0

    # if we ever go 100 iterations with no improvement, stop
    while iterations_with_no_improvement < 35:
        k = 0
        value = 0
        while k < len(x):
            value += target_fn(x[k], y[k], theta)
            k += 1
        print(value < min_value)
        if value < min_value:
            # if we've found a new minimum, remember it
            # and go back to the original step size
            min_theta, min_value = theta, value
            iterations_with_no_improvement = 0
            alpha = alpha_0
            print(min_theta)
        else:
            # otherwise we're not improving, so try shrinking the step size
            iterations_with_no_improvement += 1
            print(iterations_with_no_improvement)
            alpha *= 0.95

        i = 0
        while i < len(x):
            gradient_i = gradient_fn(x[i], y[i], theta)
            theta = vector_subtract(theta, scalar_multiply(alpha, gradient_i))
            i += 1

    return min_theta
Beispiel #11
0
def gradient_step(v, gradient, step_size):
    assert (len(v) == len(gradient))
    step = scalar_multiply(step_size, gradient)
    return add(v, step)
Beispiel #12
0
print("*** Test Module <linear_algebra> ***")
print("*** vector ......")

print("vector A = ", A)
print("vector B = ", B)

C = la.vector_add(A, B)
print("A + B = ", C)

C = la.vector_subtract(A, B)
print("A - B = ", C)

C = la.vector_sum([A, B])
print("A and B summary = ", C)

C = la.scalar_multiply(10, A)
print("10 * A = ", C)

C = la.vector_mean([A, B])
print("A and B mean = ", C)

C = la.dot(A, B)
print("A dot B = ", C)

C = la.sum_of_squares(A)
print("A^2's summary = ", C)

C = la.magnitude(A)
print("A's magnitude = ", C)

C = la.distance(A, B)
Beispiel #13
0
def gradient_step(v: Vector, gradient: Vector, step_size: float) -> Vector:
    assert len(v) == len(gradient)
    step = scalar_multiply(step_size, gradient)
    return add(v, step)
Beispiel #14
0
def project(v: Vector, w: Vector) -> Vector:
    """return Proj_w(v)"""
    projection_length = dot(v, w)
    return scalar_multiply(projection_length)
 def test_scalar_multiply(self):
     self.assertEqual([2, 4, 6], scalar_multiply(2, [1, 2, 3]))
def project(v, w):
    """return the projection of v onto w"""
    coefficient = dot(v, w)
    return scalar_multiply(coefficient, w)
Beispiel #17
0
def project(v, w):
    #vをw方向に投影したベクトルを返す
    projection_length = dot(v, w)
    return scalar_multiply(projection_length, w)
Beispiel #18
0
import sqlite3 as lite
import linear_algebra as la

last_yr = [
    39500, 59500, 61000, 63000, 70000, 70000, 78500, 79000, 86000, 89000,
    91500, 94000
]

con = lite.connect("UMBC.db")
cur = con.cursor()
myStmt = "select salary from instructor order by salary"
cur.execute(myStmt)
data = cur.fetchall()
salaries = []
for rec in data:
    salaries.append(rec[0])
con.close()
salaries.sort()
print "Last year salaries", last_yr
print "Current salaries", salaries

raises = la.vector_subtract(salaries, last_yr)
print "Raises from last year", raises

adjustment = la.scalar_multiply(1.05, salaries)
print "Adjusted for cost of living", adjustment
Beispiel #19
0
def project(v, w):
    """return the projection of v onto w"""
    coefficient = dot(v, w)
    return scalar_multiply(coefficient, w)
def project(v, w):
    projection_length = dot(v, w)
    return scalar_multiply(projection_length, w)
Beispiel #21
0
def gradient_step(v: Vector, gradient: Vector, step_size: float) -> Vector:
    """Moves `step_size` in the `gradient` direction from `v`"""
    assert len(v) == len(gradient)
    step = scalar_multiply(step_size, gradient)
    return add(v, step)
def project(v, w):
    """returns the projection of v onto the direction w"""
    projection_length = dot(v, w)
    return scalar_multiply(projection_length, w)
def gradient_step(v: Vector, gradient: Vector, step_size: float) -> Vector:
    """Moves step_size in the direction of the gradient direction from v"""
    assert len(v) == len(gradient)
    step = scalar_multiply(step_size, gradient)
    return add(v, step)
Beispiel #24
0
def project(v, w):
    projection_length = dot(v, w)
    return scalar_multiply(projection_length, w)