def minimize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):
    data = list(zip(x, y))
    theta = theta_0  # initial guess
    alpha = alpha_0  # initial step size
    min_theta, min_value = None, float("inf")  # the minimum so far
    iterations_with_no_improvement = 0

    # if we ever go 100 iterations with no improvement, stop
    while iterations_with_no_improvement < 100:
        value = sum(target_fn(x_i, y_i, theta) for x_i, y_i in data)

        if value < min_value:
            # if we've found a new minimum, remember it
            # and go back to the original step size
            min_theta, min_value = theta, value
            iterations_with_no_improvement = 0
            alpha = alpha_0
        else:
            # otherwise we're not improving, so try shrinking the step size
            iterations_with_no_improvement += 1
            alpha *= 0.9

        # and take a gradient step for each of the data points
        for x_i, y_i in in_random_order(data):
            gradient_i = gradient_fn(x_i, y_i, theta)
            theta = la.vector_subtract(theta,
                                       la.scalar_multiply(alpha, gradient_i))

    return min_theta
def minimize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):

    data = list(zip(x, y))
    theta = theta_0                             # initial guess
    alpha = alpha_0                             # initial step size
    min_theta, min_value = None, float("inf")   # the minimum so far
    iterations_with_no_improvement = 0

    # if we ever go 100 iterations with no improvement, stop
    while iterations_with_no_improvement < 100:
        value = sum( target_fn(x_i, y_i, theta) for x_i, y_i in data )

        if value < min_value:
            # if we've found a new minimum, remember it
            # and go back to the original step size
            min_theta, min_value = theta, value
            iterations_with_no_improvement = 0
            alpha = alpha_0
        else:
            # otherwise we're not improving, so try shrinking the step size
            iterations_with_no_improvement += 1
            alpha *= 0.9

        # and take a gradient step for each of the data points
        for x_i, y_i in in_random_order(data):
            gradient_i = gradient_fn(x_i, y_i, theta)
            theta = vector_subtract(theta, scalar_multiply(alpha, gradient_i))

    return min_theta
def minimize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):

    data = list(zip(x, y))
    theta = theta_0  # palpite inicial
    alpha = alpha_0  # tamanho do passo inicial
    min_theta, min_value = None, float('inf')  # o minimo até agora
    iterations_with_no_improvement = 0

    # Se formos até 100 iterações sem melhorias, paramos
    while iterations_with_no_improvement < 100:
        value = sum(target_fn(x_i, y_i, theta) for x_i, y_i in data)

        if value < min_value:

            # Se achou um novo minimo, lembre-se
            # e volte para o tamanho do passo original
            min_theta, min_value = theta, value
            iterations_with_no_improvement = 0
            alpha = alpha_0
        else:

            # Do contrario, não estamos melhorando, portanto tente
            # diminuir o tamanho do passo
            iterations_with_no_improvement += 1
            alpha *= 0.9

        # E ande um passo gradiente para todos os pontos de dados
        for x_i, y_i in in_random_order(data):
            gradient_i = gradient_fn(x_i, y_i, theta)
            theta = vector_subtract(theta, scalar_multiply(alpha, gradient_i))

    return min_theta
Exemplo n.º 4
0
def minimize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):

    data = zip(x,y)
    theta = theta_0
    alpha = alpha_0
    min_theta, min_value = None, float("inf")
    iterations_with_no_improvement = 0

    #stop if we go over 100 iterations with no improvement
    while iterations_with_no_improvement < 100:
        value = sum(target_fn(x_i, y_i, theta) for x_i, y_i in data)

        if value < min_value:
            min_theta, min_value = theta, value
            iterations_with_no_improvement = 0
            alpha = alpha_0
        else:
            iterations_with_no_improvement += 1
            alpha *= 0.9

        for x_i, y_i in in_random_order(data):
            gradient_i = gradient_fn(x_i, y_i, theta)
            theta = vector_subtract(theta, scalar_multiply(alpha, gradient_i))

    return min_theta
Exemplo n.º 5
0
def minimize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):

    data = zip(x, y)
    theta = theta_0
    alpha = alpha_0
    min_theta, min_value = None, float("inf")
    iterations_with_no_improvement = 0

    #stop if we go over 100 iterations with no improvement
    while iterations_with_no_improvement < 100:
        value = sum(target_fn(x_i, y_i, theta) for x_i, y_i in data)

        if value < min_value:
            min_theta, min_value = theta, value
            iterations_with_no_improvement = 0
            alpha = alpha_0
        else:
            iterations_with_no_improvement += 1
            alpha *= 0.9

        for x_i, y_i in in_random_order(data):
            gradient_i = gradient_fn(x_i, y_i, theta)
            theta = vector_subtract(theta, scalar_multiply(alpha, gradient_i))

    return min_theta
Exemplo n.º 6
0
def minimize_stochastic(errorCuadratico,
                        gradient_fn,
                        caracteristicasPropiedades,
                        valoresPropiedades,
                        theta_0,
                        alpha_0=0.01):
    data = list(zip(caracteristicasPropiedades, valoresPropiedades))
    theta = theta_0
    alpha = alpha_0
    min_theta, min_value = None, float("inf")
    iterations_with_no_improvement = 0
    while iterations_with_no_improvement < 100:
        value = sum(
            errorCuadratico(caracteristicasPropiedad_i, valorPropiedad_i,
                            theta)
            for caracteristicasPropiedad_i, valorPropiedad_i in data)
        if value < min_value:
            min_theta, min_value = theta, value
            iterations_with_no_improvement = 0
            alpha = alpha_0
        else:
            iterations_with_no_improvement += 1
            alpha *= 0.9
        for caracteristicasPropiedad_i, valorPropiedad_i in in_random_order(
                data):
            gradient_i = gradient_fn(caracteristicasPropiedad_i,
                                     valorPropiedad_i, theta)
            theta = vector_subtract(theta, scalar_multiply(alpha, gradient_i))
    return min_theta
Exemplo n.º 7
0
def minimize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):

    #data = zip(x, y)
    theta = theta_0  # initial guess
    alpha = alpha_0  # initial step size
    min_theta = None
    min_value = float("inf")  # the minimum so far
    iterations_with_no_improvement = 0
    #value = 0

    # if we ever go 100 iterations with no improvement, stop
    while iterations_with_no_improvement < 35:
        k = 0
        value = 0
        while k < len(x):
            value += target_fn(x[k], y[k], theta)
            k += 1
        print(value < min_value)
        if value < min_value:
            # if we've found a new minimum, remember it
            # and go back to the original step size
            min_theta, min_value = theta, value
            iterations_with_no_improvement = 0
            alpha = alpha_0
            print(min_theta)
        else:
            # otherwise we're not improving, so try shrinking the step size
            iterations_with_no_improvement += 1
            print(iterations_with_no_improvement)
            alpha *= 0.95

        i = 0
        while i < len(x):
            gradient_i = gradient_fn(x[i], y[i], theta)
            theta = vector_subtract(theta, scalar_multiply(alpha, gradient_i))
            i += 1

    return min_theta
Exemplo n.º 8
0
def remove_projection_from_vector(v, w):
    """projects v onto w and subtracts the result from v"""
    return vector_subtract(v, project(v, w))
Exemplo n.º 9
0
import linear_algebra as la
import stats as st

A = [1, 3, 5, 7, 9]
B = [6, 4, 8, 2, 10]

print("*** Test Module <linear_algebra> ***")
print("*** vector ......")

print("vector A = ", A)
print("vector B = ", B)

C = la.vector_add(A, B)
print("A + B = ", C)

C = la.vector_subtract(A, B)
print("A - B = ", C)

C = la.vector_sum([A, B])
print("A and B summary = ", C)

C = la.scalar_multiply(10, A)
print("10 * A = ", C)

C = la.vector_mean([A, B])
print("A and B mean = ", C)

C = la.dot(A, B)
print("A dot B = ", C)

C = la.sum_of_squares(A)
def remove_projection_from_vector(v, w):
    """projects v onto w and subtracts the result from v"""
    return vector_subtract(v, project(v, w))
Exemplo n.º 11
0
def remove_projection_from_vector(v, w):
    #vをw方向へ射影した結果をvから取り除く
    return vector_subtract(v, project(v, w))
Exemplo n.º 12
0
import sqlite3 as lite
import linear_algebra as la

last_yr = [
    39500, 59500, 61000, 63000, 70000, 70000, 78500, 79000, 86000, 89000,
    91500, 94000
]

con = lite.connect("UMBC.db")
cur = con.cursor()
myStmt = "select salary from instructor order by salary"
cur.execute(myStmt)
data = cur.fetchall()
salaries = []
for rec in data:
    salaries.append(rec[0])
con.close()
salaries.sort()
print "Last year salaries", last_yr
print "Current salaries", salaries

raises = la.vector_subtract(salaries, last_yr)
print "Raises from last year", raises

adjustment = la.scalar_multiply(1.05, salaries)
print "Adjusted for cost of living", adjustment
Exemplo n.º 13
0
 def test_vector_subtract(self):
     self.assertEqual([1, 0, 3], vector_subtract([3, 2, 6], [2, 2, 3]))
Exemplo n.º 14
0
def remove_projection_from_vector(v, w):
    return vector_subtract(v, project(v, w))