示例#1
0
def steepest_decent(fun, maxiters, minstep, a, h, start):
    """
    steepest_decent numerical function
    :param fun: function
    :param maxiters: number of items used if minstep not reached
    :param minstep: stops function if steps is smaller than this
    :param a: parameter
    :param h: dx or step size for gradient function
    :param start: point where you want to start looking at
    :return: coordinates for min
    """
    next_coord = start
    # looping through all the iters
    for i in range(maxiters):
        current_coord = next_coord
        # gamma based on lecture materials formula 31
        gamma = a / (len(gradient(fun, h, current_coord)) + 1)
        # next coord based on lucture materials formula 32
        next_coord = current_coord - gamma * gradient(fun, h, current_coord)
        # calculating step size
        step = np.linalg.norm(next_coord - current_coord)
        # check if step is too small
        if step < minstep:
            break
    return next_coord
    def test_gradient(self):
        initial_guess = numpy.array([[3]])
        derivative = gradient(self.parabola, initial_guess)
        self.assertTrue(self.matrices_equal(derivative, [[4]]))

        initial_guess = numpy.array([[1]])
        derivative = gradient(self.parabola, initial_guess)
        self.assertTrue(self.matrices_equal(derivative, [[0]]))

        initial_guess = numpy.array([[1], [2]])
        derivative = gradient(self.elliptic_paraboloid, initial_guess)
        self.assertTrue(self.matrices_equal(derivative, [[4], [8]]))
示例#3
0
def marquardt(x0, m, eps, f):
    k = 0
    lam = 100000.0
    x = x0.copy()
    new_grad = True
    while k < m:
        if new_grad:
            grad = gradient(f, x, eps)
            new_grad = False
        if norm(grad) < eps:
            break
        h = hesse(f, x, eps)
        for i in range(len(h)):
            h[i][i] += lam
        h = inverse_matrix(h)
        x1 = x.copy()
        for i in range(len(x1)):
            for j in range(len(h)):
                x1[i] -= h[i][j]*grad[j]
        if f(x1) < f(x):
            lam /= 2
            k += 1
            new_grad = True
            x = x1.copy()
        else:
            lam *= 2
    return x, f(x)
示例#4
0
def main():
    data = pd.read_csv('data/ex2data1.csv')
    initial_X = np.array(data.iloc[:,[0, 1]])
    y = np.array(data.iloc[:,2])
    X = normalize(initial_X)

    # Concatenate column of ones
    m, n = X.shape
    ones = np.array([np.ones(m)])
    concat_X = np.concatenate((ones.T, X), axis=1)

    costs = []
    theta = np.zeros((n+1, 1))

    for i in range(0, ITERATIONS):
        costs.append(cost(theta, concat_X, y))
        theta = gradient(theta, concat_X, y, ALPHA)


    prepare_scatter(X, y)

    print(theta)

    final_func = lambda x: (-theta[1]*x - theta[0])/theta[2]
    x_axis_values = [ i/1000 for i in range(0, 1000) ]
    y_axis_values = [ final_func(x) for x in x_axis_values]
    plt.plot(x_axis_values, y_axis_values, 'r--')
    plt.show()
示例#5
0
def main():
    data = pd.read_csv('data/ex2data2.csv')
    initial_X = np.array(data.iloc[:, [0, 1]])
    y = np.array(data.iloc[:, 2])
    X = normalize(initial_X)
    mapped_X = np.array([[
        1.0, x[0], x[1], x[0] * x[1], x[0]**2, x[1]**2, x[0] * x[1]**2,
        x[1] * x[0]**2
    ] for x in X])

    costs = []
    m, n = mapped_X.shape
    theta = np.zeros((n, 1))

    for i in range(0, ITERATIONS):
        costs.append(cost(theta, mapped_X, y, LAMBDA))
        theta = gradient(theta, mapped_X, y, ALPHA, LAMBDA)

    # Testing train data
    hits = 0
    errors = 0
    for i in range(len(X)):
        y_try = 1 if hypothesis(theta, mapped_X[i]) >= 0.5 else 0
        if (y_try == y[i]):
            hits += 1
        else:
            errors += 1

    print("Acertos: {0}".format(hits))
    print("Erros: {0}".format(errors))
    print("Total: {0}".format(hits + errors))
    print("Acurácia: {0}%".format(hits / (hits + errors) * 100))

    # prepare_scatter(X, y)
    plt.show()
示例#6
0
def fit_(x: np.ndarray, y: np.ndarray, theta: np.ndarray, alpha: float,
         max_iter: int) -> np.ndarray:
    """
    Description:
        Fits the model to the training dataset contained in x and y.
    Args:
        x: a vector of dimension m * 1: (number of training examples, 1).
        y: a vector of dimension m * 1: (number of training examples, 1).
        theta: a vector of dimension 2 * 1.
        alpha: has to be a float, the learning rate
        max_iter: has to be an int, the number of iterations done
    Returns:
        new_theta: numpy.ndarray, a vector of dimension 2 * 1.
        None if there is a matching dimension problem.
    Raises:
        This function should not raise any Exception.
    """
    if x.shape != y.shape or theta.shape != (2, 1):
        return None
    theta = theta.astype("float64")
    while max_iter > 0:
        new_theta = gradient(x, y, theta)
        theta[0][0] -= alpha * new_theta[0][0]
        theta[1][0] -= alpha * new_theta[1][0]
        max_iter -= 1
    return theta
示例#7
0
def output(cursor, by_binary_list, whitelist_avg, blacklist_avg):
    """ Print out a report for the output of malfunction

    cursor - database cursor
    by_binary_list - list of strong matching binaries
    whitelist_avg - the whitelist score
    blacklist_avg - the blacklist score"""

    score = whitelist_avg - blacklist_avg

    print("Whitelist Average: " + str(whitelist_avg))
    print("Blacklist Average: " + str(blacklist_avg))
    print("            Score: " + str(score))
    gradient.gradient(score)

    possible_filenames = []
    possible_authors = []
    comments = []
    for binary_id in by_binary_list:
        cursor.execute(
            "SELECT author,filenames,comment FROM "
            "binaries WHERE binaryID=?", (binary_id, ))
        binary_entry = cursor.fetchone()
        if binary_entry[0] not in possible_authors and binary_entry[0]:
            possible_authors.append(binary_entry[0])
        if binary_entry[1] not in possible_filenames and binary_entry[1]:
            possible_filenames.append(binary_entry[1])
        if binary_entry[2] not in comments and binary_entry[2]:
            comments.append(binary_entry[2])
    if possible_authors:
        print("***Possible Authors of this binary***")
        for author in possible_authors:
            print(author, end=" - ")
    print("\n")
    if possible_filenames:
        print("***Possible Filenames this binary could go by***")
        for filename in possible_filenames:
            print(filename, end=" - ")
    print("\n")
    if comments:
        print("***Comments about similar binaries***")
        for comment in comments:
            print(comment)
def graddes(theta, X, y, alpha, num_iters, greek_lambda):
    # print(y)
    J_history = []

    for _ in range(num_iters):
        c = costfunction.cost(theta, X, y, greek_lambda)
        grad = gradient.gradient(theta, X, y, greek_lambda)
        theta = theta - (alpha * grad)
        J_history.append(c)
    return theta, J_history
示例#9
0
def output(cursor, by_binary_list, whitelist_avg, blacklist_avg):
    """ Print out a report for the output of malfunction

    cursor - database cursor
    by_binary_list - list of strong matching binaries
    whitelist_avg - the whitelist score
    blacklist_avg - the blacklist score"""

    score = whitelist_avg - blacklist_avg

    print("Whitelist Average: " + str(whitelist_avg))
    print("Blacklist Average: " + str(blacklist_avg))
    print("            Score: " + str(score))
    gradient.gradient(score)

    possible_filenames = []
    possible_authors = []
    comments = []
    for binary_id in by_binary_list:
        cursor.execute("SELECT author,filenames,comment FROM "
                       "binaries WHERE binaryID=?", (binary_id, ))
        binary_entry = cursor.fetchone()
        if binary_entry[0] not in possible_authors and binary_entry[0]:
            possible_authors.append(binary_entry[0])
        if binary_entry[1] not in possible_filenames and binary_entry[1]:
            possible_filenames.append(binary_entry[1])
        if binary_entry[2] not in comments and binary_entry[2]:
            comments.append(binary_entry[2])
    if possible_authors:
        print("***Possible Authors of this binary***")
        for author in possible_authors:
            print(author, end=" - ")
    print("\n")
    if possible_filenames:
        print("***Possible Filenames this binary could go by***")
        for filename in possible_filenames:
            print(filename, end=" - ")
    print("\n")
    if comments:
        print("***Comments about similar binaries***")
        for comment in comments:
            print(comment)
示例#10
0
def test_gradient():
    importlib.reload(gradient)

    def func(x):
        return (x - 5)**2

    point = (5, )

    result = gradient.gradient(func, point)

    print(f"Expected: 0\nActual: {result}")
 def fit_(self, X, Y, alpha, n_cycle):
     """
     operates a gradient descent on theta, with n_cycle being the number of iterations
     """
     X_N = np.full((X.shape[0], X.shape[1] + 1), 1, float)
     X_N[:, 1:] = X
     for i in enumerate(range(n_cycle - 1)):
         theta_tmp = self.theta
         for j in range(self.theta.size):
             self.theta[j] = self.theta[j] - alpha * gradient(
                 X_N, Y, theta_tmp)[j]
     return (self.theta)
示例#12
0
def test():
    from median import median3x3
    from gradient import gradient
    from hsi import rgb2hsi, hsi2rgb, joinChannels, splitChannels
    
    # Create a noisy image with an embedded white square
    image = np.zeros((201,199),dtype=np.float32)
    width,height = image.shape
    x,y = width/2, height/2
    offset = 10
    image[x-offset:x+offset,y-offset:y+offset] = 2
    image += np.random.random_sample(image.shape)
    
    filtered = median3x3(image, 100)

    showArray("Noisy",image)
    showArray("Filtered",filtered)
    
    image = np.float32(imread("test.jpg"))
    image /= 256.

    showArray("Test HSI",image)
    r,g,b = splitChannels(image)
    h,s,i = rgb2hsi(r,g,b)
    showArray("I",i)
    showArray("S",s)
    showArray("H",h)

    from gaussian import gaussImage
    blur = gaussImage(i, 3)
    showArray("Blur", blur)
    blurmore = gaussImage(i,4)
    dog = blur-blurmore
    showArray("DOG", dog)
    
    g,a = gradient(i,5)
    showArray("Gradient",g)
    showArray("Angle", a)
    sat = np.ones_like(i)
    gimg = joinChannels(*hsi2rgb(a,sat,g))
    showArray("Color gradient with angle", gimg)
    showArrayGrad("Grad angle", image, a)
    showArrayGrad("Grad vectors", image, a,g*10)
示例#13
0
def genStyles():
    styles = b""
    randList = [
        ("background: " + gradient.gradient() + ";").encode("utf-8"),
        ("position: absolute; top: " + common.number() + "em; left: " +
         common.number() + "em;").encode("utf-8"),
        ("position: relative; top: " + common.number() + "em; left: " +
         common.number() + "em;").encode("utf-8"),
        ("height: " + common.length() + ";").encode("utf-8"),
        ("height: " + common.percent() + ";").encode("utf-8"),
        ("width: " + common.length() + ";").encode("utf-8"),
        ("width: " + common.percent() + ";").encode("utf-8"),
        ("max-width: " + common.length() + ";").encode("utf-8"),
        ("max-height: " + common.length() + ";").encode("utf-8"),
        ("transition: all " + common.time() + ";").encode("utf-8"),
    ]
    for j in range(10):
        styles += random.choice(randList)
    return styles
示例#14
0
def fit_(x, y, theta, alpha, n_cycles):
    """
    Description:
    Fits the model to the training dataset contained in x and y.
    Args:
    x: has to be a numpy.ndarray, a matrix of dimension m * n: (number of training examples, number of features).
    y: has to be a numpy.ndarray, a vector of dimension m * 1: (number of training examples, 1).
    theta: has to be a numpy.ndarray, a vector of dimension (n + 1) * 1: (number of features + 1, 1).
    alpha: has to be a float, the learning rate
    n_cycles: has to be an int, the number of iterations done during the gradient descent
    Returns:
    new_theta: numpy.ndarray, a vector of dimension (number of features + 1, 1).
    None if there is a matching dimension problem.
    Raises:
    This function should not raise any Exception.
    """
    if theta.shape != (theta.shape[0],):
        theta = np.squeeze(theta)
    for _ in range(n_cycles):
        theta = theta - (alpha * gradient(x, y, theta))
    return theta
def fit_(x, y, theta, alpha, n_cycles):
    """
	Description:
	Fits the model to the training dataset contained in x and y.
	Args:
	x: has to be a numpy.ndarray, a matrix of dimension m * n: (number of training examples, 􏰀→ number of features).
	y: has to be a numpy.ndarray, a vector of dimension m * 1: (number of training examples, 1).
	theta: has to be a numpy.ndarray, a vector of dimension (n + 1) * 1: (number of features +
	􏰀→ 1, 1).
	alpha: has to be a float, the learning rate
	n_cycles: has to be an int, the number of iterations done during the gradient descent Returns:
	new_theta: numpy.ndarray, a vector of dimension (number of features + 1, 1).
	None if there is a matching dimension problem.
	Raises:
	This function should not raise any Exception.
	"""
    if len(x) < 1 or len(y) < 1 or len(
            theta) < 1 or x.shape[0] != y.shape[0] or x is None or y is None:
        return None
    #x_norm = np.zeros(x.shape)
    #i = 0
    #while i < x.shape[1]:
    #	x_norm[:,i] = (x[:,i] - x[:,i].mean()) / x[:,i].std()
    #	i += 1
    #y_norm = (y - y.mean()) / y.std()
    for _ in range(n_cycles):
        theta -= (gradient(x, y, theta) * alpha)
    #res = theta[0]
    #i = 1
    #while i < len(theta):
    #	res -= (theta[i] * x[:,i - 1].mean() / x[:,i - 1].std())
    #	i += 1
    #theta[0] = (res * y.std()) + y.mean()
    #i = 1
    #while i < len(theta):
    #	theta[i] = (theta[i] * y.std() / x[:,i - 1].std())
    #	i += 1
    return theta
示例#16
0
def nonmax(image, sigma):
    grads, directions = gradient(image, sigma)

    height, width = grads.shape
    buffer_img = np.zeros(shape=(height + 2, width + 2), dtype=np.float64)

    buffer_img[1:height + 1, 1:width + 1] = grads

    buffer_img[0] = buffer_img[1]
    buffer_img[-1] = buffer_img[-2]
    buffer_img[:, 0] = buffer_img[:, 1]
    buffer_img[:, -1] = buffer_img[:, -2]

    nonmaxed = np.zeros(shape=(height, width), dtype=np.float64)

    for i in range(height):
        for j in range(width):
            m = 0
            if (directions[i, j] == 0):
                continue
            elif directions[i, j] == 128:
                m = max(buffer_img[i, j + 1], buffer_img[i + 2, j + 1])
            elif directions[i, j] == 64:
                m = max(buffer_img[i + 1, j], buffer_img[i + 1, j + 2])
            elif directions[i, j] == 255:
                m = max(buffer_img[i, j + 2], buffer_img[i + 2, j])
            elif directions[i, j] == 192:
                m = max(buffer_img[i + 2, j + 2], buffer_img[i, j])
            if grads[i, j] > m:
                nonmaxed[i, j] = grads[i, j]

    nonmaxed -= np.min(nonmaxed)
    m = np.max(nonmaxed)
    if m > 0:
        nonmaxed /= m

    return nonmaxed
def gradient_descent(func, v, abs_error = 1e-5):
    epsilon = 1e-5
    absolute_error = abs_error

    v_old = numpy.array(v)
    v_old = make_column_vector(v_old)
    v_new = v_old

    f_old = func(v_old)
    f_new = f_old + absolute_error * 2

    while numpy.absolute(f_new - f_old) > absolute_error:
        v_new, f_new, epsilon = adapt_learning_rate(
            v_old, v_new, f_old, f_new, epsilon
        )

        v_old = v_new
        f_old = f_new

        G = gradient(func, v_old)
        v_new = v_old - epsilon * G
        f_new = func(v_new)
        
    return v_new
示例#18
0
def log_cost(theta, x, y, hyper_p):
    """
        Logistic regression cost function with regularization.

        Parameters
        ----------
        theta : array_like
            Shape (1, n+1). Parameter values for function.

        x : array_like
            Shape (m, n+1). Features in model.

        y : array_like
            Shape (m, 1). Labels for each example.

        hyper_p : float
            Value of the hyperparameter for regularization.

        Returns
        -------
        cost : float
            Value of cost function at given parameters.

        grad : array_like
            Shape (1, n+1). The gradient for each parameter.
    """

    theta = np.reshape(theta, (1, x.shape[1]))
    size = y.shape[0]

    h = sigmoid(x @ theta.T)
    reg = (hyper_p / 2 * size) * np.sum(theta[:, 1:theta.shape[1]] ** 2)

    cost = -((1 / size) * np.sum(y * np.log(h) + (1 - y) * np.log(1 - h))) + reg
    grad = gradient(theta, x, y, hyper_p)
    return cost, grad
示例#19
0
def zcsdog(a, scale,clearmargin=True,frame=True, res=True):
    if res:
        g = getKernel(scale)
        g1 = getKernel(scale+1)
        g.write(a)
        smaller = np.zeros_like(a)
        larger = np.zeros_like(a)
        tmp = np.zeros_like(a)
        zca = np.zeros_like(a)
        g.write(smaller)
        g.write(larger)
        g.write(tmp)
        g.write(zca)
        g.res(0,a,tmp)
        g.res(1,tmp,smaller)
        g1.res(0,a,tmp)
        g1.res(1,tmp,larger)
        sub_res(larger,smaller,tmp)
        zcs_res(tmp,zca)
        g.read(zca)
        g.read(smaller)
    else:
        smaller = gaussImage(a,scale)
        larger = gaussImage(a,scale+1)
        dog = smaller - larger
        zca = zcs(dog)
    # Zero out of bounds and create a frame.
    grad,theta = gradient(smaller)
    margin = getGaussianWidth(scale)+1
    if clearmargin:
        setMargin(zca, margin)
        setMargin(grad,margin)
        setMargin(theta,margin)
    if frame:
        setFrame(zca, margin)
    return zca.copy(),grad.copy(),theta.copy()
示例#20
0
time.sleep(1.5)  # pause for 1.5 secs

# =========== Regularized Logistic Regression ============
# Add Polynomial Features
print('Feature Mapping of the training Data\n')

phi = mapFeature.mapFeature(X[:, 0], X[:, 1])

#Initialize fitting parameters
initial_theta = np.zeros((phi.shape[1], 1))

# Set regularization parameter lambda to 1.
l = 1

cost = costFunctionreg.costFunctionreg(initial_theta, phi, Y, l)
grad = gradient.gradient(initial_theta, phi, Y, l)

print(f'Cost at initial theta (zeros): \n{cost}\n')
print('Expected cost (approx): 0.693\n')
print('Gradient at initial theta (zeros) - first five values only:\n')
print(f'{grad[0:5]}\n', )
print('Expected gradients (approx) - first five values only:\n')
print(' 0.0085\n 0.0188\n 0.0001\n 0.0503\n 0.0115\n')

# Compute and display cost and gradient with all-ones theta and lambda = 10
test_theta = np.ones((phi.shape[1], 1))
costs = costFunctionreg.costFunctionreg(test_theta, phi, Y, 10)
grads = gradient.gradient(test_theta, phi, Y, 10)

print(f'\nCost at test theta (with lambda = 10): \n {costs[0,0]}')
print('Expected cost (approx): 3.16\n')
def test_gradient_array():
    p1 = np.array([0, 0, 1])
    p2 = np.array([0, 2, 5])
    obs = gradient(p1, p2)
    expect = 2
    assert obs == expect
示例#22
0
import numpy as np
import math
from gradient import gradient
def func(x):
    return x[0] + x[1]*x[1] + math.log(x[2])

x = np.zeros(3)
x[0] = 1
x[1] = 3
x[2] = 180
gr = gradient(0.01)
print(gr.derive(func, x))
示例#23
0
if __name__ == '__main__':
    # from sys import argv
    # if len(argv) < 2:
    #     print("Usage: python %s <image>" % argv[0])
    #     exit()
    im_path = '/tfshare/PycharmProjects/canny/valve.png'
    im = array(Image.open(im_path))
    subplot(1, 2, 1)
    imshow(im)
    axis('off')
    title('Original')

    im = im[:, :, 0]
    gim = gaussian(im)
    grim, gphase = gradient(gim)
    gmax = maximum(grim, gphase)
    thres = thresholding(gmax)
    edge = tracking(thres)

    gray()
    # subplot(1, 2, 1)
    # imshow()
    # axis('off')
    # title('double')

    subplot(1, 2, 2)
    imshow(edge.im)
    axis('off')
    title('Edges')
示例#24
0
import gradient as g
import loop as l
import copy
import evaluate as e

# 读取一张图片,做灰度化处理,转为二维矩阵。虽然原图是灰度图,但仍需此步骤,不然会是三维矩阵
peppers = cv2.imread('raw_peppers.png', 0)

# 使用高斯滤波消除噪声
# (5,5)是高斯核的大小,0是sigmaX,0的意思是让方程自己给你算对应的sigma
peppers_b = cv2.GaussianBlur(peppers, (5, 5), 0)

peppers_t = copy.deepcopy(peppers_b)

# 返回的是经过非极大值抑制后的矩阵
peppers_g = g.gradient(peppers_t)

# 返回的是迭代后的最优阈值
peppers_T = l.loop(peppers_g)

# ret是返回的最优阈值,image是处理后的图
# THRESH_BINARY就是三角法(这句话存疑,忘记哪里看来的了,但实际使用起来似乎使用的阈值是传入的阈值)
# THRESH_OTSU是大津法
# 这一个函数会根据直方图选择最优的阈值,即otus或传入的阈值,可以从返回值ret中获取
# peppers_ret, new1 = cv2.threshold(peppers_b, peppers_T, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# 这一条就只会使用大津法算出的阈值,但大津有缺点,直方图没有明显双峰时效果可能不好
peppers_ret, image = cv2.threshold(peppers_b, peppers_T, 255, cv2.THRESH_OTSU)

# 因为想要有更多的边被画出来,所以取更小的(我自己一厢情愿的,可能没有理论依据)
peppers_T = min(peppers_ret, peppers_T)
示例#25
0
temp_theta = np.array([-2, -1, 1, 2]).reshape(4, 1)
temp_X = np.array([np.linspace(0.1, 1.5, 15)]).reshape(3, 5).T
'''
w/0 T 
temp_X= [[0.1 0.2 0.3 0.4 0.5]
        [0.6 0.7 0.8 0.9 1. ]
        [1.1 1.2 1.3 1.4 1.5]]

'''
temp_X = np.hstack((np.ones((5, 1)), temp_X))
temp_y = np.array([1, 0, 1, 0, 1]).reshape(5, 1)
J = costfunction.cost(temp_theta, temp_X, temp_y, 3)
print(J)

grad = gradient.gradient(temp_theta, temp_X, temp_y, 3)
print(grad)

print("Running gradient descent...(estimated 5 seconds)")
greek_lambda = 0.1
num_labels = 10
all_theta, all_J = onevsall.onevall(X, y, num_labels, greek_lambda)

plot_j.plot(all_J)

print(np.shape(all_theta))

prediction = predict.predict(all_theta, X)
print(np.shape(prediction))
acc = sum(prediction[:, np.newaxis] == y)[0] * 100 / 5000
print("Accuracy:", acc, "%")
# Add Polynomial Features
# Note that mapFeature also adds a column of ones for us, so the intercept
# term is handled

X = mapFeature(X[:,0], X[:,1])


# Initialize fitting parameters
initial_theta = np.zeros((X.shape[1], 1))

# Set regularization parameter lambda to 1
reg_lambda = 1

# Compute and display initial cost and gradient for regularized logistic
# regression
cost, grad = costFunction(initial_theta, X, y, reg_lambda), gradient(initial_theta, X, y, reg_lambda)

print('Cost at initial theta (zeros): #f\n', cost)
print('Expected cost (approx): 0.693\n')
print('Gradient at initial theta (zeros) - first five values only:\n')
print(' #f \n', grad[0:6])
print('Expected gradients (approx) - first five values only:\n')
print(' 0.0085\n 0.0188\n 0.0001\n 0.0503\n 0.0115\n')

print('\nProgram paused. Press enter to continue.\n')
pause()

# Compute and display cost and gradient
# with all-ones theta and lambda = 10
test_theta = np.ones((X.shape[1],1))
cost, grad = costFunction(test_theta, X, y, 10), gradient(test_theta, X, y, reg_lambda)
pause()


"""## Part 2: Compute Cost and Gradient """

#  Setup the data matrix appropriately, and add ones for the intercept term
m, n = X.shape

#Add intercept term to x and X_test
X = np.c_[np.ones((m, 1)), X]

#Initialize fitting parameters
initial_theta = np.zeros((n + 1, 1))

#Compute and display initial cost and gradient
cost, grad = costFunction(initial_theta, X, y), gradient(initial_theta, X, y)

print("Cost at initial theta (zeros): ", cost, "\n")
print("Expected cost (approx): 0.693\n")
print('Gradient at initial theta (zeros): \n')
print(grad)
print("Expected gradients (approx):\n -0.1000\n -12.0092\n -11.2628\n")

#Compute and display cost and gradient with non-zero theta
test_theta = np.array([[-24], [0.2], [0.2]])
cost, grad = costFunction(test_theta, X, y), gradient(test_theta, X, y)

print("\nCost at test theta:", cost, "\n")
print("Expected cost (approx): 0.218\n")
print("Gradient at test theta: \n")
print(grad)
示例#28
0
def fit_(x, y, theta, alpha, n_cycles):
    for _ in range(n_cycles):
        theta -= alpha * gradient(x, y, theta)
    return theta
示例#29
0
from process_X import process_X
from cost_function import cost

alpha = 0.0000001
num_iters = 400
theta = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6])

#get X, y as numpy array
(x1, x2, y) = getX_y()
X = process_X(x1, x2)
# print "shape(X):", np.shape(X)
# print "shape(theta):", np.shape(theta)
# print h(X,theta)
print "Cost before:", cost(X, y, theta)

theta = gradient(X, y, theta, alpha, num_iters)

print "Cost after:", cost(X, y, theta)
h = h(X, theta)

#print "X:", X
#print "Theta:", theta
plt.figure()
plt.title("Sales Prediction")
plt.xlabel("month(from Jun, 2016 to Sept,2017)")
plt.ylabel("sales amount")
plt.plot(X[:, 2], y, 'ro')
plt.plot(X[:, 2], h, 'b+')
#plt.show()

mpl.rcParams['legend.fontsize'] = 10
def test_negative():
    p1 = [-1, 1, 1]
    p2 = [1, 1, 5]
    obs = gradient(p1, p2)
    expect = 2
    assert obs == expect
            gmax[i][j] = det[i][j]
        # 135 degrees
        if (phase[i][j] >= 112.5 and phase[i][j] < 157.5) or (phase[i][j] >= 292.5 and phase[i][j] < 337.5):
          if det[i][j] >= det[i - 1][j - 1] and det[i][j] >= det[i + 1][j + 1]:
            gmax[i][j] = det[i][j]
  return gmax

if __name__ == '__main__':
  from sys import argv
  if len(argv) < 2:
      print "Usage: python %s <image>" % argv[0]
      exit()
  im = array(Image.open(argv[1]))
  im = im[:, :, 0]
  gim = gaussian(im)
  grim, gphase = gradient(gim)
  gmax = maximum(grim, gphase)

  gray()

  subplot(2, 2, 1)
  imshow(im)
  axis('off')
  title('Original')

  subplot(2, 2, 2)
  imshow(gim)
  axis('off')
  title('Gaussian')

  subplot(2, 2, 3)
示例#32
0
plt.ylabel('Exam 2 score')
plt.title('Exam scores')
plt.legend(fancybox=True, framealpha=1, shadow=True, borderpad=1)
plt.show()

time.sleep(2)  # pause for 2 secs

m, n = np.shape(X)
phi = np.concatenate((np.ones((m, 1)), X), axis=1)

# initial theta
initial_theta = np.zeros((n + 1, 1))

#compute and display initial  cost and gradient
J = costFunction.costFunction(initial_theta, phi, Y)
grad = gradient.gradient(initial_theta, phi, Y)

print(f'Cost at initial theta (zeros): {round(J,2)}')
print('Expected cost (approx): 0.693\n')
print('Gradient at initial theta (zeros): \n')
print(f' {grad}')
print('Expected gradients (approx):\n -0.1000\n -12.0092\n -11.2628\n')

# Compute and display cost and gradient with non-zero theta
test_theta = np.array([[-24, 0.2, 0.2]]).T

Ja = costFunction.costFunction(test_theta, phi, Y)
grada = gradient.gradient(test_theta, phi, Y)
print(f'\nCost at test theta: \n {Ja}\n')
print('Expected cost (approx): 0.218\n')
print('Gradient at test theta: \n')
#1.1
mask = y == 1
plt.xlabel('Exam 1 score')
plt.ylabel('Exam 2 score')
adm = plt.plot(X[mask][0], X[mask][1], 'k+', label='Admitted')
not_adm = plt.plot(X[~mask][0], X[~mask][1], 'yo', label='Not admitted')

#1.2
ones = np.ones((m, 1))
y = y[:, np.newaxis]
X = np.hstack((ones, X))
theta = np.zeros([3, 1])
J = costFunction(theta, X, y)
print(J)

grad = gradient(theta, X, y)
print(grad)

theta_optimized = fmintnc(theta, X, y)
print(theta_optimized)

J_theta_optimized = costFunction(theta_optimized[:, np.newaxis], X, y)
print(J_theta_optimized)

#TestCase
Xtest = np.array([[1, 45, 85]])
test = sigmoid(np.dot(Xtest, theta_optimized))
print(test)

pred = predict(theta_optimized[:, np.newaxis], X, m)
acc = np.mean(pred == y)
示例#34
0
from gradient import gradient
import numpy as np
X = np.array([
    [ -7, -9],
        [ -2, 14],
        [ 4, -1],
        [ 4, 6],
        [ -9, 6],
        [ -5, 11],
        [ -11, 8]])
Y = np.array([2, 14, -13, 5, 12, 4])
Z = np.array([3,0.5])

print(gradient(X, Y, Z))
示例#35
0
def image():
	if ri(0,1) == 0:
		return url()
	else:
		return gradient()
示例#36
0
plot_data(X, y)
plt.xlabel('Exam 1 Score')
plt.ylabel('Exam 2 Score')
plt.legend(['Admitted', 'Not admitted'])

input('Program paused. Press enter to continue.')

#============ Part 2: Compute Cost and Gradient ============

m, n = X.shape
X = map_feature(X)

initial_theta = np.zeros((n + 4, 1))

cost = cost_function(initial_theta, X, y)
grad = gradient(initial_theta, X, y)

print('Cost at initial theta (zeros):', cost)
print('Expected cost (approx): 0.693')
print('Gradient at initial theta (zeros)', grad)

test_theta = np.array([-24, 0.2, 0.2, 0, 0.1, 0]).reshape((6, 1))
cost = cost_function(test_theta, X, y)
grad = gradient(test_theta, X, y)

print('Cost at test theta:', cost)
print('Gradient at test theta', grad)

# ============= Part 3: Optimizing using fminunc  =============

fmin_result = fmin_bfgs(cost_function, initial_theta, gradient, (X, y), maxiter=400, full_output=True)
示例#37
0
# Add Polynomial Features
# Note that mapFeature also adds a column of ones for us, so the intercept
# term is handled

X = mapFeature(X[:, 0], X[:, 1])

# Initialize fitting parameters
initial_theta = np.zeros((X.shape[1], 1))

# Set regularization parameter lambda to 1
reg_lambda = 1

# Compute and display initial cost and gradient for regularized logistic
# regression
cost, grad = costFunction(initial_theta, X, y,
                          reg_lambda), gradient(initial_theta, X, y,
                                                reg_lambda)

print('Cost at initial theta (zeros): #f\n', cost)
print('Expected cost (approx): 0.693\n')
print('Gradient at initial theta (zeros) - first five values only:\n')
print(' #f \n', grad[0:6])
print('Expected gradients (approx) - first five values only:\n')
print(' 0.0085\n 0.0188\n 0.0001\n 0.0503\n 0.0115\n')

print('\nProgram paused. Press enter to continue.\n')
pause()

# Compute and display cost and gradient
# with all-ones theta and lambda = 10
test_theta = np.ones((X.shape[1], 1))
cost, grad = costFunction(test_theta, X, y,
print("\nProgram paused. Press enter to continue.\n")
pause()

#============ Part 2: Compute Cost and Gradient ============

#  Setup the data matrix appropriately, and add ones for the intercept term
m, n = X.shape

#Add intercept term to x and X_test
X = np.c_[np.ones((m, 1)), X]

#Initialize fitting parameters
initial_theta = np.zeros((n + 1, 1))

#Compute and display initial cost and gradient
cost, grad = costFunction(initial_theta, X, y), gradient(initial_theta, X, y)

print("Cost at initial theta (zeros): ", cost, "\n")
print("Expected cost (approx): 0.693\n")
print('Gradient at initial theta (zeros): \n')
print(grad)
print("Expected gradients (approx):\n -0.1000\n -12.0092\n -11.2628\n")

#Compute and display cost and gradient with non-zero theta
test_theta = np.array([[-24], [0.2], [0.2]])
cost, grad = costFunction(test_theta, X, y), gradient(test_theta, X, y)

print("\nCost at test theta:", cost, "\n")
print("Expected cost (approx): 0.218\n")
print("Gradient at test theta: \n")
print(grad)
示例#39
0
import numpy as np
from gradient import gradient

X = np.array([[-6, -7, -9], [13, -2, 14], [-7, 14, -1], [-8, -4, 6],
              [-5, -9, 6], [1, -5, 11], [9, -11, 8]])
Y = np.array([2, 14, -13, 5, 12, 4, -19])
Z = np.array([3, 0.5, -6])
print(gradient(X, Y, Z))
# array([-37.35714286, 183.14285714, -393.])

W = np.array([0, 0, 0])
print(gradient(X, Y, W))
# array([0.85714286, 23.28571429, -26.42857143])

gradient(X, X.dot(Z), Z)
print(gradient(X, X.dot(Z), Z))
# array([0., 0., 0.])
示例#40
0
import numpy as np
from scipy import optimize
from sigmoid import sigmoid
from onevsall import onevsall
from gradient import gradient
from lrcostfunction import lrcostfunction

#checking with a small data set given below
theta_t = np.array([[-2], [-1], [1], [2]])
X_t = np.array([[1, 0.1, 0.6, 1.1], [1, 0.2, 0.7, 1.2], [1, 0.3, 0.8, 1.3],
                [1, 0.4, 0.9, 1.4], [1, 0.5, 1, 1.5]])
y_t = np.array([[1], [0], [1], [0], [1]])
lamda_t = 3
args = (X_t, y_t, lamda_t)
print(lrcostfunction(theta_t, *args))
print(gradient(theta_t, *args))

#checkgradient using check grad function
from scipy.optimize import check_grad
from sklearn.datasets import make_classification

X_examples, Y_labels = make_classification()
X_examples = np.insert(X_examples, 0, 1, axis=1)
lamda = 0.1
#print(check_grad(lrcostfunction,gradient, np.zeros(np.size(X_examples,1)), X_examples, Y_labels.flatten(),lamda))

import scipy.io as sio

mat_contents = sio.loadmat('ex4data1.mat')
#training data
示例#41
0
import numpy as np
from gradient import gradient

x = np.array([[-6, -7, -9], [13, -2, 14], [-7, 14, -1], [-8, -4, 6],
              [-5, -9, 6], [1, -5, 11], [9, -11, 8]])
y = np.array([2, 14, -13, 5, 12, 4, -19])
theta1 = np.array([0, 3, 0.5, -6])

# Example :
print(gradient(x, y, theta1))
# Output:
# array([ -37.35714286,  183.14285714, -393.        ])

# Example :
theta2 = np.array([0, 0, 0, 0])
print(gradient(x, y, theta2))
# Output:
# array([  0.85714286,  23.28571429, -26.42857143])
def test_gradient_list():
    p1 = [0, 0, 1]
    p2 = [0, 2, 5]
    obs = gradient(p1, p2)
    expect = 2
    assert obs == expect