Esempio n. 1
0
def average_slope_intercept(lines):
    left_lines = []  # (slope, intercept)
    left_weights = []  # (length,)
    right_lines = []  # (slope, intercept)
    right_weights = []  # (length,)

    for line in lines:
        for x1, y1, x2, y2 in line:
            if x2 == x1:
                continue  # ignore a vertical line
            slope = (y2 - y1) / (x2 - x1)
            intercept = y1 - slope * x1
            length = np.sqrt((y2 - y1)**2 + (x2 - x1)**2)
            if slope < 0:  # y is reversed in image
                left_lines.append((slope, intercept))
                left_weights.append((length))
            else:
                right_lines.append((slope, intercept))
                right_weights.append((length))

    # add more weight to longer lines
    left_lane = np.dot(left_weights, left_lines) / np.sum(left_weights) if len(
        left_weights) > 0 else None
    right_lane = np.dot(right_weights, right_lines) / np.sum(
        right_weights) if len(right_weights) > 0 else None

    return left_lane, right_lane  # (slope, intercept), (slope, intercept)
Esempio n. 2
0
	def train(self, X, y, learning_rate = 0.01, update_size = None):

		assert len(np.shape(X)) == 2, "Invalid input shape. The input must be 2d (batch, input_dim)"
		batch_size, input_dim = np.shape(X)
		assert self.input_dim == input_dim, "Unmatch input_dim."


		X = np.column_stack((X, np.ones((batch_size, 1))))
		y = y.reshape((-1, self.output_dim))
		
		if self.updateMethod == "closed-form":
			# check whether the matrix can be inversed
			assert np.linalg.det(X) != 0, "This matrix cann't be inversed."
			
			# update the parameters
			self.w = np.linalg.inv(np.dot(X.T, X))  
		    self.w = np.dot(self.w, X.T)
		    self.w = np.dot(self.w, y)

		    # predict under the train set
		    Y_predict = np.dot(X, self.w)  
		    # calculate the absolute differences
		    loss_train = np.average(np.abs(Y_predict - y))  

		    return Y_predict, loss_train
def reflection_matrix(point, normal):
    """Return matrix to mirror at plane 
    defined by point and normal vector.

    >>> v0 = np.random.random(4) - 0.5
    >>> v0[3] = 1.
    >>> v1 = np.random.random(3) - 0.5
    >>> R = reflection_matrix(v0, v1)
    >>> np.allclose(2, np.trace(R))
    True
    >>> np.allclose(v0, np.dot(R, v0))
    True
    >>> v2 = v0.copy()
    >>> v2[:3] += v1
    >>> v3 = v0.copy()
    >>> v2[:3] -= v1
    >>> np.allclose(v2, np.dot(R, v3))
    True

    """
    normal = unit_vector(normal[:3])
    M = np.identity(4)
    M[:3, :3] -= 2.0 * np.outer(normal, normal)
    M[:3, 3] = (2.0 * np.dot(point[:3], normal)) * normal
    return M
Esempio n. 4
0
 def train_weights(self):
     
     self.patterns = 2 * self.patterns - 1
     w_values = np.dot(self.patterns.T, self.patterns)
     for i in range(w_values.shape[0]):
                    w_values[i,i] = 0                
     self.W = w_values
Esempio n. 5
0
def computeCost(mytheta,X,y): #Cost function
    """
    theta_start is an n- dimensional vector of initial theta guess
    X is matrix with n- columns and m- rows
    y is a matrix with m- rows and 1 column
    """
    #note to self: *.shape is (rows, columns)
    return float((1./(2*m)) * np.dot((h(mytheta,X)-y).T,(h(mytheta,X)-y)))
Esempio n. 6
0
	def predict(self, X):
		assert len(np.shape(X)) == 2, "Invalid input shape. The input must be 2d (batch, input_dim)"
		batch_size, input_dim = np.shape(X)
		assert self.input_dim == input_dim, "Unmatch input_dim."

		X = np.column_stack((X, np.ones((batch_size, 1))))
		y_predict = np.dot(X, self.w)
		return y_predict
def updateLayer(layer, otherLayerValues, biases, weights, activationFun,
                binary=False):

  bias = biases[layer]
  size = otherLayerValues.shape[0]

  if layer == Layer.VISIBLE:
    activation = np.dot(otherLayerValues, weights.T)
  else:
    activation = np.dot(otherLayerValues, weights)

  probs = activationFun(np.tile(bias, (size, 1)) + activation)

  if binary:
    # Sample from the distributions
    return sampleAll(probs)

  return probs
def modelAndDataSampleDiffs(batchData, biases, weights, activationFun,
                            dropout, cdSteps):
  # Reconstruct the hidden weigs from the data
  hidden = updateLayer(Layer.HIDDEN, batchData, biases, weights, activationFun,
                       binary=True)

  # Chose the units to be active at this point
  # different sets for each element in the mini batches
  on = sample(dropout, hidden.shape)
  dropoutHidden = on * hidden
  hiddenReconstruction = dropoutHidden

  for i in xrange(cdSteps - 1):
    visibleReconstruction = updateLayer(Layer.VISIBLE, hiddenReconstruction,
                                        biases, weights, activationFun,
                                        binary=False)
    hiddenReconstruction = updateLayer(Layer.HIDDEN, visibleReconstruction,
                                       biases, weights, activationFun,
                                       binary=True)
    # sample the hidden units active (for dropout)
    hiddenReconstruction = hiddenReconstruction * on

  # Do the last reconstruction from the probabilities in the last phase
  visibleReconstruction = updateLayer(Layer.VISIBLE, hiddenReconstruction,
                                      biases, weights, activationFun,
                                      binary=False)
  hiddenReconstruction = updateLayer(Layer.HIDDEN, visibleReconstruction,
                                     biases, weights, activationFun,
                                     binary=False)

  hiddenReconstruction = hiddenReconstruction * on
  # here it should be hidden * on - hiddenreconstruction
  # also below in the hidden bias
  weightsDiff = np.dot(batchData.T, dropoutHidden) -\
                np.dot(visibleReconstruction.T, hiddenReconstruction)
  assert weightsDiff.shape == weights.shape

  visibleBiasDiff = np.sum(batchData - visibleReconstruction, axis=0)
  assert visibleBiasDiff.shape == biases[0].shape

  hiddenBiasDiff = np.sum(dropoutHidden - hiddenReconstruction, axis=0)
  assert hiddenBiasDiff.shape == biases[1].shape

  return weightsDiff, visibleBiasDiff, hiddenBiasDiff
def modelAndDataSampleDiffsPCD(batchData, biases, weights, activationFun,
                            dropout, steps, fantasyParticles):
  # Reconstruct the hidden weigs from the data
  hidden = updateLayer(Layer.HIDDEN, batchData, biases, weights, activationFun,
                       binary=True)

  # Chose the units to be active at this point
  # different sets for each element in the mini batches
  # on = sample(dropout, hidden.shape)
  # dropoutHidden = on * hidden
  # hiddenReconstruction = dropoutHidden

  for i in xrange(steps):
    visibleReconstruction = updateLayer(Layer.VISIBLE, fantasyParticles[1],
                                        biases, weights, activationFun,
                                        binary=False)
    hiddenReconstruction = updateLayer(Layer.HIDDEN, visibleReconstruction,
                                       biases, weights, activationFun,
                                       binary=True)

    # sample the hidden units active (for dropout)
    # hiddenReconstruction = hiddenReconstruction * on

  fantasyParticles = (visibleReconstruction, hiddenReconstruction)

  # here it should be hidden * on - hiddenReconstruction
  # also below in the hidden bias
  weightsDiff = np.dot(batchData.T, hidden) -\
                np.dot(visibleReconstruction.T, hiddenReconstruction)
  assert weightsDiff.shape == weights.shape

  visibleBiasDiff = np.sum(batchData - visibleReconstruction, axis=0)
  assert visibleBiasDiff.shape == biases[0].shape

  hiddenBiasDiff = np.sum(hidden - hiddenReconstruction, axis=0)
  assert hiddenBiasDiff.shape == biases[1].shape

  return weightsDiff, visibleBiasDiff, hiddenBiasDiff, fantasyParticles
Esempio n. 10
0
def train(training_inputs, yd, training_iterations):
    w = np.random.randn(2, 1)
    # w = np.array([[3.0],[1.0]])
    for iteration in range(training_iterations):
        # 得到輸出
        y = sigmoid(np.dot(training_inputs, w))
        # 計算誤差
        error = yd - y
        # 微調權重( x 與 e*sigmoid_derivative )
        adjustments = np.dot(training_inputs.T, error * sigmoid_derivative(y))
        w += learning_rate * adjustments
        fig = plt.figure()
        ax = plt.subplot()
        # ax=fig.add_axes([0, 0, 5, 5])
        plt.figure(figsize=(10, 7))
        # plt.plot(x_train.data.numpy(),y_train.data.numpy(),'*')
        plt.scatter(x[:, 0], yd, s=100, alpha=0.3)
        xs = np.linspace(0, 70, 200)
        ys2 = w[1] + w[0] * xs
        plt.plot(xs, ys2, 'g', linewidth=1)

        plt.show()
    return w
def forward_kinematics(l1, l2, l3, q1, q2, q3, q4):
    # create inital transformation matricies relateing each joint position to the global fram
    # global frame is the center of the robot

    # T is a 4x4 transformation matrix
    # T(1:3,1:3) is a rotation matrix
    # T(1:3,4) is the x,y,z position of the frame
    # i.e if i want the x position of the second joint it is T(1,4), y position is T(2,4)
    # T(4,:) is used for scaleing and remains at 0 0 0 1 for no scaleing

    T10 = dh(q1, 0, 0, pi / 2)  # Base rotation relative to global frame
    T21 = dh(
        q2, 0, l1,
        0)  # frame of reference at the end of the first link, relative to Base
    T32 = dh(q3, 0, l2, 0)  # second link end relative to first link
    T43 = dh(
        q4, 0, l3, 0
    )  # end effector relative to second link, ca nbe considered a third link

    T20 = np.dot(T10, T21)
    T30 = np.dot(T20, T32)
    T40 = np.dot(T30, T43)

    # Create matrix where each row is a joint position, i.e
    # joint positions = [x1 y1 z1
    #                   x2  y2 z2]

    # assume 0 index notation
    # T10=numpy.transpose(T10)
    # print(T10)
    # print(T20)
    p_joints = [T10[0:3, 3], T20[0:3, 3], T30[0:3, 3], T40[0:3, 3]]

    p_joints = np.transpose(p_joints)

    return p_joints
Esempio n. 12
0
 def recall(self, input, update_order):
     self.no_nodes = self.W.shape[0]
     no_stable_nodes = 0
     for i in update_order:
         if no_stable_nodes == self.no_nodes:
             break
         else:
             new_node = step(np.dot(self.W[:,i], input))
         
             if input[i] == new_node:
                  no_stable_nodes += 1                     
             else:
                 no_stable_nodes = 0
                 input[i] = new_node
                 
     return input
def unit_vector(data, axis=None, out=None):
    """Return ndarray normalized by length, 
    i.e. Euclidean norm, along axis.

    >>> v0 = np.random.random(3)
    >>> v1 = unit_vector(v0)
    >>> np.allclose(v1, v0 / np.linalg.norm(v0))
    True
    >>> v0 = np.random.rand(5, 4, 3)
    >>> v1 = unit_vector(v0, axis=-1)
    >>> v2 = v0 / np.expand_dims(np.sqrt(np.sum(v0*v0, axis=2)), 2)
    >>> np.allclose(v1, v2)
    True
    >>> v1 = unit_vector(v0, axis=1)
    >>> v2 = v0 / np.expand_dims(np.sqrt(np.sum(v0*v0, axis=1)), 1)
    >>> np.allclose(v1, v2)
    True
    >>> v1 = np.empty((5, 4, 3))
    >>> unit_vector(v0, axis=1, out=v1)
    >>> np.allclose(v1, v2)
    True
    >>> list(unit_vector([]))
    []
    >>> list(unit_vector([1]))
    [1.0]

    """
    if out is None:
        data = np.array(data, dtype=np.float64, copy=True)
        if data.ndim == 1:
            data /= np.sqrt(np.dot(data, data))
            return data
    else:
        if out is not data:
            out[:] = np.array(data, copy=False)
        data = out
    length = np.atleast_1d(np.sum(data * data, axis))
    np.sqrt(length, length)
    if axis is not None:
        length = np.expand_dims(length, axis)
    data /= length
    if out is None:
        return data
Esempio n. 14
0
class Linear_Regression(Model):
	def __init__(self, arg):
		
		self.input_dim = arg["input_dim"]
		self.output_dim = arg["output_dim"]
		self.w = np.random.normal(1, 1, size=(self.input_dim + 1, self.output_dim))
		self.updateMethod = arg["update"].lower() if "update" in arg else "sgd"

	def train(self, X, y, learning_rate = 0.01, update_size = None):

		assert len(np.shape(X)) == 2, "Invalid input shape. The input must be 2d (batch, input_dim)"
		batch_size, input_dim = np.shape(X)
		assert self.input_dim == input_dim, "Unmatch input_dim."


		X = np.column_stack((X, np.ones((batch_size, 1))))
		y = y.reshape((-1, self.output_dim))
		
		if self.updateMethod == "closed-form":
			# check whether the matrix can be inversed
			assert np.linalg.det(X) != 0, "This matrix cann't be inversed."
			
			# update the parameters
			self.w = np.linalg.inv(np.dot(X.T, X))  
		    self.w = np.dot(self.w, X.T)
		    self.w = np.dot(self.w, y)

		    # predict under the train set
		    Y_predict = np.dot(X, self.w)  
		    # calculate the absolute differences
		    loss_train = np.average(np.abs(Y_predict - y))  

		    return Y_predict, loss_train

		elif self.updateMethod.lower == "sgd":
			if update_size == None:
				update_size = batch_size
			assert update_size <= batch_size, "Update size lager than batch size!"
			y_predict = np.dot(X, self.w)
			diff = y_predict - y
Esempio n. 15
0
		    Y_predict = np.dot(X, self.w)  
		    # calculate the absolute differences
		    loss_train = np.average(np.abs(Y_predict - y))  

		    return Y_predict, loss_train

		elif self.updateMethod.lower == "sgd":
			if update_size == None:
				update_size = batch_size
			assert update_size <= batch_size, "Update size lager than batch size!"
			y_predict = np.dot(X, self.w)
			diff = y_predict - y
		    randind = np.random.randint(0,X.shape[0]-1, size=update_size)

		    # calculate the gradient
		    G = -np.dot(X[randind].T.reshape(-1, update_size), y[randind].reshape(update_size, -1))   
		    G += np.dot(X[randind].T.reshape(-1, update_size), X[randind].reshape(update_size, -1)).dot(self.w)
		    G = -G
		    # update the parameters
		    self.w += learning_rate * G  

		    y_predict_selected = np.dot(X[randind], self.w)  
		    loss_train = np.average(np.abs(y_predict_selected - y[randind])) 
		    
		    return y_predict, loss_train

	def predict(self, X):
		assert len(np.shape(X)) == 2, "Invalid input shape. The input must be 2d (batch, input_dim)"
		batch_size, input_dim = np.shape(X)
		assert self.input_dim == input_dim, "Unmatch input_dim."
Esempio n. 16
0
 def my_action(self, img):
     img_np = np.array(img)
     b = np.dot(img_np, 0.5, np=int)
     # cv.imshow('original', img)
     return b
def fabrik(l1, l2, l3, x_prev, y_prev, z_prev, x_command, y_command, z_command,
           tol_limit, max_iterations):

    # Base rotation is simply based on angle made within the x-y plane

    q1_prev = np.arctan2(y_prev[3], x_prev[3])
    q1 = np.arctan2(y_command, x_command)

    base_rotation = q1 - q1_prev  # this is the rotation the base must make to get from initial position to the commanded position

    # Base rotation matrix about z
    R_z = np.array([[np.cos(base_rotation), -np.sin(base_rotation), 0.0],
                    [np.sin(base_rotation),
                     np.cos(base_rotation), 0.0], [0.0, 0.0, 1.0]])

    # Rotate the location of each joint by the base rotation
    # This will force the FABRIK algorithim to only solve
    # in two dimensions, else each joint will move as if it has
    # a 3 DOF range of motion
    # print 'inside the fabrik method and x_joints is'
    # print x_joints
    p4 = np.dot(R_z, [x_prev[3], y_prev[3], z_prev[3]])
    p3 = np.dot(R_z, [x_prev[2], y_prev[2], z_prev[2]])
    p2 = np.dot(R_z, [x_prev[1], y_prev[1], z_prev[1]])
    p1 = np.dot(R_z, [x_prev[0], y_prev[0], z_prev[0]])

    # Store the (x,y,z) position of each joint

    p4x = p4[0]
    p4y = p4[1]
    p4z = p4[2]

    p3x = p3[0]
    p3y = p3[1]
    p3z = p3[2]

    p2x = p2[0]
    p2y = p2[1]
    p2z = p2[2]

    p1x = p1[0]
    p1y = p1[1]
    p1z = p1[2]

    # Starting point of each joint
    p1x_o = p1x
    p1y_o = p1y
    p1z_o = p1z

    iterations = 0
    for j in range(1, max_iterations + 1):

        if np.sqrt(
                np.power(x_command, 2) + np.power(y_command, 2) +
                np.power(z_command, 2)) > (l1 + l2 + l3):
            print(' desired point is likely out of reach')

        [p3x, p3y, p3z] = project_along_vector(x_command, y_command, z_command,
                                               p3x, p3y, p3z, l3)
        [p2x, p2y, p2z] = project_along_vector(p3x, p3y, p3z, p2x, p2y, p2z,
                                               l2)
        [p1x, p1y, p1z] = project_along_vector(p2x, p2y, p2z, p1x, p1y, p1z,
                                               l1)

        [p2x, p2y, p2z] = project_along_vector(p1x_o, p1y_o, p1z_o, p2x, p2y,
                                               p2z, l1)
        [p3x, p3y, p3z] = project_along_vector(p2x, p2y, p2z, p3x, p3y, p3z,
                                               l2)
        [p4x, p4y, p4z] = project_along_vector(p3x, p3y, p3z, x_command,
                                               y_command, z_command, l3)

        # check how close FABRIK position is to command position
        tolx = p4x - x_command
        toly = p4y - y_command
        tolz = p4z - z_command

        tol = np.sqrt(
            np.power(tolx, 2) + np.power(toly, 2) + np.power(tolz, 2))
        iterations = iterations + 1

        # Check if tolerance is within the specefied limit

        # Re-organize points into a big matrix for plotting elsewhere
        p_joints = np.array([[p1x, p2x, p3x, p4x], [p1y, p2y, p3y, p4y],
                             [p1z, p2z, p3z, p4z]])

        v21 = np.array([p2x - p1x, p2y - p1y, p2z - p1z])
        v32 = np.array([p3x - p2x, p3y - p2y, p3z - p2z])
        v43 = np.array([p4x - p3x, p4y - p3y, p4z - p3z])

        q2 = np.arctan2(
            (p2z - p1z),
            np.sqrt(np.power(p2x - p1x, 2) + np.power(p2y - p1y, 2)))

        q3 = -1 * angle_from_dot_product(v21, v32)
        q4 = -1 * angle_from_dot_product(v32, v43)

        q_joints = np.array([q1, q2, q3, q4])

        return q_joints
Esempio n. 18
0
import np as np
import numpy as np
from numpy.linalg import inv

# 1 Write a NumPy program to compute the multiplication of two given matrixes
x = [[1, 2], [5, 6]]
y = [[4, 5], [9, 5]]
print("original:")
print(x)
print(y)
result = np.dot(x, y)
print("Multiplication:")
print(result, "\n")

# 2Write a NumPy program to compute the determinant of an array
x = np.array([[1, 2], [3, 4]])
print("original arr:")
print(x)
result = np.linalg.det(x)
print("determinant:")
print(result, '\n')

# 3 Write a NumPy program to compute the sum of the diagonal element of a given array
x = np.array([[2, 4, 5], [8, 9, 10], [12, 15, 16]])
print(x, "matrix:")
diag = np.diagonal(x)
print("\nDiagonal matrix:")
print(diag)
print("\nSum of diagonal matrix:")
print(sum(diag))
Esempio n. 19
0
from django.test import TestCase
import np
# Create your tests here.
X = np.array([[0, 0, 1], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
y = np.array([[0, 1, 1, 0]]).T
syn0 = 2 * np.random.random((3, 4)) - 1
syn1 = 2 * np.random.random((4, 1)) - 1
for j in xrange(60000):
    l1 = 1 / (1 + np.exp(-(np.dot(X, syn0))))
    l2 = 1 / (1 + np.exp(-(np.dot(l1, syn1))))
    l2_delta = (y - l2) * (l2 * (1 - l2))
    l1_delta = l2_delta.dot(syn1.T) * (l1 * (1 - l1))
    syn1 += l1.T.dot(l2_delta)
    syn0 += X.T.dot(l1_delta)
Esempio n. 20
0
def h(theta,X): #Linear hypothesis function
    return np.dot(X,theta)