Exemple #1
0
def average_slope_intercept(lines):
    left_lines = []  # (slope, intercept)
    left_weights = []  # (length,)
    right_lines = []  # (slope, intercept)
    right_weights = []  # (length,)

    for line in lines:
        for x1, y1, x2, y2 in line:
            if x2 == x1:
                continue  # ignore a vertical line
            slope = (y2 - y1) / (x2 - x1)
            intercept = y1 - slope * x1
            length = np.sqrt((y2 - y1)**2 + (x2 - x1)**2)
            if slope < 0:  # y is reversed in image
                left_lines.append((slope, intercept))
                left_weights.append((length))
            else:
                right_lines.append((slope, intercept))
                right_weights.append((length))

    # add more weight to longer lines
    left_lane = np.dot(left_weights, left_lines) / np.sum(left_weights) if len(
        left_weights) > 0 else None
    right_lane = np.dot(right_weights, right_lines) / np.sum(
        right_weights) if len(right_weights) > 0 else None

    return left_lane, right_lane  # (slope, intercept), (slope, intercept)
def modelAndDataSampleDiffs(batchData, biases, weights, activationFun,
                            dropout, cdSteps):
  # Reconstruct the hidden weigs from the data
  hidden = updateLayer(Layer.HIDDEN, batchData, biases, weights, activationFun,
                       binary=True)

  # Chose the units to be active at this point
  # different sets for each element in the mini batches
  on = sample(dropout, hidden.shape)
  dropoutHidden = on * hidden
  hiddenReconstruction = dropoutHidden

  for i in xrange(cdSteps - 1):
    visibleReconstruction = updateLayer(Layer.VISIBLE, hiddenReconstruction,
                                        biases, weights, activationFun,
                                        binary=False)
    hiddenReconstruction = updateLayer(Layer.HIDDEN, visibleReconstruction,
                                       biases, weights, activationFun,
                                       binary=True)
    # sample the hidden units active (for dropout)
    hiddenReconstruction = hiddenReconstruction * on

  # Do the last reconstruction from the probabilities in the last phase
  visibleReconstruction = updateLayer(Layer.VISIBLE, hiddenReconstruction,
                                      biases, weights, activationFun,
                                      binary=False)
  hiddenReconstruction = updateLayer(Layer.HIDDEN, visibleReconstruction,
                                     biases, weights, activationFun,
                                     binary=False)

  hiddenReconstruction = hiddenReconstruction * on
  # here it should be hidden * on - hiddenreconstruction
  # also below in the hidden bias
  weightsDiff = np.dot(batchData.T, dropoutHidden) -\
                np.dot(visibleReconstruction.T, hiddenReconstruction)
  assert weightsDiff.shape == weights.shape

  visibleBiasDiff = np.sum(batchData - visibleReconstruction, axis=0)
  assert visibleBiasDiff.shape == biases[0].shape

  hiddenBiasDiff = np.sum(dropoutHidden - hiddenReconstruction, axis=0)
  assert hiddenBiasDiff.shape == biases[1].shape

  return weightsDiff, visibleBiasDiff, hiddenBiasDiff
Exemple #3
0
def getMSErr(imageA, imageB):
    # the 'Mean Squared Error' between the two images is the
    # sum of the squared difference between the two images;
    # NOTE: the two images must have the same dimension
    err = np.sum((imageA.astype("float") - imageB.astype("float"))**2)
    err /= float(imageA.shape[0] * imageA.shape[1])

    # return the MSE, the lower the error, the more "similar"
    # the two images are
    return err
    def work_on_detected(self):
        cpy = np.copy(self.channels["laser"])
        height, width = cpy.shape

        indices = np.transpose(np.where(cpy > 0))
        if indices.any():
            sum_y, sum_x = np.sum(indices, axis=0)
            self.ave_y, self.ave_x = sum_y / len(indices), sum_x / len(indices)

            # print("average x:", self.ave_x, "average y:", self.ave_y)
            self.manager.update_player_place((self.ave_x, self.ave_y))
def modelAndDataSampleDiffsPCD(batchData, biases, weights, activationFun,
                            dropout, steps, fantasyParticles):
  # Reconstruct the hidden weigs from the data
  hidden = updateLayer(Layer.HIDDEN, batchData, biases, weights, activationFun,
                       binary=True)

  # Chose the units to be active at this point
  # different sets for each element in the mini batches
  # on = sample(dropout, hidden.shape)
  # dropoutHidden = on * hidden
  # hiddenReconstruction = dropoutHidden

  for i in xrange(steps):
    visibleReconstruction = updateLayer(Layer.VISIBLE, fantasyParticles[1],
                                        biases, weights, activationFun,
                                        binary=False)
    hiddenReconstruction = updateLayer(Layer.HIDDEN, visibleReconstruction,
                                       biases, weights, activationFun,
                                       binary=True)

    # sample the hidden units active (for dropout)
    # hiddenReconstruction = hiddenReconstruction * on

  fantasyParticles = (visibleReconstruction, hiddenReconstruction)

  # here it should be hidden * on - hiddenReconstruction
  # also below in the hidden bias
  weightsDiff = np.dot(batchData.T, hidden) -\
                np.dot(visibleReconstruction.T, hiddenReconstruction)
  assert weightsDiff.shape == weights.shape

  visibleBiasDiff = np.sum(batchData - visibleReconstruction, axis=0)
  assert visibleBiasDiff.shape == biases[0].shape

  hiddenBiasDiff = np.sum(hidden - hiddenReconstruction, axis=0)
  assert hiddenBiasDiff.shape == biases[1].shape

  return weightsDiff, visibleBiasDiff, hiddenBiasDiff, fantasyParticles
Exemple #6
0
def getMSErrThresholded(imageA, imageB):
    imageA = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY)
    imageB = cv2.cvtColor(imageB, cv2.COLOR_BGR2GRAY)
    imageA = cv2.adaptiveThreshold(imageA, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                   cv2.THRESH_BINARY, 7, 2)
    imageB = cv2.adaptiveThreshold(imageB, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                   cv2.THRESH_BINARY, 7, 2)

    # the 'Mean Squared Error' between the two images is the
    # sum of the squared difference between the two images;
    # NOTE: the two images must have the same dimension
    err = np.sum((imageA.astype("float") - imageB.astype("float"))**2)
    err /= float(imageA.shape[0] * imageA.shape[1])

    # return the MSE, the lower the error, the more "similar"
    # the two images are
    return err
def unit_vector(data, axis=None, out=None):
    """Return ndarray normalized by length, 
    i.e. Euclidean norm, along axis.

    >>> v0 = np.random.random(3)
    >>> v1 = unit_vector(v0)
    >>> np.allclose(v1, v0 / np.linalg.norm(v0))
    True
    >>> v0 = np.random.rand(5, 4, 3)
    >>> v1 = unit_vector(v0, axis=-1)
    >>> v2 = v0 / np.expand_dims(np.sqrt(np.sum(v0*v0, axis=2)), 2)
    >>> np.allclose(v1, v2)
    True
    >>> v1 = unit_vector(v0, axis=1)
    >>> v2 = v0 / np.expand_dims(np.sqrt(np.sum(v0*v0, axis=1)), 1)
    >>> np.allclose(v1, v2)
    True
    >>> v1 = np.empty((5, 4, 3))
    >>> unit_vector(v0, axis=1, out=v1)
    >>> np.allclose(v1, v2)
    True
    >>> list(unit_vector([]))
    []
    >>> list(unit_vector([1]))
    [1.0]

    """
    if out is None:
        data = np.array(data, dtype=np.float64, copy=True)
        if data.ndim == 1:
            data /= np.sqrt(np.dot(data, data))
            return data
    else:
        if out is not data:
            out[:] = np.array(data, copy=False)
        data = out
    length = np.atleast_1d(np.sum(data * data, axis))
    np.sqrt(length, length)
    if axis is not None:
        length = np.expand_dims(length, axis)
    data /= length
    if out is None:
        return data
Exemple #8
0
def conv_(img, conv_filter):
    filter_size = conv_filter.shape[1]
    result = np.zeros((img.shape))
    #Looping through the image to apply the convolution operation.
    for r in np.uint16(np.arange(filter_size/2.0, 
                          img.shape[0]-filter_size/2.0+1)):
        for c in np.uint16(np.arange(filter_size/2.0, 
                                           img.shape[1]-filter_size/2.0+1)):
            """
            Getting the current region to get multiplied with the filter.
            How to loop through the image and get the region based on 
            the image and filer sizes is the most tricky part of convolution.
            """
            curr_region = img[r-np.uint16(np.floor(filter_size/2.0)):r+np.uint16(np.ceil(filter_size/2.0)), 
                              c-np.uint16(np.floor(filter_size/2.0)):c+np.uint16(np.ceil(filter_size/2.0))]
            #Element-wise multipliplication between the current region and the filter.
            curr_result = curr_region * conv_filter
            conv_sum = np.sum(curr_result) #Summing the result of multiplication.
            result[r, c] = conv_sum #Saving the summation in the convolution layer feature map.
            
    #Clipping the outliers of the result matrix.
    final_result = result[np.uint16(filter_size/2.0):result.shape[0]-np.uint16(filter_size/2.0), 
                          np.uint16(filter_size/2.0):result.shape[1]-np.uint16(filter_size/2.0)]
    return final_result
Exemple #9
0
#!/usr/bin/env python
import re
import np
text = open('text_numbers.txt')
final = []
for line in text:
    line = line.strip()
    y = re.findall('([0-9]+)',line)

    if len(y) > 0:
         lineVal = sum(map(int, y))
         final.append(lineVal)
         print "line sum = {0}".format(lineVal)
print "Final sum = {0}".format(np.sum(final))
def meanNonDiag(whitenedMatrix):

    whitenedCov = np.abs(np.cov(whitenedMatrix))
    sumNonDiag = np.sum(whitenedCov) - np.trace(whitenedCov)
    n = whitenedCov.shape[0]
    return sumNonDiag / (n * n - n)
Exemple #11
0
# 套索回归:使用L1作为正则化工具来训练的线性回归模型。
# 与L2 正则化不同的是, Ll 正则化会导致在使用套索回归的时候,有一部分特征的系数会正好等于0 。
# 也就是说,有一些特征会彻底被模型忽略掉,这也可以看成是模型对于特征进行自动选择的一种方式。
# 把一部分系数变成0 有助于让模型更容易理解,而且可以突出体现模型中最重要的那些特征。
# sci kit-learn 还提供了一种模型,称为弹性网模型( Elastic Net ) 。
# 弹性网模型综合了套索回归和岭回归的惩罚因子。在实践中这两个模型的组合是效果最好的,然而代价
# 是用户需要调整两个参数,一个是L1 正则化参数,另一个是L2 正则化参数。

# 导人套索回归
import np as np
from sklearn.datasets import load_diabetes
from sklearn.linear_model import Lasso
from sklearn.model_selection import train_test_split

# 使用套索回归拟合数据
X, y = load_diabetes(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y)
lasso = Lasso(alpha=0.1, max_iter=100000).fit(X_train, y_train)
print('套索回归在训练数据集的得分{:.2f}'.format(lasso.score(X_train, y_train)))
print('套索回归在测试数据集的得分{:.2f} '.format(lasso.score(X_test, y_test)))
print('套索回归使用的特征数{} '.format(np.sum(lasso.coef_ != 0)))
Exemple #12
0
def gaussian_kernel_1d(sigma):
    kernel_radius = np.ceil(sigma) * 3
    kernel_size = kernel_radius * 2 + 1
    ax = np.arange(-kernel_radius, kernel_radius + 1., dtype=np.float32)
    kernel = np.exp(-(ax**2) / (2. * sigma**2))
    return (kernel / np.sum(kernel)).reshape(1, kernel.shape[0])