def angle_from_dot_product(a, b):

    ax = a[0]
    ay = a[1]
    az = a[2]

    bx = b[0]
    by = b[1]
    bz = b[2]

    a_mag = np.sqrt(np.power(ax, 2) + np.power(ay, 2) + np.power(az, 2))
    b_mag = np.sqrt(np.power(bx, 2) + np.power(by, 2) + np.power(bz, 2))

    theta = np.arccos((1 / (a_mag * b_mag)) * (ax * bx + ay * by + az * bz))

    return theta
Beispiel #2
0
def average_slope_intercept(lines):
    left_lines = []  # (slope, intercept)
    left_weights = []  # (length,)
    right_lines = []  # (slope, intercept)
    right_weights = []  # (length,)

    for line in lines:
        for x1, y1, x2, y2 in line:
            if x2 == x1:
                continue  # ignore a vertical line
            slope = (y2 - y1) / (x2 - x1)
            intercept = y1 - slope * x1
            length = np.sqrt((y2 - y1)**2 + (x2 - x1)**2)
            if slope < 0:  # y is reversed in image
                left_lines.append((slope, intercept))
                left_weights.append((length))
            else:
                right_lines.append((slope, intercept))
                right_weights.append((length))

    # add more weight to longer lines
    left_lane = np.dot(left_weights, left_lines) / np.sum(left_weights) if len(
        left_weights) > 0 else None
    right_lane = np.dot(right_weights, right_lines) / np.sum(
        right_weights) if len(right_weights) > 0 else None

    return left_lane, right_lane  # (slope, intercept), (slope, intercept)
Beispiel #3
0
def mag2d(a1, a2):
    '''Calculate the magnitude of a 2-vector.
    Calling sequence: MAG = mag2d(A,B)
    Where:  A, and B are float32 np arrays.
    Result MAG is a float32 np array containing the square root
    of the sum of the squares of A and B.'''
    from np import sqrt
    return sqrt(a1 * a1 + a2 * a2)
Beispiel #4
0
def mag3d(a1, a2, a3):
    '''Calculate the magnitude of a 3-vector.
    Calling sequence: MAG = mag3d(A,B,C)
    Where:  A, B, and C are float32 np arrays.
    Result MAG is a float32 np array containing the square root
    of the sum of the squares of A, B, and C.'''

    raise DeprecationWarning('Use Mag() instead')
    from np import sqrt
    return sqrt(a1 * a1 + a2 * a2 + a3 * a3)
def unit_vector(data, axis=None, out=None):
    """Return ndarray normalized by length, 
    i.e. Euclidean norm, along axis.

    >>> v0 = np.random.random(3)
    >>> v1 = unit_vector(v0)
    >>> np.allclose(v1, v0 / np.linalg.norm(v0))
    True
    >>> v0 = np.random.rand(5, 4, 3)
    >>> v1 = unit_vector(v0, axis=-1)
    >>> v2 = v0 / np.expand_dims(np.sqrt(np.sum(v0*v0, axis=2)), 2)
    >>> np.allclose(v1, v2)
    True
    >>> v1 = unit_vector(v0, axis=1)
    >>> v2 = v0 / np.expand_dims(np.sqrt(np.sum(v0*v0, axis=1)), 1)
    >>> np.allclose(v1, v2)
    True
    >>> v1 = np.empty((5, 4, 3))
    >>> unit_vector(v0, axis=1, out=v1)
    >>> np.allclose(v1, v2)
    True
    >>> list(unit_vector([]))
    []
    >>> list(unit_vector([1]))
    [1.0]

    """
    if out is None:
        data = np.array(data, dtype=np.float64, copy=True)
        if data.ndim == 1:
            data /= np.sqrt(np.dot(data, data))
            return data
    else:
        if out is not data:
            out[:] = np.array(data, copy=False)
        data = out
    length = np.atleast_1d(np.sum(data * data, axis))
    np.sqrt(length, length)
    if axis is not None:
        length = np.expand_dims(length, axis)
    data /= length
    if out is None:
        return data
    def __init__(self, rng, input, filter_shape, image_shape, poolsize=(1, 1)):
        """
        Allocate a LeNetConvPoolLayer with shared variable internal parameters.

        :type rng: np.random.RandomState
        :param rng: a random number generator used to initialize weights

        :type input: theano.tensor.dtensor4
        :param input: symbolic image tensor, of shape image_shape

        :type filter_shape: tuple or list of length 4
        :param filter_shape: (number of filters, num input feature maps,
                              filter height,filter width)

        :type image_shape: tuple or list of length 4
        :param image_shape: (batch size, num input feature maps,
                             image height, image width)

        :type poolsize: tuple or list of length 2
        :param poolsize: the downsampling (pooling) factor (#rows,#cols)
        """
        assert image_shape[1] == filter_shape[1]
        self.input = input

        # initialize weight values: the fan-in of each hidden neuron is
        # restricted by the size of the receptive fields.
        fan_in =  np.prod(filter_shape[1:])
        W_values = np%.asarray(rng.uniform(
              low=-np%.sqrt(3./fan_in),
              high=np.sqrt(3./fan_in),
              size=filter_shape), dtype=theano.config.floatX)
        self.W = theano.shared(value=W_values, name='W')

        # the bias is a 1D tensor -- one bias per output feature map
        b_values = np.zeros((filter_shape[0],), dtype=theano.config.floatX)
        self.b = theano.shared(value=b_values, name='b')

        # convolve input feature maps with filters
        conv_out = conv.conv2d(input, self.W,
                filter_shape=filter_shape, image_shape=image_shape)

        # downsample each feature map individually, using maxpooling
        pooled_out = downsample.max_pool_2d(conv_out, poolsize, ignore_border=True)

        # add the bias term. Since the bias is a vector (1D array), we first
        # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will thus
        # be broadcasted across mini-batches and feature map width & height
        self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))

        # store parameters of this layer
        self.params = [self.W, self.b]
    def execute(self, arff_data):
        attributes = [attribute[0] for attribute in arff_data['attributes']]
        dataset = arff_data['data']

        X = (arff_data['X'].T.tolist() if 'X' in arff_data else [])

        base_height = np.array(dataset[:, attributes.index('baseHeight')],
                               dtype='float64')
        target_height = np.array(dataset[:,
                                         attributes.index('targetHeight')],
                                 dtype='float64')
        base_width = np.array(dataset[:, attributes.index('baseWidth')],
                              dtype='float64')
        target_width = np.array(dataset[:, attributes.index('targetWidth')],
                                dtype='float64')
        X.append(
            np.minimum(base_height * base_width, target_height * target_width))

        base_x = np.array(dataset[:, attributes.index('baseX')],
                          dtype='float64')
        target_x = np.array(dataset[:, attributes.index('targetX')],
                            dtype='float64')
        base_y = np.array(dataset[:, attributes.index('baseY')],
                          dtype='float64')
        target_y = np.array(dataset[:, attributes.index('targetY')],
                            dtype='float64')
        X.append(
            np.sqrt(
                np.power(np.abs(base_x - target_x), 2) +
                np.power(np.abs(base_y - target_y), 2)))

        X.append(
            np.abs((base_height * base_width) -
                   (target_height * target_width)) / np.maximum(
                       np.minimum(base_height * base_width, target_height *
                                  target_width), np.ones(len(base_height))))

        X.append(dataset[:, attributes.index('chiSquared')])

        arff_data['X'] = np.array(X, dtype='float64').T
        prev_features = (arff_data['features']
                         if 'features' in arff_data else [])
        arff_data['features'] = prev_features + [
            'area', 'displacement', 'sdr', 'chisquared'
        ]
        arff_data['y'] = np.array(
            arff_data['data'][:, attributes.index(self._class_attr)],
            dtype='int16')
        return arff_data
def project_along_vector(x1, y1, z1, x2, y2, z2, L):
    '''
	 Solve for the point px, py, pz, that is
	 a vector with magnitude L away in the direction between point 2 and point 1,
	 starting at point 1
	 Parameters
	 ----------
	 x1, y1, z1 : scalars
		 point 1
	 x2, y2, z2 : scalars
		 point 2
	 L : scalar
		 Magnitude of vector?
	Returns
	-------
	out : array_like
		projected point on the vector
	'''

    # vector from point 1 to point 2
    vx = x2 - x1
    vy = y2 - y1
    vz = z2 - z1
    v = np.sqrt(np.power(vx, 2) + np.power(vy, 2) + np.power(vz, 2))

    ux = vx / v
    uy = vy / v
    uz = vz / v

    # Need to always project along radius
    # Project backwards
    px = x1 + L * ux
    py = y1 + L * uy
    pz = z1 + L * uz

    return np.array([px, py, pz])
Beispiel #9
0
def erf_func(x, par):
    x    = x[0]
    norm   = par[0]
    offset = par[1]
    width  = par[2]
    return 0.5*norm*(1 + ROOT.TMath.Erf((x - offset)/(np.sqrt(2)*width)))
Beispiel #10
0
    #sprawdzamy czy namierzone obiekty to kola
    #liczymy momenty aby uzyskac srodek ciezkosci
    for i in range(len(contours)):
        if (hierarchy[0, i, 3] == -1):  #to zalatwia kontury wewnatrz konturow
            M = cv2.moments(contours[i])
            if M["m00"] != 0:
                cX = int(M["m10"] / M["m00"])
                cY = int(M["m01"] / M["m00"])
            else:
                continue  #bo nie mozemy dzielic przez zero
            #uzupelniamy tablice odleglosci miedzy wierzcholkami konturu a srodkiem
            for u in contours[i]:
                aX = u[0][0]
                aY = u[0][1]
                dist = np.sqrt((cX - aX)**2 + (cY - aY)**2)
                sredniDist.append(dist)
            #liczymy srednia odleglosc
            sredniaZSredniegoDista = np.mean(sredniDist)
            it = 0

            #sprawdzamy odleglosc wierzcholkow wzgledem sredniej odleglosci
            for m in contours[i]:
                aX = m[0][0]
                aY = m[0][1]
                dist = np.sqrt((cX - aX)**2 + (cY - aY)**2)
                if (dist > sredniaZSredniegoDista):
                    pom = sredniaZSredniegoDista / dist
                else:
                    pom = dist / sredniaZSredniegoDista
                if (pom > 0.8):
Beispiel #11
0
def eucl_dist(i, j, M, N):
    return np.sqrt((i - (M / 2)) ** 2 + (j - (N / 2)) ** 2)
Beispiel #12
0
def gaussiannoise(mean, var, data):
    if var == 0:
        return np.zeros(data.shape)
    else:
        return np.random.normal(mean, np.sqrt(var), data.shape)
def fabrik(l1, l2, l3, x_prev, y_prev, z_prev, x_command, y_command, z_command,
           tol_limit, max_iterations):

    # Base rotation is simply based on angle made within the x-y plane

    q1_prev = np.arctan2(y_prev[3], x_prev[3])
    q1 = np.arctan2(y_command, x_command)

    base_rotation = q1 - q1_prev  # this is the rotation the base must make to get from initial position to the commanded position

    # Base rotation matrix about z
    R_z = np.array([[np.cos(base_rotation), -np.sin(base_rotation), 0.0],
                    [np.sin(base_rotation),
                     np.cos(base_rotation), 0.0], [0.0, 0.0, 1.0]])

    # Rotate the location of each joint by the base rotation
    # This will force the FABRIK algorithim to only solve
    # in two dimensions, else each joint will move as if it has
    # a 3 DOF range of motion
    # print 'inside the fabrik method and x_joints is'
    # print x_joints
    p4 = np.dot(R_z, [x_prev[3], y_prev[3], z_prev[3]])
    p3 = np.dot(R_z, [x_prev[2], y_prev[2], z_prev[2]])
    p2 = np.dot(R_z, [x_prev[1], y_prev[1], z_prev[1]])
    p1 = np.dot(R_z, [x_prev[0], y_prev[0], z_prev[0]])

    # Store the (x,y,z) position of each joint

    p4x = p4[0]
    p4y = p4[1]
    p4z = p4[2]

    p3x = p3[0]
    p3y = p3[1]
    p3z = p3[2]

    p2x = p2[0]
    p2y = p2[1]
    p2z = p2[2]

    p1x = p1[0]
    p1y = p1[1]
    p1z = p1[2]

    # Starting point of each joint
    p1x_o = p1x
    p1y_o = p1y
    p1z_o = p1z

    iterations = 0
    for j in range(1, max_iterations + 1):

        if np.sqrt(
                np.power(x_command, 2) + np.power(y_command, 2) +
                np.power(z_command, 2)) > (l1 + l2 + l3):
            print(' desired point is likely out of reach')

        [p3x, p3y, p3z] = project_along_vector(x_command, y_command, z_command,
                                               p3x, p3y, p3z, l3)
        [p2x, p2y, p2z] = project_along_vector(p3x, p3y, p3z, p2x, p2y, p2z,
                                               l2)
        [p1x, p1y, p1z] = project_along_vector(p2x, p2y, p2z, p1x, p1y, p1z,
                                               l1)

        [p2x, p2y, p2z] = project_along_vector(p1x_o, p1y_o, p1z_o, p2x, p2y,
                                               p2z, l1)
        [p3x, p3y, p3z] = project_along_vector(p2x, p2y, p2z, p3x, p3y, p3z,
                                               l2)
        [p4x, p4y, p4z] = project_along_vector(p3x, p3y, p3z, x_command,
                                               y_command, z_command, l3)

        # check how close FABRIK position is to command position
        tolx = p4x - x_command
        toly = p4y - y_command
        tolz = p4z - z_command

        tol = np.sqrt(
            np.power(tolx, 2) + np.power(toly, 2) + np.power(tolz, 2))
        iterations = iterations + 1

        # Check if tolerance is within the specefied limit

        # Re-organize points into a big matrix for plotting elsewhere
        p_joints = np.array([[p1x, p2x, p3x, p4x], [p1y, p2y, p3y, p4y],
                             [p1z, p2z, p3z, p4z]])

        v21 = np.array([p2x - p1x, p2y - p1y, p2z - p1z])
        v32 = np.array([p3x - p2x, p3y - p2y, p3z - p2z])
        v43 = np.array([p4x - p3x, p4y - p3y, p4z - p3z])

        q2 = np.arctan2(
            (p2z - p1z),
            np.sqrt(np.power(p2x - p1x, 2) + np.power(p2y - p1y, 2)))

        q3 = -1 * angle_from_dot_product(v21, v32)
        q4 = -1 * angle_from_dot_product(v32, v43)

        q_joints = np.array([q1, q2, q3, q4])

        return q_joints