예제 #1
0
def angleFunction(center_x, center_y):
    """
    :param center_x: x coordinate of center from which the angle is computed
    :param center_y: y coordinate of center from which the angle is computed
    :return: new function computing angle from a given center point
    """    
    return lambda x, y: 0.99 * math.atan2(center_y - y, center_x - x) / (2.0 * math.pi)
예제 #2
0
 def goto(self, v, p, tol):
     while True:
         [x, y, theta] = self.getRobotPose();
         distance = math.sqrt(((x - p.getX()) ** 2) + ((y - p.getY()) ** 2))
         delta_theta = GeometryHelper.diffDegree(math.atan2(p.getY() - y, p.getX() - x), theta)
         if (distance < tol):
             return True
         self.move([v, delta_theta])
예제 #3
0
 def senseLandmarks(self, landmarks):
     angles = []
     distances = []
     (posX, posY, _) = self._world.getTrueRobotPose()
     for (landX, landY) in landmarks:
         distance = math.sqrt((landX - posX) ** 2 + (landY - posY) ** 2)
         distance += random.gauss(0, self._sensorNoise)
         angle = math.atan2(posY - landY, posX - landX)
         # angle += random.gauss(0, self._sensorNoise)
         angles.append(angle)
         distances.append(distance)
     return (distances, angles)
예제 #4
0
def gotoTurnFirst(robot, v, p, tol):
    # get the actual position of robot
    [x, y, theta] = robot.getRobotPose();
    # calculate the distance between robot and target point
    distance = math.sqrt(((x - p.getX()) ** 2) + ((y - p.getY()) ** 2))
    delta_theta = GeometryHelper.diffDegree(math.atan2(p.getY() - y, p.getX() - x), theta)
    # point not reached?
    if(distance > tol):
        # drive missing distance
        robot.curveDriveTruePose(0.5, 0, delta_theta)
        robot.straightDriveTruePose(v, distance);
        # call goto again.
        robot.goto(v, p, tol)
예제 #5
0
 def gotoWithObstacleAvoidance(self, v, p, tol, sensorsToUse=9, distanceTol=1, minSpeed=0.2):
     while True:
         [x, y, theta] = self.getRobotPose();
         distance = math.sqrt(((x - p.getX()) ** 2) + ((y - p.getY()) ** 2))
         delta_theta = GeometryHelper.diffDegree(math.atan2(p.getY() - y, p.getX() - x), theta)
         if (distance < tol):
             return True
         if self.isObstacleInWay(sensorsToUse, distanceTol):
             return False
         if not self.move([ v * max(minSpeed, (1 - abs(delta_theta * 5) / math.pi) 
                              * min(1, distance)), delta_theta]):
             for _ in range(1, 5):
                 self.move([-1, 0])
예제 #6
0
    def __init__(self, im_gray0, tl, br, estimate_scale=True, estimate_rotation=True):
        self.__estimate_scale = estimate_scale
        self.__estimate_rotation = estimate_rotation

        # Initialise variables
        self.__has_result = False
        self.__outliers = None
        self.__votes = None
        self.__center = None
        self.__scale_estimate = None
        self.__rotation_estimate = None
        self.__tracked_keypoints = None
        self.__keypoints_cv = None
        self.__tl = (nan, nan)
        self.__tr = (nan, nan)
        self.__br = (nan, nan)
        self.__bl = (nan, nan)
        self.__bb = array([nan, nan, nan, nan])

        # Initialise detector, descriptor, matcher
        self.__detector = cv2.BRISK_create()
        self.__descriptor = self.__detector
        self.__matcher = cv2.BFMatcher(cv2.NORM_HAMMING)  # Get initial keypoints in whole image
        keypoints_cv = self.__detector.detect(im_gray0)

        # Remember keypoints that are in the rectangle as selected keypoints
        ind = util.in_rect(keypoints_cv, tl, br)
        selected_keypoints_cv = list(itertools.compress(keypoints_cv, ind))
        selected_keypoints_cv, self.selected_features = self.__descriptor.compute(im_gray0, selected_keypoints_cv)
        selected_keypoints = util.keypoints_cv_to_np(selected_keypoints_cv)
        num_selected_keypoints = len(selected_keypoints_cv)

        if num_selected_keypoints == 0:
            raise Exception('No keypoints found in selection')

        # Remember keypoints that are not in the rectangle as background keypoints
        background_keypoints_cv = list(itertools.compress(keypoints_cv, ~ind))
        background_keypoints_cv, background_features = self.__descriptor.compute(im_gray0, background_keypoints_cv)

        # Assign each keypoint a class starting from 1, background is 0
        self.selected_classes = array(range(num_selected_keypoints)) + 1
        background_classes = zeros(len(background_keypoints_cv))

        # Stack background features and selected features into database
        self.features_database = vstack((background_features, self.selected_features))

        # Same for classes
        self.database_classes = hstack((background_classes, self.selected_classes))

        # Get all distances between selected keypoints in squareform
        pdist = scipy.spatial.distance.pdist(selected_keypoints)
        self.squareform = scipy.spatial.distance.squareform(pdist)

        # Get all angles between selected keypoints
        angles = np.empty((num_selected_keypoints, num_selected_keypoints))
        for k1, i1 in zip(selected_keypoints, range(num_selected_keypoints)):
            for k2, i2 in zip(selected_keypoints, range(num_selected_keypoints)):
                # Compute vector from k1 to k2
                v = k2 - k1

                # Compute angle of this vector with respect to x axis
                angle = math.atan2(v[1], v[0])

                # Store angle
                angles[i1, i2] = angle

        self.angles = angles

        # Find the center of selected keypoints
        center = np.mean(selected_keypoints, axis=0)

        # Remember the rectangle coordinates relative to the center
        self.center_to_tl = np.array(tl) - center
        self.center_to_tr = np.array([br[0], tl[1]]) - center
        self.center_to_br = np.array(br) - center
        self.center_to_bl = np.array([tl[0], br[1]]) - center

        # Calculate springs of each keypoint
        self.springs = selected_keypoints - center

        # Set start image for tracking
        self.im_prev = im_gray0

        # Make keypoints 'active' keypoints
        self.active_keypoints = np.copy(selected_keypoints)

        # Attach class information to active keypoints
        self.active_keypoints = hstack((selected_keypoints, self.selected_classes[:, None]))

        # Remember number of initial keypoints
        self.num_initial_keypoints = len(selected_keypoints_cv)
예제 #7
0
파일: cmt.py 프로젝트: bobbyluig/Eclipse
    def __init__(self, im_gray0, tl, br):

        # Initialize detector, descriptor, and matcher.
        self.detector = cv2.BRISK_create()
        self.descriptor = self.detector
        self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING)

        # Get initial keypoints for the entire image.
        keypoints_cv = self.detector.detect(im_gray0)

        # Remember keypoints that are in the rectangle as selected keypoints
        ind = CMT.inRectangle(keypoints_cv, tl, br)
        selected_keypoints_cv = list(itertools.compress(keypoints_cv, ind))
        selected_keypoints_cv, self.selected_features = self.descriptor.compute(im_gray0, selected_keypoints_cv)
        selected_keypoints = CMT.convertKeyPoints(selected_keypoints_cv)
        num_selected_keypoints = len(selected_keypoints_cv)

        if num_selected_keypoints == 0:
            raise Exception('No KeyPoints found in the selection.')

        # Remember keypoints that are not in the rectangle as background keypoints.
        background_keypoints_cv = list(itertools.compress(keypoints_cv, ~ind))
        background_keypoints_cv, background_features = self.descriptor.compute(im_gray0, background_keypoints_cv)
        _ = CMT.convertKeyPoints(background_keypoints_cv)

        # Assign each keypoint a class starting from 1, background is 0
        self.selected_classes = array(range(num_selected_keypoints)) + 1
        background_classes = zeros(len(background_keypoints_cv))

        # Stack background features and selected features into database
        self.features_database = vstack((background_features, self.selected_features))

        # Same for classes
        self.database_classes = hstack((background_classes, self.selected_classes))

        # Get all distances between selected keypoints in squareform
        pdist = scipy.spatial.distance.pdist(selected_keypoints)
        self.squareform = scipy.spatial.distance.squareform(pdist)

        # Get all angles between selected keypoints
        angles = np.empty((num_selected_keypoints, num_selected_keypoints))
        for k1, i1 in zip(selected_keypoints, range(num_selected_keypoints)):
            for k2, i2 in zip(selected_keypoints, range(num_selected_keypoints)):

                # Compute vector from k1 to k2
                v = k2 - k1

                # Compute angle of this vector with respect to x axis
                angle = math.atan2(v[1], v[0])

                # Store angle
                angles[i1, i2] = angle

        self.angles = angles

        # Find the center of selected keypoints.
        center = np.mean(selected_keypoints, axis=0)

        # Remember the rectangle coordinates relative to the center.
        self.center_to_tl = np.array(tl) - center
        self.center_to_tr = np.array([br[0], tl[1]]) - center
        self.center_to_br = np.array(br) - center
        self.center_to_bl = np.array([tl[0], br[1]]) - center

        # Calculate springs of each keypoint.
        self.springs = selected_keypoints - center

        # Set start image for tracking.
        self.im_prev = im_gray0

        # Make keypoints 'active' keypoints.
        self.active_keypoints = np.copy(selected_keypoints)

        # Attach class information to active keypoints.
        self.active_keypoints = hstack((selected_keypoints, self.selected_classes[:, None]))

        # Remember number of initial keypoints.
        self.num_initial_keypoints = len(selected_keypoints_cv)
예제 #8
0
파일: CMT.py 프로젝트: dtbaker/pi-web
	def initialise(self, im_gray0, tl, br):

		# Initialise detector, descriptor, matcher
		self.detector = cv2.FeatureDetector_create(self.DETECTOR)
		self.descriptor = cv2.DescriptorExtractor_create(self.DESCRIPTOR)
		self.matcher = cv2.DescriptorMatcher_create(self.MATCHER)

		# Get initial keypoints in whole image
		keypoints_cv = self.detector.detect(im_gray0)

		# Remember keypoints that are in the rectangle as selected keypoints
		ind = util.in_rect(keypoints_cv, tl, br)
		selected_keypoints_cv = list(itertools.compress(keypoints_cv, ind))
		selected_keypoints_cv, self.selected_features = self.descriptor.compute(im_gray0, selected_keypoints_cv)
		selected_keypoints = util.keypoints_cv_to_np(selected_keypoints_cv)
		num_selected_keypoints = len(selected_keypoints_cv)

		if num_selected_keypoints == 0:
			raise Exception('No keypoints found in selection')

		# Remember keypoints that are not in the rectangle as background keypoints
		background_keypoints_cv = list(itertools.compress(keypoints_cv, ~ind))
		background_keypoints_cv, background_features = self.descriptor.compute(im_gray0, background_keypoints_cv)
		_ = util.keypoints_cv_to_np(background_keypoints_cv)

		# Assign each keypoint a class starting from 1, background is 0
		self.selected_classes = array(range(num_selected_keypoints)) + 1
		background_classes = zeros(len(background_keypoints_cv))

		# Stack background features and selected features into database
		print("background")
		print(background_features)
		if background_features == None:
			background_features = self.selected_features
		print("background")
		print(background_features)
		print("selected")
		print(self.selected_features)
		print("doing:")
		self.features_database = vstack((background_features, self.selected_features))

		# Same for classes
		self.database_classes = hstack((background_classes, self.selected_classes))

		# Get all distances between selected keypoints in squareform
		pdist = scipy.spatial.distance.pdist(selected_keypoints)
		self.squareform = scipy.spatial.distance.squareform(pdist)

		# Get all angles between selected keypoints
		angles = np.empty((num_selected_keypoints, num_selected_keypoints))
		for k1, i1 in zip(selected_keypoints, range(num_selected_keypoints)):
			for k2, i2 in zip(selected_keypoints, range(num_selected_keypoints)):

				# Compute vector from k1 to k2
				v = k2 - k1

				# Compute angle of this vector with respect to x axis
				angle = math.atan2(v[1], v[0])

				# Store angle
				angles[i1, i2] = angle

		self.angles = angles

		# Find the center of selected keypoints
		center = np.mean(selected_keypoints, axis=0)

		# Remember the rectangle coordinates relative to the center
		self.center_to_tl = np.array(tl) - center
		self.center_to_tr = np.array([br[0], tl[1]]) - center
		self.center_to_br = np.array(br) - center
		self.center_to_bl = np.array([tl[0], br[1]]) - center

		# Calculate springs of each keypoint
		self.springs = selected_keypoints - center

		# Set start image for tracking
		self.im_prev = im_gray0

		# Make keypoints 'active' keypoints
		self.active_keypoints = np.copy(selected_keypoints)

		# Attach class information to active keypoints
		self.active_keypoints = hstack((selected_keypoints, self.selected_classes[:, None]))

		# Remember number of initial keypoints
		self.num_initial_keypoints = len(selected_keypoints_cv)
예제 #9
0
def get_line_data(image, x1, y1, x2, y2, line_w=2, the_z=0, the_c=0, the_t=0):
    """
    Grab pixel data covering the specified line, and rotates it horizontally.

    Uses current rendering settings and returns 8-bit data.
    Rotates it so that x1,y1 is to the left,
    Returning a numpy 2d array. Used by Kymograph.py script.
    Uses PIL to handle rotating and interpolating the data. Converts to numpy
    to PIL and back (may change dtype.)

    @param pixels:          PixelsWrapper object
    @param x1, y1, x2, y2:  Coordinates of line
    @param line_w:          Width of the line we want
    @param the_z:           Z index within pixels
    @param the_c:           Channel index
    @param the_t:           Time index
    """
    size_x = image.getSizeX()
    size_y = image.getSizeY()

    line_x = x2 - x1
    line_y = y2 - y1

    rads = math.atan2(line_y, line_x)

    # How much extra Height do we need, top and bottom?
    extra_h = abs(math.sin(rads) * line_w)
    bottom = int(max(y1, y2) + extra_h/2)
    top = int(min(y1, y2) - extra_h/2)

    # How much extra width do we need, left and right?
    extra_w = abs(math.cos(rads) * line_w)
    left = int(min(x1, x2) - extra_w)
    right = int(max(x1, x2) + extra_w)

    # What's the larger area we need? - Are we outside the image?
    pad_left, pad_right, pad_top, pad_bottom = 0, 0, 0, 0
    if left < 0:
        pad_left = abs(left)
        left = 0
    x = left
    if top < 0:
        pad_top = abs(top)
        top = 0
    y = top
    if right > size_x:
        pad_right = right-size_x
        right = size_x
    w = int(right - left)
    if bottom > size_y:
        pad_bottom = bottom-size_y
        bottom = size_y
    h = int(bottom - top)

    # get the Tile - render single channel white
    image.set_active_channels([the_c + 1], None, ['FFFFFF'])
    jpeg_data = image.renderJpegRegion(the_z, the_t, x, y, w, h)
    pil = Image.open(BytesIO(jpeg_data))

    # pad if we wanted a bigger region
    if pad_left > 0 or pad_right > 0 or pad_top > 0 or pad_bottom > 0:
        img_w, img_h = pil.size
        new_w = img_w + pad_left + pad_right
        new_h = img_h + pad_top + pad_bottom
        canvas = Image.new('RGB', (new_w, new_h), '#ff0000')
        canvas.paste(pil, (pad_left, pad_top))
        pil = canvas

    # Now need to rotate so that x1,y1 is horizontally to the left of x2,y2
    to_rotate = math.degrees(rads)

    # filter=Image.BICUBIC see
    # http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2172449/
    rotated = pil.rotate(to_rotate, expand=True)

    # finally we need to crop to the length of the line
    length = int(math.sqrt(math.pow(line_x, 2) + math.pow(line_y, 2)))
    rot_w, rot_h = rotated.size
    crop_x = (rot_w - length)/2
    crop_x2 = crop_x + length
    crop_y = (rot_h - line_w)/2
    crop_y2 = crop_y + line_w
    cropped = rotated.crop((crop_x, crop_y, crop_x2, crop_y2))

    # return numpy array
    rgb_plane = asarray(cropped)
    # greyscale image. r, g, b all same. Just use first
    return rgb_plane[::, ::, 0]
예제 #10
0
    def initialise(self, im_gray0, tl, br):

        # Initialise detector, descriptor, matcher
        self.detector = cv2.FeatureDetector_create(self.DETECTOR)
        self.descriptor = cv2.DescriptorExtractor_create(self.DESCRIPTOR)
        self.matcher = cv2.DescriptorMatcher_create(self.MATCHER)

        # Get initial keypoints in whole image
        keypoints_cv = self.detector.detect(im_gray0)

        # Remember keypoints that are in the rectangle as selected keypoints
        ind = util.in_rect(keypoints_cv, tl, br)
        selected_keypoints_cv = list(itertools.compress(keypoints_cv, ind))
        selected_keypoints_cv, self.selected_features = self.descriptor.compute(im_gray0, selected_keypoints_cv)
        selected_keypoints = util.keypoints_cv_to_np(selected_keypoints_cv)
        num_selected_keypoints = len(selected_keypoints_cv)

        if num_selected_keypoints == 0:
            raise Exception('No keypoints found in selection')

        # Remember keypoints that are not in the rectangle as background keypoints
        background_keypoints_cv = list(itertools.compress(keypoints_cv, ~ind))
        background_keypoints_cv, background_features = self.descriptor.compute(im_gray0, background_keypoints_cv)
        _ = util.keypoints_cv_to_np(background_keypoints_cv)

        # Assign each keypoint a class starting from 1, background is 0
        self.selected_classes = array(range(num_selected_keypoints)) + 1
        background_classes = zeros(len(background_keypoints_cv))

        # Stack background features and selected features into database
        self.features_database = vstack((background_features, self.selected_features))

        # Same for classes
        self.database_classes = hstack((background_classes, self.selected_classes))

        # Get all distances between selected keypoints in squareform
        pdist = scipy.spatial.distance.pdist(selected_keypoints)
        self.squareform = scipy.spatial.distance.squareform(pdist)

        # Get all angles between selected keypoints
        angles = np.empty((num_selected_keypoints, num_selected_keypoints))
        for k1, i1 in zip(selected_keypoints, range(num_selected_keypoints)):
            for k2, i2 in zip(selected_keypoints, range(num_selected_keypoints)):
                # Compute vector from k1 to k2
                v = k2 - k1

                # Compute angle of this vector with respect to x axis
                angle = math.atan2(v[1], v[0])

                # Store angle
                angles[i1, i2] = angle

        self.angles = angles

        # Find the center of selected keypoints
        center = np.mean(selected_keypoints, axis=0)

        # Remember the rectangle coordinates relative to the center
        self.center_to_tl = np.array(tl) - center
        self.center_to_tr = np.array([br[0], tl[1]]) - center
        self.center_to_br = np.array(br) - center
        self.center_to_bl = np.array([tl[0], br[1]]) - center

        # Calculate springs of each keypoint
        self.springs = selected_keypoints - center

        # Set start image for tracking
        self.im_prev = im_gray0

        # Make keypoints 'active' keypoints
        self.active_keypoints = np.copy(selected_keypoints)

        # Attach class information to active keypoints
        self.active_keypoints = hstack((selected_keypoints, self.selected_classes[:, None]))

        # Remember number of initial keypoints
        self.num_initial_keypoints = len(selected_keypoints_cv)
예제 #11
0
    def vinc_dir(f, a, coordinate_a, alpha12, s):
        """

        Returns the lat and long of projected point and reverse azimuth
        given a reference point and a distance and azimuth to project.
        lats, longs and azimuths are passed in decimal degrees
        Returns ( phi2,  lambda2,  alpha21 ) as a tuple

        """

        phi1, lambda1 = coordinate_a.lat, coordinate_a.lon
        piD4 = math.atan(1.0)
        two_pi = piD4 * 8.0
        phi1 = phi1 * piD4 / 45.0
        lambda1 = lambda1 * piD4 / 45.0
        alpha12 = alpha12 * piD4 / 45.0
        if alpha12 < 0.0:
            alpha12 += two_pi
        if alpha12 > two_pi:
            alpha12 -= two_pi

        b = a * (1.0 - f)
        tan_u1 = (1 - f) * math.tan(phi1)
        u1 = math.atan(tan_u1)
        sigma1 = math.atan2(tan_u1, math.cos(alpha12))
        sin_alpha = math.cos(u1) * math.sin(alpha12)
        cos_alpha_sq = 1.0 - sin_alpha * sin_alpha
        u2 = cos_alpha_sq * (a * a - b * b) / (b * b)

        # @todo: look into replacing A and B with vincenty's amendment, see if speed/accuracy is good
        A = 1.0 + (u2 / 16384) * (4096 + u2 * (-768 + u2 * (320 - 175 * u2)))
        B = (u2 / 1024) * (256 + u2 * (-128 + u2 * (74 - 47 * u2)))

        # Starting with the approx
        sigma = (s / (b * A))
        last_sigma = 2.0 * sigma + 2.0  # something impossible

        # Iterate the following 3 eqs unitl no sig change in sigma
        # two_sigma_m , delta_sigma
        while abs((last_sigma - sigma) / sigma) > 1.0e-9:
            two_sigma_m = 2 * sigma1 + sigma
            delta_sigma = B * math.sin(sigma) * (math.cos(two_sigma_m) + (B / 4) * (math.cos(sigma) *
                                                                                    (-1 + 2 * math.pow(
                                                                                        math.cos(two_sigma_m), 2) -
                                                                                     (B / 6) * math.cos(two_sigma_m) *
                                                                                     (-3 + 4 * math.pow(math.sin(sigma),
                                                                                                        2)) *
                                                                                     (-3 + 4 * math.pow(
                                                                                         math.cos(two_sigma_m), 2)))))
            last_sigma = sigma
            sigma = (s / (b * A)) + delta_sigma

        phi2 = math.atan2((math.sin(u1) * math.cos(sigma) + math.cos(u1) * math.sin(sigma) * math.cos(alpha12)),
                          ((1 - f) * math.sqrt(math.pow(sin_alpha, 2) +
                                               pow(math.sin(u1) * math.sin(sigma) - math.cos(u1) * math.cos(
                                                   sigma) * math.cos(alpha12), 2))))

        lmbda = math.atan2((math.sin(sigma) * math.sin(alpha12)), (math.cos(u1) * math.cos(sigma) -
                                                                   math.sin(u1) * math.sin(sigma) * math.cos(alpha12)))

        C = (f / 16) * cos_alpha_sq * (4 + f * (4 - 3 * cos_alpha_sq))
        omega = lmbda - (1 - C) * f * sin_alpha * (sigma + C * math.sin(sigma) *
                                                   (math.cos(two_sigma_m) + C * math.cos(sigma) *
                                                    (-1 + 2 * math.pow(math.cos(two_sigma_m), 2))))

        lambda2 = lambda1 + omega
        alpha21 = math.atan2(sin_alpha, (-math.sin(u1) * math.sin(sigma) +
                                         math.cos(u1) * math.cos(sigma) * math.cos(alpha12)))

        alpha21 += two_pi / 2.0
        if alpha21 < 0.0:
            alpha21 += two_pi
        if alpha21 > two_pi:
            alpha21 -= two_pi

        phi2 = phi2 * 45.0 / piD4
        lambda2 = lambda2 * 45.0 / piD4
        alpha21 = alpha21 * 45.0 / piD4
        return Coordinate(lat=phi2, lon=lambda2), alpha21
예제 #12
0
    def vinc_inv(f, a, coordinate_a, coordinate_b):
        """

        Returns the distance between two geographic points on the ellipsoid
        and the forward and reverse azimuths between these points.
        lats, longs and azimuths are in radians, distance in metres

        :param f: flattening of the geodesic
        :param a: the semimajor axis of the geodesic
        :param coordinate_a: decimal coordinate given as named tuple coordinate
        :param coordinate_b: decimal coordinate given as named tuple coordinate
        Note: The problem calculates forward and reverse azimuths as: coordinate_a -> coordinate_b

        """
        phi1 = math.radians(coordinate_a.lat)
        lembda1 = math.radians(coordinate_a.lon)

        phi2 = math.radians(coordinate_b.lat)
        lembda2 = math.radians(coordinate_b.lon)

        if (abs(phi2 - phi1) < 1e-8) and (abs(lembda2 - lembda1) < 1e-8):
            return {'distance': 0.0, 'forward_azimuth': 0.0, 'reverse_azimuth': 0.0}

        two_pi = 2.0 * math.pi

        b = a * (1 - f)

        TanU1 = (1 - f) * math.tan(phi1)
        TanU2 = (1 - f) * math.tan(phi2)

        U1 = math.atan(TanU1)
        U2 = math.atan(TanU2)

        lembda = lembda2 - lembda1
        last_lembda = -4000000.0  # an impossibe value
        omega = lembda

        # Iterate the following equations,
        #  until there is no significant change in lembda

        while (last_lembda < -3000000.0 or lembda != 0 and abs((last_lembda - lembda) / lembda) > 1.0e-9):
            sqr_sin_sigma = pow(math.cos(U2) * math.sin(lembda), 2) + \
                            pow((math.cos(U1) * math.sin(U2) -
                                 math.sin(U1) * math.cos(U2) * math.cos(lembda)), 2)

            Sin_sigma = math.sqrt(sqr_sin_sigma)

            Cos_sigma = math.sin(U1) * math.sin(U2) + math.cos(U1) * math.cos(U2) * math.cos(lembda)

            sigma = math.atan2(Sin_sigma, Cos_sigma)

            Sin_alpha = math.cos(U1) * math.cos(U2) * math.sin(lembda) / math.sin(sigma)
            alpha = math.asin(Sin_alpha)

            Cos2sigma_m = math.cos(sigma) - (2 * math.sin(U1) * math.sin(U2) / pow(math.cos(alpha), 2))

            C = (f / 16) * pow(math.cos(alpha), 2) * (4 + f * (4 - 3 * pow(math.cos(alpha), 2)))

            last_lembda = lembda

            lembda = omega + (1 - C) * f * math.sin(alpha) * (sigma + C * math.sin(sigma) * \
                                                              (Cos2sigma_m + C * math.cos(sigma) * (
                                                                  -1 + 2 * pow(Cos2sigma_m, 2))))

        u2 = pow(math.cos(alpha), 2) * (a * a - b * b) / (b * b)

        A = 1 + (u2 / 16384) * (4096 + u2 * (-768 + u2 * (320 - 175 * u2)))

        B = (u2 / 1024) * (256 + u2 * (-128 + u2 * (74 - 47 * u2)))

        delta_sigma = B * Sin_sigma * (Cos2sigma_m + (B / 4) * \
                                       (Cos_sigma * (-1 + 2 * pow(Cos2sigma_m, 2)) - \
                                        (B / 6) * Cos2sigma_m * (-3 + 4 * sqr_sin_sigma) * \
                                        (-3 + 4 * pow(Cos2sigma_m, 2))))

        s = b * A * (sigma - delta_sigma)

        alpha12 = math.atan2((math.cos(U2) * math.sin(lembda)), \
                             (math.cos(U1) * math.sin(U2) - math.sin(U1) * math.cos(U2) * math.cos(lembda)))

        alpha21 = math.atan2((math.cos(U1) * math.sin(lembda)), \
                             (-math.sin(U1) * math.cos(U2) + math.cos(U1) * math.sin(U2) * math.cos(lembda)))

        if (alpha12 < 0.0):
            alpha12 += two_pi
        if (alpha12 > two_pi):
            alpha12 -= two_pi

        alpha21 += two_pi / 2.0
        if alpha21 < 0.0:
            alpha21 += alpha21 + two_pi
        if alpha21 > two_pi:
            alpha21 -= two_pi

        return {"distance": s, "forward_azimuth": alpha12, "reverse_azimuth": alpha21}
예제 #13
0
def get_line_data(pixels, x1, y1, x2, y2, line_w=2, the_z=0, the_c=0, the_t=0):
    """
    Grabs pixel data covering the specified line, and rotates it horizontally
    so that x1,y1 is to the left,
    Returning a numpy 2d array. Used by Kymograph.py script.
    Uses PIL to handle rotating and interpolating the data. Converts to numpy
    to PIL and back (may change dtype.)

    @param pixels:          PixelsWrapper object
    @param x1, y1, x2, y2:  Coordinates of line
    @param line_w:          Width of the line we want
    @param the_z:           Z index within pixels
    @param the_c:           Channel index
    @param the_t:           Time index
    """

    size_x = pixels.getSizeX()
    size_y = pixels.getSizeY()

    line_x = x2 - x1
    line_y = y2 - y1

    rads = math.atan2(line_x, line_y)

    # How much extra Height do we need, top and bottom?
    extra_h = abs(math.sin(rads) * line_w)
    bottom = int(max(y1, y2) + old_div(extra_h, 2))
    top = int(min(y1, y2) - old_div(extra_h, 2))

    # How much extra width do we need, left and right?
    extra_w = abs(math.cos(rads) * line_w)
    left = int(min(x1, x2) - extra_w)
    right = int(max(x1, x2) + extra_w)

    # What's the larger area we need? - Are we outside the image?
    pad_left, pad_right, pad_top, pad_bottom = 0, 0, 0, 0
    if left < 0:
        pad_left = abs(left)
        left = 0
    x = left
    if top < 0:
        pad_top = abs(top)
        top = 0
    y = top
    if right > size_x:
        pad_right = right - size_x
        right = size_x
    w = int(right - left)
    if bottom > size_y:
        pad_bottom = bottom - size_y
        bottom = size_y
    h = int(bottom - top)
    tile = (x, y, w, h)

    # get the Tile
    plane = pixels.getTile(the_z, the_c, the_t, tile)

    # pad if we wanted a bigger region
    if pad_left > 0:
        data_h, data_w = plane.shape
        pad_data = zeros((data_h, pad_left), dtype=plane.dtype)
        plane = hstack((pad_data, plane))
    if pad_right > 0:
        data_h, data_w = plane.shape
        pad_data = zeros((data_h, pad_right), dtype=plane.dtype)
        plane = hstack((plane, pad_data))
    if pad_top > 0:
        data_h, data_w = plane.shape
        pad_data = zeros((pad_top, data_w), dtype=plane.dtype)
        plane = vstack((pad_data, plane))
    if pad_bottom > 0:
        data_h, data_w = plane.shape
        pad_data = zeros((pad_bottom, data_w), dtype=plane.dtype)
        plane = vstack((plane, pad_data))

    pil = script_utils.numpy_to_image(plane, (plane.min(), plane.max()), int32)

    # Now need to rotate so that x1,y1 is horizontally to the left of x2,y2
    to_rotate = 90 - math.degrees(rads)

    if x1 > x2:
        to_rotate += 180
    # filter=Image.BICUBIC see
    # http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2172449/
    rotated = pil.rotate(to_rotate, expand=True)
    # rotated.show()

    # finally we need to crop to the length of the line
    length = int(math.sqrt(math.pow(line_x, 2) + math.pow(line_y, 2)))
    rot_w, rot_h = rotated.size
    crop_x = old_div((rot_w - length), 2)
    crop_x2 = crop_x + length
    crop_y = old_div((rot_h - line_w), 2)
    crop_y2 = crop_y + line_w
    cropped = rotated.crop((crop_x, crop_y, crop_x2, crop_y2))
    return asarray(cropped)