Esempio n. 1
0
def svoExecute(fpathL1, fpathR1, fpathL2, fpathR2):
    #initialize
    (imLprev, imRprev) = openImg.getImgs(fpathL1, fpathR1)
    (imLnext, imRnext) = openImg.getImgs(fpathL2, fpathR2)

    size = [2, 2]
    #get key points
    kpLprev, descLprev = features.getFeatures(imLprev, size)
    kpRprev, descRprev = features.getFeatures(imRprev, size)

    #correspondences
    corLprev_St, corRprev_St, matchKeptL_St, matchKeptR_St = features.getCorres(
        descLprev, descRprev, kpLprev, kpRprev)

    #triangulate must be of the form 2 x N
    [x3dprev, pErrPrev,
     matchKept_3d] = triangulation.triangulate(corLprev_St, corRprev_St)

    #Get temporal correspondences
    kpLnext, descLnext = features.getFeatures(imLnext, size)

    corLprev_T, corLnext_T, matchKeptPrev_T, matchKeptNext_T = features.getCorres(
        descLprev, descLnext, kpLprev, kpLnext)

    (x3dprev_final, x2dnext_final) = idxMerge(matchKeptL_St, matchKept_3d,
                                              x3dprev, corLnext_T,
                                              matchKeptPrev_T, matchKeptNext_T)

    #Get updated camera pose
    [rot, trans] = PnP.camPose(x3dprev_final, x2dnext_final, pErrPrev)

    return (rot, trans, x3dprev_final)
Esempio n. 2
0
def triangulate(nodes, randstream, tri_mode):
    """Return the list of edges which achieves a Delaunay triangulation of the specified nodes."""
    triangles = triangulation.triangulate(nodes, randstream, tri_mode)
    edges = set()
    for tri in triangles:
        for edge in triangle_edges(tri):
            edges.add((edge[0], edge[1]))
    return list(edges)
def triangulate(nodes, randstream, tri_mode):
	"""Return the list of edges which achieves a Delaunay triangulation of the specified nodes."""
	triangles = triangulation.triangulate(nodes, randstream, tri_mode)
	edges = set()
	for tri in triangles:
		for edge in triangle_edges(tri):
			edges.add((edge[0], edge[1]))
	return list(edges)
    def naive_monte_carlo(self):
        # step 1: find paths
        scenario = self.__create_scenario__()

        game_objects = scenario.getGameObjects()
        graph, zones = triangulate(game_objects, self.width, self.height)  
        # subdivide the range of impulses. 
        #action_subdivided_ranges = self.__subdivide_action_range__(IMPULSE_RANGE_X, IMPULSE_RANGE_Y)
        possible_action_ranges = self.solve((IMPULSE_RANGE_X, IMPULSE_RANGE_Y), 0, zones, graph)
        print "possible ranges found: ", possible_action_ranges
def stereoVision(detection_boxes, camera_distance_x,
                 camera_distance_y, left_cam_angle_x, right_cam_angle_x,
                 left_cam_angle_y, right_cam_angle_y,
                 left_vision_range_x, right_vision_range_x,
                 left_vision_range_y, right_vision_range_y):
  """
  Detects objects with two cameras for stereo vision
  Arguments:
    detection_boxes - Detection boxes for each camera. Assumes this represents the results of at 
                      least two cameras
    camera_distance_x, camera_distance_y - Horizontal or vertical distance between the two cameras
                                           The unit this is given in is the unit in which results
                                           will be returned
    left_cam_angle, right_cam_angle - Angle at which the cameras are situated. Measured
                                      north of east in degrees
    left_vision_range_x, right_vision_range_x - Horizontal range of vision of each
                                                        camera, measured in degrees
    left_vision_range_y, right_vision_range_y - Vertical range of vision of each
                                                        camera, measured in degrees
  Returns:
    object_displacements - Dictionary of x-y-z target displacements from each detected object
                           relative to the origin at the leftmost camera
    camera_depths - Depth (z displacement) relative to each camera
  """
  #Checks which classes were detected by both cameras
  left_classes = set(detection_boxes[0].keys())
  right_classes = set(detection_boxes[1].keys())
  overlap = left_classes.intersection(right_classes)
  object_displacements = {}
  camera_depths = {}
  #Uses stereo vision to detect the object's depth location
  for target_name in list(overlap):
    ax, ay = getCenterPoint(detection_boxes[0][target_name])
    bx, by = getCenterPoint(detection_boxes[1][target_name])
    #Finds the x-z displacement between the left camera and target
    base_triangulation = triangulation.triangulate(ax, bx,
                                                   radians(left_cam_angle_x), radians(right_cam_angle_x),
                                                   radians(left_vision_range_x), radians(right_vision_range_x),
                                                   camera_distance_x)
    object_angle_1, object_angle_2, target_distance_1, target_distance_2, = base_triangulation
    left_x_displacement, left_z_displacement = triangulation.findTargetDistances(object_angle_1, radians(left_cam_angle_x), target_distance_1)
    right_x_displacement, right_z_displacement = triangulation.findTargetDistances(object_angle_2, radians(right_cam_angle_x), target_distance_2)
    left_depth, right_depth = triangulation.findCameraDepths(radians(left_vision_range_x), radians(right_vision_range_x),
                                                             object_angle_1, object_angle_2,
                                                             target_distance_1, target_distance_2)
    #Finds the y displacement between the cameras and target
    #Doesn't use triangulation... yet
    #Will not work if there is any y displacement between cameras
    #TODO: This needs testing
    y_angle = (ay-.5)*radians(left_vision_range_y)
    y_displacement = tan(y_angle)*left_depth
    object_displacements[target_name] = ((left_x_displacement, y_displacement, left_z_displacement),
                                         (right_x_displacement, y_displacement, right_z_displacement))
    camera_depths[target_name] = left_depth, right_depth
  return object_displacements, camera_depths
    def solve_with_rules(self):

        scenario = self.__create_scenario__()
        game_objects = scenario.getGameObjects()

        # step 1: find simple paths
        self.graph, self.zones = triangulate(game_objects, self.width, self.height)
        simple_paths = nx.all_simple_paths(self.graph, source=self.initial_zone, target=self.target_zone)

        for path in simple_paths:
            self.plan_follow_path(path, scenario, self.zones, self.graph)
            break # break for debug
    def test_old_evaluate(self, action):
        scenario = Scenario_Generator(self.width, self.height, self.immobile_objs, self.mobile_objs, self.manipulatable_obj, self.target_obj, showRender=False)
        game_objects = scenario.getGameObjects()
        graph, zones = triangulate(game_objects, self.width, self.height)  
        end_position, shape =  scenario.evaluate(action)
        end_position =  Point(end_position)
        last_zone = -1
        for i in xrange(len(zones)):
            if zones[i].contains(end_position):
                last_zone = i
                break
        if last_zone == -1:
            return len(zones) # set to the maximum length

        score = nx.shortest_path_length(graph, source=i, target=self.target_zone)
        return score
    def solve(self):
        # step 1: find paths
        scenario = self.__create_scenario__()
        game_objects = scenario.getGameObjects()
        graph = triangulate(game_objects, self.width, self.height)  
        # find paths:
        simple_paths = nx.all_simple_paths(graph, source=24, target=8)
        for path in simple_paths:
            # detect turning nodes
            nodes_id = self.detect_turning_nodes_id(path, graph)
            # if there is a turning node, check whether there will be any events that can support this change
            # step 1, starting from this node, check if there is any edge that allows bouncing back

            #print nodes_id

        # random sample an action

        #print simple_paths
        return
Esempio n. 9
0
def normalized_cameras_from_ematrix(E, feature1, feature2):
	# Extract the camera matrices
	W = array([[0, -1, 0], [1, 0, 0], [0, 0, 1]])
	U, s, Vh = svd(E)
	P1 = eye(3, 4)										# P1 = I | 0
	P2s = array([										# P2 = [e2] * F | e2
		hstack((dot(U, dot(W, Vh)), U[:, -1].reshape(3, 1))),
		hstack((dot(U, dot(W, Vh)), -U[:, -1].reshape(3, 1))),
		hstack((dot(U, dot(W.T, Vh)), U[:, -1].reshape(3, 1))),
		hstack((dot(U, dot(W.T, Vh)), -U[:, -1].reshape(3, 1)))])

	# The second camera has positive depth for the points along with the first one
	xyz_test = homogeneous(array([triangulate(P1, P2i, feature1, feature2) for P2i in P2s]))
	xyz_P1 = array([dot(xyz_test[i], P1.T) for i in range(4)])
	xyz_P2 = array([dot(xyz_test[i], P2s[i].T) for i in range(4)])
	
	P2 = P2s[(xyz_P1[:, 2] > 0) & (xyz_P2[:, 2] > 0)][0]
	
	return P1, P2
	
    def test_evaluate(self, action):
        scenario =  Scenario_Generator(self.width, self.height, self.immobile_objs, self.mobile_objs, self.manipulatable_obj, self.target_obj, showRender=False)
        game_objects = scenario.getGameObjects()
        graph, zones = triangulate(game_objects, self.width, self.height)  
        end_position, shape =  scenario.evaluate(action)
        radius = shape.radius
        circular_region_ball = end_position.buffer(radius)
        occupied_zones = []
        for i in xrange(len(zones)):
            if zones[i].intersects(circular_region_ball):
                occupied_zones.append(i)
        if len(occupied_zones) == 0:
            return len(zones) # set to the maximum length

        min_d = 9999
        for occupied_zone in occupied_zones:
            length = nx.shortest_path_length(graph, source=occupied_zone, target=self.target_zone)

            if length < min_d:
                min_d = length

        return min_d
        print(f'No saved detections found at \'{DETECTION_CACHE_PATH}\'. Running detection...')
        detections = detection.detect_traffic_signs(IMAGE_DIR_PATH, debug_output_path=DETECTION_DEBUG_PATH)
        if (len(detections) > 0):
            util.pickle_save(DETECTION_CACHE_PATH, detections)
    else:
        print(f'Loaded detections from \'{DETECTION_CACHE_PATH}\'.\n')

    detection_count = sum([len(detections[x]) for x in detections])
    print(f'Detected {detection_count} traffic signs in total.')


    print_heading('Feature matching')

    matches = matching.match_detections(IMAGE_DIR_PATH, detections)

    match_count = len(matches)
    print(f'Found {match_count} matches.')
    # TODO Print how many match clusters there are


    print_heading('Point triangulation')

    gps_full_data = np.load(GPS_MEASUREMENTS_PATH)
    imu_full_data = None
    gt_estimator = GroundTruthEstimator(gps_full_data, imu_full_data, print_kf_progress=True)

    landmark_list = triangulation.triangulate(COLMAP_EXECUTABLE_PATH, IMAGE_DIR_PATH, detections, matches, gt_estimator, COLMAP_WORKING_DIR_PATH)

    # Save landmark map
    util.pickle_save(MAP_OUTPUT_PATH, landmark_list)
Esempio n. 12
0
def triangle_function_one(event):
    d = input.ply2dcel('venv\example\one-face.txt')
    triangulation.triangulate(d)
    draw_function(d, 'See full example')
Esempio n. 13
0
def triangle_function_full(event):
    triangulation.triangulate(d)
    draw_function(d, 'See one-face example')
Esempio n. 14
0

import sys
import pdb


for m in range(1):

# get features
    imgL,imgR = openImg.getImgs(leftpaths[m], rightpaths[m])
    kp1,desc1 = features.getFeatures(imgL, [2,2])
    kp2,desc2 = features.getFeatures(imgR, [2,2])

    leftCorres, rightCorres, leftCorresidx, rightCorresidx = features.getCorres(desc1, desc2, kp1,kp2)

    x3dSave, perrFin, idxSave = triangulation.triangulate(leftCorres,rightCorres)

    if m==0:
        x3d = x3dSave
    else:
        x3d = np.concatenate((x3d,x3dSave), axis =1)

    print(m)
import pdb; pdb.set_trace()

x3d = x3d.transpose()
pos = x3d.getA()
app = QtGui.QApplication([])
w = gl.GLViewWidget()
w.opts['distance'] = 20
w.show()