Beispiel #1
0
    def contains_point(self, point):

        coords = self.coordinates #polygon['coordinates'][0]
        lines = []

        #create positive, zero slope ray from point, check for odd
        #number of intersects with polygon (ray casting algorithm)
        #xy2 = (point[0]+999, point[1])
        ray = Line(m=0, b=point[1], x_min=point[0]) #horiz ray to right of point
        #print ray

        #print line segs
        for i in range(0, len(coords)-1):
            linexy = [coords[i], coords[i+1]]

            #print linexy
            line = LineSegment(linexy)
            if ray.intersects_line(line):
                lines.append(line)


        if len(lines) % 2 != 0:
            #print '{} intersection(s)'.format(len(lines))
            return True
        else:
            return False
Beispiel #2
0
def pipeline(img):
    img = cv2.resize(
        img, (WIDTH, HEIGHT)
    )  #resize all videos to one size, to avoid problem of changing code for different size videos
    img = cam.undistort_image(img, mtx,
                              dist)  #remove distortion using camera matrix
    warped, inverse_perspective = warp_perspective(
        img, src, dst
    )  #warp image to get perspective from src to dist points (found in config.py)
    output = segment_lanes(
        warped
    )  #segment lane lines into the image - perform color and sobel thresholds to segment lane lines

    left_line = Line()  #left lane line object
    right_line = Line()  #right lane line object

    left_line, right_line, window_search_img = window_search(
        output, left_line, right_line, 4
    )  #perform window search to get left and right lanes and fit their positions
    line_image = draw_lines_on_undistorted(
        img, inverse_perspective, left_line,
        right_line)  #draw lines on unwarped image

    mean_curvature_meter = np.mean(
        [left_line.curvature_meter,
         right_line.curvature_meter])  #calculate radius of curvature
    cv2.putText(line_image,
                'Curvature radius: {:.02f}m'.format(mean_curvature_meter),
                (100, 60), cv2.FONT_HERSHEY_DUPLEX, 1, (255, 255, 255), 2,
                cv2.LINE_AA)
    center_offset = offset_from_center(
        line_image, left_line, right_line)  #computer offset from center
    cv2.putText(line_image,
                'Offset from Center: {:.02f}m'.format(center_offset),
                (100, 120), cv2.FONT_HERSHEY_PLAIN, 2, (255, 255, 255), 2,
                cv2.LINE_AA)
    cv2.imshow("Window search", window_search_img)
    return line_image
Beispiel #3
0
    def go_to_coord(self, new):
        line_start = None
        while (self.x, self.y) != new:
            #point toward the target
            yield from self.turn_and_back_away(
                math.atan((new[1] - self.y) / (new[0] - self.x)))

            #go to target (if possible)
            distance = math.sqrt((self.y - new[1])**2 + (self.x - new[0])**2)
            curr = time.time()
            l, r, ok = yield self.move_forward(distance)
            self.record_motion(curr, time.time(), distance, ok)

            #didn't work?!
            if not ok:
                #record where we're at and turn the smallest reasonable amount
                line_start = self.location()
                angle = math.atan((l - r) / SENSOR_SEPARATION)
                yield from self.turn_and_back_away(angle)
                # recall whether the obstacle is on the left or right
                o_left = l > r

                #move a bit forward
                curr = time.time()
                l, r, ok = yield self.move_forward(0.1)
                self.record_motion(curr, time.time(), 0.1, ok)

                #we can't move forward - we have a corner case
                if not ok:
                    #turn away from the known wall, along the new one
                    angle = math.atan((l - r) / SENSOR_SEPARATION)
                    if o_left == l < r:
                        angle -= math.pi
                    yield from self.turn_and_back_away(angle)

                    #move alogn the second wall for a bit.
                    curr = time.time()
                    l, r, ok = yield self.move_forward(0.1)
                    self.record_motion(curr, time.time(), 0.1, ok)
                    #if you're at a third corner, give up
                    if not ok:
                        raise Exception("Andrea, why'd you put it in a box?")
            elif line_start is not None:
                Line(line_start, self.location()).clean_firebase()
Beispiel #4
0
 def __init__( self, startPoint, endPoint, headSize ):
     Line.__init__( self, startPoint, endPoint )
     self.headSize = headSize
Beispiel #5
0
def soccer():
    global pub_command, pub_stop, TARGET_OFFSET, file
    rospy.init_node("soccer_node")
    rospy.Subscriber("/emergency_stop", Empty, closer)
    location.init()
    rospy.on_shutdown(cleanUp)

    file = open("soccer.log", "w")

    print "Connecting..."
    while pub_command.get_num_connections() == 0:
        pass

    x = 0
    y = 0
    theta = 0
    true_angle = 0

    file.write("{} {}\n".format(x, y))
    print["start", x, y]

    # scan from current position for ball and goal
    ball_angle_1, goal_angle_1 = scan(pub_command)

    rospy.sleep(1)

    # turn to face 45 degrees past ball away from goal
    if ball_angle_1 < 0:
        ball_angle_1 += 360
    if goal_angle_1 < 0:
        goal_angle_1 += 360

    direction = 1

    delta = goal_angle_1 - ball_angle_1
    if delta < -180 or (delta > 0 and delta < 180):
        direction = -1

    target_angle = ball_angle_1 + direction * 45

    print "--- SCAN 1 RESULTS ---"
    print "x: {} y: {},  goal @ {}, ball @ {}".format(x, y, goal_angle_1,
                                                      ball_angle_1)
    goal_vector_1 = Line(x=x, y=y, theta=goal_angle_1, useDegrees=True)
    ball_vector_1 = Line(x=x, y=y, theta=ball_angle_1, useDegrees=True)

    _, _, theta = location.currentLocation

    if theta < 0:
        theta += 360

    move_angle = target_angle - theta

    execute(0, move_angle, 0.6)
    rospy.sleep(1)

    # move to new location
    move_dist = 0.3
    execute(move_dist, 0, 0.5)

    print "--- REALIGNING FOR SCAN 2 ---"
    print "odom @ {}, angle from scan 1 start @ {}".format(
        location.currentLocation[2], target_angle)

    true_angle = target_angle

    scan_2_vector = Line(x=0, y=0, theta=true_angle, useDegrees=True)
    x, y = scan_2_vector.findPointFrom(0, 0, move_dist)

    file.write("{} {}\n".format(x, y))

    print["scan_2", x, y]

    # get new position
    _, _, theta = location.currentLocation

    theta += true_angle

    # scan from current position for ball and goal
    ball_angle_2, goal_angle_2 = scan(pub_command)
    ball_angle_2 += true_angle
    goal_angle_2 += true_angle

    goal_vector_2 = Line(x=x, y=y, theta=goal_angle_2, useDegrees=True)
    ball_vector_2 = Line(x=x, y=y, theta=ball_angle_2, useDegrees=True)

    # calculate intersections
    goal_x, goal_y = goal_vector_1.intersect(goal_vector_2)
    ball_x, ball_y = ball_vector_1.intersect(ball_vector_2)

    print "--- SCAN 2 RESULTS ---"
    print "x: {}, y: {},  goal @ {}, ball @ {}".format(x, y, goal_angle_2,
                                                       ball_angle_2)
    print "--- GOAL LOCATION ---"
    print "x: {}, y: {}".format(goal_x, goal_y)
    print "--- BALL LOCATION ---"
    print "x: {}, y: {}".format(ball_x, ball_y)

    file.write("{} {}\n".format(ball_x, ball_y))
    file.write("{} {}\n".format(goal_x, goal_y))

    print["ball", ball_x, ball_y]
    print["goal", goal_x, goal_y]

    # find line from ball to goal
    approach_vector = Line(x1=goal_x, y1=goal_y, x2=ball_x, y2=ball_y)

    TARGET_OFFSET = 0.5
    # select point on approach vector for robot to start
    target_x, target_y = approach_vector.findPointFrom(ball_x, ball_y,
                                                       TARGET_OFFSET)
    alt_x = ball_x * 2 - target_x
    alt_y = ball_y * 2 - target_y

    file.write("{} {}\n".format(target_x, target_y))
    print["kick", target_x, target_y]

    file.write("{} {}\n".format(alt_x, alt_y))
    print["kick_alt", alt_x, alt_y]

    d_ball_target = distance(ball_x, ball_y, target_x, target_y)
    d_ball_goal = distance(ball_x, ball_y, goal_x, goal_y)
    d_goal_target = distance(goal_x, goal_y, target_x, target_y)
    d_goal_alt = distance(goal_x, goal_y, alt_x, alt_y)

    if d_goal_alt > d_goal_target:
        print "--- ALT SELECTED ---"
        target_x = alt_x
        target_y = alt_y
    else:
        print "--- FIRST TARGET SELECTED ---"

    print "x: {}, y: {}".format(target_x, target_y)

    # find way to move from current position to target pt
    dist = distance(x, y, target_x, target_y)
    movement_vector = Line(x=x, y=y, x2=target_x, y2=target_y)
    angle = movement_vector.angle(useDegrees=True)

    if angle < 0:
        angle += 360

    # check if the angle is correct or opposite of correct
    if ball_angle_2 > goal_angle_2:
        print "--- USING ALTERNATE ANGLE ---"
        if angle < ball_angle_2:
            angle += 180
        if angle > ball_angle_2 + 180:
            angle -= 180

    print "--- APPROACHING SHOT LOCATION ---"
    print "angle to shot location @ {}".format(angle)

    _, _, theta = location.currentLocation
    theta += true_angle
    execute(0, angle - theta, 0.6, reset=True)
    execute(dist, 0, 0.6, reset=True)

    print "--- REALIGNING FOR SHOT ---"
    print "odom @ {}, angle from scan 2 start @ {}, angle from scan 1 start @ {}".format(
        location.currentLocation[2], angle, angle + true_angle)

    # scan for ball(and goal)
    ball_angle_fin, goal_angle_fin = scan(pub_command)

    # create background thread to stop robot once ball is hit
    from threading import Thread

    stop_thread = Thread(target=stopper)
    stop_thread.start()

    # make the shot
    execute(TARGET_OFFSET + 2, 0, 1, reset=True)

    stop_thread.join()
    file.close()
    file = None
Beispiel #6
0
 def __init__(self, startPoint, endPoint, headSize):
     Line.__init__(self, startPoint, endPoint)
     self.headSize = headSize
Beispiel #7
0
screen = pg.display.set_mode(settings.win_size,
                             flags=pg.DOUBLEBUF | pg.NOFRAME)
surf = pg.Surface(settings.win_size)
surf.set_alpha(200)

settings.screen = screen
settings.main_surf = surf

clock = pg.time.Clock()

font = pg.font.SysFont('Times New Roman', 16)

lines = []
for k in range(settings.lines_num):
    line = Line(settings)
    line.draw()
    lines.append(line)

while True:
    surf.fill(settings.win_bg)

    clock.tick(settings.win_fps)

    mouse_rad_rect = pg.draw.circle(settings.main_surf, (255, 0, 0),
                                    settings.mouse_pos, settings.mouse_rad, 1)

    for line_ind in settings.mouse_rect.collidelistall([k.rect
                                                        for k in lines]):
        line = lines[line_ind]
        if settings.mouse_pos[1] <= line.y:
# Read in the saved camera matrix and distortion coefficients
# These are the arrays you calculated using cv2.calibrateCamera()
dist_pickle = pickle.load(open("configs/camera_params.p", "rb"))
mtx = dist_pickle["mtx"]
dist = dist_pickle["dist"]

# project Videos
project_video = 'project_video.mp4'

# project_video = 'challenge_video.mp4'

# video output path
video_output_path = 'output_images/project_video.mp4'

# Declare lines
right_line = Line()
left_line = Line()

# create a video capture
video_capture = cv2.VideoCapture(project_video)

# define a video writer
video_writer = cv2.VideoWriter(video_output_path,
                               fourcc=cv2.VideoWriter_fourcc(*'MP4V'),
                               fps=30,
                               frameSize=(1280, 720))

## Parameter settings
# Thresholding params
color_thresh = (170, 255)
sobel_thresh = (50, 100)
Beispiel #9
0
    text1 = 'Radius of Curvature: %d(m)'
    text2 = 'Offset from center: %.2f(m)'
    text3 = 'Radius of Curvature: Inf (m)'

    if mean_curv < 3000:
        cv2.putText(result, text1 % (int(mean_curv)),
                    (60, 100), font, 1.0, (255, 255, 255), thickness=2)
    else:
        cv2.putText(result, text3,
                    (60, 100), font, 1.0, (255, 255, 255), thickness=2)
    cv2.putText(result, text2 % (-offset),
                (60, 130), font, 1.0, (255, 255, 255), thickness=2)

    return result


c = Camera(display=False)

# Video Pipline
frame_num = 15   # latest number of frames for good detection
left = Line(False, frame_num)
right = Line(True, frame_num)

video_output = 'result.mp4'
input_path = './test_videos/project_video.mp4'

clip1 = VideoFileClip(input_path).subclip(0, 30)

final_clip = clip1.fl_image(lambda image: process_image(image, c, left, right))
final_clip.write_videofile(video_output, audio=False)
Beispiel #10
0
    f = open('data/distortion_correction.pckl', 'r')
    mtx = pickle.load(f)
    dist = pickle.load(f)
    M = pickle.load(f)
    Minv = pickle.load(f)
else:
    mtx, dist = calibrate_camera()
    M, Minv = compute_perspective_distortion()
    f = open('data/distortion_correction.pckl', 'wb')
    pickle.dump(mtx, f)
    pickle.dump(dist, f)
    pickle.dump(M, f)
    pickle.dump(Minv, f)

# Line objects to keep track of the detected data
leftLine = Line(niterations=3, height=720, width=1280, M=M)
rightLine = Line(niterations=3, height=720, width=1280, M=M)

cap = cv2.VideoCapture(video_file)
# Variables for inter-prediction filter. Initialised to None since for the very
# first frame there is no temporal information available.
inter_reference = None
plotyy = None
cnt = 0

while (cap.isOpened()):
    ret, img = cap.read()

    if img is None:
        break
 def side_right(self):
     return Line(vec(self.right, self.top), vec(self.right, self.bottom))
 def side_left(self):
     return Line(vec(self.left, self.top), vec(self.left, self.bottom))
 def side_bottom(self):
     return Line(vec(self.left, self.bottom), vec(self.right, self.bottom))
 def side_top(self):
     return Line(vec(self.left, self.top), vec(self.right, self.top))