Exemplo n.º 1
0
def main():
    while True:
        try:
            # List of channel IDs
            ids = []
            liked = getLiked()
            channels = subscriptions()
            # Check if videos have been liked during these 10 minutes
            if len(liked) != 0:
                f = open("liked.json", "w")
                f.close()
                # Write them to like.json file
                for like in liked:
                    with open("liked.json", "a") as data:
                        json.dump(like, data, indent=2)

            # For every subscription
            for channel in channels:
                channel_id = channel.get("id")
                ids.append(channel_id)  # Fill the list with the IDs
            # Print all the channels with their relative id
            for channel in channels:
                name = channel.get("name")
                channel_id = channel.get("id")
                print("{}:{}".format(name, channel_id))
            # Start the tracker on the current subscriptions
            tracker(ids)
            print("Sleeping 10 minutes...")
            print("\n", end='')

            # Sleep 10 minutes and then repeat the control
            sleep(600)
        except KeyboardInterrupt:
            break
Exemplo n.º 2
0
def test_pipeline():
    calibration_pickle = pickle.load(open("./calibration_pickle.p", "rb"))
    mtx = calibration_pickle["mtx"]
    dist = calibration_pickle["dist"]
    M, Minv, xm_per_pix, ym_per_pix = warp.perspective_matrix_and_inverse()
    img_paths = glob.glob('./test_images/*.jpg')
    lane_centers = tracker.tracker(My_ym=ym_per_pix,
                                   My_xm=xm_per_pix,
                                   Mysmooth_factor=1,
                                   debug=True)
    for fname in img_paths:
        print('working on ', fname)
        img = mpimg.imread(fname)
        result, binary, warped, undistorted = pipeline(img,
                                                       mtx,
                                                       dist,
                                                       M,
                                                       Minv,
                                                       lane_centers,
                                                       debug=True,
                                                       raw_fname=fname)
        saved_name = "./output_images/lane_center_" + os.path.basename(fname)
        mpimg.imsave(saved_name, result)
    # End of for fname
    # illustrate the perspective transform.
    utils.display_side_by_side(undistorted,
                               result,
                               caption="lane_center_test",
                               cmap='gray',
                               save_path="./output_images")
Exemplo n.º 3
0
 def __init__(self, folder, param_folder, images='local', thresh=0.5):
     # images parameter describes from where to request images (local or airsim)
     print('Reading Folder Contents...')
     self.folder = folder
     self.param_folder = param_folder
     # reading details about paretoOptimal SVM weights
     print('Reading Details about pareto-Optimal parameters...')
     with open(param_folder + '/opres.pkl', 'rb') as f:
         opres = pickle.load(f)
     #initializing mdp and tracker
     print('Initializing MDP and tracker...')
     self.mdp_ = mdp_trainer(opres)
     self.mdp_.train()
     self.tracker_ = tracker()
     # initializing relevant SVM weights and edgeboxes (both are lumped
     # inside hog_predictor)
     # we just have to give it proper filename
     print('Initializing edgeboxes, hog-predictors and SVM weights...')
     self.hog_predictors_ = {}
     for k in opres.keys():
         self.hog_predictors_[k] = hog_predictor(
             os.path.join(param_folder, k + '.pkl'))
     # initialize the image_server
     print('Initializing Image Server...')
     self.im_server = image_server(images, folder=folder)
     # set the threshold for retriggering the MDPs
     self.thresh = thresh
Exemplo n.º 4
0
def main():
    calibration_pickle = pickle.load(open("./calibration_pickle.p", "rb"))
    mtx = calibration_pickle["mtx"]
    dist = calibration_pickle["dist"]
    M, Minv, xm_per_pix, ym_per_pix = warp.perspective_matrix_and_inverse()
    lane_centers = tracker.tracker(My_ym=ym_per_pix,
                                   My_xm=xm_per_pix,
                                   Mysmooth_factor=5)

    def mark_lane_frame(img):
        result, binary, warped, undistorted = pipeline.pipeline(
            img, mtx, dist, M, Minv, lane_centers)
        return result

    video_paths = glob.glob("./*.mp4")

    for v_p in video_paths:
        clip = VideoFileClip(v_p)
        output_clip = clip.fl_image(mark_lane_frame)
        output_clip.write_videofile("./output_images/marked_" +
                                    os.path.basename(v_p),
                                    audio=False)
    # End of for v_p

    return 0
Exemplo n.º 5
0
    def __init__(self):
    
        self.idx = 0
        self.oldDrawnIdx = 0
        self.cap = None
        self.FPS = 0
        self.FILENAME = ""
        self.SEARCHTERM = b''
        self.locs = []

        self.vlc = VLC()
        #time.sleep(1)

        self.tracker = tracker()

        self.GUIapp = Tk()
        self.GUIapp.geometry("640x480")
        self.GUIapp.title("VLC Search by attributes")
        self.currPlayingStr = StringVar()
        label = Label( self.GUIapp, textvariable=self.currPlayingStr, relief=RAISED )
        label.pack()

        self.textBox=Text(self.GUIapp, height=2, width=10)
        self.textBox.pack()

        Button(self.GUIapp, text="SEARCH", command=self.searchInit).pack()

        self.GUIapp.after(500, self.loopfunction)
        self.GUIapp.mainloop()
Exemplo n.º 6
0
 def test_sameTimeStamp(self):
     utrackr2 = tracker('192.168.0.18')
     utrackr4 = tracker('192.168.0.19')
     utrackr3 = tracker('192.168.0.31')
     utrackr = tracker('192.168.0.21')
     rpiOneTimestamp = utrackr.timesync.getTimeStamp()
     rpiTwoTimeStamp = utrackr2.timesync.getTimeStamp()
     rpiThreeTimeStamp = utrackr3.timesync.getTimeStamp()
     rpiFourTimeStamp = utrackr4.timesync.getTimeStamp()
     print "RPi Timestamp 1: " + rpiOneTimestamp
     print "RPi Timestamp 2: " + rpiTwoTimeStamp
     print "RPi Timestamp 3: " + rpiThreeTimeStamp
     print "RPi Timestamp 4: " + rpiFourTimeStamp
     self.assertEquals(rpiOneTimestamp, rpiTwoTimeStamp)
     self.assertEquals(rpiTwoTimeStamp, rpiThreeTimeStamp)
     self.assertEquals(rpiThreeTimeStamp, rpiFourTimeStamp)
Exemplo n.º 7
0
 def compute_tracker(self):
     truck_PO = truck_path(self.truck)
     new_tracker = tracker(self.db_path, self.truck, truck_PO,
                           float(self.argv[0]), float(self.argv[1]),
                           float(self.argv[2]), float(self.argv[3]),
                           float(self.argv[4]), float(self.argv[5]))
     new_tracker.compute_tracker()
    def __init__(self):
        self.bridge_object = CvBridge()

        # Change the subscriber to get a different feed
        self.image_sub = rospy.Subscriber("/Videocam", Image,
                                          self.camera_callback)

        # characteristics added for counting
        self.frame_count = 0
        self.start = time.time()
        self.Tracker = tracker(framesBeforeErasure)
Exemplo n.º 9
0
    def _create_trackers(self):
        self.trackers = []
        #set up the q trackers for each condition
        for i in range(self.n_states):

            state_tracker = tracker.tracker(self.net,
                                            self.prototypes,
                                            self.n_actions,
                                            n_per_state=self.n_per_state,
                                            l_epoch=self.l_epoch,
                                            epsilon=self.epsilon,
                                            recordWeights=self.recordWeights,
                                            recordSpikes=self.recordSpikes)

            self.trackers.append(state_tracker)
def test(_):
  from tracker import tracker

  # used for generating a random bounding box
  def gen_bbox():
    bbox = np.random.randint(0,99,size=4)
    bbox[0], bbox[2] = min(bbox[0], bbox[2]), max(bbox[0], bbox[2])
    bbox[1], bbox[3] = min(bbox[1], bbox[3]), max(bbox[1], bbox[3])
    return bbox
  
  bbox_list = []
  label_list = []
  tracker_list = []
  for i in range(flags.FLAGS.num):
    bbox = gen_bbox()
    label = i
    tracker_list.append(tracker(tid=i, bbox=bbox, depth=0, est_dict={'label': i}))
    bbox_list.append(bbox)
    label_list.append(label)
  
  logging.info('test perfect matching')
  m, ub, ut = associate(bbox_list, label_list, tracker_list)
  logging.info('m={}, ub={}, ut={}'.format(m, ub, ut))
  
  logging.info('test empty bbox list')
  m, ub, ut = associate((), (), tracker_list)
  logging.info('m={}, ub={}, ut={}'.format(m, ub, ut))
  
  logging.info('test empty tracker list')
  m, ub, ut = associate(bbox_list, label_list, ())
  logging.info('m={}, ub={}, ut={}'.format(m, ub, ut))
  
  bbox_list = []
  for i in range(flags.FLAGS.num):
    bbox_list.append(gen_bbox())
  
  logging.info('test random matching')
  m, ub, ut = associate(bbox_list, label_list, tracker_list)
  logging.info('m={}, ub={}, ut={}'.format(m, ub, ut))
Exemplo n.º 11
0
 def __init__(self, ksize=3, debug=False):
     data = pickle.load(open('./calibration_pickle.p', 'rb'))
     self.mtx = data['mtx']
     self.dist = data['dist']
     self.kernel = ksize
     self.debug = debug
     self.bot_width = 0.85  # oercebt of bottom trapezoid height
     self.mid_width = 0.12  # percent of middle trapezoid height
     self.height_pct = 0.62  # percent for trapezoid height
     self.bottom_trim = 0.935  # percent from top to bottom to avoid car hood
     self.threshold = Threshold(3)
     self.first_image = True
     self.window_width = 25
     self.window_height = 80
     self.last_right_lane_average = -1000
     self.last_right_lane = None
     self.curve_centers = tracker(Mywindow_width=self.window_width,
                                  Mywindow_height=self.window_height,
                                  Mymargin=25,
                                  My_ym=10 / 720,
                                  My_xm=4 / 384,
                                  Mysmooth_factor=15)
Exemplo n.º 12
0
def findLanes(warped, My_ym, My_xm, window_width=25, window_height=80):
    #setup the overall class to do all the tracking
    curve_centers = tracker(Mywindow_width=window_width,
                            Mywindow_height=window_height,
                            Mymargin=25,
                            My_ym=My_ym,
                            My_xm=My_xm,
                            Mysmooth_factor=15)

    window_centroids = curve_centers.find_window_centroids(warped)

    #points used to find the left and right lanes
    rightx = []
    leftx = []

    #go through each level and draw the windows
    for level in range(0, len(window_centroids)):
        leftx.append(window_centroids[level][0])
        rightx.append(window_centroids[level][1])

    leftx = np.array(leftx, dtype=np.int)
    rightx = np.array(rightx, dtype=np.int)
    midx = (leftx + rightx) / 2
    return leftx, rightx, midx
Exemplo n.º 13
0
    def calc_curves(self, image):
        # A function that takes an image, object points, and image points
        # performs the camera calibration, image distortion correction and
        # returns the undistorted image
        image = cv2.undistort(image, self.mtx, self.dist, None, self.mtx)

        (warped, M, Minv, src, dst) = self.warp_image(image)
        img_size = (image.shape[1], image.shape[0])

        window_width = 25
        window_height = 80

        # set up the overall class to do all the tracking
        curve_centers = tracker(Mywindow_width=window_width,
                                Mywindow_height=window_height,
                                Mymargin=25,
                                My_ym=10 / 720,
                                My_xm=4 / 384,
                                Mysmooth_factor=15)
        window_centroids = curve_centers.find_window_centroids(warped)

        # points used to draw all  the left and right windows
        l_points = np.zeros_like(warped)
        r_points = np.zeros_like(warped)

        # points used to find the left and right lanes
        rightx = []
        leftx = []

        # Go through each level and draw the windows
        for level in range(0, len(window_centroids)):
            # window_mask is a function to draw window areas
            l_mask = self.window_mask(window_width, window_height, warped,
                                      window_centroids[level][0], level)
            r_mask = self.window_mask(window_width, window_height, warped,
                                      window_centroids[level][1], level)
            # add center value found in frame to the list of lane point per left, right
            leftx.append(window_centroids[level][0])
            rightx.append(window_centroids[level][1])

            # Add graphic points from window mask here to total pixels found
            l_points[(l_points == 255) | ((l_mask == 1))] = 255
            r_points[(r_points == 255) | ((r_mask == 1))] = 255

        # Draw the results
        template = np.array(
            r_points + l_points,
            np.uint8)  # add both left and right window pixels together
        zero_channel = np.zeros_like(template)  # create a zero color channel
        template = np.array(cv2.merge((zero_channel, template, zero_channel)),
                            np.uint8)  # make window pixels green
        warpage = np.array(
            cv2.merge((warped, warped, warped)),
            np.uint8)  # making the original road pixels 3 color channels
        result = cv2.addWeighted(
            warpage, 1, template, 0.5,
            0.0)  # overlay the original road image with window results

        # Fit the lane boundaries to the left, right center positions found
        # yvals is height
        yvals = range(0, warped.shape[0])

        # fit to box centers
        res_yvals = np.arange(warped.shape[0] - (window_height / 2), 0,
                              -window_height)

        # polynomial fit to a second order polynomial
        left_fit = np.polyfit(res_yvals, leftx, 2)
        left_fitx = left_fit[0] * yvals * yvals + left_fit[
            1] * yvals + left_fit[2]
        left_fitx = np.array(left_fitx, np.int32)

        # polynomial fit to a second order polynomial
        right_fit = np.polyfit(res_yvals, rightx, 2)
        right_fitx = right_fit[0] * yvals * yvals + right_fit[
            1] * yvals + right_fit[2]
        right_fitx = np.array(right_fitx, np.int32)

        left_lane = np.array(
            list(
                zip(
                    np.concatenate((left_fitx - window_width / 2,
                                    left_fitx[::-1] + window_width / 2),
                                   axis=0),
                    np.concatenate((yvals, yvals[::-1]), axis=0))), np.int32)
        right_lane = np.array(
            list(
                zip(
                    np.concatenate((right_fitx - window_width / 2,
                                    right_fitx[::-1] + window_width / 2),
                                   axis=0),
                    np.concatenate((yvals, yvals[::-1]), axis=0))), np.int32)
        inner_lane = np.array(
            list(
                zip(
                    np.concatenate((left_fitx + window_width / 2,
                                    right_fitx[::-1] - window_width / 2),
                                   axis=0),
                    np.concatenate((yvals, yvals[::-1]), axis=0))), np.int32)
        middle_marker = np.array(
            list(
                zip(
                    np.concatenate((right_fitx + window_width / 2,
                                    right_fitx[::-1] + window_width / 2),
                                   axis=0),
                    np.concatenate((yvals, yvals[::-1]), axis=0))), np.int32)

        # color in the lines (road) and clear out in the original lines (road_bkg)
        road = np.zeros_like(image)
        road_bkg = np.zeros_like(image)
        cv2.fillPoly(road, [left_lane], color=[255, 0, 0])
        cv2.fillPoly(road, [right_lane], color=[0, 0, 255])
        cv2.fillPoly(road, [inner_lane], color=[0, 255, 0])
        cv2.fillPoly(road_bkg, [left_lane], color=[255, 255, 255])
        cv2.fillPoly(road_bkg, [right_lane], color=[255, 255, 255])

        # Overlay lines onto the road surface
        road_warped = cv2.warpPerspective(road,
                                          Minv,
                                          img_size,
                                          flags=cv2.INTER_LINEAR)
        road_warped_bkg = cv2.warpPerspective(road_bkg,
                                              Minv,
                                              img_size,
                                              flags=cv2.INTER_LINEAR)

        base = cv2.addWeighted(image, 1.0, road_warped_bkg, -1.0, 0.0)
        result = cv2.addWeighted(base, 1.0, road_warped, 0.7, 0.0)

        # calculate the offset of the car on the road and
        # figure out if it's in the right or left lane
        # and scale to meters
        # the -1 position is closest to the car
        ym_per_pix = curve_centers.ym_per_pix  # meters per pixel in y direction
        xm_per_pix = curve_centers.xm_per_pix  # meters per pixel in x direction

        # we want curvature in meters using the left lane to calculate the value
        # we could do the right, left + right average, or create a whole new middle line
        # see www.intmath.com/applications-differentiation/8-radius-curvature.php
        curve_fit_cr = np.polyfit(
            np.array(res_yvals, np.float32) * ym_per_pix,
            np.array(leftx, np.float32) * xm_per_pix, 2)
        curvead = (
            (1 + (2 * curve_fit_cr[0] * yvals[-1] * ym_per_pix +
                  curve_fit_cr[1])**2)**1.5) / np.absolute(2 * curve_fit_cr[0])

        camera_center = (left_fitx[-1] + right_fitx[-1]) / 2
        center_diff = (camera_center - warped.shape[1] / 2) * xm_per_pix

        # if camera center is positive we are on the left side of the road
        side_pos = 'left'
        if center_diff <= 0:
            side_pos = 'right'
        thumbnail = cv2.resize(
            template,
            (int(0.25 * result.shape[0]), int(0.25 * result.shape[1])))
        result[0:thumbnail.shape[0], result.shape[1] -
               thumbnail.shape[1]:result.shape[1]] = thumbnail
        cv2.putText(result,
                    'Radius of Curvature = ' + str(round(curvead, 3)) + '(m) ',
                    (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
        cv2.putText(
            result, 'Vehicle is  = ' + str(abs(round(center_diff, 3))) + 'm ' +
            side_pos + ' of center', (50, 100), cv2.FONT_HERSHEY_SIMPLEX, 1,
            (255, 255, 255), 2)
        return result
def process_image(img):
    #read and undistor images

    img = cv2.undistort(img, mtx, dist, None, mtx)

    # Process image and generate binary pixel of interests
    preprocessImage = np.zeros_like(img[:, :, 0])
    gradx = abs_sobel_thresh(img, orient='x', thresh=(12, 255))
    grady = abs_sobel_thresh(img, orient='y', thresh=(25, 255))
    c_binary = color_threshold(img, sthresh=(100, 255), vthresh=(50, 255))
    preprocessImage[((gradx == 1) & (grady == 1) | (c_binary == 1))] = 255

    # Defining perspective transformation area
    height = img.shape[0]
    width = img.shape[1]
    img_size = (width, height)

    top_left_src = (563, 470)
    bottom_left_src = (220, 700)
    top_left_dst = (300, 300)
    bottom_left_dst = (300, 720)

    src = np.float32([[top_left_src[0], top_left_src[1]],
                      [bottom_left_src[0], bottom_left_src[1]],
                      [width - bottom_left_src[0], bottom_left_src[1]],
                      [width - top_left_src[0], top_left_src[1]]])

    dst = np.float32([[top_left_dst[0], top_left_dst[1]],
                      [bottom_left_dst[0], bottom_left_dst[1]],
                      [width - bottom_left_dst[0], bottom_left_dst[1]],
                      [width - top_left_dst[0], top_left_dst[1]]])

    # Start applying perspective tranform
    M = cv2.getPerspectiveTransform(src, dst)
    Minv = cv2.getPerspectiveTransform(dst, src)
    warped = cv2.warpPerspective(preprocessImage,
                                 M,
                                 img_size,
                                 flags=cv2.INTER_LINEAR)

    warped_color = cv2.warpPerspective(img,
                                       M,
                                       img_size,
                                       flags=cv2.INTER_LINEAR)

    #Define the box size for fitting the curvature line
    window_width = 25
    window_height = 80

    #set up the overeall class to do the tracking
    curve_centers = tracker(Mywindow_width=window_width,
                            Mywindow_height=window_height,
                            Mymargin=25,
                            My_ym=10 / 720,
                            My_xm=4 / 384,
                            Mysmooth_factor=15)

    window_centroids = curve_centers.find_window_centroids(warped)

    # Points used to draw all the left and right window
    l_points = np.zeros_like(warped)
    r_points = np.zeros_like(warped)

    # Points used to find the left and right lanes
    leftx = []
    rightx = []

    # Go through each level and draw the windows
    for level in range(0, len(window_centroids)):
        # Window_mask is a function to draw window areas
        # Add center value found in frame to the list of lane points per left,right

        leftx.append(window_centroids[level][0])
        rightx.append(window_centroids[level][1])

        l_mask = window_mask(window_width, window_height, warped,
                             window_centroids[level][0], level)
        r_mask = window_mask(window_width, window_height, warped,
                             window_centroids[level][1], level)

        # Add graphic points from window mask here to total pixels found
        l_points[(l_points == 255) | ((l_mask == 1))] = 255
        r_points[(r_points == 255) | ((r_mask == 1))] = 255

    # Draw the results
    #template = np.array(r_points+l_points,np.uint8) # add both left and right window pixels together
    #zero_channel = np.zeros_like(template) # create a zero color channel
    #template = np.array(cv2.merge((zero_channel,template,zero_channel)),np.uint8) # make window pixels green
    #warpage = np.array(cv2.merge((warped,warped,warped)),np.uint8) # making the original road pixels 3 color channels
    #output = cv2.addWeighted(warpage, 1, template, 0.5, 0.0) # overlay the orignal road image with window results

    #Fit the lane boundaries to the left,right and center positions found
    yvals = range(0, warped.shape[0])

    res_yvals = np.arange(warped.shape[0] - (window_height / 2), 0,
                          -window_height)

    left_fit = np.polyfit(res_yvals, leftx, 2)
    left_fitx = left_fit[0] * yvals * yvals + left_fit[1] * yvals + left_fit[2]
    left_fitx = np.array(left_fitx, np.int32)

    right_fit = np.polyfit(res_yvals, rightx, 2)
    right_fitx = right_fit[0] * yvals * yvals + right_fit[
        1] * yvals + right_fit[2]
    right_fitx = np.array(right_fitx, np.int32)

    left_lane = np.array(
        list(
            zip(
                np.concatenate((left_fitx - window_width / 2,
                                left_fitx[::-1] + window_width / 2),
                               axis=0),
                np.concatenate((yvals, yvals[::-1]), axis=0))), np.int32)
    right_lane = np.array(
        list(
            zip(
                np.concatenate((right_fitx - window_width / 2,
                                right_fitx[::-1] + window_width / 2),
                               axis=0),
                np.concatenate((yvals, yvals[::-1]), axis=0))), np.int32)
    inner_lane = np.array(
        list(
            zip(
                np.concatenate((left_fitx + window_width / 2,
                                right_fitx[::-1] - window_width / 2),
                               axis=0),
                np.concatenate((yvals, yvals[::-1]), axis=0))), np.int32)

    road = np.zeros_like(img)
    road_bkg = np.zeros_like(img)
    cv2.fillPoly(road, [left_lane], color=[255, 0, 0])
    cv2.fillPoly(road, [right_lane], color=[0, 0, 255])
    cv2.fillPoly(road, [inner_lane], color=[0, 100, 0])
    #cv2.fillPoly(road_bkg,[left_lane],color=[255,255,255])
    #cv2.fillPoly(road_bkg,[right_lane],color=[255,255,255])

    road_warped = cv2.warpPerspective(road,
                                      Minv,
                                      img_size,
                                      flags=cv2.INTER_LINEAR)
    road_warped_bkg = cv2.warpPerspective(road_bkg,
                                          Minv,
                                          img_size,
                                          flags=cv2.INTER_LINEAR)

    base = cv2.addWeighted(img, 1.0, road_warped_bkg, -1.0, 0.0)
    final_line = cv2.addWeighted(base, 1.0, road_warped, 0.7, 0.0)

    ym_per_pix = curve_centers.ym_per_pix
    xm_per_pix = curve_centers.xm_per_pix
    curve_fit_cr = np.polyfit(
        np.array(res_yvals, np.float32) * ym_per_pix,
        np.array(leftx, np.float32) * xm_per_pix, 2)
    curverad = (
        (1 +
         (2 * curve_fit_cr[0] * yvals[-1] * ym_per_pix + curve_fit_cr[1])**2)**
        1.5) / np.absolute(2 * curve_fit_cr[0])

    #Calculate the offset of the car on the road
    camera_center = (left_fitx[-1] + right_fitx[-1]) / 2
    center_diff = (camera_center - warped.shape[1] / 2) * xm_per_pix
    side_pos = 'left'
    if center_diff <= 0:
        side_pos = 'right'

    cv2.putText(final_line,
                'Radius of Curvature = ' + str(round(curverad, 3)) + '(m)',
                (50, 70), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255, 255, 255), 3)
    cv2.putText(
        final_line, 'Vehicle is ' + str(abs(round(center_diff, 3))) + 'm ' +
        side_pos + ' of center', (50, 140), cv2.FONT_HERSHEY_SIMPLEX, 1.5,
        (255, 255, 255), 3)

    return final_line
Exemplo n.º 15
0
Arquivo: server.py Projeto: jledet/mcc
    def server(self):
        # Parse configuration
        conf = config.config(VERSION)

        # Register cleanup function
        atexit.register(self.cleanup)

        # Get logging instance
        self.mcclog = logging.getLogger("self.mcclog")
        self.mcclog.setLevel(logging.DEBUG)
        logformatter = logging.Formatter("[%(levelname)7s] %(asctime)s %(module)s: %(message)s")
        colorformatter = ColorFormatter("[%(levelname)21s] %(asctime)s %(module)s: %(message)s")
        if conf.verbose:
            loglvl = logging.DEBUG
        else:
            loglvl = logging.INFO

        if conf.daemon:
            # Daemonize
            try:
                daemon.daemonize()
            except Exception as e:
                sys.exit(1)
        else:
            # Enable logging to stdout
            streamhandler = logging.StreamHandler()
            if conf.color:
                streamhandler.setFormatter(colorformatter)
            else:
                streamhandler.setFormatter(logformatter)
            streamhandler.setLevel(loglvl)
            self.mcclog.addHandler(streamhandler)

        # Check if logging to file is enabled
        if not conf.logfile == None:
            filehandler = logging.FileHandler(conf.logfile)
            filehandler.setFormatter(logformatter)
            filehandler.setLevel(loglvl)
            self.mcclog.addHandler(filehandler)

        # Start the program
        self.mcclog.info("AAUSAT3 MCC Server {0}".format(VERSION))
        self.mcclog.info("Copyright (c) 2009-2011 Jeppe Ledet-Pedersen <*****@*****.**>")
        uname = os.uname()
        self.mcclog.info("Running on {0} {1} {2}".format(uname[0], uname[2], uname[4]))

        # Print process info
        self.mcclog.info(
            "pid={0}, ppid={1}, pgrp={2}, sid={3}, uid={4}, euid={5}, gid={6}, egid={7}".format(
                os.getpid(),
                os.getppid(),
                os.getpgrp(),
                os.getsid(0),
                os.getuid(),
                os.geteuid(),
                os.getgid(),
                os.getegid(),
            )
        )

        # Test if HP is running the program as root
        if os.geteuid() == 0:
            self.mcclog.warning("Running the MCC as root is both unnecessary and a very bad idea")

        # Validate interface arguments
        if not conf.csp_enable:
            self.mcclog.warning("CSP interface disabled. Replay only mode.")

        # Connect to Database
        if conf.db_type == "postgresql":
            try:
                self.dbconn = database.postgresqlmanager(self.mcclog, conf)
                self.dbconn.test()
            except Exception as e:
                self.mcclog.error(
                    "Failed to start PostgreSQL Database Manager with host={0}, db={1}, user={2} ({3})".format(
                        conf.db_host, conf.db_name, conf.db_user, str(e).replace("\n\t", " ").replace("\n", "")
                    )
                )
                sys.exit(1)
            self.mcclog.info(
                "Started PostgreSQL Database Manager with host={0}, db={1}, user={2}".format(
                    conf.db_host, conf.db_name, conf.db_user
                )
            )
        elif conf.db_type == "mysql":
            try:
                self.dbconn = database.mysqlmanager(self.mcclog, conf)
                self.dbconn.test()
            except Exception as e:
                self.mcclog.error(
                    "Failed to start MySQL Database Manager with host={0}, db={1}, user={2} ({3})".format(
                        conf.db_host, conf.db_name, conf.db_user, str(e).replace("\n\t", " ").replace("\n", "")
                    )
                )
                sys.exit(1)
            self.mcclog.info(
                "Started MySQL database manager with host={0}, db={1}, user={2}".format(
                    conf.db_host, conf.db_name, conf.db_user
                )
            )
        elif conf.db_type == "sqlite":
            try:
                self.dbconn = database.sqlitemanager(self.mcclog, conf)
                self.dbconn.test()
            except Exception as e:
                self.mcclog.error("Failed to start SQLite Database Manager for {0} ({1})".format(conf.db_file, e))
                sys.exit(1)
            self.mcclog.info("Started SQLite database manager for {0}".format(conf.db_file))
        else:
            self.mcclog.error(
                "{0} is not a valid database type. Use either postgresql, mysql or sqlite".format(conf.db_type)
            )
            sys.exit(1)

        # Initialize CSP
        if conf.csp_enable:
            try:
                self.csp = csp.csp(self.mcclog, self.dbconn, self.inq, self.outq, conf)
            except Exception as e:
                self.mcclog.error("Failed to initialize CSP ({0})".format(e))
                sys.exit(1)
            self.mcclog.info("Initialized CSP for address {0}".format(conf.csp_host))

        # Initialize Tracking
        if conf.track_enable:
            try:
                self.tracker = tracker.tracker(self.mcclog, self.outq, conf)
            except Exception as e:
                self.mcclog.error("Failed to initialize Tracker ({0})".format(e))
                sys.exit(1)
            self.mcclog.info("Initialized Tracker")

        # Initialize Web Interface
        if conf.web_enable:
            try:
                self.web = web.web(conf.web_port, conf.web_auth, conf.web_https, conf.web_certfile)
            except Exception as e:
                self.mcclog.error("Failed to initialize Web Interface ({0})".format(e))
                sys.exit(1)
            self.mcclog.info("Initialized Web Interface on port {0}".format(conf.web_port))

        # Initialize Connection Manager
        try:
            self.connman = connmanager.connectionmanager(self.mcclog, self.dbconn, self.connlist, self.outq, conf)
        except Exception as e:
            self.mcclog.error(
                "Failed to initialize Connection Manager on port {0}, {1} ({2})".format(
                    conf.listen_port,
                    "connection limit: {0}".format(conf.max_users if int(conf.max_users) > 0 else "none"),
                    e,
                )
            )
            sys.exit(1)

        self.mcclog.info(
            "Initialized Connection Manager on port {0}, {1}".format(
                conf.listen_port, "connection limit: {0}".format(conf.max_users if int(conf.max_users) > 0 else "none")
            )
        )

        if not conf.use_tls:
            self.mcclog.warning("TLS encryption is disabled! Eve can read your data...")

        self.mcclog.info("Startup sequence completed - Listening for incoming connections")

        # Enter main loop
        rx_packets = 0
        tx_packets = 0
        while True:
            try:
                packet = self.inq.get(True, 30)
            except Queue.Empty:
                pass
            except KeyboardInterrupt:
                print ""
                self.mcclog.info("Closing AAUSAT3 MCC Server")
                sys.exit(0)
            except Exception as e:
                self.mcclog.error("Caught exception ({0})".format(e))
                sys.exit(1)
            else:
                if packet.source == conf.csp_host and packet.sport >= 17:
                    tx_packets += 1
                else:
                    rx_packets += 1
                self.mcclog.debug("Distributing packet: {0}".format(packet.debug()))
                self.mcclog.debug("Packets transmitted: {0}, Packets received: {1}".format(tx_packets, rx_packets))
                # Add packet to packet queues
                for conn in self.connlist:
                    if conn.enabled and conn.authorized:
                        conn.queue.put(packet)
        print "STOPPED unexpect"
Exemplo n.º 16
0
# create messageboard for button tracker
# can get messages since creation of MB
# or since last read
mb = messageboard.MessageBoard('tracker')

waitForArm = False
targetReceived = False
target = None
state1 = None
state2 = None
state = None
vis = True

# create tracker
tracker = tracker.tracker(visualize=vis)

while(True):

    if not targetReceived:
        # check messageboard once every second
        # time.sleep(1) 
        
        # check messageboard for target
        targetList = mb.readMsg(['nav', 'directions'])

        # if no new commands
        if len(targetList) == 0: continue
        else:
            newestTarget = targetList[-1]
            # if shutdown signal, break
Exemplo n.º 17
0
	ym_per_pixel = center_tracker.ym_per_pixel
	curv_fit = np.polyfit(np.array(y_centers, np.float32)*ym_per_pixel, np.array(left_centers, np.float32)*xm_per_pixel, 2)
	curv_rad = ((1 + (2*curv_fit[0]*y_values[-1]*ym_per_pixel*curv_fit[1])**2)**(3/2))/np.absolute(2*curv_fit[0])
	# calcutate the distance from center
	lane_center = (left_fitx[-1] + right_fitx[-1])/2
	diff = (lane_center - img_size[0]/2)*xm_per_pixel
	if diff <= 0:
		dire = 'right'
	else:
		dire = 'left'

	result = cv2.addWeighted(img, 1.0, lane_warped, 0.5, 0.0)	
	# write curvature and position information onto the images
	cv2.putText(result, "Radius of Curvature = " + str(round(curv_rad, 3)) + '(m)', (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2)
	cv2.putText(result, "Current position is " + str(round(diff,3)) + 'm on the ' + dire + " of the center", (50, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2)

	return result

win_width = 30
win_height = 80 # break image into 9 vertical layers since the height of the image is 720
margin = 100 # Slice area for searching
center_tracker = tracker(window_width=win_width, window_height=win_height, margin=margin, ym2pixel=40/720, xm2pixel=4/517, smooth_factor=15)

input_video = 'project_video.mp4'
output_video = 'output_video_test.mp4'

clip = VideoFileClip(input_video)
print("Start processing video...")
video_clip = clip.fl_image(pipeline)
video_clip.write_videofile(output_video, audio=False)
print("Finished")
Exemplo n.º 18
0
def process_image(img):
    img = cv2.undistort(img,mtx,dist,None,mtx)

    preprocessImage = np.zeros_like(img[:,:,0])
    gradx = abs_sobel_thresh(img, orient= 'x', thresh=(12,255))
    grady = abs_sobel_thresh(img, orient= 'y',thresh=(25,255))
    c_binary = color_threshold(img, sthresh=(100,255),vthresh=(50,255))
    preprocessImage[((gradx == 1) & (grady == 1) | (c_binary == 1) )] = 255

    #work on defining perspective transformation area
    img_size = (img.shape[1],img.shape[0])
    bot_width = 0.76 #percent of bottom trapizoid height
    mid_width = 0.08 
    height_pct = 0.62
    bottom_trim = 0.935
    src = np.float32([[img.shape[1]*(0.5-mid_width/2),img.shape[0]*height_pct],[img.shape[1]*(0.5+mid_width/2),img.shape[0]*height_pct],
                      [img.shape[1]*(0.5+bot_width/2),img.shape[0]*bottom_trim],[img.shape[1]*(0.5-bot_width/2),img.shape[0]*bottom_trim]])
    offset = img_size[0]* 0.25
    dst = np.float32([[offset, 0],[img_size[0]-offset,0],[img_size[0]-offset, img_size[1]],[offset, img_size[1]]])

    #perform the transform 
    M = cv2.getPerspectiveTransform(src,dst)
    Minv = cv2.getPerspectiveTransform(dst,src)
    warped = cv2.warpPerspective(preprocessImage, M, img_size, flags=cv2.INTER_LINEAR)

    window_width = 25
    window_height = 80


    #set up the overall class to do all tracking
    curve_centers = tracker(Mywindow_width = window_width, Mywindow_height=window_height, Mymargin = 25, My_ym = 10/720, My_xm = 4/384, Mysmooth_factor=15)

    window_centroids = curve_centers.find_window_centroids(warped)

    #Points used to draw all the left and right windows
    l_points = np.zeros_like(warped)
    r_points = np.zeros_like(warped)

    #points used to find the left and right lanes
    rightx=[]
    leftx=[]     

    #Go through each level and draw the Windows
    for level in range(0, len(window_centroids)):
        #add center value found in frame to the list of lane points per left, right
        leftx.append(window_centroids[level][0])
        rightx.append(window_centroids[level][1])
        l_mask = window_mask(window_width, window_height, warped, window_centroids[level][0],level)
        r_mask = window_mask(window_width, window_height, warped, window_centroids[level][1],level)
        #Add graphic points from window mask here to total pixels found
        l_points[(l_points == 255) | ((l_mask == 1) ) ] = 255
        r_points[(r_points == 255) | ((r_mask == 1) ) ] = 255

    Draw the results
    template = np.array(r_points+ l_points, np.uint8) #add both left and right window pixels together
    zero_channel = np.zeros_like(template) #create a zero color channels
    template = np.array(cv2.merge((zero_channel,template,zero_channel)),np.uint8) #make window pixel green
    warpage = np.array(cv2.merge((warped,warped,warped)),np.uint8) #making the original road pixel 3 color channels
    result = cv2.addWeighted(warpage, 1, template, 0.5, 0.0) #Overlay the original road image with window results

    #fit the lane boundaries to the left, right, center positions found
    yvals = range(0, warped.shape[0])
    
    res_yvals = np.arange(warped.shape[0]-(window_height/2), 0, -window_height)

    left_fit = np.polyfit(res_yvals, leftx, 2)
    left_fitx = left_fit[0]*yvals*yvals + left_fit[1]*yvals + left_fit[2]
    left_fitx = np.array(left_fitx, np.uint32)

    right_fit = np.polyfit(res_yvals, rightx, 2)
    right_fitx = right_fit[0]*yvals*yvals + right_fit[1]*yvals + right_fit[2]
    right_fitx = np.array(right_fitx, np.int32)

    left_lane = np.array(list(zip(np.concatenate((left_fitx-window_width/2, left_fitx[::-1]+window_width/2),axis=0),np.concatenate((yvals,yvals[::-1]),axis=0))), np.int32)
    right_lane = np.array(list(zip(np.concatenate((right_fitx-window_width/2, right_fitx[::-1]+window_width/2),axis=0),np.concatenate((yvals,yvals[::-1]),axis=0))), np.int32)
    inner_lane = np.array(list(zip(np.concatenate((left_fitx-window_width/2, right_fitx[::-1]+window_width/2),axis=0),np.concatenate((yvals,yvals[::-1]),axis=0))), np.int32)

    road = np.zeros_like(img)
    road_bkg = np.zeros_like(img)
    cv2.fillPoly(road, [left_lane], color=[255,0,0])
    cv2.fillPoly(road, [right_lane], color=[0,0,255])
    cv2.fillPoly(road, [inner_lane],color=[0,255,0])
    cv2.fillPoly(road_bkg, [left_lane], color=[255,255,255])
    cv2.fillPoly(road_bkg,[right_lane],color=[255,255,255])
 
    road_warped = cv2.warpPerspective(road, Minv, img_size, flags = cv2.INTER_LINEAR)
    road_warped_bkg = cv2.warpPerspective(road_bkg, Minv, img_size, flags = cv2.INTER_LINEAR)

    base = cv2.addWeighted(img, 1.0, road_warped_bkg, -1.0, 0.0)
    result = cv2.addWeighted(img, 1.0, road_warped, 0.7, 0.0)

    ym_per_pix = curve_centers.ym_per_pix #meters per pixel in y dimension
    xm_per_pix = curve_centers.xm_per_pix #meters per pixel in x dimension

    curve_fit_cr = np.polyfit(np.array(res_yvals, np.float32)*ym_per_pix, np.array(leftx,np.float32)*xm_per_pix,2)
    curverad = ((1 + (2*curve_fit_cr[0]*yvals[-1]*ym_per_pix + curve_fit_cr[1])**2)**1.5)/np.absolute(2*curve_fit_cr[0])

    #calculate the offset of the car on the road
    camera_center = (left_fitx[-1] + right_fitx[-1])/2
    center_diff = (camera_center - warped.shape[1]/2)*xm_per_pix
    side_pos = 'left'
    if center_diff <= 0:
        side_pos = 'right'

    #draw the text showing curvature, offset, and speed
    cv2.putText(result, 'Radius of Curvature = '+str(round(curverad,3))+'(m)',(50,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2)
    cv2.putText(result, 'Vehicle is '+str(abs(round(center_diff,3)))+'m '+side_pos+' of center', (50,100), cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2)

    return result
Exemplo n.º 19
0
                      [(img_size[0] * 3 / 4), img_size[1]],
                      [(img_size[0] * 3 / 4), 0]])

    M = cv2.getPerspectiveTransform(src, dst)
    Minv = cv2.getPerspectiveTransform(dst, src)
    warped = cv2.warpPerspective(preprocessImage,
                                 M,
                                 img_size,
                                 flags=cv2.INTER_LINEAR)

    window_width = 40
    window_height = 60

    curve_centers = tracker(Mywindow_width=window_width,
                            Mywindow_height=window_height,
                            Mymargin=30,
                            My_ym=30 / 720,
                            My_xm=3.7 / 700,
                            Mysmooth_factor=10)

    window_centroids = curve_centers.find_window_centroids(warped)

    l_points = np.zeros_like(warped)
    r_points = np.zeros_like(warped)

    leftx = []
    rightx = []

    for level in range(0, len(window_centroids)):

        leftx.append(window_centroids[level][0])
        rightx.append(window_centroids[level][1])
processed_img = np.zeros_like(gradx)
processed_img[((gradx == 1) & (grady == 1) | (c_binary == 1))] = 1
processed_img *= mask_w
processed_img += mask_y

#perspective transformation
warped_img, img_size, Minv = warped(processed_img)

# locate line pixels by sliding door convolution
window_width = 25
window_height = 80  # Break image into 9 vertical layers since image height is 720
margin = 40  # How much to slide left and right for searching

curve_centers = tracker(window_width,
                        window_height,
                        margin,
                        ym=10 / 728,
                        xm=4 / 667,
                        smooth_factor=15)

window_centroids = curve_centers.find_window_centroids(warped_img)

# Points used to draw all the left and right windows
l_points = np.zeros_like(warped_img)
r_points = np.zeros_like(warped_img)

leftx = []
rightx = []

# Go through each level and draw the windows
for level in range(0, len(window_centroids)):
    # Window_mask is a function to draw window areas
Exemplo n.º 21
0
def process_image(img):
    img = cv2.undistort(img,mtx,dist,None,mtx)

    preprocessImage = np.zeros_like(img[:,:,0])
    gradx = abs_sobel_thresh(img, orient='x', thresh=(12,255))
    grady = abs_sobel_thresh(img, orient='y', thresh=(25,255))
    c_binary = color_threshold(img, sthresh=(50,255), vthresh=(100,255), lthresh=(50,255))
    m_binary = mag_thresh(img, sobel_kernel=3, mag_thresh=(0,25))
    d_binary = dir_threshold(img, sobel_kernel=15, thresh=(0.0,1.5))
    preprocessImage[((gradx == 1) & (grady == 1) | (m_binary == 1) & (d_binary == 1) & (c_binary == 1))] = 255

    img_size = (img.shape[1], img.shape[0])
    bot_width = 0.76
    mid_width = 0.08
    height_pct = 0.62
    bottom_trim = 0.935
    src = np.float32([[img.shape[1]*(0.5-mid_width/2),img.shape[0]*height_pct],[img.shape[1]*(0.5+mid_width/2),img.shape[0]*height_pct],
        [img.shape[1]*(0.5+bot_width/2),img.shape[0]*bottom_trim], [img.shape[1]*(0.5-bot_width/2),img.shape[0]*bottom_trim]])
    offset = img_size[0]*0.80
    dst = np.float32([[offset, 0], [img_size[0]-offset, 0], [img_size[0]-offset, img_size[1]], [offset, img_size[1]]])

    M = cv2.getPerspectiveTransform(src, dst)
    Minv = cv2.getPerspectiveTransform(dst, src)
    warped = cv2.warpPerspective(preprocessImage,M,img_size,flags=cv2.INTER_LINEAR)

    window_width = 25
    window_height = 88

    curve_centers = tracker(Mywindow_width = window_width, Mywindow_height = window_height, Mymargin = 25, My_ym = 10/720, My_xm = 4/384, Mysmooth_factor = 15)

    window_centroids = curve_centers.find_window_centroids(warped)

    l_points = np.zeros_like(warped)
    r_points = np.zeros_like(warped)

    rightx = []
    leftx = []

    for level in range(0, len(window_centroids)):
        leftx.append(window_centroids[level][0])
        rightx.append(window_centroids[level][1])

        l_mask = window_mask(window_width,window_height,warped,window_centroids[level][0],level)
        r_mask = window_mask(window_width,window_height,warped,window_centroids[level][1],level)

        l_points[(l_points == 255) | ((l_mask == 1))] = 255
        r_points[(r_points == 255) | ((r_mask == 1))] = 255

    template = np.array(r_points+l_points,np.uint8)
    zero_channel = np.zeros_like(template)
    template = np.array(cv2.merge((zero_channel,template,zero_channel)),np.uint8)
    warpage = np.array(cv2.merge((warped,warped,warped)),np.uint8)
    result = cv2.addWeighted(warpage,1,template,0.5,0.0)

    yvals = range(0,warped.shape[0])

    res_yvals = np.arange(warped.shape[0]-(window_height/2),0,-window_height)

    left_fit = np.polyfit(res_yvals, leftx, 2)
    left_fitx = left_fit[0]*yvals*yvals + left_fit[1]*yvals + left_fit[2]
    left_fitx = np.array(left_fitx,np.int32)

    right_fit = np.polyfit(res_yvals, rightx, 2)
    right_fitx = right_fit[0]*yvals*yvals + right_fit[1]*yvals + right_fit[2]
    righ_fitx = np.array(right_fitx,np.int32)
    '''
    yvals = np.linspace(0, 719, num=720)# to cover same y-range as image
    res_yvals = np.linspace(0, 719, num=720)
    quadratic_coeff = 3e-4 # arbitrary quadratic coefficient

    leftx = np.array([200 + (y**2)*quadratic_coeff + np.random.randint(-50, high=51) for y in yvals])
    rightx = np.array([900 + (y**2)*quadratic_coeff + np.random.randint(-50, high=51) for y in yvals])

    leftx = leftx[::-1]  # Reverse to match top-to-bottom in y
    rightx = rightx[::-1]  # Reverse to match top-to-bottom in y

    left_fit = np.polyfit(yvals, leftx, 2)
    left_fitx = left_fit[0]*yvals**2 + left_fit[1]*yvals + left_fit[2]
    right_fit = np.polyfit(yvals, rightx, 2)
    right_fitx = right_fit[0]*yvals**2 + right_fit[1]*yvals + right_fit[2]
    '''
    left_lane = np.array(list(zip(np.concatenate((left_fitx-window_width/2,left_fitx[::-1]+window_width/2),axis=0), np.concatenate((yvals,yvals[::-1]),axis=0))),np.int32)
    right_lane = np.array(list(zip(np.concatenate((right_fitx-window_width/2,right_fitx[::-1]+window_width/2),axis=0), np.concatenate((yvals,yvals[::-1]),axis=0))),np.int32)
    middel_marker = np.array(list(zip(np.concatenate((left_fitx+window_width/2,right_fitx[::-1]-window_width/2),axis=0), np.concatenate((yvals,yvals[::-1]),axis=0))),np.int32)

    road = np.zeros_like(img)
    road_bkg = np.zeros_like(img)

    cv2.fillPoly(road, [left_lane], color=[255,0,0])
    cv2.fillPoly(road, [right_lane], color=[0,0,255])
    cv2.fillPoly(road, [middel_marker], color=[0,255,0])
    cv2.fillPoly(road_bkg, [left_lane], color=[255,255,255])
    cv2.fillPoly(road_bkg, [right_lane], color=[255,255,255])

    road_warped = cv2.warpPerspective(road,Minv,img_size,flags=cv2.INTER_LINEAR)
    road_warped_bkg = cv2.warpPerspective(road_bkg,Minv,img_size,flags=cv2.INTER_LINEAR)

    base = cv2.addWeighted(img, 1.0, road_warped_bkg, -1.0, 0.0)
    result = cv2.addWeighted(base, 1.0, road_warped, 0.7, 0.0)

    ym_per_pix = curve_centers.ym_per_pix
    xm_per_pix = curve_centers.xm_per_pix

    curve_fit_cr = np.polyfit(np.array(res_yvals,np.float32)*ym_per_pix, np.array(leftx,np.float32)*xm_per_pix, 2)
    curverad = ((1 + (2*curve_fit_cr[0]*yvals[-1]*ym_per_pix + curve_fit_cr[1])**2)**1.5) / np.absolute(2*curve_fit_cr[0])

    camera_center = (left_fitx[-1] + right_fitx[-1])/2
    center_diff = (camera_center-warped.shape[1]/2)*xm_per_pix
    side_pos = 'left'
    if center_diff <= 0:
        side_pos = 'right'

    cv2.putText(result, 'Radius of Curvature = ' +str(round(curverad,3))+'(m)',(50,50), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,255,255),2)
    cv2.putText(result,'Vehicle is '+str(abs(round(center_diff,3))) +'m '+side_pos+' of center',(50,100), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,255,255),2)

    return result
Exemplo n.º 22
0
    binary_warped = cv2.warpPerspective(preprocessImage,
                                        M,
                                        img_size,
                                        flags=cv2.INTER_LINEAR)

    result1 = binary_warped
    write_warped = './test_images/warped/warped' + str(idx) + '.jpg'
    cv2.imwrite(write_warped, result1)

    window_width = 25
    window_height = 80

    # set up overall class to do all the tracking
    curve_centers = tracker(myWindow_width=window_width,
                            myWindow_height=window_height,
                            myMargin=25,
                            my_ym=10 / 720,
                            my_xm=4 / 384,
                            mySmooth_factor=15)

    window_centroids = curve_centers.find_window_centroids(result1)

    # Points used to draw all the left and right windows
    l_points = np.zeros_like(result1)
    r_points = np.zeros_like(result1)

    # points used to find left and right lanes
    rightx = []
    leftx = []

    # Go through each level and draw the windows
    for level in range(0, len(window_centroids)):
Exemplo n.º 23
0
	def compute_tracker(self):
		truck_PO = truck_path(self.truck)
		new_tracker = tracker(self.db_path, self.truck, truck_PO, float(self.argv[0]), float(self.argv[1]), float(self.argv[2]), float(self.argv[3]), float(self.argv[4]), float(self.argv[5]))
		new_tracker.compute_tracker()
Exemplo n.º 24
0
 def configure(self, env):
     import tracker_params
     env.set_params(tracker_params)
     tracker()
Exemplo n.º 25
0
    def __init__(self, torrent_file_path = None, part_file_path = None):
        '''
        Torrent object will be use to store data for torrent overall like downloaded,
        uploaded, left bytes, piece size, number of pieces, peer_id. It can be
        initialize in two ways either using path to a .torrent file(newly created
        torrent) or using already partially downloaded .part file.
        Note :- .part file is nothing but just an extension use to save temporarly
        downloaded file
        This function is not thread safe
        '''
        # Will be access by both tracker thread and peer thread hence need to be locked
        self.uploaded = 0
        self.downloaded = 0
        self.left = 0
        self.piece_freq = None
        self.my_bitfield = set([])
        # This list will store the count of number of request in pipeline for
        # corresponding piece
        self.requested_pieces = None
        # This array will store offset of pieces below which blocks are already
        # downloaded
        self.downloaded_piece_offset = None
        # This denotes piece which this client do not have
        self.requestable_pieces = set([])
        # Making lock for my_bitfield, piece_freq, uploaded, downloaded and left
        self.lock = threading.Lock()

        # Creating peer id
        self.peer_id = 0
        peer_id_sha = hashlib.sha1()
        peer_id_sha.update(str(os.getpid()).encode())
        peer_id_sha.update(str(datetime.now()).encode())
        self.peer_id = peer_id_sha.digest()

        # Creating TCP socket to accept connections from other peers
        self.socket_for_peer = socket(AF_INET, SOCK_STREAM)
        self.socket_for_peer.bind(('', 0))
        self.port_for_peer = self.socket_for_peer.getsockname()[1]
        self.socket_for_peer.listen(1)
        torrent_logger.debug("Listening on port " + str(self.port_for_peer) +\
                " for other peers connection")

        # Initialise using extract from .torrent file
        torrent_file_extract = torrent_file.torrent_file(torrent_file_path)
        self.file_extract = torrent_file_extract.file_extract
        self.piece_len = torrent_file_extract.piece_len
        self.piece_hash = torrent_file_extract.piece
        self.name = torrent_file_extract.name
        self.length = torrent_file_extract.length
        self.number_of_pieces = int(self.length / self.piece_len)
        torrent_logger.debug("Number of pieces :- " + str(self.number_of_pieces))
        self.trackers_list = torrent_file_extract.tracker
        self.left = self.length
        # Initializing all downloaded piece offset to zero since
        # 1) this is a new torrent and therefore we cant have already downloaded
        # pieces
        # 2) this is resume of downloaded file and we dont store partially
        # downloaded piece in file
        self.lock.acquire()
        self.downloaded_piece_offset = [0] * self.number_of_pieces
        self.lock.release()

        # If there is already downloaded file read information from it
        if (part_file_path != None):
            torrent_logger.debug("Reading already downloaded piece info from part file " + part_file_path)
            self.my_bitfield |= part_file.get_piece_data(part_file_path)
            self.part_file = part_file.part_file(part_file_path)
        else:
            self.part_file = part_file.part_file(self.name + ".part")

        self.info_hash = hashlib.sha1()
        info_bencode = self.file_extract[b'info']
        self.info_hash.update(bencodepy.encode(info_bencode))

        # Make a trackers object from tracker_list
        self.trackers = tracker.tracker(self, self.trackers_list)

        # List of peers object
        self.peers = []

        self.lock.acquire()
        self.piece_freq = [0] * self.number_of_pieces
        self.requested_pieces = [0] * self.number_of_pieces
        for i in range(self.number_of_pieces):
            if i not in self.my_bitfield:
                self.requestable_pieces.add(i)
        self.lock.release()

        # A cache to store partially downloaded pieces, this will be access by
        # main peer only
        self.part_pieces = dict()
Exemplo n.º 26
0
def process_image(img, debug=False):
    img = cv2.undistort(img, mtx, dist, None, mtx)

    preprocessImage = np.zeros_like(img[:, :, 0])
    gradx = abs_sobel_thresh(img, orient='x', thresh=(12, 255))
    grady = abs_sobel_thresh(img, orient='y', thresh=(25, 255))
    c_binary = color_threshold(img, sthresh=(100, 255), vthresh=(50, 255))
    preprocessImage[((gradx == 1) & (grady == 1) | (c_binary == 1))] = 255

    img_size = (img.shape[1], img.shape[0])
    # bot_width = .76 # percent of bottom trapizoid height
    # mid_width = .08 # percent of middle trapizoid height
    # height_pct = .62 # percent of trapizoid height
    # bottom_trim = .935 # percent from top to bottom to avoid hood
    # src = np.float32([[img.shape[1]*(.5-mid_width/2), img.shape[0]*height_pct],
    #                   [img.shape[1]*(.5+mid_width/2), img.shape[0]*height_pct],
    #                   [img.shape[1]*(.5+bot_width/2), img.shape[0]*bottom_trim],
    #                   [img.shape[1]*(.9-bot_width/2), img.shape[0]*bottom_trim]])
    # offset = img_size[0]*0.25
    # dst = np.float32([[offset, 0],
    #                   [img_size[0]-offset, 0],
    #                   [img_size[0]-offset, img_size[1]],
    #                   [offset, img_size[1]]])
    src = np.float32([[(img_size[0] / 2) - 55, img_size[1] / 2 + 100],
                      [((img_size[0] / 6) - 10), img_size[1]],
                      [(img_size[0] * 5 / 6) + 60, img_size[1]],
                      [(img_size[0] / 2 + 55), img_size[1] / 2 + 100]])
    dst = np.float32([[(img_size[0] / 4), 0], [(img_size[0] / 4), img_size[1]],
                      [(img_size[0] * 3 / 4), img_size[1]],
                      [(img_size[0] * 3 / 4), 0]])

    M = cv2.getPerspectiveTransform(src, dst)
    Minv = cv2.getPerspectiveTransform(dst, src)
    warped = cv2.warpPerspective(preprocessImage,
                                 M,
                                 img_size,
                                 flags=cv2.INTER_LINEAR)

    # Find Lane Line
    # Sliding Window Search
    window_width = 25
    window_height = 80
    curve_centers = tracker(Mywindow_width=window_width,
                            Mywindow_height=window_height,
                            Mymargin=25,
                            My_ym=30 / 720,
                            My_xm=3.7 / 700,
                            Mysmooth_factor=15)

    window_centroids = curve_centers.find_window_centroids(warped)

    l_points = np.zeros_like(warped)
    r_points = np.zeros_like(warped)

    rightx = []
    leftx = []

    for level in range(0, len(window_centroids)):
        l_mask = window_mask(window_width, window_height, warped,
                             window_centroids[level][0], level)
        r_mask = window_mask(window_width, window_height, warped,
                             window_centroids[level][1], level)
        leftx.append(window_centroids[level][0])
        rightx.append(window_centroids[level][1])
        l_points[(l_points == 255) | (l_mask == 1)] = 255
        r_points[(r_points == 255) | (r_mask == 1)] = 255

    template = np.array(r_points + l_points, np.uint8)
    zero_channel = np.zeros_like(template)
    template = np.array(cv2.merge((zero_channel, template, zero_channel)),
                        np.uint8)
    warpage = np.array(cv2.merge((warped, warped, warped)), np.uint8)
    warpage = cv2.addWeighted(warpage, 1, template, 0.5, 0.0)

    # draw the lane lines
    yvals = range(0, warped.shape[0])
    res_yvals = np.arange(warped.shape[0] - (window_height / 2), 0,
                          -window_height)

    left_fit = np.polyfit(res_yvals, leftx, 2)
    left_fitx = left_fit[0] * yvals * yvals + left_fit[1] * yvals + left_fit[2]
    left_fitx = np.array(left_fitx, np.int32)

    right_fit = np.polyfit(res_yvals, rightx, 2)
    right_fitx = right_fit[0] * yvals * yvals + right_fit[
        1] * yvals + right_fit[2]
    right_fitx = np.array(right_fitx, np.int32)

    left_lane = np.array(
        list(
            zip(
                np.concatenate((left_fitx - window_width / 2,
                                left_fitx[::-1] + window_width / 2),
                               axis=0),
                np.concatenate((yvals, yvals[::-1]), axis=0))), np.int32)
    right_lane = np.array(
        list(
            zip(
                np.concatenate((right_fitx - window_width / 2,
                                right_fitx[::-1] + window_width / 2),
                               axis=0),
                np.concatenate((yvals, yvals[::-1]), axis=0))), np.int32)
    inner_lane = np.array(
        list(
            zip(
                np.concatenate((left_fitx + window_width / 2,
                                right_fitx[::-1] - window_width / 2),
                               axis=0),
                np.concatenate((yvals, yvals[::-1]), axis=0))), np.int32)

    road = np.zeros_like(img)
    road_bkg = np.zeros_like(img)
    cv2.fillPoly(road, [left_lane], color=[255, 0, 0])
    cv2.fillPoly(road, [right_lane], color=[0, 0, 255])
    cv2.fillPoly(road, [inner_lane], color=[0, 255, 0])
    cv2.fillPoly(road_bkg, [left_lane], color=[255, 255, 255])
    cv2.fillPoly(road_bkg, [right_lane], color=[255, 255, 255])

    road_warped = cv2.warpPerspective(road,
                                      Minv,
                                      img_size,
                                      flags=cv2.INTER_LINEAR)
    road_warped_bkg = cv2.warpPerspective(road_bkg,
                                          Minv,
                                          img_size,
                                          flags=cv2.INTER_LINEAR)

    base = cv2.addWeighted(img, 1.0, road_warped_bkg, -1.0, 0.0)
    result = cv2.addWeighted(base, 1.0, road_warped, 1.0, 0.0)

    ym_per_pix = curve_centers.ym_per_pix
    xm_per_pix = curve_centers.xm_per_pix

    # Measuring Curvature
    curve_fit_cr = np.polyfit(
        np.array(res_yvals, np.float32) * ym_per_pix,
        np.array(leftx, np.float32) * xm_per_pix, 2)
    curverad = (
        (1 +
         (2 * curve_fit_cr[0] * yvals[-1] * ym_per_pix + curve_fit_cr[1])**2)**
        1.5) / np.absolute(2 * curve_fit_cr[0])

    camera_center = (left_fitx[-1] + right_fitx[-1]) / 2
    center_diff = (camera_center - warped.shape[1] / 2) * xm_per_pix
    side_pos = 'left'
    if center_diff <= 0:
        side_pos = 'right'

    cv2.putText(result,
                'Radius of Curvature = ' + str(round(curverad, 3)) + '(m)',
                (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
    cv2.putText(
        result, 'Vehicle is ' + str(abs(round(center_diff, 3))) + 'm ' +
        side_pos + ' of center', (50, 100), cv2.FONT_HERSHEY_SIMPLEX, 1,
        (255, 255, 255), 2)

    if debug:
        return result, warpage, warped, preprocessImage, img
    else:
        return result
Exemplo n.º 27
0
import argparse
import pdb
from logger import setup_logger
from regressor import regressor
from loader_vot import loader_vot
from tracker import tracker
from tracker_manager import tracker_manager

logger = setup_logger(logfile=None)

ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", required=True, help="Path to the prototxt")
ap.add_argument("-m", "--model", required=True, help="Path to the model")
ap.add_argument("-v",
                "--input",
                required=True,
                help="Path to the vot directory")
ap.add_argument("-g", "--gpuID", required=True, help="gpu to use")
args = vars(ap.parse_args())

do_train = False
objRegressor = regressor(args['prototxt'], args['model'], args['gpuID'],
                         do_train, logger)
objTracker = tracker(False, logger)
objLoaderVot = loader_vot(args['input'], logger)
video_frames, annotations = objLoaderVot.get_videos()
objTrackerVis = tracker_manager(video_frames, annotations, objRegressor,
                                objTracker, logger)

objTrackerVis.trackAll(0, 1)
Exemplo n.º 28
0
bridge = CvBridge()

goal = ("orange", 11, "cat")

cv_image = None
media = []
centro = []
atraso = 1.5E9  # 1 segundo e meio. Em nanossegundos

low = np.array([22, 50, 50], dtype=np.uint8)
high = np.array([36, 255, 255], dtype=np.uint8)

v = 0.2
w = math.pi / 12

tracker = tracker(v, w)
aruco_tracker = ArucoTracker(goal[1])
claw = claw()

cm_coords = None
cm_coords_creeper = None
center_image = None

area = 0.0  # Variavel com a area do maior contorno

# Só usar se os relógios ROS da Raspberry e do Linux desktop estiverem sincronizados.
# Descarta imagens que chegam atrasadas demais
check_delay = False

resultados = [
]  # Criacao de uma variavel global para guardar os resultados vistos
Exemplo n.º 29
0
						[img.shape[1]*(.5-bot_width/2),img.shape[0]*bottom_trim]])
	offset = img_size[0]*.25
	dst = np.float32([[offset,0],
					 [img_size[0]-offset, 0],
					 [img_size[0]-offset, img_size[1]],
					 [offset,img_size[1]]])

	M = cv2.getPerspectiveTransform(src,dst)
	Minv = cv2.getPerspectiveTransform(dst,src)
	warped = cv2.warpPerspective(preprocessImage,M,img_size,flags=cv2.INTER_LINEAR)

	# Find The center points of each lane
	window_width = 25
	window_height = 80
	#Set up overall class to do all the tracking
	curve_centers = tracker(Mywindow_width = window_width, Mywindow_height = window_height, Mymargin = 25, My_ym = 10/720, My_xm = 4/384, Mysmooth_factor = 15)

	window_centroids = curve_centers.find_window_centroids(warped)

	l_points = np.zeros_like(warped)
	r_points = np.zeros_like(warped)
	leftx = []
	rightx = []

	for level in range(0,len(window_centroids)):
		leftx.append(window_centroids[level][0])
		rightx.append(window_centroids[level][1])
		l_mask = window_mask(window_width, window_height, warped, window_centroids[level][0], level)
		r_mask = window_mask(window_width, window_height,warped,window_centroids[level][1], level)

		l_points[(l_points == 255) | ((l_mask == 1))] = 255
Exemplo n.º 30
0
from threading import Thread
from tracker import tracker
from intersection import intersection
import time

# pi camera 1
utrackr = tracker('192.168.0.23')
# utrackr = tracker('10.24.245.136')

# pi camera 2
utrackr2 = tracker('192.168.0.21')
# utrackr2 = tracker('10.24.160.204')

# pi camera 3
utrackr3 = tracker('192.168.0.22')
# utrackr3 = tracker('10.24.105.183')


def func1():
    while (utrackr.cap.isOpened()):
        # print "cam1: " + utrackr.timesync.getTimeStamp() + " x: " + str(utrackr.x) + " y: " + str(utrackr.y)
        time.sleep(1)


def func2():
    utrackr.outputFrame("cam1")


def func3():
    while (utrackr2.cap.isOpened()):
        # print "cam2: " + utrackr2.timesync.getTimeStamp() + " x: " + str(utrackr2.x) + " y: " + str(utrackr2.y)
def process_image(img):
    
    global counter

    # undistort image
    img = cv2.undistort(img, mtx,dist,None,mtx)
    img_orig = img

    #2.Magnitude Threshold
    #Threshold color    
    yellow_low = np.array([0,100,100])
    yellow_high = np.array([50,255,255])
    white_low = np.array([18,0,180])
    white_high = np.array([255,80,255])
    global ref_left 
    global ref_right
    global left_fit
    global right_fit
    
   
    # Process image and generate binary pixel of interests
    imgThres_yellow = hls_color_thresh(img,yellow_low,yellow_high)
    imgThres_white = hls_color_thresh(img,white_low,white_high)
    gradx = abs_sobel_thresh(img,orient='x',sobel_kernel=9,thresh=(80,220)) 
    
    preprocessImage = np.zeros_like(img[:,:,0])
    preprocessImage[((gradx == 1) | (imgThres_yellow==1) | (imgThres_white==1))] = 255
    #preprocessImage[((gradx == 1) & (grady == 1) | (m_binary == 1) & (d_binary == 1) & (c_binary == 1))] = 255
    #c_binary = color_thresh1(img, sthresh=(100,255), vthresh=(50,255))
    #c_binary = color_thresh2(img, sthresh=(50,255), vthresh=(100,255), lthresh=(50,255))
   # m_binary = mag_thresh(img, sobel_kernel=3, mag_thresh=(0,25))
    #d_binary = dir_threshold(img, sobel_kernel=15, thresh=(0.8,1.5))
    
    bin_image = preprocessImage
    
    # Define the region parameters taken from https://github.com/wonjunee/Advanced-Lane-Finding
    # Masking of the binary preprocessed image
    imshape = img.shape
    left_bottom = (100, imshape[0])
    right_bottom = (imshape[1] - 20, imshape[0])
    apex1 = (610, 410)
    apex2 = (680, 410)

    inner_left_bottom = (310, imshape[0])
    inner_right_bottom = (1150, imshape[0])
    inner_apex1 = (700, 480)
    inner_apex2 = (650, 480)

    vertices = np.array([[left_bottom, apex1, apex2, \
                          right_bottom, inner_right_bottom, \
                          inner_apex1, inner_apex2, inner_left_bottom]], dtype=np.int32)
    # Masked area
    preprocessImage = region_of_interest(bin_image, vertices)
    
    
    # Definition for points to use for perspective transform
    img_size = (img.shape[1],img.shape[0])
    # img.shape[1] # This is 1280, Xmax
    # img.shape[0] # This is 720, Ymax
    # Generate Binary Warped Image using perspective transform
    #Tweaking source and destination for 4 corners of the trapezoid per reviewer comment
    src = np.float32([[585, 450], [203, 720], [1127, 720], [685, 450]])
    dst = np.float32([[320, 0], [320, 720], [960,720], [960, 0]])
    
    M = cv2.getPerspectiveTransform(src, dst)
    Minv = cv2.getPerspectiveTransform(dst,src)
    warped = cv2.warpPerspective(preprocessImage, M, img_size, flags=cv2.INTER_LINEAR)
    ########
    
    if counter==0:
        left_fit, right_fit,out_imgfit = fitlines(warped)
    else:
        left_fit, right_fit = fit_continuous(left_fit, right_fit, warped)
    
#    left_fit,right_fit,out_img = fitlines(warped)

    status_sanity, d0, d1 =sanity_check(left_fit, right_fit, 0, .55)

    #Calc curvature and center
    if status_sanity  == True:        
        #Save as last reliable fit
        ref_left, ref_right = left_fit, right_fit        
        counter+=1
    else:        #Use the last realible fit
        left_fit, right_fit = ref_left, ref_right
        
    left_curv, right_curv, center_off = curvature(left_fit, right_fit, warped)

    ploty = np.linspace(0, warped.shape[0]-1, warped.shape[0] )
    left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
    right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
    
    
    
    window_width =  25  #60
    window_height = 80  #80 based on Udacity classlass 
    # Use Offset for window sliding
    Offset = 25
    
    ## Set up the overall class to do all the tracking
    curve_centers = tracker(Mywindow_width = window_width, Mywindow_height = window_height, Mymargin = Offset, My_ym = 30/720, My_xm = 3.7/700, Mysmooth_factor = 15)
    
    window_centroids = curve_centers.find_window_centroids(warped)
    
    # Points used to draw all the left and right windows
    l_points = np.zeros_like(warped)
    r_points = np.zeros_like(warped)
    
    # points used to find the left and right lanes
    rightx = []
    leftx = []
    
    # Go through each level and draw the windows 	
    for level in range(0,len(window_centroids)):
        # Window_mask is a function to draw window areas
        # add center value found in frame to the list of lane points per left, right
        leftx.append(window_centroids[level][0])
        rightx.append(window_centroids[level][1])

        l_mask = window_mask(window_width,window_height,warped,window_centroids[level][0],level)
        r_mask = window_mask(window_width,window_height,warped,window_centroids[level][1],level)
	    # Add graphic points from window mask here to total pixels found 
        l_points[(l_points == 255) | ((l_mask == 1))] = 255
        r_points[(r_points == 255) | ((r_mask == 1))] = 255
   
    # Draw the results
    template = np.array(r_points+l_points,np.uint8) # add both left and right window pixels together
    zero_channel = np.zeros_like(template) # create a zero color channel
    template = np.array(cv2.merge((zero_channel,template,zero_channel)),np.uint8) # make window pixels green (R,G,B) with R and B being zeros
    warpage = np.array(cv2.merge((warped,warped,warped)),np.uint8) # making the original road pixels 3 color channels
    result = cv2.addWeighted(warpage, 1, template, 0.5, 0.0) # overlay the orignal road image with window results
 
    green_boxes = result # debug holder

    ## Fitting the lane boundaries to the left, right and center positions found
    yvals = range(0,warped.shape[0])
    #yvals = np.linspace(0, 719, num=720)
    #print(warped.shape[0])
    res_yvals = np.arange(warped.shape[0]-(window_height/2),0,-window_height)


    left_lane = np.array(list(zip(np.concatenate((left_fitx-window_width/2,left_fitx[::-1]+window_width/2),axis=0), np.concatenate((yvals,yvals[::-1]),axis=0))),np.int32)
    right_lane = np.array(list(zip(np.concatenate((right_fitx-window_width/2,right_fitx[::-1]+window_width/2),axis=0), np.concatenate((yvals,yvals[::-1]),axis=0))),np.int32)
    middel_marker = np.array(list(zip(np.concatenate((left_fitx+window_width/2,right_fitx[::-1]-window_width/2),axis=0), np.concatenate((yvals,yvals[::-1]),axis=0))),np.int32)

    road = np.zeros_like(img)
    road_bkg = np.zeros_like(img)

    cv2.fillPoly(road, [left_lane], color=[255,0,0])
    cv2.fillPoly(road, [right_lane], color=[0,0,255])
    cv2.fillPoly(road, [middel_marker], color=[0,255,0])
    cv2.fillPoly(road_bkg, [left_lane], color=[255,255,255])
    cv2.fillPoly(road_bkg, [right_lane], color=[255,255,255])

    road_warped = cv2.warpPerspective(road,Minv,img_size,flags=cv2.INTER_LINEAR)
    road_warped_bkg = cv2.warpPerspective(road_bkg,Minv,img_size,flags=cv2.INTER_LINEAR)

    base = cv2.addWeighted(img_orig, 1.0, road_warped_bkg, -1.0, 0.0)
    result = cv2.addWeighted(base, 1.0, road_warped, 1.0, 0.0)  # Setting up final overlaid image with lane markers

    ym_per_pix = curve_centers.ym_per_pix # meters per pixel in y dim
    xm_per_pix = curve_centers.xm_per_pix # meters per pixel in x dim

    # Track left lane curvature as we are using leftx
    #curve_fit_cr = np.polyfit(np.array(res_yvals,np.float32)*ym_per_pix, np.array(leftx,np.float32)*xm_per_pix, 2)
    #curverad = ((1 + (2*curve_fit_cr[0]*yvals[-1]*ym_per_pix + curve_fit_cr[1])**2)**1.5) / np.absolute(2*curve_fit_cr[0])
    
    
    # Calculate the offset of the car on the road
    #camera_center = (left_fitx[-1] + right_fitx[-1])/2
    #center_diff = (camera_center-warped.shape[1]/2)*xm_per_pix
    
    side_pos = 'left'
    if center_off <= 0:
        side_pos = 'right'

    avg_curv = (left_curv+right_curv)/2
    
    # draw the text showingn curvature, offset and speed
    cv2.putText(result, 'Radius of Curvature = ' +str(round(avg_curv,3))+'(m)',(50,50), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,255,255),2)
    cv2.putText(result,'Vehicle is '+str(abs(round(center_off,3))) +'m '+side_pos+' of center',(50,100), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,255,255),2)

    # Final combination that works best includes
    # color_thresh1 based on s and v channel and preprocessing which includes gradx, grady and c_binary
    
    #result = bin_image
    #result = road
    #result = green_boxes
    #result = warped
    
    return result
Exemplo n.º 32
0
    wlan.init(mode=WLAN.AP, ssid='GPSnode', channel=6, antenna=WLAN.INT_ANT)
    network_OK = True

print('Starting Clocks')
if network_OK:
    print('   Syncing RTC to '+ ntp_source)
    rtc.ntp_sync(ntp_source)
    utime.sleep_ms(1500)
    print('   RTC Time :', rtc.now())
utime.timezone(TZ_offset_secs)
print('   Local Time:', utime.localtime())


print ("Starting Tracker")
print ("   Open Serial Port")
RockAir = tracker(location_formatting='dd')
# get the current location from the tracker
RockAir.getGPS()
if (RockAir.valid):
# check we got some data
    tracker_OK = True
    print('   Tracker OK: ',RockAir._latitude[3],RockAir._longitude[3])
    # create a dictionary object to hold startup message data
    startupData = {}
    startupData["EVT"] = 'STARTUP'
    startupData["ID"] = my_ID
    startupData["SW"] = swVer
    startupData["HW"] = hwVer
    # encode the message data as JSON without spaces
    encodedData = ujson.dumps(startupData).replace(" ", "")
    # send the startup message