Esempio n. 1
0
 def _initialize_thresholder(self):
     self._thresholder = Threshold(self._sx_params,
                                   self._sy_params,
                                   self._sdir_params,
                                   self._mag_params,
                                   self._s_color_params)
     print("Initialized thresholder")
Esempio n. 2
0
    def __init__(self):

        self.image_shape = [0, 0]

        self.camera_calibration_path = '../camera_cal/'
        self.output_images_path = '../output_images/'
        self.input_video_path = '../input_video/'
        self.output_video_path = '../output_video/'

        self.sobel_kernel_size = 7
        self.sx_thresh = (60, 255)
        self.sy_thresh = (60, 150)
        self.s_thresh = (170, 255)
        self.mag_thresh = (40, 255)
        self.dir_thresh = (.65, 1.05)

        self.wrap_src = np.float32([[595, 450], [686, 450], [1102, 719], [206, 719]])
        self.wrap_dst = np.float32([[320, 0], [980, 0], [980, 719], [320, 719]])

        self.mask_offset = 30
        self.vertices = [np.array([[206-self.mask_offset, 719],
                                   [595-self.mask_offset, 460-self.mask_offset],
                                   [686+self.mask_offset, 460-self.mask_offset],
                                   [1102+self.mask_offset, 719]],
                                  dtype=np.int32)]

        self.mask_offset_inverse = 30
        self.vertices_inverse = [np.array([[206+self.mask_offset_inverse, 719],
                                   [595+self.mask_offset_inverse, 460-self.mask_offset_inverse],
                                   [686-self.mask_offset_inverse, 460-self.mask_offset_inverse],
                                   [1102-self.mask_offset_inverse, 719]],
                                  dtype=np.int32)]

        self.thresh = Threshold()
        self.lane = Lane()
Esempio n. 3
0
 def __init__(self, ksize=3, debug=False):
     data = pickle.load(open('./calibration_pickle.p', 'rb'))
     self.mtx = data['mtx']
     self.dist = data['dist']
     self.kernel = ksize
     self.debug = debug
     self.bot_width = 0.85  # oercebt of bottom trapezoid height
     self.mid_width = 0.12  # percent of middle trapezoid height
     self.height_pct = 0.62  # percent for trapezoid height
     self.bottom_trim = 0.935  # percent from top to bottom to avoid car hood
     self.threshold = Threshold(3)
     self.first_image = True
def process_image(file_name, path, show):
    """
    Loads a given image path, and applies the pipeline to it

    :param file_name: The name of the file (without extension)
    :param path: Path to the input (image, video) to process.
    :param show: Show the outcome

    :return: None
    """

    # load the image
    image = cv2.imread(path, cv2.IMREAD_UNCHANGED)
    height, width = image.shape[:2]

    # initialize the objects
    threshold = Threshold()
    lane = Lane(height=height)
    camera = Camera(image_size=(width, height))
    transform = Transform(width=width, height=height)

    # process the frame
    output = pipeline(image, camera, threshold, transform, lane)

    # save the output
    cv2.imwrite(f'data/processed/output_images/{file_name}.jpg', output)
Esempio n. 5
0
 def __init__(self, thresholds=(4.0, ), *args, **kwargs):
     super(MLT, self).__init__(*args, **kwargs)
     self.thresholds = tuple(
         Threshold(value) for value in sorted(thresholds))
     self.enable_auto_max_query_terms = (strtobool(
         self.config.get('partycrasher.bucket',
                         'enable_auto_max_query_terms')))
     self.initial_mlt_max_query_terms = (int(
         self.config.get('partycrasher.bucket',
                         'initial_mlt_max_query_terms')))
     self.auto_max_query_term_maximum_documents = (int(
         self.config.get('partycrasher.bucket',
                         'auto_max_query_term_maximum_documents')))
     self.auto_max_query_term_minimum_documents = (int(
         self.config.get('partycrasher.bucket',
                         'auto_max_query_term_minimum_documents')))
     self.mlt_min_score = (float(
         self.config.get('partycrasher.bucket', 'mlt_min_score')))
     self.strictly_increasing = (strtobool(
         self.config.get('partycrasher.bucket', 'strictly_increasing')))
     if self.enable_auto_max_query_terms:
         self.last_max_query_terms = self.initial_mlt_max_query_terms
     self.max_top_match_score = 0
     self.total_top_match_scores = 0
     self.total_matches = 0
Esempio n. 6
0
 def __init__(self, weights=DataBuffer(), activation_function="logistic"):
     """Perceptron class constructor.
     -->()
         weights: Optional initial weights.
         activation_function: Sigmoid function to use in learning.
     ()-->
         None.
     """
     self.weights = weights
     self.activation_function = Threshold(activation_function)
Esempio n. 7
0
 def sign_threshold(self, m, player, players):
     """
     As the given player out of a list of player indices,
     return a signature share for the given message.
     """
     assert player in players
     r = hash_to_point_Fq2(m).to_jacobian()
     i = players.index(player)
     lambs = Threshold.lagrange_coeffs_at_zero(players)
     return Signature.from_g2(self.value * (r * lambs[i]))
def process_video(file_name, path, show):
    """
    Loads a given video path, and applies the pipeline to it (frame by frame)

    :param file_name: The name of the file (without extension)
    :param path: Path to the input (image, video) to process.
    :param show: Show the outcome

    :return: None
    """
    # initialize the objects
    video = Video(path)
    threshold = Threshold()
    lane = Lane(height=video.height)
    camera = Camera(image_size=(video.width, video.height))
    transform = Transform(width=video.width, height=video.height)

    # define the output video
    out = cv2.VideoWriter(f'data/processed/output_videos/{file_name}.avi',
                          cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),
                          video.fps, (video.width, video.height))

    # loop over all frames
    while (video.has_frame()):

        # Capture frame-by-frame
        ret, frame, id = video.get_frame()

        # show progress
        if id % 100 == 0:
            logging.debug(f'Progress: {(id / video.n_frames) * 100:.2f}%')

        # Stop when there is no frame
        if ret == False:
            break

        # process the frame
        output = pipeline(frame, camera, threshold, transform, lane)

        # write the frame to the output
        out.write(output)

        if show:
            # Display the resulting frame
            cv2.imshow('output', output)

            # wait 1 milisecond on keypress = (esc)
            key_press = cv2.waitKey(1) & 0xFF
            if key_press == 27:
                break

    # When everything done, release the video capture and video write objects
    del (video)
    out.release()
    cv2.destroyAllWindows()
Esempio n. 9
0
 def get_bucket_id(self, threshold):
     key = Threshold(threshold).to_elasticsearch()
     try:
         buckets = self['buckets']
     except KeyError:
         raise Exception('No assigned buckets for: {!r}'.format(self))
     try:
         return buckets[key]
     except KeyError:
         raise Exception('Buckets threshold {} not assigned for: '
                         '{!r}'.format(key, self))
 def __init__(self, ksize=3, debug=False):
     data = pickle.load(open('./calibration_pickle.p', 'rb'))
     self.mtx = data['mtx']
     self.dist = data['dist']
     self.kernel = ksize
     self.debug = debug
     self.bot_width = 0.85  # oercebt of bottom trapezoid height
     self.mid_width = 0.12  # percent of middle trapezoid height
     self.height_pct = 0.62  # percent for trapezoid height
     self.bottom_trim = 0.935  # percent from top to bottom to avoid car hood
     self.threshold = Threshold(3)
     self.first_image = True
     self.window_width = 25
     self.window_height = 80
     self.last_right_lane_average = -1000
     self.last_right_lane = None
     self.curve_centers = tracker(Mywindow_width=self.window_width,
                                  Mywindow_height=self.window_height,
                                  Mymargin=25,
                                  My_ym=10 / 720,
                                  My_xm=4 / 384,
                                  Mysmooth_factor=15)
Esempio n. 11
0
    def compute_threshold(self, amount):
        """
        Compute a threshold for this equity group
        For an amount X, the threshold date is when this group's unvested equity will be less than X.
        """
        # We don't expect to have more than 10s of entries, so a linear search is fine
        # Iterate through all vesting dates in ascending order, until we find one where
        # the unvested value is less than the threshold amount.
        # All equity vests _eventually_, at which point unvested will be 0,
        # so we're guaranteed to find an answer.

        sorted_vesting_dates = sorted(self.vesting_dates)
        for vesting_date in sorted_vesting_dates:
            vested_at_threshold = self.total_value() - amount
            if self.value_at(vesting_date) >= vested_at_threshold:
                return Threshold(amount, vesting_date)
Esempio n. 12
0
class Process:
    def __init__(self):

        self._image_count = 0

        # Camera variables
        self._calibration_images_dir = '../camera_cal/calibration*.jpg'
        self._calibration_images_nx = 9
        self._calibration_images_ny = 6
        self._calibration_matrix = None
        self._initialize_calibration_matrix()

        # Thresholding variables
        self._sx_params = {"Active": True, "Orient": 'x', "Thresh": (20, 190)}
        self._sy_params = {"Active": True, "Orient": 'y', "Thresh": (20, 190)}
        self._sdir_params = {"Active": True, "Thresh": (0.8, 1.5), "Sobel_kernel": 15}
        self._mag_params = {"Active": True, "Thresh": (0, 255), "Sobel_kernel": 3}
        self._s_color_params = {"Active": True, "Thresh": (170, 255)}
        self._thresholder = None
        self._initialize_thresholder()

        # Masking
        self._masker = None
        self._initialize_masker()

        # Perspective transform
        self._perspective_transformer = None
        self._initialize_perspective_transformer()

        # WindowSearch
        self._window_searcher = None
        self._initialize_window_searcher()

        # Road
        self._road = Road()

    def _initialize_calibration_matrix(self):
        self._calibration_matrix = Camera(self._calibration_images_nx,
                                          self._calibration_images_ny,
                                          self._calibration_images_dir)
        print("Initialized calibration matrix")

    def _initialize_thresholder(self):
        self._thresholder = Threshold(self._sx_params,
                                      self._sy_params,
                                      self._sdir_params,
                                      self._mag_params,
                                      self._s_color_params)
        print("Initialized thresholder")

    def _initialize_masker(self):
        self._masker = Masking()
        print("Initialized masker")

    def _initialize_perspective_transformer(self):
        self._perspective_transformer = PerpectiveTransform()
        print("Initialized perspective transformer")

    def _initialize_window_searcher(self):
        self._window_searcher = WindowSearch()

    def mark_stats(self, image,
                   left_ROC, right_ROC,
                   left_lane_fit, right_lane_fit,
                   lane_width, delta):
        img = np.copy(image)
        font = cv2.FONT_HERSHEY_SIMPLEX
        cv2.putText(img, 'left_ROC = {:.2f} m'.format(left_ROC), (10, 50), font, 1, (255, 255, 255), 2, cv2.LINE_AA)
        cv2.putText(img, 'left_lane_fit = [{0:.5f}, {1:.2f}, {2:.2f}]'.format(left_lane_fit[0], left_lane_fit[1], left_lane_fit[2]),
                    (10, 80), font, 1, (255, 255, 255), 2, cv2.LINE_AA)
        cv2.putText(img, 'right_ROC = {:.2f} m'.format(right_ROC), (10, 130), font, 1, (255, 255, 255), 2, cv2.LINE_AA)
        cv2.putText(img, 'right_lane_fit = [{0:.5f}, {1:.2f}, {2:.2f}]'.format(right_lane_fit[0], right_lane_fit[1], right_lane_fit[2]),
                    (10, 160), font, 1, (255, 255, 255), 2, cv2.LINE_AA)
        cv2.putText(img, 'lane_width = {:.2f} m'.format(lane_width), (10, 210), font, 1, (255, 255, 255), 2, cv2.LINE_AA)
        cv2.putText(img, 'delta = {:.2f} m'.format(delta), (10, 240), font, 1, (255, 255, 255), 2,cv2.LINE_AA)
        return img

    def process_image(self, image, mark_stats_on_image=False):
        """

        :param image: An mpimg read image
        :return: Same image with lane superimposed
        """
        img = np.copy(image)

        undistorted_image = self._calibration_matrix.get_undistorted(image)
        img = self._thresholder.apply_threshold(undistorted_image)
        img = self._masker.get_region_of_interest(img)
        img = self._perspective_transformer.get_perspective_transform(img)

        # Get lanes from the image
        current_left_lane, current_right_lane = self._window_searcher.get_lanes(img)
        best_left_ROC, best_right_ROC, best_left_lane_fit, best_right_lane_fit, best_lane_width, best_delta = \
            self._road.get_road_stats(current_left_lane, current_right_lane, img.shape[1], img.shape[0])

        # 1) Make another plot with the lane curves
        # 2) Perform inverse perspective transform on it
        # 3) Stack it on top of the original image
        left_fit = best_left_lane_fit
        right_fit = best_right_lane_fit
        ploty = np.linspace(0, img.shape[0] - 1, img.shape[0])
        leftx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
        rightx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]

        pts_left = np.array([np.transpose(np.vstack([leftx, ploty]))])
        pts_right = np.array([np.flipud(np.transpose(np.vstack([rightx, ploty])))])
        pts = np.hstack((pts_left, pts_right))

        dst = np.zeros_like(image)
        cv2.fillPoly(dst, np.int_([pts]), (0, 255, 0))

        inv_warp = self._perspective_transformer.get_inverse_perspective_transform(dst)
        dst = cv2.addWeighted(undistorted_image, 1, inv_warp, 0.3, 0)

        self._image_count += 1
        if mark_stats_on_image:
            dst = self.mark_stats(dst,
                                  best_left_ROC, best_right_ROC,
                                  best_left_lane_fit, best_right_lane_fit,
                                  best_lane_width, best_delta)
        return dst

    def process_video(self, input_video, output_video):
        clip = VideoFileClip(input_video)
        clip_with_lane = clip.fl_image(self.process_image)
        clip_with_lane.write_videofile(output_video, audio=False)
Esempio n. 13
0
class Image():
    def __init__(self, ksize=3, debug=False):
        data = pickle.load(open('./calibration_pickle.p', 'rb'))
        self.mtx = data['mtx']
        self.dist = data['dist']
        self.kernel = ksize
        self.debug = debug
        self.bot_width = 0.85  # oercebt of bottom trapezoid height
        self.mid_width = 0.12  # percent of middle trapezoid height
        self.height_pct = 0.62  # percent for trapezoid height
        self.bottom_trim = 0.935  # percent from top to bottom to avoid car hood
        self.threshold = Threshold(3)
        self.first_image = True

    def window_mask(self, width, height, img_ref, center, level):
        output = np.zeros_like(img_ref)
        output[int(img_ref.shape[0] -
                   (level + 1) * height):int(img_ref.shape[0] -
                                             level * height),
               max(0, int(center -
                          width)):min(int(center +
                                          width), img_ref.shape[1])] = 1
        return output

    def draw_trapezoid(self, image, src):
        cv2.line(image, (src[0][0], src[0][1]), (src[1][0], src[1][1]),
                 (110, 220, 0), 5)
        cv2.line(image, (src[1][0], src[1][1]), (src[2][0], src[2][1]),
                 (110, 220, 0), 5)
        cv2.line(image, (src[2][0], src[2][1]), (src[3][0], src[3][1]),
                 (110, 220, 0), 5)
        cv2.line(image, (src[3][0], src[3][1]), (src[0][0], src[0][1]),
                 (110, 220, 0), 5)
        return image

    def warp_image(self, image):
        preprocessImage = np.zeros_like(image[:, :, 0])
        # calculate the various thresholds
        gradx = self.threshold.abs_sobel_thresh(image,
                                                orient='x',
                                                thresh=(12, 255))
        grady = self.threshold.abs_sobel_thresh(image,
                                                orient='y',
                                                thresh=(25, 255))
        c_binary = self.threshold.color_threshold(image,
                                                  s_threshold=(100, 255),
                                                  v_threshold=(50, 255))
        # form a combination of thresholds
        preprocessImage[((gradx == 1) & (grady == 1)) | (c_binary == 1)] = 255

        # work on defining perspective transformation area
        img_size = (image.shape[1], image.shape[0])
        src = np.float32([[
            image.shape[1] * (0.535 - self.mid_width / 2),
            image.shape[0] * self.height_pct
        ],
                          [
                              image.shape[1] * (0.5 + self.mid_width / 2),
                              image.shape[0] * self.height_pct
                          ],
                          [
                              image.shape[1] * (0.495 + self.bot_width / 2),
                              self.bottom_trim * image.shape[0]
                          ],
                          [
                              image.shape[1] * (0.56 - self.bot_width / 2),
                              self.bottom_trim * image.shape[0]
                          ]])

        #for debugging, draw the trapezoid on the image
        if self.debug:
            image = self.draw_trapezoid(image, src)
        offset = img_size[0] * 0.25
        dst = np.float32([[offset, 0], [img_size[0] - offset, 0],
                          [img_size[0] - offset, img_size[1]],
                          [offset, img_size[1]]])
        M = cv2.getPerspectiveTransform(src, dst)
        Minv = cv2.getPerspectiveTransform(dst, src)
        warped = cv2.warpPerspective(preprocessImage,
                                     M,
                                     img_size,
                                     flags=cv2.INTER_LINEAR)
        if self.first_image:
            self.first_image = False
            write_name = 'preprocessed_image.jpg'
            cv2.imwrite(write_name, preprocessImage)
            write_name = 'source_image.jpg'
            image = self.draw_trapezoid(image, src)
            cv2.imwrite(write_name, image)
            write_name = 'sample_warped_image.jpg'
            dest_warped = self.draw_trapezoid(warped, dst)
            cv2.imwrite(write_name, dest_warped)
        return (warped, M, Minv, src, dst)

    def calc_curves(self, image):
        # A function that takes an image, object points, and image points
        # performs the camera calibration, image distortion correction and
        # returns the undistorted image
        image = cv2.undistort(image, self.mtx, self.dist, None, self.mtx)

        (warped, M, Minv, src, dst) = self.warp_image(image)
        img_size = (image.shape[1], image.shape[0])

        window_width = 25
        window_height = 80

        # set up the overall class to do all the tracking
        curve_centers = tracker(Mywindow_width=window_width,
                                Mywindow_height=window_height,
                                Mymargin=25,
                                My_ym=10 / 720,
                                My_xm=4 / 384,
                                Mysmooth_factor=15)
        window_centroids = curve_centers.find_window_centroids(warped)

        # points used to draw all  the left and right windows
        l_points = np.zeros_like(warped)
        r_points = np.zeros_like(warped)

        # points used to find the left and right lanes
        rightx = []
        leftx = []

        # Go through each level and draw the windows
        for level in range(0, len(window_centroids)):
            # window_mask is a function to draw window areas
            l_mask = self.window_mask(window_width, window_height, warped,
                                      window_centroids[level][0], level)
            r_mask = self.window_mask(window_width, window_height, warped,
                                      window_centroids[level][1], level)
            # add center value found in frame to the list of lane point per left, right
            leftx.append(window_centroids[level][0])
            rightx.append(window_centroids[level][1])

            # Add graphic points from window mask here to total pixels found
            l_points[(l_points == 255) | ((l_mask == 1))] = 255
            r_points[(r_points == 255) | ((r_mask == 1))] = 255

        # Draw the results
        template = np.array(
            r_points + l_points,
            np.uint8)  # add both left and right window pixels together
        zero_channel = np.zeros_like(template)  # create a zero color channel
        template = np.array(cv2.merge((zero_channel, template, zero_channel)),
                            np.uint8)  # make window pixels green
        warpage = np.array(
            cv2.merge((warped, warped, warped)),
            np.uint8)  # making the original road pixels 3 color channels
        result = cv2.addWeighted(
            warpage, 1, template, 0.5,
            0.0)  # overlay the original road image with window results

        # Fit the lane boundaries to the left, right center positions found
        # yvals is height
        yvals = range(0, warped.shape[0])

        # fit to box centers
        res_yvals = np.arange(warped.shape[0] - (window_height / 2), 0,
                              -window_height)

        # polynomial fit to a second order polynomial
        left_fit = np.polyfit(res_yvals, leftx, 2)
        left_fitx = left_fit[0] * yvals * yvals + left_fit[
            1] * yvals + left_fit[2]
        left_fitx = np.array(left_fitx, np.int32)

        # polynomial fit to a second order polynomial
        right_fit = np.polyfit(res_yvals, rightx, 2)
        right_fitx = right_fit[0] * yvals * yvals + right_fit[
            1] * yvals + right_fit[2]
        right_fitx = np.array(right_fitx, np.int32)

        left_lane = np.array(
            list(
                zip(
                    np.concatenate((left_fitx - window_width / 2,
                                    left_fitx[::-1] + window_width / 2),
                                   axis=0),
                    np.concatenate((yvals, yvals[::-1]), axis=0))), np.int32)
        right_lane = np.array(
            list(
                zip(
                    np.concatenate((right_fitx - window_width / 2,
                                    right_fitx[::-1] + window_width / 2),
                                   axis=0),
                    np.concatenate((yvals, yvals[::-1]), axis=0))), np.int32)
        inner_lane = np.array(
            list(
                zip(
                    np.concatenate((left_fitx + window_width / 2,
                                    right_fitx[::-1] - window_width / 2),
                                   axis=0),
                    np.concatenate((yvals, yvals[::-1]), axis=0))), np.int32)
        middle_marker = np.array(
            list(
                zip(
                    np.concatenate((right_fitx + window_width / 2,
                                    right_fitx[::-1] + window_width / 2),
                                   axis=0),
                    np.concatenate((yvals, yvals[::-1]), axis=0))), np.int32)

        # color in the lines (road) and clear out in the original lines (road_bkg)
        road = np.zeros_like(image)
        road_bkg = np.zeros_like(image)
        cv2.fillPoly(road, [left_lane], color=[255, 0, 0])
        cv2.fillPoly(road, [right_lane], color=[0, 0, 255])
        cv2.fillPoly(road, [inner_lane], color=[0, 255, 0])
        cv2.fillPoly(road_bkg, [left_lane], color=[255, 255, 255])
        cv2.fillPoly(road_bkg, [right_lane], color=[255, 255, 255])

        # Overlay lines onto the road surface
        road_warped = cv2.warpPerspective(road,
                                          Minv,
                                          img_size,
                                          flags=cv2.INTER_LINEAR)
        road_warped_bkg = cv2.warpPerspective(road_bkg,
                                              Minv,
                                              img_size,
                                              flags=cv2.INTER_LINEAR)

        base = cv2.addWeighted(image, 1.0, road_warped_bkg, -1.0, 0.0)
        result = cv2.addWeighted(base, 1.0, road_warped, 0.7, 0.0)

        # calculate the offset of the car on the road and
        # figure out if it's in the right or left lane
        # and scale to meters
        # the -1 position is closest to the car
        ym_per_pix = curve_centers.ym_per_pix  # meters per pixel in y direction
        xm_per_pix = curve_centers.xm_per_pix  # meters per pixel in x direction

        # we want curvature in meters using the left lane to calculate the value
        # we could do the right, left + right average, or create a whole new middle line
        # see www.intmath.com/applications-differentiation/8-radius-curvature.php
        curve_fit_cr = np.polyfit(
            np.array(res_yvals, np.float32) * ym_per_pix,
            np.array(leftx, np.float32) * xm_per_pix, 2)
        curvead = (
            (1 + (2 * curve_fit_cr[0] * yvals[-1] * ym_per_pix +
                  curve_fit_cr[1])**2)**1.5) / np.absolute(2 * curve_fit_cr[0])

        camera_center = (left_fitx[-1] + right_fitx[-1]) / 2
        center_diff = (camera_center - warped.shape[1] / 2) * xm_per_pix

        # if camera center is positive we are on the left side of the road
        side_pos = 'left'
        if center_diff <= 0:
            side_pos = 'right'
        thumbnail = cv2.resize(
            template,
            (int(0.25 * result.shape[0]), int(0.25 * result.shape[1])))
        result[0:thumbnail.shape[0], result.shape[1] -
               thumbnail.shape[1]:result.shape[1]] = thumbnail
        cv2.putText(result,
                    'Radius of Curvature = ' + str(round(curvead, 3)) + '(m) ',
                    (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
        cv2.putText(
            result, 'Vehicle is  = ' + str(abs(round(center_diff, 3))) + 'm ' +
            side_pos + ' of center', (50, 100), cv2.FONT_HERSHEY_SIMPLEX, 1,
            (255, 255, 255), 2)
        return result

    def process_images(self, file_name, ksize=3):
        # get the list of names
        images = glob.glob(file_name)
        for idx, fname in enumerate(images):
            # read file and undistort it
            image = cv2.imread(fname)
            result = self.calc_curves(image)
            write_name = './test_images/tracked' + str(idx) + '.jpg'
            cv2.imwrite(write_name, result)
Esempio n. 14
0
    'channel': 2,
    'direction': None,
    'thresh': (100, 255)
}, {
    'color_space': 'gray',
    'channel': None,
    'direction': None,
    'thresh': (190, 255)
}]

img = cv2.imread(test_image)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

binary = None
for param in thresh_params:
    th = Threshold(img, param['color_space'], param['channel'])

    th.transform(param['direction'], thresh=param['thresh'])
    if binary is None:
        binary = th.binary
    else:
        binary |= th.binary

    # Visualize the result in each step
    title1 = param['color_space'].upper() + '-' + str(param['channel'])
    if param['direction'] is None:
        title2 = 'color thresh ' + str(param['thresh'])
    else:
        title2 = param['direction'] + ' gradient thresh ' + str(
            param['thresh'])
Esempio n. 15
0
def main():
    def mkdate(datestr):
        return datetime.strptime(datestr, '%Y-%m-%d')

    parser = argparse.ArgumentParser(description="Filter Toggl time entries based on duration.")
    parser.add_argument('-t', '--token',type=str, help="Toggl API token for authentication.  If not supplied, use --username/--password.")
    parser.add_argument('-u', '--username',type=str, help="Toggl username for authentication.  If not supplied, use --token.")
    parser.add_argument('-p', '--password',type=str, help="Toggl password for authentication.")
    parser.add_argument('-w', '--workspace',type=int, help="Toggl workspace id number.")
    parser.add_argument('-l', '--list_workspaces',action='store_true',help="List workspaces.  This option prints workspaces and then quits.")
    parser.add_argument('since',nargs='?',type=mkdate,help="Start of the time period to search")
    parser.add_argument('until',nargs='?',type=mkdate,help="End of the time period to search")
    parser.add_argument('threshold',nargs='*',help="Time entries longer than this value will be flagged.  State this in natural language (e.g. '2 hours', '1 day').")

    args = parser.parse_args()

    if args.token:
        threshold = Threshold(api_token=args.token, app_name="sanitoggl")
    elif args.username and args.password:
        threshold = Threshold(username=args.username,password=args.password,app_name="sanitoggl")
    else:
        print 'Must supply either an api token or a username/password pair.'
        sys.exit(-1)

    if args.list_workspaces:
        workspaces = threshold.get_workspaces()
        print 'Toggl workspaces:'
        for workspace in workspaces:
            print workspace
        sys.exit(-1)

    if not args.workspace:
        print "Workspace id required to process time entires.  Use --list_workspaces to retrieve a list."
        sys.exit(-1)

    since = datetime.strftime(args.since,"%Y-%m-%d")
    until = datetime.strftime(args.until,"%Y-%m-%d")

    diff = datetime.strptime(until,"%Y-%m-%d") - datetime.strptime(since,"%Y-%m-%d")
    if diff > timedelta(days=365):
        print "The Toggl API currently only allows requests for reports up to 1 year."

    p = parsedatetime.Calendar()
    time_input = datetime.fromtimestamp(mktime(p.parse(''.join(args.threshold))[0]))
    length = time_input - datetime.now()
    threshold_value = length.days*86400000 + length.seconds*1000 + length.microseconds/1000

    threshold = Threshold(api_token=args.token)

    print "Getting time entries in the specified range..."
    entries = threshold.time_entries(args.since,args.until,args.workspace)
    filtered = threshold.filter_entries(entries,threshold_value)

    filtered.sort(key=lambda x: datetime.strptime(x['start'].split('T')[0],"%Y-%m-%d"))

    for entry in filtered:
        start = ' '.join([entry['start'].split('T')[0],entry['start'].split('T')[1][:8]])
        end = ' '.join([entry['end'].split('T')[0],entry['end'].split('T')[1][:8]])

        start = datetime.strptime(start,"%Y-%m-%d %H:%M:%S")
        end = datetime.strptime(end,"%Y-%m-%d %H:%M:%S")
        length = end - start

        print 'id: {}\tdesc: {:.<40}\t{}\t{}'.format(
            entry['id'],
            entry['description'],
            start,
            '\t{}'.format(length)
        )
Esempio n. 16
0
    def threshold(self, img):
        img_hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS).astype(np.float)
        img_lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB).astype(np.float)
        img_luv = cv2.cvtColor(img, cv2.COLOR_BGR2LUV).astype(np.float)

        threshold = Threshold()

        w_binary = threshold.white(img_luv, min_max= (180, 255))
        l_x = threshold.gradient_magnitude_x(img_luv[:,:,0], sobel_kernel=5, min_max = (15,255))
        l_d = threshold.gradient_direction_xy(img_luv[:,:,0], sobel_kernel=3, min_max = (0.7, 1.3))
        white = np.zeros_like(w_binary)
        white[(img[:,:,0] >= 200) & (img[:,:,1] >= 200) & (img[:,:,2] >= 200)] = 1
        r_x = threshold.gradient_magnitude_x(white, sobel_kernel=5, min_max = (10,255))
        l_binary = threshold.lightness(img_hls, min_max = (210, 255))
        g_m = threshold.gradient_magnitude_x(l_binary, sobel_kernel=31, min_max = (100, 255))

        white_lane = np.zeros_like(w_binary)
        white_lane [(r_x == 1) | ((l_d == 1) & (l_x == 1) & (w_binary == 1)) | (g_m == 1)] = 1

        y_binary = threshold.blue_yellow(img_lab, min_max= (145, 255))
        y_m = threshold.gradient_magnitude_xy(img_lab[:,:,2], sobel_kernel=3, min_max = (50, 255))
        s_binary = threshold.saturation(img_hls, min_max = (10,255))
        s_x = threshold.gradient_magnitude_x(img_hls[:,:,2], sobel_kernel=31, min_max = (100,255))
        s_d = threshold.gradient_direction_xy(img_hls[:,:,2], sobel_kernel=31, min_max = (0.6, 1.2))

        yellow_line = np.zeros_like(y_binary)
        yellow_line[(y_binary == 1) | (y_m == 1) | ((s_d == 1) & (s_x == 1) & (s_binary == 1))] = 1

        result = np.zeros_like(w_binary)
        result[(yellow_line == 1) | (white_lane == 1)] = 1
        return result
Esempio n. 17
0
        # Given src and dst points, calculate the perspective transform matrix
        if not inverse:
            M = cv2.getPerspectiveTransform(src, dst)
        else:
            M = cv2.getPerspectiveTransform(dst, src)

        # Warp the image using OpenCV warpPerspective()
        return cv2.warpPerspective(img, M, img.shape[1::-1])


if __name__ == "__main__":
    from calibration import Calibration
    from threshold import Threshold

    cal = Calibration()
    threshold = Threshold()
    pers = Perspective()

    fig = plt.figure(figsize=(10, 5))

    fig.add_subplot(1, 2, 1)
    plt.imshow(threshold.process(
        cal.undistort(plt.imread('test_images/test6.jpg')))[0],
               cmap='gray')
    plt.title("Distorted")

    fig.add_subplot(1, 2, 2)
    plt.imshow(pers.warp(
        threshold.process(cal.undistort(
            plt.imread('test_images/test6.jpg')))[0]),
               cmap='gray')
Esempio n. 18
0
from sklearn.svm import LinearSVC
from sklearn.metrics import f1_score, roc_auc_score

X, y = load_breast_cancer(True)
K = 10

N0, N1 = np.sum(y == 0), np.sum(y == 1)
IR = min(N0, N1) / max(N0, N1)
print('* dataset: N=%d, p=%d, IR=%.2f' % (*X.shape, IR))
print()

models = [
    ('linear svc', LinearSVC(penalty='l1', tol=1e-3, dual=False)),
    ('balanced linear svc',
     LinearSVC(class_weight='balanced', penalty='l1', tol=1e-3, dual=False)),
    ('ranksvm', Threshold(RankSVM())),
    ('adaboost', AdaBoost(100)),
    ('rankboost', Threshold(RankBoost(100))),
    ('stochastic ranknet', Threshold(StochasticRankNet(0.7, 10, maxit=10))),
]

f1_scores = np.zeros(len(models))
roc_auc_scores = np.zeros(len(models))

for k, (tr, ts) in enumerate(StratifiedKFold(K, True).split(X, y)):
    print('* Fold %d of %d' % (k + 1, K))
    for j, (name, model) in enumerate(models):
        scaler = StandardScaler()
        Xtr = scaler.fit_transform(X[tr])
        Xts = scaler.transform(X[ts])
Esempio n. 19
0
File: main.py Progetto: Leonz18/demo
__author__ = 'z84105425'
# -*- coding:utf-8 -*-

import cv2
from threshold import Threshold
from perspective_transform import PerspectiveTransform

if __name__ == '__main__':
    image_path = "bios.jpg"
    img = cv2.imread(image_path)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # 获取阈值
    th = Threshold(gray.shape[1], gray.shape[0], 0.83, 0.03)
    thresh_1 = th.get_thresh_1(gray)
    thresh_2 = th.get_thresh_2(gray)

    # 二值化
    ret1, binary1 = cv2.threshold(gray, thresh_1, 255, cv2.THRESH_BINARY)
    ret2, binary2 = cv2.threshold(gray, thresh_2, 255, cv2.THRESH_BINARY)
    cv2.namedWindow("binary1", 0)
    cv2.namedWindow("binary2", 0)
    cv2.imshow("binary1", binary1)
    cv2.imshow("binary2", binary2)
    cv2.imwrite('binary1.jpg', binary1)
    cv2.waitKey(0)

    # 透视变换
    pt = PerspectiveTransform(10000)
    pt.perspective_transform(img, binary1)
Esempio n. 20
0
# Read raw data from file
raw_data_frame = Preprocess("raw_data/girlbosskaty_tweets.csv", header=0)

# Select the Time and username column from raw data
data_time_uid = raw_data_frame.get_columns(["Screen_Name", "Time"])

# print(data_time_uid)

# Calculate Activity
act = Activity(data_time_uid)
dic_act = act.export_times()

# print(dic_act)

myThresh = Threshold(dic_act)
# print(myThresh.apply_clock_threshold(start_time="01:00:00", stop_time="05:00:00"),
#       "tweets between %s and %s" % (myThresh.start_time, myThresh.stop_time))
# print(myThresh.ckeck_day_tweets())

WeekDay_counter = myThresh.ckeck_day_tweets()
night_tweet_counter = myThresh.apply_clock_threshold(start_time="01:00:00",
                                                     stop_time="05:00:00")

print(WeekDay_counter)

head = ['Username', 'Night_tweets'
        ] + [key for key in list(WeekDay_counter.keys())]
row = [list(dic_act.keys())[0], night_tweet_counter
       ] + [value for value in list(WeekDay_counter.values())]
Esempio n. 21
0
class FindLane(object):
    def __init__(self):

        self.image_shape = [0, 0]

        self.camera_calibration_path = '../camera_cal/'
        self.output_images_path = '../output_images/'
        self.input_video_path = '../input_video/'
        self.output_video_path = '../output_video/'

        self.sobel_kernel_size = 7
        self.sx_thresh = (60, 255)
        self.sy_thresh = (60, 150)
        self.s_thresh = (170, 255)
        self.mag_thresh = (40, 255)
        self.dir_thresh = (.65, 1.05)

        self.wrap_src = np.float32([[595, 450], [686, 450], [1102, 719], [206, 719]])
        self.wrap_dst = np.float32([[320, 0], [980, 0], [980, 719], [320, 719]])

        self.mask_offset = 30
        self.vertices = [np.array([[206-self.mask_offset, 719],
                                   [595-self.mask_offset, 460-self.mask_offset],
                                   [686+self.mask_offset, 460-self.mask_offset],
                                   [1102+self.mask_offset, 719]],
                                  dtype=np.int32)]

        self.mask_offset_inverse = 30
        self.vertices_inverse = [np.array([[206+self.mask_offset_inverse, 719],
                                   [595+self.mask_offset_inverse, 460-self.mask_offset_inverse],
                                   [686-self.mask_offset_inverse, 460-self.mask_offset_inverse],
                                   [1102-self.mask_offset_inverse, 719]],
                                  dtype=np.int32)]

        self.thresh = Threshold()
        self.lane = Lane()

    def warp(self, img, visualize=False):
        img_size = (img.shape[1], img.shape[0])

        perspective_M = cv2.getPerspectiveTransform(self.wrap_src, self.wrap_dst)

        # warped
        top_down = cv2.warpPerspective(img, perspective_M, img_size, flags=cv2.INTER_LINEAR)

        top_down[:, 0:230] = 0
        top_down[:, top_down.shape[1] - 100:top_down.shape[1]] = 0

        if visualize:
            f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, figsize=(24, 9))
            f.tight_layout()
            ax1.imshow(img, cmap='gray')

            # Create a Polygon patch
            rect_src = patches.Polygon(self.wrap_src, fill=False, edgecolor='r', linestyle='solid', linewidth=2.0)
            # Add the patch to the Axes
            ax1.add_patch(rect_src)

            ax1.set_title('Thresholded Image', fontsize=10)
            ax2.imshow(top_down, cmap='gray')

            # Create a Polygon patch
            rect_dst = patches.Polygon(self.wrap_dst, fill=False, edgecolor='r', linestyle='solid', linewidth=2.0)
            # Add the patch to the Axes
            ax2.add_patch(rect_dst)

            ax2.set_title('Warped Image', fontsize=10)
            histogram = np.sum(top_down[top_down.shape[0] // 2:, :], axis=0)
            ax3.plot(histogram)
            ax3.set_title('Histogram', fontsize=10)

        return top_down


    def region_of_interest(self, img, vertices):
        """
        Applies an image mask.

        Only keeps the region of the image defined by the polygon
        formed from `vertices`. The rest of the image is set to black.
        """
        # defining a blank mask to start with
        mask = np.zeros_like(img)

        # defining a 3 channel or 1 channel color to fill the mask with depending on the input image
        if len(img.shape) > 2:
            channel_count = img.shape[2]  # i.e. 3 or 4 depending on your image
            ignore_mask_color = (255,) * channel_count
        else:
            ignore_mask_color = 255

        # filling pixels inside the polygon defined by "vertices" with the fill color
        cv2.fillPoly(mask, vertices, ignore_mask_color)

        # returning the image only where mask pixels are nonzero
        masked_image = cv2.bitwise_and(img, mask)
        return masked_image

    def region_of_interest_inverse(self, img, vertices):
        """
        Applies an image mask.

        Only keeps the region of the image defined by the polygon
        formed from `vertices`. The rest of the image is set to black.
        """
        # defining a blank mask to start with
        mask = img.copy()
        mask.fill(255)
        # mask = np.zeros_like(img)

        # defining a 3 channel or 1 channel color to fill the mask with depending on the input image
        if len(img.shape) > 2:
            channel_count = img.shape[2]  # i.e. 3 or 4 depending on your image
            ignore_mask_color = (0,) * channel_count
        else:
            ignore_mask_color = 0

        # filling pixels inside the polygon defined by "vertices" with the fill color
        cv2.fillPoly(mask, vertices, ignore_mask_color)

        # plt.imshow(img, cmap='gray')
        # plt.imshow(mask, cmap='gray')

        # returning the image only where mask pixels are nonzero
        masked_image = cv2.bitwise_and(img, mask)

        # plt.imshow(masked_image, cmap='gray')

        return masked_image

    # The pipeline.
    def pipeline(self, img, sobel_kernel_size=3, sx_thresh=(20, 100), sy_thresh=(20, 100), s_thresh=(170, 255),
                 mag_thresh=(10, 255), dir_thresh=(0, 1), test_image_pipeline=False, visualize=False):

        # Perform Gaussian Blur
        kernel_size = 5
        img = cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)

        gray, gradx, grady = self.thresh.sobel_thresh(img, sx_thresh, sy_thresh, sobel_kernel_size)

        mag_binary = self.thresh.mag_thresh(gray, sobel_kernel=sobel_kernel_size, mag_thresh=mag_thresh)
        dir_binary = self.thresh.dir_threshold(gray, sobel_kernel=sobel_kernel_size, thresh=dir_thresh)

        sobel_combined = np.zeros_like(dir_binary)
        sobel_combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1))] = 1

        color_thresh = self.thresh.color_thresh(img, s_thresh)

        thresholded_binary = np.zeros_like(sobel_combined)
        thresholded_binary[(color_thresh > 0) | (sobel_combined > 0)] = 1

        # Masked area
        thresholded_binary = self.region_of_interest(thresholded_binary, self.vertices)
        thresholded_binary[0:450, :] = 0

        # thresholded_binary = self.region_of_interest_inverse(thresholded_binary, self.vertices_inverse)

        warped = self.warp(thresholded_binary, visualize)
        out_img, ploty, left_fitx, right_fitx = self.lane.find(warped, test_image_pipeline)

        return thresholded_binary, warped, out_img, ploty, left_fitx, right_fitx

    def calculate_position(self, pts):

        # Find the position of the car from the center
        # It will show if the car is 'x' meters from the left or right
        position = self.image_shape[1] / 2

        try:
            left = np.min(pts[(pts[:, 1] < position) & (pts[:, 0] > 700)][:, 1])
            right = np.max(pts[(pts[:, 1] > position) & (pts[:, 0] > 700)][:, 1])
            center = (left + right) / 2
            # Define conversions in x and y from pixels space to meters
            xm_per_pix = 3.7 / 700  # meters per pixel in x dimension
            return (position - center), (position - center) * xm_per_pix
        except:
            return 0, 0

    def calculate_curvatures(self, ploty, left_fitx, right_fitx):
        y_eval = np.max(ploty)

        ym_per_pix = 30 / 720  # meters per pixel in y dimension
        xm_per_pix = 3.7 / 700  # meters per pixel in x dimension

        # Fit new polynomials to x,y in world space
        left_fit_cr = np.polyfit(ploty * ym_per_pix, left_fitx * xm_per_pix, 2)
        right_fit_cr = np.polyfit(ploty * ym_per_pix, right_fitx * xm_per_pix, 2)

        # Calculate the new radius of curvature
        left_curverad = \
            ((1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix + left_fit_cr[1]) ** 2) ** 1.5) / \
                np.absolute(2 * left_fit_cr[0])
        right_curverad = ((1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix + right_fit_cr[1]) ** 2) ** 1.5) / \
                np.absolute(2 * right_fit_cr[0])

        # Now our radius of curvature is in meters
        return left_curverad, right_curverad

    def plot_dashboard(self, image, ploty, left_fitx, right_fitx, newwarp):
        left_curverad, right_curverad = self.calculate_curvatures(ploty, left_fitx, right_fitx)

        self.lane.left_line.radius_of_curvature = left_curverad
        self.lane.right_line.radius_of_curvature = right_curverad

        # Put text on an image
        font = cv2.FONT_HERSHEY_SIMPLEX

        text = "Radius of Left Line Curvature: {} m".format(int(left_curverad))
        cv2.putText(image, text, (100, 50), font, 1, (255, 255, 255), 2)

        text = "Radius of Right Line Curvature: {} m".format(int(right_curverad))
        cv2.putText(image, text, (100, 100), font, 1, (255, 255, 255), 2)

        # Find the position of the car
        pts = np.argwhere(newwarp[:, :, 1])
        position_pixels, position_meters = self.calculate_position(pts)

        if position_meters < 0:
            text = "Vehicle is {:.2f} m left of center".format(-position_meters)
            self.lane.left_line.line_base_pos = 3.7/2 - position_meters
            self.lane.right_line.line_base_pos = 3.7/2 + position_meters
        else:
            text = "Vehicle is {:.2f} m right of center".format(position_meters)
            self.lane.left_line.line_base_pos = 3.7/2 + position_meters
            self.lane.right_line.line_base_pos = 3.7/2 - position_meters

        cv2.putText(image, text, (100, 200), font, 1, (255, 255, 255), 2)

        # text = "Left diff: {}".format(self.lane.left_line.diffs)
        # cv2.putText(image, text, (100, 200), font, 1, (255, 255, 255), 2)
        #
        # text = "Right diff: {}".format(self.lane.right_line.diffs)
        # cv2.putText(image, text, (100, 250), font, 1, (255, 255, 255), 2)
        #
        # text = "Left fit: {}".format(self.lane.left_line.current_fit)
        # cv2.putText(image, text, (100, 300), font, 1, (255, 255, 255), 2)
        #
        # text = "Right fit: {}".format(self.lane.right_line.current_fit)
        # cv2.putText(image, text, (100, 350), font, 1, (255, 255, 255), 2)

        text = "Left base line pos: {} m".format(round(self.lane.left_line.line_base_pos, 4))
        cv2.putText(image, text, (100, 250), font, 1, (255, 255, 255), 2)

        text = "Right base line pos: {} m".format(round(self.lane.right_line.line_base_pos, 4))
        cv2.putText(image, text, (100, 300), font, 1, (255, 255, 255), 2)

        return image


    def project_back(self, undist, thresholded_binary, warped, ploty, left_fitx, right_fitx):
        # Create an image to draw the lines on
        warp_zero = np.zeros_like(warped).astype(np.uint8)
        color_warp = np.dstack((warp_zero, warp_zero, warp_zero))

        # Recast the x and y points into usable format for cv2.fillPoly()
        pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
        pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
        pts = np.hstack((pts_left, pts_right))

        # Draw the lane onto the warped blank image
        cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))

        Minv = cv2.getPerspectiveTransform(self.wrap_dst, self.wrap_src)

        # Warp the blank back to original image space using inverse perspective matrix (Minv)
        newwarp = cv2.warpPerspective(color_warp, Minv, (thresholded_binary.shape[1], thresholded_binary.shape[0]))

        # Combine the result with the original image
        result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)
        # print('result shape', result.shape)
        # plt.imshow(result)

        return result, newwarp

    def execute_image_pipeline(self, visualize=False):
        images = glob.glob(self.output_images_path + 'undist_*.jpg')

        for idx, fname in enumerate(images):
            image_fname = ntpath.basename(fname)

            print("Processing", fname)

            image = mpimg.imread(self.output_images_path + image_fname)

            self.image_shape = image.shape

            thresholded_binary, warped, out_img, ploty, left_fitx, right_fitx = \
                self.pipeline(
                    image,
                    sobel_kernel_size=self.sobel_kernel_size,
                    sx_thresh=self.sx_thresh,
                    sy_thresh=self.sy_thresh,
                    s_thresh=self.s_thresh,
                    mag_thresh=self.mag_thresh,
                    dir_thresh=self.dir_thresh,
                    test_image_pipeline=True,
                    visualize=True)

            result, newwarp = self.project_back(image, thresholded_binary, warped, ploty, left_fitx, right_fitx)

            result = self.plot_dashboard(result, ploty, left_fitx, right_fitx, newwarp)

            mpimg.imsave(self.output_images_path + 'thres_' + image_fname, thresholded_binary, cmap=cm.gray)
            mpimg.imsave(self.output_images_path + 'warp_' + 'thres_' + image_fname, warped, cmap=cm.gray)
            mpimg.imsave(self.output_images_path + 'out_' + 'warp_' + 'thres_' + image_fname, out_img)
            mpimg.imsave(self.output_images_path + 'result_' + 'out_' + 'warp_' + 'thres_' + image_fname, result)

            if visualize:
                # Plot the result
                f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
                f.tight_layout()

                ax1.imshow(image)
                ax1.set_title('Undistorted Image', fontsize=30)

                ax2.imshow(thresholded_binary, cmap='gray')
                ax2.set_title('Thresholded Result', fontsize=30)
                plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)

                ax2.imshow(warped, cmap='gray')
                ax2.set_title('Warped Result', fontsize=30)
                plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)

                ax2.imshow(out_img, cmap='gray')
                ax2.set_title('Line fit', fontsize=30)
                ax2.plot(left_fitx, ploty, color='yellow')
                ax2.plot(right_fitx, ploty, color='yellow')

                plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)

                ax1.imshow(result)
                ax1.set_title('Output Image', fontsize=30)

    def execute_video_pipeline(self, image):

        handle = open(self.camera_calibration_path + "wide_dist_pickle.p", 'rb')
        dist_pickle = pickle.load(handle)

        undist_image = cv2.undistort(image, dist_pickle["mtx"], dist_pickle["dist"], None, dist_pickle["mtx"])

        self.image_shape = undist_image.shape

        # thresholded_binary, warped, out_img, ploty, left_fitx, right_fitx = self.pipeline(undist_image)

        thresholded_binary, warped, out_img, ploty, left_fitx, right_fitx = \
            self.pipeline(
                undist_image,
                sobel_kernel_size=self.sobel_kernel_size,
                sx_thresh=self.sx_thresh,
                sy_thresh=self.sy_thresh,
                s_thresh=self.s_thresh,
                mag_thresh=self.mag_thresh,
                dir_thresh=self.dir_thresh,
                test_image_pipeline=False,
                visualize=False)

        result, newwarp = self.project_back(image, thresholded_binary, warped, ploty, left_fitx, right_fitx)

        return self.plot_dashboard(result, ploty, left_fitx, right_fitx, newwarp)

    def project_video(self):
        # Execute the pipeline for the video file
        in_project_video = self.input_video_path + 'project_video.mp4'

        # project_clip = VideoFileClip(in_project_video).subclip(20, 26)
        # project_clip = VideoFileClip(in_project_video).subclip(35, 42)
        # project_clip = VideoFileClip(in_project_video).subclip(35, 45)
        # project_clip = VideoFileClip(in_project_video).subclip(41, 45)
        # project_clip = VideoFileClip(in_project_video).subclip(20, 45)
        project_clip = VideoFileClip(in_project_video)

        out_project_clip = project_clip.fl_image(self.execute_video_pipeline)  # NOTE: this function expects color images!!

        out_project_video = self.output_video_path + 'out_project_video.mp4'
        out_project_clip.write_videofile(out_project_video, audio=False)
Esempio n. 22
0
def test_threshold_instance(T, N):
    commitments = []
    # fragments[i][j] = fragment held by player i,
    #                   received from player j
    fragments = [[None] * N for _ in range(N)]
    secrets = []

    # Step 1 : PrivateKey.new_threshold
    for player in range(N):
        secret_key, commi, frags = PrivateKey.new_threshold(T, N)
        for target, frag in enumerate(frags):
            fragments[target][player] = frag
        commitments.append(commi)
        secrets.append(secret_key)

    # Step 2 : Threshold.verify_secret_fragment
    for player_source in range(1, N + 1):
        for player_target in range(1, N + 1):
            assert Threshold.verify_secret_fragment(
                T, fragments[player_target - 1][player_source - 1],
                player_target, commitments[player_source - 1])

    # Step 3 : master_pubkey = PublicKey.aggregate(...)
    #          secret_share = PrivateKey.aggregate(...)
    master_pubkey = PublicKey.aggregate(
        [PublicKey.from_g1(cpoly[0].to_jacobian()) for cpoly in commitments],
        False)

    secret_shares = [
        PrivateKey.aggregate(map(PrivateKey, row), None, False)
        for row in fragments
    ]

    master_privkey = PrivateKey.aggregate(secrets, None, False)
    msg = 'Test'
    signature_actual = master_privkey.sign(msg)

    # Step 4 : sig_share = secret_share.sign_threshold(...)
    # Check every combination of T players
    for X in combinations(range(1, N + 1), T):
        # X: a list of T indices like [1, 2, 5]

        # Check underlying secret key is correct
        r = Threshold.interpolate_at_zero(
            X, [secret_shares[x - 1].value for x in X])
        secret_cand = PrivateKey(r)
        assert secret_cand == master_privkey

        # Check signatures
        signature_shares = [
            secret_shares[x - 1].sign_threshold(msg, x, X) for x in X
        ]
        signature_cand = Signature.aggregate_sigs_simple(signature_shares)
        assert signature_cand == signature_actual

    # Check that the signature actually verifies the message
    agg_info = AggregationInfo.from_msg(master_pubkey, msg)
    signature_actual.set_aggregation_info(agg_info)
    assert signature_actual.verify()

    # Step 4b : Alternatively, we can add the lagrange coefficients
    # to 'unit' signatures.
    for X in combinations(range(1, N + 1), T):
        # X: a list of T indices like [1, 2, 5]

        # Check signatures
        signature_shares = [secret_shares[x - 1].sign(msg) for x in X]
        signature_cand = Threshold.aggregate_unit_sigs(signature_shares, X, T)
        assert signature_cand == signature_actual
Esempio n. 23
0
from add import Add
from multiply import Multiply
from scale_image import ScaleImage
from magnitude import Magnitude
from table_lookup import TableLookup
from or_node import Or
from and_node import And
from warp_affine import WarpAffine
from dubbel_io_test import DubbelIoTest

#Only create node classes once, when module is first loaded
DEFAULT_DUMMY_NODE = BaseNode() #Used if name of node is not in the dictionary

NODE_DICTIONARY = {'HalfScaleGaussian'  : HalfScaleGaussian(),
                   'Subtract'           : Subtract(),
                   'Threshold'          : Threshold(),
                   'Sobel3x3'           : Sobel3x3(),
                   'AbsDiff'            : AbsDiff(),
                   'ConvertDepth'       : ConvertDepth(),
                   'Dilate3x3'          : Dilate3x3(),
                   'Erode3x3'           : Erode3x3(),
                   'Add'                : Add(),
                   'Multiply'           : Multiply(),
                   'ScaleImage'         : ScaleImage(),
                   'Magnitude'          : Magnitude(),
                   'TableLookup'        : TableLookup(),
                   'Or'                 : Or(),
                   'And'                : And(),
                   'WarpAffine'         : WarpAffine(),
                   'DubbelIoTest'       : DubbelIoTest(), #This is a dummy node used only for testing the parser.
                   'Dilate2x2'          : Dilate2x2(),