Esempio n. 1
0
    def _get_initial_perspective(self, *methods):
        """ Return the initial perspective. """

        methods = [
            # If a default perspective was specified then we prefer that over
            # any other perspective.
            self._get_default_perspective,

            # If there was no default perspective then try the perspective that
            # was active the last time the application was run.
            self._get_previous_perspective,

            # If there was no previous perspective, then try the first one that
            # we know about.
            self._get_first_perspective
        ]

        for method in methods:
            perspective = method()
            if perspective is not None:
                break

        # If we have no known perspectives, make a new blank one up.
        else:
            logger.warn('no known perspectives - creating a new one')
            perspective = Perspective()

        return perspective
def transform2birdseye(image):
    # Transform it to birdseye
    P = Perspective()
    birdseye = P.to_birdseye(image)
    # cv2.imshow('Birdseye view', birdseye)
    # print('birdseye shape: {}'.format(birdseye.shape))
    return birdseye
Esempio n. 3
0
 def __init__(self):
     self.mtx_file_path = 'camera_matrices/mtx.npz'
     self.camera_matrices = None
     self.thresholding_pipeline = Thresholding()
     self.perspective = Perspective()
     self.load_camera_matrices()
     self.line_search = LineSearch()
Esempio n. 4
0
def get_perspective_score(text):
    p = Perspective(API_KEY)
    try:
        comment = p.score(text, tests=["TOXICITY"])
        score = comment["TOXICITY"].score
    except:
        score = 0
    return score
Esempio n. 5
0
 def __init__(self):
     super(MainWindow, self).__init__()
     self.setWindowTitle(App_Name)
     self._menubar = QtGui.QMenuBar(self)
     self._actions = {}
     self._handlers = {}
     self.buildActions()
     self.buildMenus()
     self.setMenuBar(self._menubar)
     self.perspective = Perspective(self)
Esempio n. 6
0
 def __init__(self):
     self.mouse = MouseControl()
     self.XRES, self.YRES = self.mouse.get_screen_resolution()
     self.XRES, self.YRES = float(self.XRES), float(self.YRES)
     self.ABCD = [(0, 0), (self.XRES - 5, 0), (0, self.YRES - 5),
                  (self.XRES - 5, self.YRES - 5)]
     self.perspective = Perspective()
     self.perspective.setdst((0.0, 0.0), (self.XRES, 0.0), (0.0, self.YRES),
                             (self.XRES, self.YRES))
     self.cursor = Cursor(self)
     self.Options = Options()
Esempio n. 7
0
class PerspectiveTests(unittest.TestCase):
    p = Perspective(api_key)

    def test_comment(self):
        for x in range(10):
            test = random.choice(allowed)
            comment = self.p.score(generate_text(),
                                   tests=test,
                                   languages=["EN"])
            self.assertTrue(0 <= comment[test].score <= 1)
            for attr in comment[test].spans:
                self.assertTrue(0 <= attr.score <= 1)
                self.assertTrue(str(attr) in comment.text)
Esempio n. 8
0
    def get_perspective_by_id(self, id):
        """ Return the perspective with the specified Id.

        Return None if no such perspective exists.

        """

        for perspective in self.perspectives:
            if perspective.id == id:
                break

        else:
            if id == Perspective.DEFAULT_ID:
                perspective = Perspective()

            else:
                perspective = None

        return perspective
 def __init__(self,
              src,
              dst,
              n_images=1,
              calibration=None,
              line_segments=LINE_SEGMENTS,
              offset=0):
     self.n_images = n_images
     self.cam_calibration = calibration
     self.line_segments = line_segments
     self.image_offset = offset
     self.left_line = None
     self.right_line = None
     self.center_poly = None
     self.curvature = 0.0
     self.offset = 0.0
     self.perspective_src = src
     self.perspective_dst = dst
     self.perspective = Perspective(src, dst)
     self.dists = []
    def __init__(self,
                 imagenDeInicializacion,
                 maximoPuntos,
                 poligonoDePartida,
                 poligonoDeLlegada=[[0, 0]]):
        # El poligono
        self.imagenAuxiliarEnGrises = cv2.cvtColor(imagenDeInicializacion,
                                                   cv2.COLOR_BGR2GRAY)
        [self.height, self.width] = imagenDeInicializacion.shape[:2]
        #print('AQUIIIIIIIIIIIIIIII:',self.width,self.height)
        if poligonoDePartida == None:
            #print('No se utilizara mascara')
            self.mascara = None
            self.regionDeInteres = np.array([[0, 0], [0, 240], [320, 240],
                                             [320, 0]])
        else:
            self.regionDeInteres = np.array(poligonoDePartida)
            self.mascara = np.zeros_like(self.imagenAuxiliarEnGrises)
            cv2.fillPoly(self.mascara, [self.regionDeInteres], 255)
            #print('Mascara creada con exito')

        self.poligonoDeLlegada = np.array(poligonoDeLlegada)
        # Auxiliares
        self.feature_params = dict(maxCorners=maximoPuntos,
                                   qualityLevel=0.3,
                                   minDistance=7,
                                   blockSize=7)

        self.lk_params = dict(winSize=(15, 15),
                              maxLevel=2,
                              criteria=(cv2.TERM_CRITERIA_EPS
                                        | cv2.TERM_CRITERIA_COUNT, 10, 0.03))

        self.color = np.random.randint(
            0, 255, (50, 3))  # Se empleara un color nuevo por infraccion
        self.zonaPartida = Perspective(self.regionDeInteres)
        self.imagenAuxiliarZonaPartida = self.zonaPartida.generarRegionNormalizada(
            imagenDeInicializacion)
        self.flujoEnLaPartida = FlowDetector(180)
        self.flujoEnLaPartida.inicializarClase(self.imagenAuxiliarZonaPartida)
Esempio n. 11
0
    def __init__(self, camera_matrix, distortion_coefficients, buffer_size = 50, img_size = (1280,720)):
        self.camera_matrix = camera_matrix
        self.distortion_coefficients = distortion_coefficients

        src = np.float32(
            [[(img_size[0] / 2) - 75, img_size[1] / 2 + 100],
            [((img_size[0] / 6) - 10), img_size[1]],
            [(img_size[0] * 5 / 6) + 60, img_size[1]],
            [(img_size[0] / 2 + 75), img_size[1] / 2 + 100]])
        dst = np.float32(
            [[(img_size[0] / 4), 0],
            [(img_size[0] / 4), img_size[1]],
            [(img_size[0] * 3 / 4), img_size[1]],
            [(img_size[0] * 3 / 4), 0]])

        self.perspective = Perspective(src,dst)

        self.search_engine = HistogramSearch(
                            window_width = 200,
                            window_height = 80,
                            min_pixels_to_recenter=50,
                            visualisation = None)
        self.left = Lane(buffer_size)
        self.right = Lane(buffer_size)
Esempio n. 12
0
        result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
        plt.imshow(result)
        plt.plot(left_fit_x, plot_y, color='yellow')
        plt.plot(right_fit_x, plot_y, color='yellow')
        plt.xlim(0, 1280)
        plt.ylim(720, 0)
        plt.axis("off")
        # Save visualization
        if save_image:
            plt.savefig('./output_images/vis_area.png', bbox_inches='tight')
        plt.show()


if __name__ == '__main__':
    polyfit = Polyfit()
    perspective = Perspective()

    image_file = 'test_images/test2.jpg'
    image = mpimg.imread(image_file)
    undistorted = undistort(image)
    # Create binary outputs
    abs_thresh, mag_thresh, dir_thresh, hls_thresh, hsv_thresh, combined_output = combined_threshold(
        undistorted)
    plt.imshow(combined_output, cmap='gray')
    warped = perspective.warp(combined_output)
    plt.imshow(warped, cmap='gray')
    # Find lanes
    left_fit, right_fit, vars = polyfit.poly_fit_skip(warped)
    left_curve_rad, right_curve_rad = polyfit.curvature()
    position = polyfit.vehicle_position(warped)
    print('curvature: {:.2f}m'.format((left_curve_rad + right_curve_rad) / 2))
Esempio n. 13
0
def process_frame(frame):
    """ Process a single image or a frame from a video to find the lane lines

    This is the main entry point for this code.  It can be used in debug mode (see gEnv above) to process a
    single image, but is designed to operate with moviepy to process videos.

    Args:
        frame (image): The input image

    Returns:
        result (image): The output image with the lane and lines overlaid, along with curvature and car position.
    """
    global gEnv

    gEnv['frame_count'] += 1  # keep track of frames to get ratio of good ones
    # Correct for camera distortion
    corrected = correct_image(frame)

    # normalize brightness and contrast
    image = apply_CLAHE(corrected)

    # Transform it to birdseye
    image = transform2birdseye(image)
    if gEnv['debug']:
        cv2.imshow('Birdseye', image)

    # Enhance the image, and if necessary find the starts of the lines
    enhanced, gEnv['prev_line_ends'] = find_line_starts(image, gEnv)
    if gEnv['debug']:
        cv2.imshow('Enhanced', enhanced)

    # walk up the lines to find a series of points
    left_Xpts, right_Xpts, Ypts, data_good = walk_lines(enhanced, gEnv)

    # Fit a 2nd order function for each line
    left_coef = np.polyfit(Ypts, left_Xpts, 2)
    right_coef = np.polyfit(Ypts, right_Xpts, 2)

    # Get new lines from the fitted curve
    Yarr = np.array(Ypts)
    left_newXpts = (left_coef[0] * Yarr**2) + (left_coef[1] *
                                               Yarr) + left_coef[2]
    right_newXpts = (right_coef[0] * Yarr**2) + (right_coef[1] *
                                                 Yarr) + right_coef[2]

    # was the walk data good enough to keep?
    max_bad_frames = 25  # don't reset on the first bad frame, wait a second
    if data_good:
        gEnv['bad_frame_count'] = 0
        gEnv[
            'left_prev_coef'] = left_coef  # only save coef if data was actually good, regardless of count
        gEnv['right_prev_coef'] = right_coef
        gEnv['prev_line_ends'] = [
            left_newXpts[len(left_newXpts) - 1],
            right_newXpts[len(right_newXpts) - 1]
        ]
        gEnv['good_frames'] += 1
    else:
        gEnv['bad_frame_count'] += 1

    if gEnv['bad_frame_count'] > max_bad_frames:
        gEnv['prev_data_good'] = False
    else:
        gEnv['prev_data_good'] = True

    # Create an image to draw the lines on
    warp_zero = np.zeros_like(image).astype(np.uint8)

    # Form polylines and polygon
    left_newpts = [z for z in zip(left_newXpts, Ypts)]
    left_newpts = [np.array(left_newpts, dtype=np.int32)]
    right_newpts = [z for z in zip(right_newXpts, Ypts)]
    right_newpts = [np.array(right_newpts, dtype=np.int32)]
    poly_pts = np.vstack((left_newpts[0], right_newpts[0][::-1]))
    cv2.fillPoly(warp_zero, np.int_([poly_pts]), (0, 255, 0))

    # Draw the polylines and polygon
    cv2.fillPoly(warp_zero, np.int_([poly_pts]), (0, 255, 0))
    cv2.polylines(warp_zero,
                  np.int_([left_newpts]),
                  False, (255, 0, 0),
                  thickness=30)
    cv2.polylines(warp_zero,
                  np.int_([right_newpts]),
                  False, (0, 0, 255),
                  thickness=30)

    # Where are we in the lane?
    car_center = frame.shape[1] / 2  # camera is centered in the car
    left_lineX = left_newXpts[0]
    right_lineX = right_newXpts[0]
    lane_center = np.mean([left_lineX, right_lineX])

    birdseye_laneWpx = 906  # pixel width of lane in birdseye (points are in birdseye space)
    meters_px = 3.6576 / birdseye_laneWpx  # Assumming that the lane is 12 feet wide (3.6576 meters)
    offcenter_Px = car_center - lane_center
    offcenter_Meters = offcenter_Px * meters_px
    if offcenter_Meters > 0:
        position_msg = 'Car is {:.2f} meters to the right of center'.format(
            abs(offcenter_Meters))
    else:
        position_msg = 'Car is {:.2f} meters to the left of center'.format(
            abs(offcenter_Meters))

    # Define conversions in x and y from pixels space to meters
    y_eval = np.max(Ypts)
    ym_per_pix = 30 / 720  # meters per pixel in y dimension
    xm_per_pix = meters_px  # meters per pixel in x dimension
    left_fit_cr = np.polyfit(Yarr * ym_per_pix,
                             np.asarray(left_Xpts) * xm_per_pix, 2)
    right_fit_cr = np.polyfit(Yarr * ym_per_pix,
                              np.asarray(right_Xpts) * xm_per_pix, 2)
    left_curverad = ((1 + (2 * left_fit_cr[0] * y_eval + left_fit_cr[1])**2)**
                     1.5) / np.absolute(2 * left_fit_cr[0])
    right_curverad = ((1 + (2 * right_fit_cr[0] * y_eval + right_fit_cr[1])**2)
                      **1.5) / np.absolute(2 * right_fit_cr[0])
    # Now our radius of curvature is in meters
    curvature_msg = 'Curvatures: {:.2f} meters left line, {:.2f} meters right line'.format(
        left_curverad, right_curverad)

    # Transform the drawn image back to normal from birdseye
    P = Perspective()
    normal = P.back2normal(warp_zero)

    # Overlay the drawn image on the corrected camera image
    result = cv2.addWeighted(corrected, 1, normal, 0.7, 0)
    cv2.putText(result, position_msg, (10, 20), cv2.FONT_HERSHEY_PLAIN, 1.5,
                (0, 0, 0), 2)
    cv2.putText(result, curvature_msg, (10, 50), cv2.FONT_HERSHEY_PLAIN, 1.5,
                (0, 0, 0), 2)
    if data_good:
        cv2.putText(result, 'Good Data', (10, 80), cv2.FONT_HERSHEY_PLAIN, 1.5,
                    (0, 255, 0), 2)
    else:
        cv2.putText(result, 'Bad Data', (10, 80), cv2.FONT_HERSHEY_PLAIN, 1.5,
                    (255, 0, 0), 2)

    return result
Esempio n. 14
0
import dash_html_components as html
import dash_core_components as dcc
import dash_table_experiments as dt
from flask_caching import Cache
import numpy as np
import os
import pandas as pd
import plotly.graph_objs as go
import time
import json

from perspective import Perspective
from twitter import Twitter

perspective_key = os.environ.get('PERSPECTIVE_KEY')
perspective_client = Perspective(perspective_key)

twitter_consumer_key = os.environ.get('TWITTER_KEY')
twitter_consumer_secret = os.environ.get('TWITTER_SECRET')
twitter_client = Twitter(twitter_consumer_key, twitter_consumer_secret)

app = dash.Dash('harassment dashboard')
server = app.server
CACHE_CONFIG = {
    'CACHE_TYPE': 'redis',
    'CACHE_REDIS_URL': os.environ.get('REDIS_URL', 'localhost:6379')
}
cache = Cache()
cache.init_app(app.server, config=CACHE_CONFIG)

app.css.append_css(
from camera_calibration import Camera
from perspective import RegionOfInterest, Perspective
from util import images_in_directory, write_images_to_directory
from binary_image import Lane, identify_lane_pixels
import cv2
import numpy as np
from moviepy.editor import VideoFileClip

camera = Camera(images_in_directory('camera_cal'))
region_of_interest = RegionOfInterest((1280, 720))
perspective = Perspective(region_of_interest)


class Pipeline:
    def __init__(self):
        self.previous_polynomial_left = []
        self.previous_polynomial_right = []
        self.previous_curves = []

    def pipeline(self, image):
        undistorted_image = camera.undistort(image)
        bird_view_image = perspective.bird_view(undistorted_image)
        left_lane, right_lane = self.find_lanes(bird_view_image)

        bird_view_lane_image = np.zeros_like(bird_view_image).astype(np.uint8)
        left_lane.draw_fill_on_image(bird_view_lane_image,
                                     other_lane=right_lane)
        left_lane.draw_identified_pixels_on_image(bird_view_lane_image,
                                                  [255, 0, 0])
        right_lane.draw_identified_pixels_on_image(bird_view_lane_image,
                                                   [0, 0, 255])