예제 #1
0
def run():
    global is_white
    global zumi

    try:
        camera = Camera()
        knn = ColorClassifier(path='/home/pi/Zumi_Content/Data')
        knn.load_model("demo_BW_light")
        camera.start_camera()

        while True:
            if input("Press Enter to read a card, or type q first to exit. "
                     ) == "q":
                break
            image = camera.capture()
            predict = knn.predict(image, 'v')
            print(predict)

            if predict == "white":
                is_white = True
                drive_thread = Thread(target=continue_straight)
                drive_thread.start()
            if predict == "black":
                is_white = False
                zumi.stop()
    finally:
        is_white = False
        zumi.stop()
        camera.close()
예제 #2
0
    def __init__(self):
        #TODO load classifier
        self.object_classifier = ObjectClassifier()

        self.is_site = False

        self.color_classifier = ColorClassifier(self.is_site)
예제 #3
0
 def default(train_path):
     color_training = get_color_training_data(train_path)
     color_classifier = ColorClassifier(color_training)
     shape_segmenter = Segmenter()
     classifier = ShapeAlphaColorClassifier(color_classifier,
                                            shape_segmenter)
     return classifier
예제 #4
0
class TLClassifier(object):
    def __init__(self):
        # init object classifier (step 1)
        self.object_classifier = ObjectClassifier()

        # init traffic light color classifier (step 2)
        self.color_classifier = ColorClassifier()

    def get_classification(self, image, frame_id):
        """Determines the color of the traffic light in the image

        Args:
            image (cv::Mat): image containing the traffic light

        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """

        # step 1
        traffic_light_images = self.object_classifier.get_traffic_light_images(image)

        # step 2
        traffic_light_color = self.color_classifier.predict_images(traffic_light_images)

        for idx, image in enumerate(traffic_light_images):
            image.save('cropped/image{}-{}-{}.jpg'.format(frame_id, idx, tf_colors[traffic_light_color]))


        return traffic_light_color, traffic_light_images
예제 #5
0
    def __init__(self, is_site):
        # init object classifier (step 1)
        rospy.loginfo('TLClassifier start inititialization')
        self.object_classifier = ObjectClassifier()

        # init traffic light color classifier (step 2)
        #john added is_site log output
        if is_site:
            rospy.loginfo('is_site TRUE using site classifer')
        else:
            rospy.loginfo('is_site FALSE using sim classifer')

        self.color_classifier = ColorClassifier(
            is_site)  #john ...now sending boolena to color classifier

        rospy.loginfo('TLClassifier inititialized')
class TLClassifier(object):
    #john  added boolean is_site to indicate site or sim classifer to load
    def __init__(self, is_site):
        # init object classifier (step 1)
        rospy.loginfo('TLClassifier start inititialization')
        self.object_classifier = ObjectClassifier()

        # init traffic light color classifier (step 2)
        #john added is_site log output
        if is_site:
            rospy.loginfo('is_site TRUE using site classifer')
        else:
            rospy.loginfo('is_site FALSE using sim classifer')

        self.color_classifier = ColorClassifier(
            is_site)  #john ...now sending boolena to color classifier

        rospy.loginfo('TLClassifier inititialized')

        self.RECORD_CROPPED_IMAGES = False

    def get_classification(self, image):
        """Determines the color of the traffic light in the image
        Args:
            image (cv::Mat): image containing the traffic light
        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)
        """
        rospy.loginfo('TLClassifier received image')

        # convert from bgr 2 rgb
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        # step 1
        traffic_light_images = self.object_classifier.get_traffic_light_images(
            image)

        traffic_light_color = self.color_classifier.predict_images(
            traffic_light_images)

        tf_color = ['RED', 'YELLOW', 'GREEN', 'UNDEFINED',
                    'UNKNOWN']  # Not so nice ;-( but want to show text
        rospy.loginfo('Traffic light detected {}'.format(
            tf_color[traffic_light_color]))

        if self.RECORD_CROPPED_IMAGES:
            dir = './data/cropped/'
            if not os.path.exists(dir):
                os.makedirs(dir)

            for idx, image in enumerate(traffic_light_images):
                f_name = "sim_tl_{}_{}_{}.jpg".format(
                    calendar.timegm(time.gmtime()),
                    tf_color[traffic_light_color], idx)
                image.save(dir + f_name)

        return traffic_light_color
예제 #7
0
class TLClassifier(object):
    def __init__(self):
        #TODO load classifier
        self.object_classifier = ObjectClassifier()

        self.is_site = False

        self.color_classifier = ColorClassifier(self.is_site)

    def get_classification(self, image):
        """Determines the color of the traffic light in the image

        Args:
            image (cv::Mat): image containing the traffic light

        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        # implement light color prediction
        # convert from bgr 2 rgb
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        # step 1
        traffic_light_images = self.object_classifier.get_traffic_light_images(
            image)

        traffic_light_color = self.color_classifier.predict_images(
            traffic_light_images)

        tf_color = ['RED', 'YELLOW', 'GREEN', 'UNDEFINED', 'UNKNOWN']

        if self.RECORD_CROPPED_IMAGES:
            dir = './data/cropped/'
            if not os.path.exists(dir):
                os.makedirs(dir)

            for idx, image in enumerate(traffic_light_images):
                f_name = "sim_tl_{}_{}_{}.jpg".format(
                    calendar.timegm(time.gmtime()),
                    tf_color[traffic_light_color], idx)
                image.save(dir + f_name)

        return traffic_light_color
예제 #8
0
class TLClassifier(object):
    #john  added boolean is_site to indicate site or sim classifer to load
    def __init__(self, is_site):
        # init object classifier (step 1)
        rospy.loginfo('TLClassifier start inititialization')
        self.object_classifier = ObjectClassifier()

        # init traffic light color classifier (step 2)
        #john added is_site log output
        if is_site:
            rospy.loginfo('is_site TRUE using site classifer')
        else:
            rospy.loginfo('is_site FALSE using sim classifer')

        self.color_classifier = ColorClassifier(
            is_site)  #john ...now sending boolena to color classifier

        rospy.loginfo('TLClassifier inititialized')

    def get_classification(self, image):
        """Determines the color of the traffic light in the image
        Args:
            image (cv::Mat): image containing the traffic light
        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)
        """
        rospy.loginfo('TLClassifier received image')

        # step 1
        traffic_light_images = self.object_classifier.get_traffic_light_images(
            image)

        #john there might be a probelm here what if there are 1 or 3 predictions how does that effect below?
        # eventually this gets passed back to tl_detector and is a singluar state maybe this sia problem?
        traffic_light_color = self.color_classifier.predict_images(
            traffic_light_images)

        tf_color = ['RED', 'YELLOW', 'GREEN', 'UNDEFINED',
                    'UNKNOWN']  # Not so nice ;-( but want to show text
        rospy.loginfo('Traffic light detected {}'.format(
            tf_color[traffic_light_color]))
        #changed bacue now using styx msg #TrafficLight(traffic_light_color)))

        return traffic_light_color
예제 #9
0
def detect_video(yolo, video_path, output_path="", query="Q1"):
    import cv2
    print('query', query)
    vid = cv2.VideoCapture(video_path)
    if not vid.isOpened():
        raise IOError("Couldn't open webcam or video")
    video_FourCC    = int(vid.get(cv2.CAP_PROP_FOURCC))
    video_fps       = vid.get(cv2.CAP_PROP_FPS)
    video_size      = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
                        int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
    isOutput = True if output_path != "" else False
    if isOutput:
        print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size))
        out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
    accum_time = 0
    curr_fps = 0
    fps = "FPS: ??"
    prev_time = timer()
    color_classifier = ColorClassifier()
    cartype_classifier = CarTypeClassifier()
    out=[]
    out_exetime = []
    frame_number = 0
    while frame_number < 5:#True:         #frame_number < 10:
        data = []
        time = []
        exe_time = []
        curr_time = timer()
        return_value, frame = vid.read()
        if return_value:
            frame_number += 1
            image = Image.fromarray(frame)
            image, detect_imagepoints = yolo.detect_image(image)
            data.append(str(frame_number))
            exe_time.append(str(frame_number))
            detectimage_time = timer()
            exe_time.append(str(detectimage_time - curr_time))
            # print(detect_imagepoints)
            result = np.asarray(image)
            for detect_image in detect_imagepoints:
                box_image = image.crop((detect_image['left'], detect_image['top'], detect_image['right'], detect_image['bottom']))
                cartype = predict_class[np.argmax(cartype_classifier.detect_cartype(np.array(box_image, dtype='float32')))]
                cartype_time =timer()
                color = color_classifier.detect_color(np.array(box_image, dtype='float32'))
                color_time =timer()
                data.append(cartype)
                data.append(color)
            exe_time.append(str(cartype_time - curr_time))
            exe_time.append(str(color_time - curr_time))
            data.append(str(len(detect_imagepoints)))
            # curr_time = timer()
            # exec_time = curr_time - prev_time
            # prev_time = curr_time
            # accum_time = accum_time + exec_time
            # curr_fps = curr_fps + 1
            # if accum_time > 1:
            #     accum_time = accum_time - 1
            #     fps = "FPS: " + str(curr_fps)
            #     curr_fps = 0
            print(','.join(data))
            out.append(data)
            out_exetime.append(exe_time)
            cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                        fontScale=0.50, color=(255, 0, 0), thickness=2)
            cv2.namedWindow("result", cv2.WINDOW_NORMAL)
            cv2.imshow("result", result)
            # if isOutput:
            #     out.write(result)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
    
   
    yolo.close_session()
    f= open("out.csv","w+")
    for data in out:
        print(data)
        out_data = data[0] + ','
        pattern = np.zeros(10, dtype=int)
        print(data[1], data[2])
        #received substring not found error randomly
        index = predict_class.index(data[1])*5 + (color.index(data[2]))
        pattern[index] +=1
        if len(data) > 4:
            index = predict_class.index(data[3])*5 + (color.index(data[4]))
            pattern[index] +=1
        out_data += ','.join([str(num) for num in pattern])
        if len(data) > 4:
            out_data += ',' + data[5]
        else:
            out_data += ',' + data[3]
        f.write(out_data +'\n')
    f = open('out_time.csv','w+')
    for exe_time in out_exetime:
        f.write(','.join(exe_time)+'\n')
예제 #10
0
    def __init__(self):
        # init object classifier (step 1)
        self.object_classifier = ObjectClassifier()

        # init traffic light color classifier (step 2)
        self.color_classifier = ColorClassifier()
예제 #11
0
import cv2
from detect_car_yolo import detect_car
from color_classifier import ColorClassifier
from vehicle_classifier import VehicleClassifier
from plate_recognition_alpr import get_plate_alpr
import json
import time
# declare image path that want to proceed
img_path = "img_test/agya2.jpg"

# ========= Load model ===================
# declare object for VehicleClassifier
vehicle_classifier = VehicleClassifier()

# declare object for color_classifier
color_classifier = ColorClassifier()
# ========= End Load model =================

start = time.time()
# get Car object using Yolo
cars = detect_car(img_path)

# declare variable to save vehicle and color prediction
colors = []
car_models = []
for ind, car in enumerate(cars):
    # Predict Color
    color = color_classifier.predict(car)

    # Predict Vehicle make and model
    pred = vehicle_classifier.predict(car)
예제 #12
0
from flask import Flask, request, jsonify
from werkzeug.utils import secure_filename
import os
from vehicle_classifier import VehicleClassifier
from color_classifier import ColorClassifier
from main_2 import get_car_info
import time

# declare object for VehicleClassifier
vehicle_classifier = VehicleClassifier()

# declare object for color_classifier
color_classifier = ColorClassifier()

UPLOAD_FOLDER = 'uploads/'
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER


@app.route("/getCarInfo", methods=["POST"])
def homePage():
    try:
        data = {}
        start = time.time()
        file = request.files['images']
        file_name = secure_filename(file.filename)
        file_path = os.path.join(app.config['UPLOAD_FOLDER'], file_name)
        file.save(file_path)

        result = get_car_info(img_path=file_path,
                              vehicle_classifier=vehicle_classifier,