Ejemplo n.º 1
0
def run_main():
	# created a "threaded" video stream, allow the camera sensor to warmup
	vs = PiVideoStream((640,480)).start()
	time.sleep(2.0)
	
	# read the first frame of the video
	frame = vs.read()
	
	# Set the ROI
	c, r, w, h=200, 100, 70, 70
	track_window=(c, r, w, h)
	
	# Create mask and nomralized histogram
	roi = frame[r:r+h, c:c+w]
	hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
	mask = cv2.inRange(hsv_roi, np.array((0., 30., 32.)), np.array((180.,255.,255.)))
	roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
	cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
	
	# Setup the termination criteria, either 80 iteration or move by atleast 1pt
	term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 80, 1)
	
	while True:
		frame = vs.read()
	
		hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
		dst = cv2.calcBackProject([hsv], [0], roi_hist, [0,180], 1)
		
		# apply meanshift to get the new location
		ret, track_window = cv2.meanShift(dst, track_window, term_crit)
		
		# apply camshift to get the new location
		#ret, track_window = cv2.CamShift(dst, track_window, term_crit)

		# draw it on image
		x, y, w, h = track_window
		cv2.rectangle(frame, (x, y), (x+w, y+h), 255, 2)
		cv2.putText(frame, 'Tracked M', (x-25, y-10), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2, cv2.LINE_AA)
		
		cv2.imshow('Tracking', frame)
		
		# if the 'q' key is pressed, break from the loop
		key = cv2.waitKey(1) & 0xFF
		if key == ord("q"):
			break
			
	vs.stop()
	cv2.destroyAllWindows()
stream.close()
rawCapture.close()
camera.close()

# created a *threaded *video stream, allow the camera sensor to warmup,
# and start the FPS counter
print("[INFO] sampling THREADED frames from `picamera` module...")
vs = PiVideoStream().start()
time.sleep(2.0)
fps = FPS().start()

# loop over some frames...this time using the threaded stream
while fps._numFrames < args["num_frames"]:
    # grab the frame from the threaded video stream and resize it
    # to have a maximum width of 400 pixels
    frame = vs.read()
    frame = imutils.resize(frame, width=400)

    # check to see if the frame should be displayed to our screen
    if args["display"] > 0:
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

    # update the FPS counter
    fps.update()

# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
Ejemplo n.º 3
0
class emoElmo:
    def __init__(self):
        print("constructing emo elmo")
        os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
        
        self.os = os

        # Create the model
        model = Sequential()

        model.add(Conv2D(32, kernel_size=(3, 3),
                         activation='relu', input_shape=(48, 48, 1)))
        model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Flatten())
        model.add(Dense(1024, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(7, activation='softmax'))

        model.load_weights('model.h5')

        self.model = model

        self.cv2 = cv2

        # prevents openCL usage and unnecessary logging messages
        self.cv2.ocl.setUseOpenCL(False)

        # dictionary which assigns each label an emotion (alphabetical order)
        self.emotion_dict = {0: "Angry", 1: "Disgusted", 2: "Fearful",
                             3: "Happy", 4: "Neutral", 5: "Sad", 6: "Surprised"}

        # dictionary to calculate total emotion score at the end of the run
        self.emotion_score_dict = {"Angry": 0, "Disgusted": 0, "Fearful": 0,
                                   "Happy": 0, "Neutral": 0, "Sad": 0, "Surprised": 0}

        # get start datetime
        self.startDateTime = datetime.now()
        self.dateTimeString = self.startDateTime.strftime("%Y-%m-%d_%H:%M")
        self.videoFileName = self.dateTimeString + '.avi'
        self.audioFileName = self.dateTimeString + '.wav'

        # initialize audio recording
        self.form_1 = pyaudio.paInt16  # 16-bit resolution
        self.chans = 1  # 1 channel
        self.samp_rate = 44100  # 44.1kHz sampling rate
        self.chunk = 4096  # 2^12 samples for buffer
        # device index found by p.get_device_info_by_index(ii)
        self.dev_index = 2
        self.wav_output_filename = self.audioFileName  # name of .wav file

        self.audio = pyaudio.PyAudio()  # create pyaudio instantiation

        # create pyaudio stream
        self.stream = self.audio.open(format=self.form_1, rate=self.samp_rate, channels=self.chans,
                                 input_device_index=self.dev_index, input=True,
                                 frames_per_buffer=self.chunk)

        self.audioFrames = []

        # start the webcam feed
        self.vs = PiVideoStream().start()
        time.sleep(2.0)
        self.fps = FPS().start()

        # define codec and create VideoWriter Object
        self.fourcc = cv2.VideoWriter_fourcc(*'XVID')
        self.out = cv2.VideoWriter(self.videoFileName, self.fourcc, 30.0, (320, 240))

        self.facecasc = cv2.CascadeClassifier(
            '/home/pi/opencv/data/haarcascades/haarcascade_frontalface_default.xml')

        self.wave = wave

        # array to store all frames capture for post-processing
        self.videoFrameStore = []

        # boolean flag for recording
        self.recording = 1

        # tkinter settings
        win = Tk()

        win.title("Litmus Box")
        win.geometry('800x480')

        self.win = win

        myFont = tkinter.font.Font(family='Helvetica', size=36, weight='bold')
        
        self.stopRecordButton = Button(self.win, text = "STOP RECORD", font = myFont, command = lambda: self.stop_recording(), height = 2, width = 15)
        self.stopRecordButton.pack(side = BOTTOM)

        self.recordButton = Button(self.win, text = "RECORD", font = myFont, command = lambda: self.start_recording(), height = 2, width = 15)
        self.recordButton.pack()

        mainloop()

    def start_recording(self):
        print("call start recording")
        self.recording = 1
        while self.recording == 1:
            # Find haar cascade to draw bounding box around face
            print("inside whle loop")
            frame = self.vs.read()

            self.videoFrameStore.append(frame)

            # fps stuff
            self.out.write(frame)
            self.fps.update()

            # audio stuff
            data = self.stream.read(self.chunk, exception_on_overflow=False)
            self.audioFrames.append(data)
            print("end of one iteration")
            self.win.update()

        print("exit while loop")

        for currentVideoFrame in self.videoFrameStore:
            # convert to gray and get face position
            gray = self.cv2.cvtColor(currentVideoFrame, cv2.COLOR_BGR2GRAY)
            faces = self.facecasc.detectMultiScale(
                gray, scaleFactor=1.3, minNeighbors=5)

            for (x, y, w, h) in faces:
                roi_gray = gray[y:y + h, x:x + w]
                cropped_img = np.expand_dims(np.expand_dims(
                    cv2.resize(roi_gray, (48, 48)), -1), 0)

                prediction = self.model.predict(cropped_img)
                maxindex = int(np.argmax(prediction))
                # increment score
                emotion = self.emotion_dict[maxindex]
                self.emotion_score_dict[emotion] += 1

        # write to audio file
        self.stream.stop_stream()
        self.stream.close()
        self.audio.terminate()
        self.fps.stop()

        # save the audio frames as .wav file
        wavefile = self.wave.open(self.wav_output_filename, 'wb')
        wavefile.setnchannels(self.chans)
        wavefile.setsampwidth(self.audio.get_sample_size(self.form_1))
        wavefile.setframerate(self.samp_rate)
        wavefile.writeframes(b''.join(self.audioFrames))
        wavefile.close()

        # write final emotion score to txt file
        print(self.emotion_score_dict)
        with open(self.dateTimeString + '.txt', 'w') as file:
            file.write(json.dumps(self.emotion_score_dict))

        print("[INFO] elasped time: {:.2f}".format(self.fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(self.fps.fps()))

        # cap.release()
        self.out.release()
        self.cv2.destroyAllWindows()
        self.vs.stop()            

    def stop_recording(self):
        print("call stop record button")
        self.recording = 0
def return_book():
    Node1, Node2, Node3 = True, True, True
    image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
    detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
    detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
    detection_classes = detection_graph.get_tensor_by_name(
        'detection_classes:0')
    num_detections = detection_graph.get_tensor_by_name('num_detections:0')

    def node_status():
        if Node1:
            cv2.putText(frame, "NOT AVAILABLE", (LEFT_R_outside),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        if Node2:
            cv2.putText(frame, "NOT AVAILABLE", (CENTER_R_outside),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        if Node3:
            cv2.putText(frame, "NOT AVAILABLE", (RIGHT_R_outside),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

    frame_rate_calc = 1
    freq = cv2.getTickFrequency()
    global frame

    vs = PiVideoStream().start()
    time.sleep(2.0)
    with detection_graph.as_default():
        with tf.Session() as sess:
            while True:

                frame = vs.read()
                frame = imutils.resize(frame, width=400)
                t1 = cv2.getTickCount()

                cv2.putText(frame, "FPS: {0:.2f}".format(frame_rate_calc),
                            (30, 50), font, 1, (255, 255, 0), 2, cv2.LINE_AA)
                t2 = cv2.getTickCount()
                time1 = (t2 - t1) / freq
                frame_rate_calc = 1 / time1

                frame_expanded = np.expand_dims(frame, axis=0)

                (boxes, scores, classes,
                 num) = sess.run([
                     detection_boxes, detection_scores, detection_classes,
                     num_detections
                 ],
                                 feed_dict={image_tensor: frame_expanded})
                vis_util.visualize_boxes_and_labels_on_image_array(
                    frame,
                    np.squeeze(boxes),
                    np.squeeze(classes).astype(np.int32),
                    np.squeeze(scores),
                    category_index,
                    use_normalized_coordinates=True,
                    line_thickness=5,
                    min_score_thresh=min_t_hold
                    #min_score_thresh=0.38
                )
                cv2.rectangle(frame, RIGHT_L_outside, RIGHT_R_outside,
                              (255, 255, 255), 1)
                cv2.rectangle(frame, CENTER_L_outside, CENTER_R_outside,
                              (255, 255, 255), 1)
                cv2.rectangle(frame, LEFT_L_outside, LEFT_R_outside,
                              (255, 255, 255), 1)

                try:
                    for i, b in enumerate(boxes[0]):
                        if classes[0][i] == 1:  # if book
                            # if scores[0][i] > 0.5:
                            if scores[0][i] > min_t_hold:
                                ymin = boxes[0][i][0]
                                xmin = boxes[0][i][1]
                                ymax = boxes[0][i][2]
                                xmax = boxes[0][i][3]

                                width = int(((xmin + xmax / 2) * IM_WIDTH))
                                height = int(((ymin + ymax / 2) * IM_HEIGHT))
                                mid_x = (xmax + xmin) / 2  # in percentage
                                mid_y = (ymax + ymin) / 2  # in percentage
                                mid_x_pixel = int(mid_x * IM_WIDTH)
                                mid_y_pixel = int(mid_y * IM_HEIGHT)
                                apx_distance = round((1 - (xmax - xmin))**4, 1)
                                centers.append((mid_x_pixel, mid_y_pixel))
                                # cv2.putText(frame, '{}'.format(apx_distance), (mid_x_pixel,mid_y_pixel), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,255,255), 2)
                                cv2.circle(frame, (mid_x_pixel, mid_y_pixel),
                                           2, (255, 0, 0))
                                # if apx_distance <= 0.5:
                                if apx_distance <= 0.9:  # change to 0.5 ^
                                    cv2.putText(
                                        frame, "CLOSE",
                                        (mid_x_pixel - 50, mid_y_pixel - 50),
                                        cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                        (244, 66, 137), 2)
                                    if mid_x > 0.6 and mid_x < 0.95:
                                        cv2.putText(
                                            frame, "RIGHT",
                                            (mid_x_pixel - 50, mid_y_pixel),
                                            cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                                            (66, 244, 128), 2)
                                        Node3_Frame[
                                            1] += 1  # 1 is True 0 is False
                                        if Node3_Frame[
                                                1] >= Node_Frame_Wait_Time:
                                            Node3 = True
                                            N3 = 1
                                            Node3_Frame[1] = 0
                                    elif (mid_x > 0.6
                                          and mid_x < 0.95) == False:
                                        Node3_Frame[
                                            0] += 1  # 1 is True 0 is False
                                        if Node3_Frame[
                                                0] >= Node_Frame_Wait_Time:
                                            Node3 = False
                                            N3 = 0
                                            Node3_Frame[0] = 0
                                    else:
                                        Node3 = None
                                    if mid_x > 0.32 and mid_x < 0.58:
                                        cv2.putText(
                                            frame, "CENTER",
                                            (mid_x_pixel - 50, mid_y_pixel),
                                            cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                                            (66, 244, 128), 2)
                                        Node2_Frame[
                                            1] += 1  # 1 is True 0 is False
                                        if Node2_Frame[
                                                1] >= Node_Frame_Wait_Time:
                                            Node2 = True
                                            N2 = 1
                                            Node2_Frame[1] = 0

                                    elif (mid_x > 0.32
                                          and mid_x < 0.58) == False:
                                        Node2_Frame[0] += 1
                                        if Node2_Frame[
                                                0] >= Node_Frame_Wait_Time:
                                            Node2 = False
                                            N2 = 0
                                            Node2_Frame[0] = 0
                                    else:
                                        Node2 = None
                                    if mid_x > 0.05 and mid_x < 0.3:
                                        cv2.putText(
                                            frame, "LEFT",
                                            (mid_x_pixel - 50, mid_y_pixel),
                                            cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                                            (66, 244, 128), 2)
                                        Node1_Frame[
                                            1] += 1  # 1 is True 0 is False
                                        if Node1_Frame[
                                                1] >= Node_Frame_Wait_Time:
                                            Node1 = True
                                            N1 = 1
                                            Node1_Frame[1] = 0
                                    elif (mid_x > 0.05
                                          and mid_x < 0.3) == False:
                                        Node1_Frame[0] += 1
                                        if Node1_Frame[
                                                0] >= Node_Frame_Wait_Time:
                                            Node1 = False
                                            N1 = 0
                                            Node1_Frame[0] = 0
                                    else:
                                        Node1 = None
                                    # led_test()
                            node_status()

                            if (len(centers)) >= 2:
                                recentX = 0
                                recentY = 1
                                center_size = len(centers)
                                for i in range(0, center_size - 1):
                                    # cv2.line(frame, (centers[i]), (centers[i+1]), (0, 255, 0), 2) # for connecting books
                                    distance = np.linalg.norm(
                                        int(mid_x_pixel +
                                            (mid_x_pixel + IM_HEIGHT)) / 2 -
                                        int(mid_y_pixel +
                                            (mid_y_pixel + IM_WIDTH)) / 2)
                                    distance_cm = distance / 12

                                    lengths.append(
                                        (distance, centers[i], centers[i + 1]
                                         ))  # lenght point A and point B
                                    if len(
                                            lengths
                                    ) >= 2:  # to find the biggest distance and draw a line between
                                        if lengths[i][0] < lengths[i + 1][
                                                0] and lengths[i][0] < lengths[
                                                    i +
                                                    1][0]:  # 321< 400 305 < 359
                                            CURRENT_SPACE.append(lengths[i +
                                                                         1])
                                        else:
                                            CURRENT_SPACE.append(lengths[i])
                                        recentPointMax = 0

                                        cv2.line(
                                            frame,
                                            (CURRENT_SPACE[recentPointMax][1]),
                                            (CURRENT_SPACE[recentPointMax][2]),
                                            (255, 255, 255), 2)
                                        cv2.putText(
                                            frame,
                                            str(CURRENT_SPACE[recentPointMax]
                                                [0]),
                                            (mid_x_pixel, mid_y_pixel),
                                            cv2.FONT_HERSHEY_SIMPLEX, 2,
                                            (255, 255, 255), 3)
                                        MAX_SPACE.append(
                                            CURRENT_SPACE[recentPointMax])
                                        print(
                                            " max distance " +
                                            str(MAX_SPACE[recentPointMax][0]) +
                                            " wot " +
                                            str(MAX_SPACE[recentPointMax][1]) +
                                            " wot " +
                                            str(MAX_SPACE[recentPointMax][2]))
                                        print(MAX_SPACE[recentPointMax][1][0])
                                        CURRENT_SPACE.clear()
                                        lengths.clear()

                                # centers.clear()
                                # print(lengths)
                                recentX += 1
                                recentY += 1
                                centers.clear()

                    # node_status()
                    # print("break")
                except Exception as e:
                    print(e)
                    traceback.print_exc()

                # cv2.imshow('object detection', cv2.resize(frame, (800, 480)))

                cv2.imshow('object detection', frame)
                # cv2.waitKey(250)
                if cv2.waitKey(25) & 0xFF == ord('q'):
                    cv2.destroyAllWindows()
                    vs.stop()
                    break
Ejemplo n.º 5
0
class Camera():
    def __init__(self, config={}):
        self.config = config
        self.flip = self.config['flip_cam']
        self.frame_width = self.config['res_x']
        self.frame_height = self.config['res_y']
        if not isLoadPiCam:
            self.cap = cv2.VideoCapture(0)
            # getting width and height from the capture device
            self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.frame_width)
            self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.frame_height)
        else:
            self.cap = PiVideoStream(resolution=(
                self.frame_width, self.frame_height)).start()
        self.canRecord = True
        self.timeWithoutBody = 0
        self.startRecordingTime = 0
        self.pauseRecording = 0
        # self.fourcc = 0x00000021
        self.fourcc = cv2.VideoWriter_fourcc(*'avc1')

        self.out = cv2.VideoWriter()
        self.maxRecordingTime = self.config['max_record_time']
        self.classifier = cv2.CascadeClassifier(self.config['classifier'])
        self.isRecording = False
        self.currentFrame = None

    def recording(self):
        if isLoadPiCam is False:
            _, img = self.cap.read()
        else:
            img = self.cap.read()
        if self.flip:
            img = cv2.flip(img, 0)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = resize(gray, int(self.frame_width / 2),
                      int(self.frame_height / 2))

        self.isRecording = self.out.isOpened()

        objects = self.classifier.detectMultiScale(gray,
                                                   scaleFactor=1.3,
                                                   minNeighbors=5,
                                                   minSize=(30, 30))

        rects = np.array([[x, y, x + w, y + h]
                          for (x, y, w, h) in objects])
        filtered = non_max_suppression(rects, overlapThresh=0.65)
        for (x1, y1, x2, y2) in filtered:
            cv2.rectangle(img, (x1 * 2, y1 * 2), (x2 * 2, y2 * 2),
                          (0, 255, 0), 1)

            if not self.out.isOpened() and self.canRecord:
                print('starting recording')
                self.startRecordingTime = GetMilliSecs(time.time())
                self.pauseRecording = 0
                filename = self.config['video_folder'] + \
                    GetTimeStamp(time) + '.mp4'

                res = self.out.open(filename,
                                    self.fourcc,
                                    20,
                                    (self.frame_width, self.frame_height))
        if self.out.isOpened():
            self.out.write(img)
            if (self.startRecordingTime +
                    self.maxRecordingTime) < GetMilliSecs(time.time()):
                self.out.release()
                self.canRecord = False
                self.pauseRecording = GetMilliSecs(time.time())
                print('Stopped recording')

        if not self.canRecord and (self.pauseRecording +
                                   self.config['time_in_between'] <
                                   GetMilliSecs(time.time())):
            self.canRecord = True
        self.currentFrame = img

    def get_frame(self):
        if self.currentFrame is None:
            if isLoadPiCam is False:
                _, self.currentFrame = self.cap.read()
            else:
                self.currentFrame = self.cap.read()
        _, jpeg = cv2.imencode('.jpg', self.currentFrame)
        return jpeg.tobytes()

    def __del__(self):
        try:
            self.cap.release()
            cv2.destroyAllWindows()
        except:
            pass
Ejemplo n.º 6
0
net = cv.dnn.readNet('human-pose-estimation-0001.xml',
                     'human-pose-estimation-0001.bin')
# Specify target device.
net.setPreferableTarget(cv.dnn.DNN_TARGET_MYRIAD)

# starting video streaming
cv.namedWindow('pose')
vs = PiVideoStream().start()
time.sleep(2.0)

joint = 0
chestY = 0

while True:
    startTime = time.time()
    frameArray = vs.read()
    captureTime = time.time()
    frameClone = imutils.resize(frameArray, width=456)

    blob = cv.dnn.blobFromImage(frameArray, size=(456, 256), ddepth=cv.CV_8U)
    prepareTime = time.time()
    net.setInput(blob)
    heatmap = net.forward()
    processTime = time.time()
    rightShoulder = False
    leftShoulder = False
    chest = False
    rightShoulderX = 0
    rightShoulderY = 0
    leftShoulderX = 0
    leftShoulderY = 0
Ejemplo n.º 7
0
def cameraOptim():
    time.sleep(2)
    serialHandler = SerialHandler.SerialHandler("/dev/ttyACM0")
    serialHandler.startReadThread()
    serialHandler.sendPidActivation(True)

    def moveForward():
        serialHandler.sendMove(SPEED, -1.8)
        print("Forward")

    def moveLeft():
        serialHandler.sendMove(SPEED, -23.0)
        print("Left")

    def moveRight():
        serialHandler.sendMove(SPEED, 23.0)
        print("Right")

    def moveBackward():
        serialHandler.sendMove(-SPEED, -1.8)
        print("Back")

    def dontMove():
        serialHandler.sendMove(0, -1.8)
        print("Break")

    def avoidObstacle():
        dontMove()
        time.sleep(0.1)
        moveBackward()
        time.sleep(1)
        moveLeft()
        time.sleep(1)
        moveForward()
        time.sleep(0.2)
        moveRight()
        time.sleep(1)
        moveForward()

    def region_of_interest(img, vertices):

        # Define a blank matrix that matches the image height/width.
        mask = np.zeros_like(img)
        # Retrieve the number of color channels of the image.
        #channel_count = img.shape[2]
        # Create a match color with the same color channel counts.
        match_mask_color = (255)
        # Fill inside the polygon
        cv2.fillPoly(mask, vertices, match_mask_color)
        # Returning the image only where mask pixels match
        masked_image = cv2.bitwise_and(img, mask)
        return masked_image

    def draw_lanes(img, lines, color=[0, 255, 255], thickness=3):
        # if this fails, go with some default line
        try:

            # finds the maximum y value for a lane marker
            # (since we cannot assume the horizon will always be at the same point.)

            ys = []
            for i in lines:
                for ii in i:
                    ys += [ii[1], ii[3]]
            min_y = min(ys)
            max_y = 600
            new_lines = []
            line_dict = {}

            for idx, i in enumerate(lines):
                for xyxy in i:
                    # These four lines:
                    # modified from http://stackoverflow.com/questions/21565994/method-to-return-the-equation-of-a-straight-line-given-two-points
                    # Used to calculate the definition of a line, given two sets of coords.
                    x_coords = (xyxy[0], xyxy[2])
                    y_coords = (xyxy[1], xyxy[3])
                    A = vstack([x_coords, ones(len(x_coords))]).T
                    m, b = lstsq(A, y_coords)[0]

                    # Calculating our new, and improved, xs
                    x1 = (min_y - b) / m
                    x2 = (max_y - b) / m

                    line_dict[idx] = [m, b, [int(x1), min_y, int(x2), max_y]]
                    new_lines.append([int(x1), min_y, int(x2), max_y])

            final_lanes = {}

            for idx in line_dict:
                final_lanes_copy = final_lanes.copy()
                m = line_dict[idx][0]
                b = line_dict[idx][1]
                line = line_dict[idx][2]

                if len(final_lanes) == 0:
                    final_lanes[m] = [[m, b, line]]

                else:
                    found_copy = False

                    for other_ms in final_lanes_copy:

                        if not found_copy:
                            if abs(other_ms * 1.2) > abs(m) > abs(
                                    other_ms * 0.8):
                                if abs(final_lanes_copy[other_ms][0][1] *
                                       1.2) > abs(b) > abs(
                                           final_lanes_copy[other_ms][0][1] *
                                           0.8):
                                    final_lanes[other_ms].append([m, b, line])
                                    found_copy = True
                                    break
                            else:
                                final_lanes[m] = [[m, b, line]]

            line_counter = {}

            for lanes in final_lanes:
                line_counter[lanes] = len(final_lanes[lanes])

            top_lanes = sorted(line_counter.items(),
                               key=lambda item: item[1])[::-1][:2]

            lane1_id = top_lanes[0][0]
            lane2_id = top_lanes[1][0]

            def average_lane(lane_data):
                x1s = []
                y1s = []
                x2s = []
                y2s = []
                for data in lane_data:
                    x1s.append(data[2][0])
                    y1s.append(data[2][1])
                    x2s.append(data[2][2])
                    y2s.append(data[2][3])
                return int(mean(x1s)), int(mean(y1s)), int(mean(x2s)), int(
                    mean(y2s))

            l1_x1, l1_y1, l1_x2, l1_y2 = average_lane(final_lanes[lane1_id])
            l2_x1, l2_y1, l2_x2, l2_y2 = average_lane(final_lanes[lane2_id])

            return [l1_x1, l1_y1, l1_x2, l1_y2], [l2_x1, l2_y1, l2_x2,
                                                  l2_y2], lane1_id, lane2_id
        except Exception as e:
            print(str(e))

    def process_img(image):
        original_image = image
        gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        processed_img = cv2.Canny(gray_image, 200, 300)
        processed_img = cv2.GaussianBlur(processed_img, (5, 5), 0)
        #processed_img = gray_image

        vertices = np.array([
            [10, 500],
            [10, 400],
            [300, 250],
            [500, 250],
            [800, 500],
            [800, 500],
        ], np.int32)
        cropped_image = region_of_interest(processed_img, [vertices])
        lines = cv2.HoughLinesP(cropped_image,
                                rho=1,
                                theta=np.pi / 180,
                                threshold=180,
                                lines=np.array([]),
                                minLineLength=20,
                                maxLineGap=15)
        m1 = 0
        m2 = 0
        try:
            l1, l2, m1, m2 = draw_lanes(original_image, lines)
            cv2.line(original_image, (l1[0], l1[1]), (l1[2], l1[3]),
                     [0, 255, 0], 30)
            cv2.line(original_image, (l2[0], l2[1]), (l2[2], l2[3]),
                     [0, 255, 0], 30)
        except Exception as e:
            print(str(e))
            pass
        try:
            for coords in lines:
                coords = coords[0]
                try:
                    cv2.line(processed_img, (coords[0], coords[1]),
                             (coords[2], coords[3]), [255, 0, 0], 3)

                except Exception as e:
                    print(str(e))
        except Exception as e:
            pass
        #line_image = draw_lines(image, lines, thickness=1)
        #line_image = image
        #return line_image
        return processed_img, original_image, m1, m2

    # construct the argument parse and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-n",
                    "--num-frames",
                    type=int,
                    default=100,
                    help="# of frames to loop over for FPS test")
    ap.add_argument("-d",
                    "--display",
                    type=int,
                    default=1,
                    help="Whether or not frames should be displayed")
    args = vars(ap.parse_args())

    # initialize the camera and stream
    #camera = PiCamera()
    #camera.resolution = (800, 600)
    #camera.framerate = 32

    #last_time = time.time()

    # created a *threaded *video stream, allow the camera sensor to warmup,
    # and start the FPS counter
    print("[INFO] sampling THREADED frames from `picamera` module...")
    vs = PiVideoStream().start()
    time.sleep(2.0)
    #fps = FPS().start()

    # loop over some frames...this time using the threaded stream
    #while fps._numFrames < args["num_frames"]:

    # do a bit of cleanup
    last_time = time.time()
    while (True):
        #rawCapture=PiRGBArray(camera, size=(800,600))
        #cap.framerate = 10
        #camera.capture(rawCapture, format="bgr")
        frame = vs.read()
        frame = imutils.resize(frame, width=800)
        last_time = time.time()
        new_screen, original_image, m1, m2 = process_img(frame)
        #rawCapture = PiRGBArray(camera, size=(320, 240))
        #stream = camera.capture_continuous(rawCapture, format="bgr",
        #use_video_port=True)
        #frame = stream.array
        #screen= process_img(frame)
        print('Frame took {} seconds'.format(time.time() - last_time))

        cv2.imshow(
            'frame',
            original_image)  #se mai poate adauga argumentul cv2.COLOR_BGR2RGB

        if (waitline != ser.readline()):

            if m1 < 0 and m2 < 0:
                moveRight()
                time.sleep(0.1)
                dontMove()
            elif m1 > 0 and m2 > 0:
                moveLeft()
                time.sleep(0.1)
                dontMove()
            else:
                moveForward()
                time.sleep(0.1)
                dontMove()
        else:
            print("OBSTACLE AHEAD")
            avoidObstacle()

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cv2.destroyAllWindows()
    vs.stop()
Ejemplo n.º 8
0
def cameraOptim():
    def region_of_interest(img, vertices):

        # Define a blank matrix that matches the image height/width.
        mask = np.zeros_like(img)
        # Retrieve the number of color channels of the image.
        #channel_count = img.shape[2]
        # Create a match color with the same color channel counts.
        match_mask_color = (255)
        # Fill inside the polygon
        cv2.fillPoly(mask, vertices, match_mask_color)
        # Returning the image only where mask pixels match
        masked_image = cv2.bitwise_and(img, mask)
        return masked_image

    def draw_lines(img, lines, color=[255, 0, 0], thickness=10):
        if lines is None:
            return img
        line_img = np.zeros(
            (img.shape[0], img.shape[1], 3),
            dtype=np.uint8,
        )
        # Loop over all lines and draw them on the blank image.
        for line in lines:
            for x1, y1, x2, y2 in line:
                cv2.line(line_img, (x1, y1), (x2, y2), color, thickness)
        # Merge the image with the lines onto the original.
        img = cv2.addWeighted(img, 0.8, line_img, 1.0, 0.0)
        # Return the modified image.
        return img

    def process_img(image):
        gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
        processed_img = cv2.Canny(gray_image, 200, 300)
        #processed_img = gray_image

        vertices = np.array([
            [10, 500],
            [10, 400],
            [300, 250],
            [500, 250],
            [800, 500],
            [800, 500],
        ], np.int32)
        cropped_image = region_of_interest(processed_img,
                                           np.array([vertices], np.int32))
        lines = cv2.HoughLinesP(cropped_image,
                                rho=2,
                                theta=np.pi / 180,
                                threshold=40,
                                lines=np.array([]),
                                minLineLength=10,
                                maxLineGap=110)
        line_image = draw_lines(image, lines, thickness=1)
        #line_image = image
        return line_image

    # construct the argument parse and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-n",
                    "--num-frames",
                    type=int,
                    default=100,
                    help="# of frames to loop over for FPS test")
    ap.add_argument("-d",
                    "--display",
                    type=int,
                    default=1,
                    help="Whether or not frames should be displayed")
    args = vars(ap.parse_args())

    # initialize the camera and stream
    #camera = PiCamera()
    #camera.resolution = (800, 600)
    #camera.framerate = 32

    last_time = time.time()
    '''
        while (True):
                rawCapture=PiRGBArray(camera, size=(800,600))
                #cap.framerate = 10
                camera.capture(rawCapture, format="bgr")
                frame=rawCapture.array
                #rawCapture = PiRGBArray(camera, size=(320, 240))
                #stream = camera.capture_continuous(rawCapture, format="bgr",
                #use_video_port=True)
                #frame = stream.array
                screen= process_img(frame)
                print('Frame took {} seconds'.format(time.time()-last_time))
                last_time = time.time()

                cv2.imshow('frame', screen)
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

        #cap.release()
        cv2.destroyAllWindows()
        
        '''
    '''
        # allow the camera to warmup and start the FPS counter
        print("[INFO] sampling frames from `picamera` module...")
        time.sleep(2.0)
        fps = FPS().start()
         
        # loop over some frames
        for (i, f) in enumerate(stream):
                # grab the frame from the stream and resize it to have a maximum
                # width of 400 pixels
                frame = f.array
                frame = imutils.resize(frame, width=400)
         
                # check to see if the frame should be displayed to our screen
                if args["display"] > 0:
                        cv2.imshow("Frame", frame)
                        key = cv2.waitKey(1) & 0xFF
         
                # clear the stream in preparation for the next frame and update
                # the FPS counter
                rawCapture.truncate(0)
                fps.update()
         
                # check to see if the desired number of frames have been reached
                if i == args["num_frames"]:
                        break
         
        # stop the timer and display FPS information
        fps.stop()
        print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
         
        # do a bit of cleanup
        cv2.destroyAllWindows()
        stream.close()
        rawCapture.close()
        camera.close()
        '''

    # created a *threaded *video stream, allow the camera sensor to warmup,
    # and start the FPS counter
    print("[INFO] sampling THREADED frames from `picamera` module...")
    vs = PiVideoStream().start()
    time.sleep(2.0)
    #fps = FPS().start()

    # loop over some frames...this time using the threaded stream
    #while fps._numFrames < args["num_frames"]:
    '''
        while(True)
                # grab the frame from the threaded video stream and resize it
                # to have a maximum width of 800 pixels
                frame = vs.read()
                frame = imutils.resize(frame, width=800)
         
                # check to see if the frame should be displayed to our screen
                if args["display"] > 0:
                        cv2.imshow("Frame", frame)
                        key = cv2.waitKey(1) & 0xFF
         
                # update the FPS counter
                fps.update()
         
        # stop the timer and display FPS information
        fps.stop()
        print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
        '''

    # do a bit of cleanup

    while (True):
        #rawCapture=PiRGBArray(camera, size=(800,600))
        #cap.framerate = 10
        #camera.capture(rawCapture, format="bgr")
        frame = vs.read()
        frame = imutils.resize(frame, width=800)
        #rawCapture = PiRGBArray(camera, size=(320, 240))
        #stream = camera.capture_continuous(rawCapture, format="bgr",
        #use_video_port=True)
        #frame = stream.array
        screen = process_img(frame)
        print('Frame took {} seconds'.format(time.time() - last_time))
        last_time = time.time()

        cv2.imshow('frame', screen)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cv2.destroyAllWindows()
    vs.stop()
Ejemplo n.º 9
0
class PeopleCounter(object):
    def __init__(self, flip=True):
        self.vs = PiVideoStream(resolution=(800, 608)).start()
        self.flip = flip
        time.sleep(2.0)

        self.firstFrame = None

        self.move_time = time.time()
        self.movelist = []
        self.enter = 0
        self.leave = 0

    def __del__(self):
        self.vs.stop()


    def crossed_y_centerline(self, enter, leave, movelist):
        # Check if over center line then count
        if len(movelist) > 1:  # Are there two entries
            if ( movelist[0] <= y_center
                   and  movelist[-1] > y_center + y_buf ):
                leave += 1
                movelist = []
            elif ( movelist[0] > y_center
                   and  movelist[-1] < y_center - y_buf ):
                enter += 1
                movelist = []
        return enter, leave, movelist


    def flip_if_needed(self, frame):
        if self.flip:
            return np.flip(frame, 0)
        return frame

    def get_frame(self):
        frame = self.flip_if_needed(self.vs.read())
        frame = self.process_image(frame)
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def process_image(self, frame):
        motion_found = False
        biggest_area = MIN_AREA
        frame = imutils.resize(frame, width=min(800, frame.shape[1]))
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        if self.firstFrame is None:
            self.firstFrame = gray
            return frame

        cv2.line( frame,( 0, y_center ),( x_max, y_center ),(255, 0, 0), 2 )

        frameDelta = cv2.absdiff(self.firstFrame, gray)
        thresh = cv2.threshold(frameDelta, 70, 255, cv2.THRESH_BINARY)[1]
        thresh = cv2.dilate(thresh, None, iterations=2)

	(cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

	if cnts:
            for c in cnts:
                if cv2.contourArea(c) < 700:
                    continue
        found_area = cv2.contourArea(c)
        if found_area > biggest_area:
            motion_found = True
            biggest_area = found_area
            (x, y, w, h) = cv2.boundingRect(c)
            cx = int(x + w/2)
            cy = int(y + h/2)
            cw, ch = w, h

        if motion_found:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
        cv2.circle(frame, (cx, cy), 5, (0, 0, 255), 2)
        move_timer = time.time() - self.move_time
        if (move_timer >= movelist_timeout):
            self.movelist = []
        self.move_time = time.time()

        old_enter = self.enter
        old_leave = self.leave
        self.movelist.append(cy)
        self.enter, self.leave, self.movelist = self.crossed_y_centerline(self.enter, self.leave, self.movelist)

        if not self.movelist:
            if self.enter > old_enter:
                prefix = 'enter'
            elif self.leave > old_leave:
                prefix = 'leave'
            else:
                prefix = 'error'

    img_text = ("LEAVE %i : ENTER %i" % (self.enter, self.leave))
    cv2.putText(frame, img_text, (45, 25), font, 1.0, (0, 0, 255), 2)
        
        return frame
Ejemplo n.º 10
0
src = np.array([[15, 226], [303, 221], [262, 2], [64, 5]], dtype="float32")
dst = np.array([[50, 240], [270, 240], [270, 0], [50, 0]], dtype="float32")
M = cv2.getPerspectiveTransform(src, dst)
image_hsv = None
pixel = (0, 0, 0)  #RANDOM DEFAULT VALUE
try:
    arduino = serial.Serial('/dev/ttyUSB0', baudrate=9600, timeout=3.0)
except:
    arduino = serial.Serial('/dev/ttyUSB1', baudrate=9600, timeout=3.0)

#video = cv2.VideoCapture(0)
video = PiVideoStream(resolution=(640, 480), framerate=60).start()
time.sleep(2.0)
timeCheck = time.time()
while True:
    image_src = video.read()
    image_src = cv2.flip(image_src, -1)
    ################# color transformation and filter -------> binary image
    image_hsv = cv2.cvtColor(image_src, cv2.COLOR_BGR2HSV)
    image_mask = cv2.inRange(image_hsv, lower1, upper1) + cv2.inRange(
        image_hsv, lower2, upper2)

    ############################ warp tranform ------------> birds eye view
    warped = cv2.warpPerspective(image_mask, M, (320, 240))

    #cv2.imshow("aaa", warped)
    #cv2.waitKey(1)
    ############ for line detection
    edges = cv2.Canny(warped, 30, 50)
    section = 100
    roi = edges[section:, :]
Ejemplo n.º 11
0
    def process(self):
        self.image = tf.placeholder(tf.float32, shape=[1, self.width, self.height, 3],name='image')
        x = self.image
        rate = [1,1]
        buff = []
        with tf.variable_scope(None, 'MobilenetV1'):
            for m in self.layers:
                strinde = [1,m['stride'],m['stride'],1]
                rate = [m['rate'],m['rate']]
                if (m['convType'] == "conv2d"):
                    x = self.conv(x,strinde,m['blockId'])
                    buff.append(x)
                elif (m['convType'] == "separableConv"):
                    x = self.separableConv(x,strinde,m['blockId'],rate)
                    buff.append(x)
        self.heatmaps = self.convToOutput(x, 'heatmap_2')
        self.offsets = self.convToOutput(x, 'offset_2')
        self.displacementFwd = self.convToOutput(x, 'displacement_fwd_2')
        self.displacementBwd = self.convToOutput(x, 'displacement_bwd_2')
        self.heatmaps = tf.sigmoid(self.heatmaps, 'heatmap')

        cap = PiVideoStream().start()
        time.sleep(2.0)
        cap_width = 320
        cap_height = 240
        width_factor =  cap_width/self.width
        height_factor = cap_height/self.height

        with tf.Session() as sess:
            ######################
            #    Setup GCloud    #
            ######################
            
            project_id = "ai-dj-36"
            topic_name = "pose"
            
            publisher = pubsub_v1.PublisherClient()
            topic_path = publisher.topic_path(project_id, topic_name)
            
            # create topic if it does not exists
            project_path = publisher.project_path(project_id)
            topics = publisher.list_topics(project_path)
            topic_names = [topic.name for topic in topics]
            if topic_path not in topic_names:
               topic = publisher.create_topic(topic_path)
               print('Topic created: {}'.format(topic))

            # continue with model
            init = tf.global_variables_initializer()
            sess.run(init)

            saver = tf.train.Saver()
            save_dir = './checkpoints'
            save_path = os.path.join(save_dir, 'model_ckpt')
            saver.save(sess, save_path)
            while True:
                total_point_var = 0.0
                ave_counter = 0
                frame = cap.read()
                init_pose = self.process_frame(sess, cap, frame, width_factor, height_factor)
                while(len(init_pose) <= 0 or init_pose[0]['score'] <= 0.2):
                    frame = cap.read()
                    init_pose = self.process_frame(sess, cap, frame, width_factor, height_factor)
                    cv2.imshow("1", frame)
                    cv2.waitKey(1)

                while True:
                    frame = cap.read()
                    orig_image = frame
                    startime = time.time()
                    curr_pose = self.process_frame(sess, cap, frame, width_factor, height_factor)
                    if len(curr_pose) == 0:
                        continue
                    if ave_counter < NUM_FRAMES_TO_AVERAGE:
                        total_point_var += measure_keypoint_var(init_pose[0], curr_pose[0])
                        ave_counter += 1
                    else:
                        print(total_point_var / NUM_FRAMES_TO_AVERAGE)
                        # send score to appengine
                        curtime = time.time()
                        payload_contents = {"device_id":"jeffsrpi", "published_at":curtime, "pose": total_point_var / NUM_FRAMES_TO_AVERAGE}
                        payload = json.dumps(payload_contents)
                        payload = payload.encode("utf-8")
                        print(payload_contents)
                        future = publisher.publish(topic_path, data=payload)

                        ave_counter = 0 
                        total_point_var = 0
                        
                    init_pose = curr_pose
                    
                    for idx in range(len(curr_pose)):
                        if curr_pose[idx]['score'] > 0.2:
                            color = color_table[idx]
                            drawKeypoints(curr_pose[idx], orig_image, color)
                            drawSkeleton(curr_pose[idx], orig_image)
                    
                    endtime = time.time()
                    print('Time cost per frame : %f' % (endtime - startime))
                    cv2.imshow("1", orig_image)
                    cv2.waitKey(1)
Ejemplo n.º 12
0
class VideoCamera(object):
    def __init__(self):
        self.cap = PiVideoStream().start()
        time.sleep(0.1)
        print("Initialising Raspberry Pi...")
        GPIO.output(11, 1)
        print("LED Red Testing")
        time.sleep(1.0)
        GPIO.output(11, 0)
        GPIO.output(13, 1)
        print("LED Green Testing")
        time.sleep(1.0)
        GPIO.output(13, 0)
        GPIO.output(15, 1)
        print("LED Blue Testing")
        time.sleep(1.0)
        GPIO.output(15, 0)
        wiringpi.wiringPiSetup()
        wiringpi.pinMode(26, 2)
        wiringpi.softPwmCreate(26, 0, 25)

    def __del__(self):
        self.cap.stop()

    def get_frame(self):
        frame = self.cap.read()
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def get_object(self, classifier):
        frame = self.cap.read()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        objects = classifier.detectMultiScale(gray,
                                              scaleFactor=1.1,
                                              minNeighbors=5,
                                              minSize=(30, 30),
                                              flags=cv2.CASCADE_SCALE_IMAGE)

        # Draw a rectangle around the objects
        for (x, y, w, h) in objects:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 140, 125), 2)

        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def get_mask(self):
        frame = self.cap.read()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        dutycycle = 0

        model = Model()
        interpreter = model.load_interpreter()
        input_details = model.input_details()
        output_details = model.output_details()
        input_shape = input_details[0]['shape']
        label_dict = {0: 'MASK', 1: "NO MASK"}
        eye_img = gray
        resized = cv2.resize(eye_img, (100, 100))
        normalized = resized / 255.0
        reshaped = np.reshape(normalized, input_shape)
        reshaped = np.float32(reshaped)
        interpreter.set_tensor(input_details[0]['index'], reshaped)
        interpreter.invoke()
        result = interpreter.get_tensor(output_details[0]['index'])
        result = 1 - result
        label = np.argmax(result, axis=1)[0]
        if label == 0:
            cv2.putText(frame, label_dict[label], (75, 150),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (250, 240, 255), 2)
            GPIO.output(15, 1)
            print(label_dict[label])
            wiringpi.softPwmWrite(26, 25)
            time.sleep(0.1)
            position = dutycycle
            GPIO.output(15, 0)
        elif label == 1:
            cv2.putText(frame, label_dict[label], (75, 150),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (250, 240, 255), 2)
            GPIO.output(13, 1)
            print(label_dict[label])
            wiringpi.softPwmWrite(26, 0)
            time.sleep(0.1)
            position = dutycycle
            GPIO.output(13, 0)
        ret, jpeg = cv2.imencode('.jpg', frame)
        return frame
Ejemplo n.º 13
0
    def run(self):
        while True:
            start_time = time.time()
            #time.sleep(60) #Because of at startup nothing is connected yet
            #Try to find every Pico Zebro every 60 Seconds

            #First Send global command to all Pico Zebro with Stop.

            #Wait untill connected Pico Zebro's give back yes I am stopped
            #Also Turn all leds off.
            #if within Time not every one has stopped Try sending global
            #command again.
            #Picture(1) 
            
            time1=0
            t_start = time.time()  # start
            e1 = cv2.getTickCount()
            #First Take original Picture
            vs = PiVideoStream().start()
            #time.sleep(2.0)
            fps = FPS().start()
            while fps._numFrames < 10:
                # grab the frame from the threaded video stream and resize it
                # to have a maximum width of 400 pixels
                frame = vs.read()
             
                # check to see if the frame should be displayed to our screen
 #               cv2.imshow("Frame", frame)
                cv2.imwrite("image25.jpg",frame)
                key = cv2.waitKey(1) & 0xFF
             
                # update the FPS counter
                fps.update()
             
            # stop the timer and display FPS information
            fps.stop()
            print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
            print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
             
            # do a bit of cleanup
            cv2.destroyWindow("Frame")
            vs.stop()

            
            Original = cv2.imread("Image1.jpg")
            e2 = cv2.getTickCount()
            
            #With 1 Vor image1 being original image where nothing has moved

            
            time1 = (e2 - e1) / cv2.getTickFrequency() + time1
            elapsedTime = time.time()-t_start
            print (time1, elapsedTime)
            #for Devices in 16:
                #print(Devices)
                #if Devices == 1: # This has to be done for every Zebro
                # Also if Devices is 1 give a value the address of Pico Zebro 1.
                #Say Pico Zebro 1 Turn led 1 on.
                #Wait until said back in register it is turned on for certain time

            #Picture(2) #led 1 on picture 2
          
                #Say Pico Zebro 1 Turn led 1 off and 3 on.
                #wait until Pico Zebro Says Yeah have done that

            #Picture(3) #led 3 on in picture 3
                # Turn All leds of again. # Till here other things needs to be coded for every possible
                
            Led_1 = cv2.imread("Image2.jpg")
            Led_3 = cv2.imread("Image3.jpg")
            picture_test = 0
            
            New_image_Led_1 = abs(Original - Led_1)    #Taking away improved time with 0.05 seconds which is fo 20 zebros: 1 second
            New_image_Led_3 = abs(Original - Led_3)
            
            
            Difference_led_1 = Image_Difference(New_image_Led_1)
            Difference_led_3 = Image_Difference(New_image_Led_3)
            
            Finding_Canny_Led_1 = cv2.Canny(Difference_led_1, 15, 200)
            Finding_Canny_Led_3 = cv2.Canny(Difference_led_3, 15, 200)
            
            Finding_Canny_Led_1 = cv2.morphologyEx(Finding_Canny_Led_1, cv2.MORPH_CLOSE, np.ones((8,8),np.uint8))
            Finding_Canny_Led_3 = cv2.morphologyEx(Finding_Canny_Led_3, cv2.MORPH_CLOSE, np.ones((8,8),np.uint8))

            Finding_Canny_Led_1 = cv2.Canny(Finding_Canny_Led_1, 100, 200)
            Finding_Canny_Led_3 = cv2.Canny(Finding_Canny_Led_3, 100, 200)

            (_, contours_Led_1, _) = cv2.findContours(Finding_Canny_Led_1.copy(), cv2.RETR_EXTERNAL,
                                                cv2.CHAIN_APPROX_NONE)
            (_, contours_Led_3, _) = cv2.findContours(Finding_Canny_Led_3.copy(), cv2.RETR_EXTERNAL,
                                                cv2.CHAIN_APPROX_NONE)
            x_Led_1 = 0
            y_Led_1 = 0
            w_Led_1 = 0
            h_Led_1 = 0
            for c in contours_Led_1:
                try:
                    [x_Led_1,y_Led_1,w_Led_1,h_Led_1] = cv2.boundingRect(c)
                    cv2.rectangle(Difference_led_1,(x_Led_1,y_Led_1),(x_Led_1+w_Led_1,y_Led_1+h_Led_1),(0,255,0),2)
                    cv2.putText(Difference_led_1,'Found led 1',(x_Led_1+w_Led_1+10,y_Led_1+h_Led_1),0,0.3,(0,255,0))
                except AttributeError:
                    print("Nothing Found")
                    pass
            x_Led_3 = 0
            y_Led_3 = 0
            w_Led_3 = 0
            h_Led_3 = 0    
            for c in contours_Led_3:
                try:
                    [x_Led_3,y_Led_3,w_Led_3,h_Led_3] = cv2.boundingRect(c)
                    cv2.rectangle(Difference_led_3,(x_Led_3,y_Led_3),(x_Led_3+w_Led_3,y_Led_3+h_Led_3),(0,255,0),2)
                    cv2.putText(Difference_led_3,'Found led 3',(x_Led_3+w_Led_3+10,y_Led_3+h_Led_3),0,0.3,(0,255,0))
                except AttributeError:
                    print("Nothing Found")
                    pass
            #For some reason this debug functions gives errors that kille
            #everything which is anoying
                
            #Add led 1 and 3 together for Testing purposes
            #LEDS_Image = cv2.addWeighted(Difference_led_1,1,Difference_led_3,1,0)
            #cv2.imwrite("Leds_image.jpg", LEDS_Image)
            #cv2.imshow("LEds together", LEDS_Image)
                
            try:
                (Zebro_Middle_x,Zebro_Middle_y,Direction) = Find_Orientation(x_Led_1,x_Led_3,y_Led_1,y_Led_3)
                #print(Zebro_Middle_x,Zebro_Middle_y,Direction)
            except TypeError as e:
                print(e)
                pass

           
            #if Devices == 0: in here it is multiple times aquire
                #set data from PZ 1
                #Zebro_1_Middle_x = Zebro_Middle_x
                #Zebro_1_Middle_y,
                #Direction_Zebro_1 = Direction
                #etc every value
                #Now Add To a Value 1- (amount of Robots) These 3 Values and Send it to the thread which
                #Has the control of the Robots
            #once every value for every possible Zebro is determind then
            #Check if any of the x and y values are close to each other.
            #or if any of the x or y values are to close to the edge which is
            # if x == 0 or x == 1600 or y = 0 or y == 920 
            #So a gigantic multiple if statement. which becomes smaller and smaller
            # 
            
            #time.sleep(0.1)
            print ("My program took", time.time() - start_time, "to run")
Ejemplo n.º 14
0
def main():
    default_model_dir = 'all_models'
    default_model = 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite'
    default_labels = 'coco_labels.txt'
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        help='.tflite model path',
                        default=os.path.join(default_model_dir, default_model))
    parser.add_argument('--labels',
                        help='label file path',
                        default=os.path.join(default_model_dir,
                                             default_labels))
    parser.add_argument(
        '--top_k',
        type=int,
        default=3,
        help='number of categories with highest score to display')
    parser.add_argument('--camera_idx',
                        type=int,
                        help='Index of which video source to use. ',
                        default=0)
    parser.add_argument('--threshold',
                        type=float,
                        default=0.1,
                        help='classifier score threshold')
    args = parser.parse_args()
    multiTracker = cv2.MultiTracker_create()

    #Initialize logging files
    logging.basicConfig(filename='storage/results.log',
                        format='%(asctime)s-%(message)s',
                        level=logging.DEBUG)

    print('Loading {} with {} labels.'.format(args.model, args.labels))
    interpreter = common.make_interpreter(args.model)
    interpreter.allocate_tensors()
    labels = load_labels(args.labels)

    vs = PiVideoStream(resolution=(2048, 1536), framerate=32).start()
    #cap = cv2.VideoCapture(args.camera_idx)
    cap = vs.stream
    #cap.set(3, 1920)
    #cap.set(4, 1440)
    # 4:3 resolutions
    # 640×480, 800×600, 960×720, 1024×768, 1280×960, 1400×1050,
    # 1440×1080 , 1600×1200, 1856×1392, 1920×1440, 2048×1536
    # 5 MP
    #cap.set(3, 2048)
    #cap.set(4, 1536)

    bboxes = []
    colors = []
    visitation = []
    trackers = []
    started_tracking = None
    last_tracked = None
    visitation_id = None
    save_one_with_boxes = False
    recording = False
    out = None
    fps = FPS().start()
    is_stopped = False
    current_fps = 4.0

    while vs is not None:
        try:
            frame = vs.read()

            if frame is not None:
                if fps._numFrames < 500:
                    fps.update()
                else:
                    fps.stop()
                    current_fps = fps.fps()
                    logging.info("[INFO] elasped time: {:.2f}".format(
                        fps.elapsed()))
                    logging.info("[INFO] approx. FPS: {:.2f}".format(
                        fps.fps()))
                    fps = FPS().start()

                success, boxes = multiTracker.update(frame)

                if success:
                    last_tracked = time.time()

                if len(boxes) > 0:
                    logging.info("success {}".format(success))
                    logging.info("boxes {}".format(boxes))

                cv2_im = frame
                #cv2_im_rgb = cv2.cvtColor(cv2_im, cv2.COLOR_BGR2RGB)
                pil_im = Image.fromarray(cv2_im)

                common.set_input(interpreter, pil_im)
                interpreter.invoke()
                objs = get_output(interpreter,
                                  score_threshold=args.threshold,
                                  top_k=args.top_k)
                height, width, channels = cv2_im.shape

                bird_detected = False
                boxes_to_draw = []
                for obj in objs:
                    x0, y0, x1, y1 = list(obj.bbox)
                    x0, y0, x1, y1 = int(x0 * width), int(y0 * height), int(
                        x1 * width), int(y1 * height)
                    percent = int(100 * obj.score)
                    object_label = labels.get(obj.id, obj.id)
                    label = '{}% {}'.format(percent, object_label)
                    hdd = psutil.disk_usage('/')

                    if object_label == 'bird' and percent > 40:
                        bird_detected = True
                        new_bird = True

                        for bbox in boxes:
                            if intersects(bbox, obj.bbox):
                                logging.info("intersected.. same bird")
                                new_bird = False

                        if new_bird and len(bboxes) == 0:
                            logging.info("found a new bird")
                            visitation_id = uuid.uuid4()
                            started_tracking = time.time()
                            recording = True
                            save_one_with_boxes = True
                            bboxes.append(obj.bbox)
                            colors.append((randint(64, 255), randint(64, 255),
                                           randint(64, 255)))
                            tracker = cv2.TrackerCSRT_create()
                            trackers.append(tracker)
                            multiTracker.add(tracker, cv2_im, obj.bbox)

                        if hdd.percent < 95:
                            boxed_image_path = "storage/detected/boxed_{}_{}_{}.png".format(
                                time.strftime("%Y-%m-%d_%H-%M-%S"), percent,
                                visitation_id)
                            full_image_path = "storage/detected/full_{}_{}_{}.png".format(
                                time.strftime("%Y-%m-%d_%H-%M-%S"), percent,
                                visitation_id)
                            cv2.imwrite(boxed_image_path, cv2_im[y0:y1, x0:x1])
                            if percent > 95:
                                cv2.imwrite(full_image_path, cv2_im)

                        else:
                            logging.info("Not enough disk space")

                    percent = int(100 * obj.score)
                    object_label = labels.get(obj.id, obj.id)
                    label = '{}% {}'.format(percent, object_label)

                    # postpone drawing so we don't get lines in the photos
                    boxes_to_draw.append({
                        "p1": (x0, y0),
                        "p2": (x1, y1),
                        "label": label,
                        "label_p": (x0, y0 + 30)
                    })

                for box in boxes_to_draw:
                    cv2_im = cv2.rectangle(cv2_im, box["p1"], box["p2"],
                                           (0, 255, 0), 2)
                    cv2_im = cv2.putText(cv2_im, box["label"], box["label_p"],
                                         cv2.FONT_HERSHEY_SIMPLEX, 1.0,
                                         (255, 0, 0), 2)

                if recording == True:
                    if out == None:
                        fourcc = cv2.VideoWriter_fourcc(*'X264')
                        out = cv2.VideoWriter(
                            "storage/video/{}.avi".format(visitation_id),
                            fourcc, 4.0, (2048, 1536))
                    out.write(cv2_im)

                if bird_detected == True and save_one_with_boxes == True:
                    with_boxes_image_path = "storage/with_boxes/full_{}_{}.png".format(
                        time.strftime("%Y-%m-%d_%H-%M-%S"), visitation_id)
                    cv2.imwrite(with_boxes_image_path, cv2_im)
                    save_one_with_boxes = False

                if bird_detected == False and len(trackers) > 0:
                    now = time.time()
                    if now - last_tracked > 60:
                        logging.info("visitation {} lasted {} seconds".format(
                            visitation_id, now - started_tracking))
                        logging.info("clearing trackers")
                        for tracker in trackers:
                            tracker.clear()
                        multiTracker = cv2.MultiTracker_create()
                        boxes = []
                        colors = []
                        trackers = []
                        bboxes = []
                        recording = False
                        out.release()
                        out = None

                for i, newbox in enumerate(boxes):
                    x0, y0, x1, y1 = list(newbox)
                    x0, y0, x1, y1 = int(x0 * width), int(y0 * height), int(
                        x1 * width), int(y1 * height)
                    cv2_im = cv2.rectangle(cv2_im, (x0, y0), (x1, y1),
                                           (0, 0, 255), 2)

                cv2.namedWindow('Leroy', cv2.WINDOW_NORMAL)
                cv2.resizeWindow('Leroy', 800, 600)
                cv2.imshow('Leroy', cv2_im)

        except KeyboardInterrupt:
            print('Interrupted')
            try:
                sys.exit(0)
            except SystemExit:
                os._exit(0)
        except:
            logging.exception('Something happened.')
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
    out.release()
    vs.stop()
def cameraOptim():
    time.sleep(2)
    serialHandler = SerialHandler.SerialHandler("/dev/ttyACM0")
    serialHandler.startReadThread()
    serialHandler.sendPidActivation(True)

    def moveForward():
        serialHandler.sendMove(SPEED, -1.8)
        print("Forward")

    def moveLeft():
        serialHandler.sendMove(SPEED, -23.0)
        print("Left")

    def moveRight():
        serialHandler.sendMove(SPEED, 23.0)
        print("Right")

    def moveBackward():
        serialHandler.sendMove(-SPEED, -1.8)
        print("Back")

    def dontMove():
        serialHandler.sendMove(0, -1.8)
        print("Break")

    def region_of_interest(img, vertices):
        mask = np.zeros_like(img)
        match_mask_color = (255)
        cv2.fillPoly(mask, vertices, match_mask_color)
        masked_image = cv2.bitwise_and(img, mask)
        return masked_image

    def draw_lanes(img, lines, color=[0, 255, 255], thickness=3):
        # if this fails, go with some default line
        try:
            ys = []
            for i in lines:
                for ii in i:
                    ys += [ii[1], ii[3]]
            min_y = min(ys)
            max_y = 600
            new_lines = []
            line_dict = {}

            for idx, i in enumerate(lines):
                for xyxy in i:
                    x_coords = (xyxy[0], xyxy[2])
                    y_coords = (xyxy[1], xyxy[3])
                    A = vstack([x_coords, ones(len(x_coords))]).T
                    m, b = lstsq(A, y_coords)[0]
                    x1 = (min_y - b) / m
                    x2 = (max_y - b) / m

                    line_dict[idx] = [m, b, [int(x1), min_y, int(x2), max_y]]
                    new_lines.append([int(x1), min_y, int(x2), max_y])

            final_lanes = {}

            for idx in line_dict:
                final_lanes_copy = final_lanes.copy()
                m = line_dict[idx][0]
                b = line_dict[idx][1]
                line = line_dict[idx][2]

                if len(final_lanes) == 0:
                    final_lanes[m] = [[m, b, line]]

                else:
                    found_copy = False

                    for other_ms in final_lanes_copy:

                        if not found_copy:
                            if abs(other_ms * 1.2) > abs(m) > abs(
                                    other_ms * 0.8):
                                if abs(final_lanes_copy[other_ms][0][1] *
                                       1.2) > abs(b) > abs(
                                           final_lanes_copy[other_ms][0][1] *
                                           0.8):
                                    final_lanes[other_ms].append([m, b, line])
                                    found_copy = True
                                    break
                            else:
                                final_lanes[m] = [[m, b, line]]

            line_counter = {}

            for lanes in final_lanes:
                line_counter[lanes] = len(final_lanes[lanes])

            top_lanes = sorted(line_counter.items(),
                               key=lambda item: item[1])[::-1][:2]

            lane1_id = top_lanes[0][0]
            lane2_id = top_lanes[1][0]

            def average_lane(lane_data):
                x1s = []
                y1s = []
                x2s = []
                y2s = []
                for data in lane_data:
                    x1s.append(data[2][0])
                    y1s.append(data[2][1])
                    x2s.append(data[2][2])
                    y2s.append(data[2][3])
                return int(mean(x1s)), int(mean(y1s)), int(mean(x2s)), int(
                    mean(y2s))

            l1_x1, l1_y1, l1_x2, l1_y2 = average_lane(final_lanes[lane1_id])
            l2_x1, l2_y1, l2_x2, l2_y2 = average_lane(final_lanes[lane2_id])

            return [l1_x1, l1_y1, l1_x2, l1_y2], [l2_x1, l2_y1, l2_x2,
                                                  l2_y2], lane1_id, lane2_id
        except Exception as e:
            print(str(e))

    def process_img(image):
        original_image = image
        gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        processed_img = cv2.Canny(gray_image, 200, 300)
        processed_img = cv2.GaussianBlur(processed_img, (5, 5), 0)

        vertices = np.array([
            [10, 500],
            [10, 400],
            [300, 250],
            [500, 250],
            [800, 500],
            [800, 500],
        ], np.int32)
        cropped_image = region_of_interest(processed_img, [vertices])
        lines = cv2.HoughLinesP(cropped_image,
                                rho=1,
                                theta=np.pi / 180,
                                threshold=180,
                                lines=np.array([]),
                                minLineLength=20,
                                maxLineGap=15)
        m1 = 0
        m2 = 0
        try:
            l1, l2, m1, m2 = draw_lanes(original_image, lines)
            cv2.line(original_image, (l1[0], l1[1]), (l1[2], l1[3]),
                     [0, 255, 0], 30)
            cv2.line(original_image, (l2[0], l2[1]), (l2[2], l2[3]),
                     [0, 255, 0], 30)
        except Exception as e:
            print(str(e))
            pass
        try:
            for coords in lines:
                coords = coords[0]
                try:
                    cv2.line(processed_img, (coords[0], coords[1]),
                             (coords[2], coords[3]), [255, 0, 0], 3)

                except Exception as e:
                    print(str(e))
        except Exception as e:
            pass
        return processed_img, original_image, m1, m2

    ap = argparse.ArgumentParser()
    ap.add_argument("-n",
                    "--num-frames",
                    type=int,
                    default=100,
                    help="# of frames to loop over for FPS test")
    ap.add_argument("-d",
                    "--display",
                    type=int,
                    default=1,
                    help="Whether or not frames should be displayed")
    args = vars(ap.parse_args())

    print("[INFO] sampling THREADED frames from `picamera` module...")
    vs = PiVideoStream().start()

    last_time = time.time()
    while (True):
        frame = vs.read()
        frame = imutils.resize(frame, width=800)
        last_time = time.time()
        new_screen, original_image, m1, m2 = process_img(frame)
        print('Frame took {} seconds'.format(time.time() - last_time))

        cv2.imshow(
            'frame',
            original_image)  #se mai poate adauga argumentul cv2.COLOR_BGR2RGB

        if (waitline != ser.readline()):

            if m1 < 0 and m2 < 0:
                moveRight()
                time.sleep(0.1)
                #dontMove()
            elif m1 > 0 and m2 > 0:
                moveLeft()
                time.sleep(0.1)
                #dontMove()
            else:
                moveForward()
                time.sleep(0.1)
                #dontMove()
        else:
            dontMove()
            print("OBSTACLE AHEAD")

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cv2.destroyAllWindows()
    vs.stop()
stream.close()
rawCapture.close()
camera.close()

# created a *threaded *video stream, allow the camera sensor to warmup,
# and start the FPS counter
print("[INFO] sampling THREADED frames from `picamera` module...")
vs = PiVideoStream().start()
time.sleep(2.0)
fps = FPS().start()
 
# loop over some frames...this time using the threaded stream
while fps._numFrames < args["num_frames"]:
    # grab the frame from the threaded video stream and resize it
    # to have a maximum width of 400 pixels
    frame = vs.read()
    frame = imutils.resize(frame, width=400)
 
    # check to see if the frame should be displayed to our screen
    if args["display"] > 0:
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF
 
    # update the FPS counter
    fps.update()
 
# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
 
class QRDetector(object):
    def __init__(self, flip = False):
        self.vs = PiVideoStream(resolution=(800, 608)).start()
        self.flip = flip
        time.sleep(2.0)

    def __del__(self):
        self.vs.stop()

    def flip_if_needed(self, frame):
        if self.flip:
            return np.flip(frame, 0)
        return frame

    def get_frame(self):
        frame = self.flip_if_needed(self.vs.read())
        frame = self.process_image(frame)
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()
        
    def process_image(self, frame):
        pass

    def decode(self, frame):
        pass

    def draw(self, frame, decoded_objs):
        pass
    
    @app.route('/stream')
    def stream():
        return Response(gen(),
                        mimetype='multipart/x-mixed-replace; boundary=frame')

    def gen():
        while True:
            frame = get_frame()
            yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
    
    
    def get_frame():
        camera.capture(rawCapture, format="bgr", use_video_port=True)
        frame = rawCapture.array
        decoded_objs = decode(frame)
        frame = display(frame, decoded_objs)
        ret, jpeg = cv2.imencode('.jpg', frame)
        rawCapture.truncate(0)

        return jpeg.tobytes()


    def decode(frame):
        decoded_objs = pyzbar.decode(frame, scan_locations=True)
        for obj in decoded_objs:
            print(datetime.now().strftime('%H:%M:%S.%f'))
            print('Type: ', obj.type)
            print('Data: ', obj.data)

        return decoded_objs
    
    
    def display(frame, decoded_objs):
        for decoded_obj in decoded_objs:
            left, top, width, height = decoded_obj.rect
            frame = cv2.rectangle(frame,
                      (left, top),
                      (left + width, height + top),
                      (0, 255, 0), 2)
            
    
    if __name__ == '__main__':
    app.run(host="0.0.0.0", debug=False, threaded=True)
    

        return frame
Ejemplo n.º 18
0
class DroidVisionThread(threading.Thread):
    def __init__(self):
        threading.Thread.__init__(self)
        self.running = True
        self.fps_counter = FPS().start()
        self.camera = PiVideoStream(resolution=(config.RAW_FRAME_WIDTH, config.RAW_FRAME_HEIGHT))
        self.camera.start()
        time.sleep(config.CAMERA_WARMUP_TIME) # wait for camera to initialise
        self.frame = None
        self.frame_chroma = None
        self.centre = config.CENTRE
        self.can_see_lines = False
        self.last_yellow_mean = config.WIDTH * 0.8
        self.last_blue_mean = config.WIDTH * 0.2
        self.desired_steering = config.NEUTRAL_STEERING # 0 = left, 0.5 = center, 1 = right
        self.desired_throttle = config.NEUTRAL_THROTTLE # 0 = stop, 0.5 = medium speed, 1 = fastest

    def run(self):
        debug("DroidVisionThread: Thread started")
        self.vision_processing()
        self.camera.stop()
        cv2.destroyAllWindows()
        debug("DroidVisionThread: Thread stopped")

    def stop(self):
        self.running = False
        debug("DroidVisionThread: Stopping thread")

    def vision_processing(self):
        while self.running:
            self.grab_frame()

            # colour mask
            blue_mask = cv2.inRange(self.frame_chroma, config.BLUE_CHROMA_LOW, config.BLUE_CHROMA_HIGH)
            yellow_mask = cv2.inRange(self.frame_chroma, config.YELLOW_CHROMA_LOW, config.YELLOW_CHROMA_HIGH)
            colour_mask = cv2.bitwise_or(blue_mask, yellow_mask)
            colour_mask = cv2.erode(colour_mask, config.ERODE_KERNEL)
            colour_mask = cv2.dilate(colour_mask, config.DILATE_KERNEL)
            
            # lines
            lines = cv2.HoughLinesP(colour_mask, config.HOUGH_LIN_RES, config.HOUGH_ROT_RES, config.HOUGH_VOTES, config.HOUGH_MIN_LEN, config.HOUGH_MAX_GAP)
            blue_lines = np.array([])
            yellow_lines = np.array([])
            if lines != None:
                for line in lines:
                    x1,y1,x2,y2 = line[0]
                    angle = np.rad2deg(np.arctan2(y2-y1, x2-x1))
                    if config.MIN_LINE_ANGLE < abs(angle) < config.MAX_LINE_ANGLE:
                        if config.IMSHOW:
                            cv2.line(self.frame, (x1,y1), (x2,y2), (0,0,255), 1)
                        if angle > 0:
                            yellow_lines = np.append(yellow_lines, [x1, x2])
                        elif angle < 0:
                            blue_lines = np.append(blue_lines, [x1, x2])

            # find centre
            blue_mean = self.last_blue_mean
            yellow_mean = self.last_yellow_mean
            if len(blue_lines):
                blue_mean = int(np.mean(blue_lines))
            if len(yellow_lines):
                yellow_mean = int(np.mean(yellow_lines))
            self.centre = (blue_mean + yellow_mean) / 2.0
            self.last_blue_mean = blue_mean
            self.last_yellow_mean = yellow_mean

            self.can_see_lines = (len(blue_lines) or len(yellow_lines))

            # set steering and throttle
            self.desired_steering = (1.0 - (self.centre / config.WIDTH))
            if len(blue_lines) or len(yellow_lines):
                self.desired_throttle = 0.22
            else:
                self.desired_throttle = 0

            if config.IMSHOW:
                cv2.circle(self.frame, (int(self.centre), config.HEIGHT - 20), 10, (0,0,255), -1)
                cv2.imshow("colour_mask without noise", colour_mask)
                cv2.imshow("raw frame", self.frame)
                cv2.waitKey(1)

    def grab_frame(self):
        self.fps_counter.update()
        # grab latest frame and index the ROI
        self.frame = self.camera.read()
        self.frame = self.frame[config.ROI_YMIN:config.ROI_YMAX, config.ROI_XMIN:config.ROI_XMAX]
        # convert to chromaticity colourspace
        B = self.frame[:, :, 0].astype(np.uint16)
        G = self.frame[:, :, 1].astype(np.uint16)
        R = self.frame[:, :, 2].astype(np.uint16)    
        Y = 255.0 / (B + G + R)
        b = (B * Y).astype(np.uint8)
        g = (G * Y).astype(np.uint8)
        r = (R * Y).astype(np.uint8)
        self.frame_chroma = cv2.merge((b,g,r))

    def get_fps(self):
        self.fps_counter.stop()
        return self.fps_counter.fps()

    def get_error(self):
        return (config.CENTRE - self.centre)

    def get_steering_throttle(self):
        return self.desired_steering, self.desired_throttle
Ejemplo n.º 19
0
    io.output(7, 0)
    io.setup(11, io.OUT)
    io.setup(13, io.OUT)
    laser_h = io.PWM(11, 50)
    laser_h.start(7.5)
    laser_v = io.PWM(13, 50)
    laser_v.start(7.5)

    wd = 400
    hog = cv2.HOGDescriptor()
    hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
    wCam = WebcamVideoStream(src=0).start()
    pCam = PiVideoStream().start()
    sleep(2.0)
    while True:
        pFrame = pCam.read()
        wFrame = wCam.read()
        pFrame = imutils.resize(pFrame, width=wd)
        wFrame = imutils.resize(wFrame, width=wd)
        pFound, _ = hog.detectMultiScale(pFrame,
                                         winStride=(8, 8),
                                         padding=(8, 8),
                                         scale=1.05)
        wFound, _ = hog.detectMultiScale(wFrame,
                                         winStride=(8, 8),
                                         padding=(8, 8),
                                         scale=1.05)
        pFound = np.array([[x, y, x + w, y + h] for (x, y, w, h) in pFound])
        wFound = np.array([[x, y, x + w, y + h] for (x, y, w, h) in wFound])
        pFine = non_max_suppression(pFound, probs=None, overlapThresh=1)
        wFine = non_max_suppression(wFound, probs=None, overlapThresh=1)
Ejemplo n.º 20
0
#init pygame timer
clock = pygame.time.Clock()

#global calibration array variables
imcalib = 0
imgain = 0

#frame variables
counter=0
mode=0
frame=0

#Pi Camera interface from imutils
vs = PiVideoStream(resolution=(width*2, height*2),framerate=30).start()
vsimage=vs.read()

try:
    import pygame.surfarray as surfarray
except ImportError:
    raise ImportError, "pygame failed to initialize"

#pygame font init
pygame.font.init()
myfont = pygame.font.SysFont('Nimbus Sans', 15)

#GUI variables
uiMenuItem = 0
uiSelector = 0
uiTimer=0
uiVisible = 0
Ejemplo n.º 21
0
	else:
		if(not IS_IR_FIRED):
			search_mode()
			
			
			
if __name__ == '__main__':
	vs = PiVideoStream().start()
	GPIO.setmode(GPIO.BOARD)
	time.sleep(0.5)

	while True:
	
		try:
			src = vs.read()
			src = cv2.flip(src, 1)
			#src = src[52:170, 0:320]

			find_box(image)

			
			
			if cv2.waitKey(1)==27:
				break
				
		except KeyboardInterrupt:
			break

	vs.stop()
	cv2.destroyAllWindows()
Ejemplo n.º 22
0
class CameraFeed:
    # frame dimension (calculated below in go)
    _frame_width = 0
    _frame_height = 0

    # how many frames processed
    _frame = 0

    def __init__(self,
                 source=0,
                 crop_x1=0,
                 crop_y1=0,
                 crop_x2=500,
                 crop_y2=500,
                 max_width=640,
                 b_and_w=False,
                 hog_win_stride=6,
                 hog_padding=8,
                 hog_scale=1.05,
                 mog_enabled=False,
                 people_options=None,
                 lines=None,
                 font=cv2.FONT_HERSHEY_SIMPLEX,
                 endpoint=None,
                 pi=False,
                 show_window=True,
                 to_stdout=False,
                 save_first_frame=False,
                 quit_after_first_frame=False,
                 camera_resolution=(1280, 720),
                 threaded=False):

        self.__dict__.update(locals())

    def go_config(self, config_path=None):

        # load config
        config = configparser.ConfigParser()
        config.read(config_path)

        # remote host settings
        self.endpoint = config.get('host', 'endpoint', fallback=None)

        # backend settings
        project_name = 'test'
        self.backend = FeedDB(project_name)

        # platform
        self.pi = config.getboolean('platform', 'pi')
        self.to_stdout = config.getboolean('platform', 'to_stdout')
        self.show_window = config.getboolean('platform', 'show_window')
        self.save_first_frame = config.getboolean('platform',
                                                  'save_first_frame')
        self.quit_after_first_frame = config.getboolean(
            'platform', 'quit_after_first_frame')
        self.threaded = config.getboolean('platform', 'threaded')

        # video source settings
        self.crop_x1 = config.getint('video_source', 'frame_x1')
        self.crop_y1 = config.getint('video_source', 'frame_y1')
        self.crop_x2 = config.getint('video_source', 'frame_x2')
        self.crop_y2 = config.getint('video_source', 'frame_y2')
        self.max_width = config.getint('video_source', 'max_width')
        self.b_and_w = config.getboolean('video_source', 'b_and_w')

        # hog settings
        self.hog_win_stride = config.getint('hog', 'win_stride')
        self.hog_padding = config.getint('hog', 'padding')
        self.hog_scale = config.getfloat('hog', 'scale')

        # mog settings
        self.mog_enabled = config.getboolean('mog', 'enabled')
        if self.mog_enabled:
            self.mogbg = cv2.createBackgroundSubtractorMOG2()

        # setup lines
        lines = []
        total_lines = config.getint('triplines', 'total_lines')

        for idx in range(total_lines):
            key = 'line%d' % (idx + 1)
            start = eval(config.get('triplines', '%s_start' % key))
            end = eval(config.get('triplines', '%s_end' % key))
            buffer = config.getint('triplines', '%s_buffer' % key, fallback=10)
            direction_1 = config.get('triplines',
                                     '%s_direction_1' % key,
                                     fallback='Up')
            direction_2 = config.get('triplines',
                                     '%s_direction_2' % key,
                                     fallback='Down')
            line = Tripline(point_1=start,
                            point_2=end,
                            buffer_size=buffer,
                            direction_1=direction_1,
                            direction_2=direction_2)
            lines.append(line)

        self.lines = lines
        self.source = config.get('video_source', 'source')
        if self.source == 'webcam':
            self.webcam = True
        else:
            self.webcam = False
        self.people_options = dict(config.items('person'))

        self.go()

    def go(self):

        # setup HUD
        self.last_time = time.time()

        # opencv 3.x bug??
        cv2.ocl.setUseOpenCL(False)

        # people tracking
        self.finder = PeopleTracker(people_options=self.people_options)

        # STARTS HERE
        # connect to camera
        if self.pi:

            if self.threaded == False:
                print('non threading picamera started')
                from picamera.array import PiRGBArray
                from picamera import PiCamera

                self.camera = PiCamera()
                self.camera.resolution = self.camera_resolution
                self.camera.framerate = 20

                self.rawCapture = PiRGBArray(self.camera,
                                             size=self.camera_resolution)
            # threaded video_stream
            else:
                print('threading picamera started')
                from imutils.video.pivideostream import PiVideoStream
                from imutils.video import FPS

                self.vs = PiVideoStream(resolution=self.camera_resolution,
                                        framerate=20).start()

            time.sleep(1)  # let camera warm up

        elif self.webcam:
            self.camera = cv2.VideoCapture(1)  #TODO check which webcam
            self.camera.set(3, self.camera_resolution[0])
            self.camera.set(4, self.camera_resolution[1])

        else:
            self.camera = cv2.VideoCapture(self.source)

        # connect to backend
        self.backend.connect()

        # setup detectors
        self.hog = cv2.HOGDescriptor()
        self.hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())

        # feed in video
        if self.pi:

            if self.threaded == False:
                for frame in self.camera.capture_continuous(
                        self.rawCapture, format="bgr", use_video_port=True):

                    image = frame.array
                    self.process(image)
                    self.rawCapture.truncate(0)
                    if self.quit_after_first_frame or cv2.waitKey(
                            1) & 0xFF == ord('q'):
                        break
                cv2.destroyAllWindows()
            else:
                while True:
                    frame = self.vs.read()
                    self.process(frame)
                    if self.quit_after_first_frame or cv2.waitKey(
                            1) & 0xFF == ord('q'):
                        break
                cv2.destroyAllWindows()
                self.vs.stop()
        elif self.webcam:
            while self.camera.isOpened():
                rval, frame = self.camera.read()
                self.process(frame)
                if self.quit_after_first_frame or cv2.waitKey(1) & 0xFF == ord(
                        'q'):
                    break
            self.camera.release()
            cv2.destroyAllWindows()

        else:

            while self.camera.isOpened():

                rval, frame = self.camera.read()
                self.process(frame)

                if self.quit_after_first_frame or cv2.waitKey(1) & 0xFF == ord(
                        'q'):
                    break
            self.camera.release()
            cv2.destroyAllWindows()

    def process(self, frame):
        if self.b_and_w:
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        frame = self.crop_and_resize(frame)

        print_frame_size = self._frame_height == 0

        self._frame_height = frame.shape[0]
        self._frame_width = frame.shape[1]

        if print_frame_size and not self.to_stdout:
            print('resized video to %dx%d' %
                  (self._frame_width, self._frame_height))

        frame = self.apply_mog(frame)
        frame = self.handle_the_people(frame)
        frame = self.render_hud(frame)

        if self.show_window:
            cv2.imshow('Camerafeed', frame)

        if self.to_stdout:
            sys.stdout.write(frame.tostring())
            # string = "".join(map(chr, frame.tostring()))
            # sys.stdout.write(string)

        if self.save_first_frame and self._frame == 0:
            cv2.imwrite('first_frame.png', frame)

    # help us crop/resize frames as they come in
    def crop_and_resize(self, frame):

        frame = frame[self.crop_y1:self.crop_y2, self.crop_x1:self.crop_x2]
        frame = imutils.resize(frame,
                               width=min(self.max_width, frame.shape[1]))

        return frame

    # apply background subtraction if needed
    def apply_mog(self, frame):
        if self.mog_enabled:
            mask = self.mogbg.apply(frame)
            frame = cv2.bitwise_and(frame, frame, mask=mask)

        return frame

    # all the data that overlays the video
    def render_hud(self, frame):
        this_time = time.time()
        diff = this_time - self.last_time
        fps = 1 / diff
        message = 'FPS: %d' % fps
        # print(message)

        cv2.putText(frame, message, (10, self._frame_height - 20), self.font,
                    0.5, (255, 255, 255), 2)

        self.last_time = time.time()

        return frame

    def handle_the_people(self, frame):

        (rects, weight) = self.hog.detectMultiScale(
            frame,
            winStride=(self.hog_win_stride, self.hog_win_stride),
            padding=(self.hog_padding, self.hog_padding),
            scale=self.hog_scale)

        people = self.finder.people(rects)

        # draw triplines
        for line in self.lines:
            for person in people:
                if line.handle_collision(person) == 1:
                    self.new_collision(person)

            frame = line.draw(frame)

        for person in people:
            frame = person.draw(frame)
            person.colliding = False

        return frame

    def new_collision(self, person):
        post = {
            'name': person.name,
            'meta': json.dumps(person.meta),
            'time': time.time()
        }

        if self.endpoint is not None:

            request = grequests.post(self.endpoint, data=post)
            grequests.map([request])

        if not self.to_stdout:
            print("NEW COLLISION %s HEADING %s" %
                  (person.name, str(person.meta)))

        self.backend.insert(post)
Ejemplo n.º 23
0
    startup()

    vs = PiVideoStream(SIZE, FPS).start()
    time.sleep(3.0)
    print("Camera done")
    pygame.init()
    display = pygame.display.set_mode(SIZE, 0)

    possible_keys = [pygame.K_UP, pygame.K_DOWN, pygame.K_RIGHT, pygame.K_LEFT]

    SIZE=SIZE[::-1]
    frame_no = 1
    strt = time.time()
    while True:
        cont = True
        original_frame = vs.read()
        # frame = original_frame
        frame = cv2.flip(cv2.rotate(original_frame, cv2.ROTATE_90_CLOCKWISE), 1)
        if frame is None:
            print("Camera Error")
            break
        pygame_frame=cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
        gray_frame = cv2.cvtColor(cv2.flip(original_frame, -1), cv2.COLOR_BGR2GRAY)
        arrw_src = (SIZE[0]/2, SIZE[1]/2)
        arrw_dst = (SIZE[0]/2, SIZE[1]/2)
        went_in = False
        pygame.event.get()
        key_input = pygame.key.get_pressed()
        arrw_dst = get_arrow(key_input)
        cont = do_action(key_input)
        if cont is None or cont is False:
net = cv2.dnn.readNet('caffemodel/MobileNetSSD/MobileNetSSD_deploy.caffemodel',
                      'caffemodel/MobileNetSSD/MobileNetSSD_deploy.prototxt')
net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_INFERENCE_ENGINE)

try:

    vs = PiVideoStream((camera_width, camera_height)).start()

    time.sleep(2)

    while True:
        t1 = time.perf_counter()

        color_image = vs.read()
        height = color_image.shape[0]
        width = color_image.shape[1]

        blob = cv2.dnn.blobFromImage(color_image,
                                     size=(300, 300),
                                     ddepth=cv2.CV_8U,
                                     swapRB=False,
                                     crop=False)
        net.setInput(blob, scalefactor=1.0 / 127.5, mean=[127.5, 127.5, 127.5])
        out = net.forward()
        out = out.flatten()

        for box_index in range(100):
            if out[box_index + 1] == 0.0:
                break
                help="jffj")
args = vars(ap.parse_args())
cam = PiVideoStream()
cam = cam.start()
time.sleep(.5)
font = cv2.cv.InitFont(cv2.cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 1, 1)
while (True):
    i = 0
    if i == 1:  # When output from motion sensor is HIGH
        print("Intruder Detected")
        GPIO.output(4, 1)  # Turn ON LED
        time.sleep(.5)
        GPIO.output(4, 0)
        lcd_string("Intruder Detect", LCD_LINE_1)
        lcd_string("    Room 1", LCD_LINE_2)
    img = cam.read()
    img = imutils.resize(img, width=450)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    faces = faceCascade.detectMultiScale(gray, 1.2, 5)
    for (x, y, w, h) in faces:
        cv2.rectangle(img, (x, y), (x + w, y + h), (225, 0, 0), 2)
        Id, conf = recognizer.predict(gray[y:y + h, x:x + w])
        name = " "
        if (conf < 100):
            if (Id == 1):
                name = ""
            elif (Id == 2):
                name = ""
            elif (Id == 3):
                name = ""
Ejemplo n.º 26
0
def main(args):
    # created a *threaded *video stream, allow the camera sensor to warmup,
    # and start the FPS counter
    vs = PiVideoStream().start() if args["picamera"] else WebcamVideoStream().start()
    print("[INFO] warming up camera...")
    time.sleep(2.0)

    if not os.path.isdir(args["buffer"]):
        os.makedirs(args["buffer"])
    if not os.path.isdir(args["output"]):
        os.makedirs(args["output"])

    # initialize the FourCC, video writer, dimensions of the frame, and
    # zeros array
    fourcc = cv2.VideoWriter_fourcc(*args["codec"])
    (h, w) = (None, None)
    length = args["length"]
    temp = list()
    counter = 0
    timeframe = 0
    cv2.namedWindow("Frame", cv2.WINDOW_NORMAL)
    params = {
        "output": args["output"],
        "buffer": args["buffer"],
        "type": args["type"],
        "length": length,
        "fps": args["fps"],
        "counter": 0,
        "temp": None,
        "frame": None,
        "writer": None,
        "idx": 0,
        "res_code": 0
    }

    buffer_prefix = os.path.join(args["buffer"], buffer_name)
    # loop over some frames...this time using the threaded stream
    while True:
        # grab the frame from the threaded video stream and resize it
        # to have a maximum width of 400 pixels
        frame = vs.read()
        frame = imutils.resize(frame, width=400)

        if params["writer"] is None:
            # store the image dimensions, initialzie the video writer,
            # and construct the zeros array
            (h, w) = frame.shape[:2]
            params["writer"] = cv2.VideoWriter(buffer_prefix + "_" +
                                               str(timeframe) + "." + args["type"],
                                               fourcc, args["fps"], (w, h), True)

        temp.append(frame)
        counter += 1
        params["counter"] = counter
        params["temp"] = temp
        params["frame"] = frame
        if counter >= args["fps"]:
            for img in temp:
                params["writer"].write(img)
            temp = list()
            counter = 0
            timeframe = (timeframe + 1) % length
            params["timeframe"] = timeframe
            params["writer"].release()
            params["writer"] = cv2.VideoWriter(buffer_prefix + "_" +
                                               str(timeframe) + "." + args["type"],
                                               fourcc, args["fps"], (w, h), True)

        if not q.empty():
            flag = q.get()
            if flag == 0:
                params["writer"].release()
                break
            click(params)

        # check to see if the frame should be displayed to our screen
        if args["display"]:
            cv2.imshow("Frame", frame)

    print("[INFO] cleaning up...")
    cv2.destroyAllWindows()
    vs.stop()
    print("[INFO] done")
Ejemplo n.º 27
0
class EntryLog:

    def __init__(self):

        self.recognizer = cv2.face.LBPHFaceRecognizer_create()
        self.recognizer.read('trainer/trainer.yml')
        self.cascadePath = "haarcascade/haarcascade_frontalface_default.xml"
        self.faceCascade = cv2.CascadeClassifier(self.cascadePath)
        self.ops = sys.platform
        self.font = cv2.FONT_HERSHEY_DUPLEX
        self.d = database.Database().getAll()
        self.updated = False
        if self.ops == 'win32':
            # from imutils.video import WebcamVideoStream
            # self.cam = WebcamVideoStream(src=0).start()
            self.cam = cv2.VideoCapture(0)
        else:
            from imutils.video.pivideostream import PiVideoStream
            self.cam = PiVideoStream().start()
        self.ls = {}
        for doc in self.d:
            self.ls[doc['id']] = doc['name']
        self.name = 'unknown'
        self.idarr = []
        self.i = 0
        self.q = False
        self.counts = 0
        # params for ShiTomasi corner detection
        self.feature_params = dict(maxCorners=100,
                                   qualityLevel=0.3,
                                   minDistance=7,
                                   blockSize=7)
        # Parameters for lucas kanade optical flow
        self.lk_params = dict(winSize=(15, 15),
                              maxLevel=5,
                              criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))

        # define movement threshodls
        self.max_head_movement = 20
        self.movement_threshold = 50
        if self.ops == 'win32':
            self.gesture_threshold = 150
        else:
            self.gesture_threshold = 75
        self.x = 0
        self.y = 0
        # find the face in the image
        self.face_found = False
        self.frame_num = 0

    def get_coords(self, p1):
        try:
            return int(p1[0][0][0]), int(p1[0][0][1])
        except:
            return int(p1[0][0]), int(p1[0][1])

    def attn(self):

        # if self.ops == 'win32':
        #     # from imutils.video import WebcamVideoStream
        #     # self.cam = WebcamVideoStream(src=0).start()
        #     self.cam = cv2.VideoCapture(0)
        # else:
        #     from imutils.video.pivideostream import PiVideoStream
        #     self.cam = PiVideoStream().start()
        # self.cam = WebcamVideoStream(src=0).start()
        while True:
            if self.q:
                break
            while True:
                if self.ops == 'win32':
                    ret, im = self.cam.read()
                else:
                    im = self.cam.read()
                self.gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
                faces = self.faceCascade.detectMultiScale(self.gray, 1.2, 5)
                if self.updated:
                    if (datetime.datetime.now() - self.t).total_seconds() < 2:
                        cv2.putText(im, 'Welcome ' + self.name, (50, 50), self.font, 0.8, (0, 255, 0), 2)
                for (x, y, w, h) in faces:
                    cv2.rectangle(im, (x, y), (x + w, y + h), (225, 0, 0), 2)
                    Id, conf = self.recognizer.predict(self.gray[y:y + h, x:x + w])
                    self.face_found = True
                    if cv2.waitKey(10) == ord('q'):
                        self.q = True
                        break
                    if (conf >= 50):
                        self.name = self.ls.get(Id)
                        self.id = Id
                        # print(self.name, Id, conf)
                        self.idarr.append(Id)

                    cv2.putText(im, str(self.name), (x, y + h), self.font, 1, (255, 255, 255))
                cv2.namedWindow("Entry")
                cv2.imshow("Entry", im)
                if cv2.waitKey(10) == ord('q'):
                    self.q = True
                    break
                if len(self.idarr) == 30:
                    self.counts = np.bincount(np.array(self.idarr))
                    break

            if self.q:
                break

            if self.face_found:
                face_center = x + w / 2, y + h / 3
                self.p0 = np.array([[face_center]], np.float32)

            # self.ls.get(np.argmax())
            if self.confirm():
                now = datetime.datetime.now()
                day = str(now.year) + '-' + str(now.month) + '-' + str(now.day)
                log = []
                d = database.Database().getlogbydate(day)
                # if not d:
                #     m = {'_id': day, 'logs': []}
                #     database.Database().pushEntryLog(m)
                #     log = []
                if d:
                    log = d['logs']
                # self.now = datetime.datetime.now()
                self.updated = True
                l = {'id': self.id, 'name': self.name, 'timestamp': now.strftime("%H:%M:%S.%f")}
                log.append(l)
                database.Database().updatelog(day, log, self.name)
                self.t = datetime.datetime.now()

        if self.ops == 'win32':
            self.cam.release()
        else:
            self.cam.stop()
        cv2.waitKey(1)
        cv2.destroyAllWindows()

    def confirm(self):
        self.idarr = []
        self.counts = 0
        if self.gesture():
            return True
        else:
            return False

    def gesture(self):
        gesture = False
        x_movement = 0
        y_movement = 0

        while True:

            if self.ops == 'win32':
                ret, frame = self.cam.read()

            else:
                frame = self.cam.read()
            old_gray = self.gray.copy()
            self.gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, self.gray, self.p0, None, **self.lk_params)
            cv2.circle(frame, self.get_coords(p1), 4, (0, 0, 255), -1)
            cv2.circle(frame, self.get_coords(self.p0), 4, (255, 0, 0))

            # get the xy coordinates for points p0 and p1
            a, b = self.get_coords(self.p0), self.get_coords(p1)
            x_movement += abs(a[0] - b[0])
            y_movement += abs(a[1] - b[1])

            if not gesture: cv2.putText(frame, 'detected:', (50, 50), self.font, 0.8, (0, 0, 0), 2)
            if not gesture: cv2.putText(frame, self.name, (180, 50), self.font, 0.8, (255, 0, 0), 2)
            # text = 'x_movement: ' + str(x_movement)
            text = 'nod to confirm'
            if not gesture: cv2.putText(frame, text, (50, 100), self.font, 0.8, (0, 255, 0), 2)
            # text = 'y_movement: ' + str(y_movement)
            text = 'shake to cancel'
            if not gesture: cv2.putText(frame, text, (50, 150), self.font, 0.8, (0, 0, 255), 2)

            if cv2.waitKey(10) == ord('q'):
                self.q = True
                break

            if x_movement > self.gesture_threshold:
                self.updated = False
                return False
            if y_movement > self.gesture_threshold:
                return True

            self.p0 = p1

            cv2.imshow("Entry", frame)
            cv2.waitKey(1)
Ejemplo n.º 28
0
lk_params = dict(winSize=(25, 25),
                 maxLevel=7,
                 criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10,
                           0.03))
blur_params = (4, 4)
dilation_params = (5, 5)
movment_threshold = 180

active = False
frame_holder = None

# start capturing
vs = PiVideoStream().start()
time.sleep(2.0)
run_request = True
frame_holder = vs.read()
print("About to start.")


def FrameReader():
    global frame_holder
    while True:
        frame = vs.read()
        frame = imutils.resize(frame, width=400)
        cv2.flip(frame, 1, frame)
        frame_holder = frame
        time.sleep(.03)


def Spell(spell):
    #clear all checks
Ejemplo n.º 29
0
def printMouseCoords(event, x, y, flags, param):
    if event == cv2.EVENT_LBUTTONDBLCLK:
        print "Mouse at ({},{})".format(x, y)


# repeated timer for logging each x seconds
rt = RepeatedTimer(logInterval, collect)

# prepare windows for opencv and mouse listener
cv2.namedWindow('original')
cv2.setMouseCallback('original', printMouseCoords)

# infinite loop
while 1:
    # 1) read the next frame
    frame = cap.read()

    # 2) convert current frame to gray and improve contrast with CLAHE (Contrast Limited Adaptive Histogram Equalization)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    cl1 = clahe.apply(gray)

    # 3) do edge detection on grayscaled frame
    edges = cv2.Canny(gray, 50, 70)
    edges = ~edges

    # 4) blur detected edges
    blur = cv2.bilateralFilter(cv2.blur(edges, (21, 21), 100), 9, 200, 200)

    # 5) apply threshold to frame: depending on pixel value, an array of 1s and 0s is created
    _, threshold = cv2.threshold(blur, 230, 255, cv2.THRESH_BINARY)
Ejemplo n.º 30
0
# If debug flag is invoked
if args["debug"]:
    # Start benchmark
    print( fullStamp() + " [INFO] Debug Mode: ON" )
    fps = FPS().start()


# ************************************************************************
# =========================> MAKE IT ALL HAPPEN <=========================
# ************************************************************************

# Infinite loop
while True:
    
    # Get image from stream
    frame = stream.read()[36:252, 48:336]
    output = frame

    # Add a 4th dimension (Alpha) to the captured frame
    (h, w) = frame.shape[:2]
    frame = numpy.dstack( [frame, numpy.ones( ( h, w ), dtype="uint8" ) * 255] )

    # Create an overlay layer
    overlay = numpy.zeros( ( h, w, 4 ), "uint8" )
    
    # Convert into grayscale because HoughCircle only accepts grayscale images
    bgr2gray = cv2.cvtColor( frame, cv2.COLOR_BGR2GRAY )

    # Start thread to process image and apply required filters to detect circles
    t_procFrame = Thread( target=procFrame, args=( bgr2gray, Q_procFrame ) )
    t_procFrame.start()
    def calibraPerclos(self):

        self.judite.scriptInicial()
        # Habilitando a captura de video
        cap = PiVideoStream().start()
        time.sleep(2.0)
        start_time = time.time()
        contImgCalibracao, contFPS = 0, 0
        # Matriz da Razao de Aspecto
        self.matrizRA = np.zeros((5, self.framesCalibragem))
        displayFPS = 1  # Atualiza o FPS mostrado a cada segundo(s) setado na variavel.

        b = True
        while b:
            frame = cap.read()
            frameRedimencionado = imutils.resize(frame, width=400)
            rgbImage = cv2.cvtColor(frameRedimencionado, cv2.COLOR_BGR2RGB)
            gray = cv2.cvtColor(frameRedimencionado, cv2.COLOR_BGR2GRAY)

            retangulos = self.detectorCv.detectMultiScale(
                gray,
                scaleFactor=1.1,
                minNeighbors=5,
                minSize=(30, 30),
                flags=cv2.CASCADE_SCALE_IMAGE)

            for (x, y, w, h) in retangulos:

                retangulo = dlib.rectangle(int(x), int(y), int(x + w),
                                           int(y + h))

                self.shape = self.predicao(gray, retangulo)
                self.shape = face_utils.shape_to_np(self.shape)

                # Aquisição de dados para calibrar
                if (contImgCalibracao < self.framesCalibragem):

                    if (contImgCalibracao == 0):
                        print(
                            str(time.localtime().tm_hour) + ":" +
                            str(time.localtime().tm_min) + ":" +
                            str(time.localtime().tm_sec) +
                            " - Aquisição de dados para calibração")

                    if (contImgCalibracao == (self.framesCalibragem // 2) - 1):
                        self.judite.scriptRegulagemOlhosAbertos()

                    self.matrizRA[0][
                        contImgCalibracao] = self.analisePERCLOS.razaoDeAspecto(
                            self.shape)
                    self.matrizRA[1][
                        contImgCalibracao] = self.analisePERCLOS.desvioPadrao(
                            contImgCalibracao, self.matrizRA)
                    self.matrizRA[2][
                        contImgCalibracao] = self.analisePERCLOS.erroPadrao(
                            contImgCalibracao, self.matrizRA)

                    self.matrizRA = self.preencherDadosDeTeste(
                        contImgCalibracao, self.matrizRA)

                # Calibragem e plotagem dos dados de calibragem no gr�fico
                elif (contImgCalibracao == self.framesCalibragem):
                    self.judite.scriptFimCalibragem()
                    self.matrizRA = self.analisePERCLOS.calibragemAberturaOlhos(
                        self.matrizRA)

                    arquivoCalibracao = ArquivoCalibracao()
                    arquivoCalibracao.gerarArquivoCalibracao(
                        self.matrizRA[0], self.usuario)
                    b = False

            contImgCalibracao += 1

            contFPS = contFPS + 1
            if (time.time() - start_time) > displayFPS:
                fps = int(contFPS / (time.time() - start_time))
                print("FPS: {}".format(fps))
                contFPS = 0
                start_time = time.time()

            # show the frame
            if (self.display):
                cv2.imshow("Calibracao", rgbImage)
                key = cv2.waitKey(1) & 0xFF

                # if the `q` key was pressed, break from the loop
                if key == ord("q"):
                    break

        cv2.destroyAllWindows()
        cap.stop()
        time.sleep(5)
Ejemplo n.º 32
0
def ai():
    cap = PiVideoStream(resolution=(208, 208))  #rpi camera
    cap.start()
    time.sleep(2.0)
    cap.camera.hflip = True
    cap.camera.vflip = True

    print("[INFO] video recording")
    ##    out = cv2.VideoWriter('avcnet.avi',cv2.VideoWriter_fourcc(*'XVID'), 10, (208,208))

    # load the trained convolutional neural network
    print("[INFO] loading network...")
    ##    model = load_model("./avcnet_v1.model")
    model = load_model("/home/pi/drone_exe/avcnet_best_5.hdf5",
                       custom_objects={"tf": tf})

    # default parameters
    orient = 0
    tim_old = 0.1
    state = 'fly'
    dist = 510
    global velocity_y
    velocity_y = 0
    fly_1 = 0.9
    k = 0.83  # k=(tau/T)/(1+tau/T) tau time constant LPF, T period

    while True:
        start = time.time()
        frame = cap.read()
        orig = frame.copy()
        frame = cv2.resize(frame, (64, 64))
        frame = frame.astype("float") / 255.0
        frame = img_to_array(frame)
        frame = np.expand_dims(frame, axis=0)

        # classify the input image
        (stop, left, right, fly) = model.predict(frame)[0]
        # build the label
        ##        my_dict = {'stop':stop, 'left':left, 'right':right,'fly':fly}
        my_dict = {'stop': stop, 'left': left, 'right': right}
        maxPair = max(my_dict.iteritems(), key=itemgetter(1))
        fly_f = k * fly_1 + (1 - k) * fly
        fly_1 = fly_f

        if state == 'avoid':
            ##            label=maxPair[0]
            ##            proba=maxPair[1]
            if fly_f * 100 >= 60:
                dist = 510
                state = 'fly'
                print >> f, state

        else:
            label = 'forward'
            proba = fly
            if fly_f * 100 <= 50:
                dist = 180
                label = maxPair[0]
                proba = maxPair[1]
                print >> f, my_dict
                state = 'avoid'

        label_1 = "{} {:.1f}% {}".format(label, proba * 100, state)
        # draw the label on the image
        output = imutils.resize(orig, width=208)
        cv2.putText(output, label_1, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                    (0, 255, 0), 2)

        # Write the frame into the file 'output.avi'
        ##        out.write(output)

        if vehicle.channels['8'] > 1700:
            event.clear()
        if vehicle.channels['8'] > 1400 and vehicle.channels['8'] < 1700:
            if state == "fly":
                event.clear()

            if state == "avoid":
                event.set()

                if label == 'left':
                    velocity_y = -0.5
                if label == 'right':
                    velocity_y = 0.5
                if label == 'stop':
                    velocity_y = 0

        if vehicle.channels['8'] < 1400:
            event.clear()

        msg_sensor(dist, orient)

        # show the output frame
        cv2.imshow("Frame", output)
        key = cv2.waitKey(10) & 0xFF

        # if the `Esc` key was pressed, break from the loop
        if key == 27:
            break

    # do a bit of cleanup
    f.close()
    print('end')

    cv2.destroyAllWindows()
    cap.stop()
 frame_interval = StatValue()
 last_frame_time = clock()
 while True:
     while len(pending) > 0 and pending[0].ready():
         '''   '''
         res, t0 = pending.popleft().get()
         latency.update(clock() - t0)
         draw_str(res, (20, 20), "Latency: %.1f ms" % (latency.value*1000))
         draw_str(res, (100, 20), "Frame interval: %.1f ms" % (frame_interval.value*1000))
         print('Interval: %.lf ms',(frame_interval.value*1000))
         #cv2.imshow('threaded video', res)
         frame = cv2.medianBlur(frame, 19)
     if len(pending) < threadn:
         #camera.capture(rawCap, format = "bgr")
         #frame = rawCap.array
         frame = cap.read()
         t = clock()
         frame_interval.update(t - last_frame_time)
         last_frame_time = t
         if threaded_mode:
             task = pool.apply_async(process_frame, (copy.copy(frame), t))
             #task = pool.apply_async(process_frame, (frame, t))
         else:
             task = DummyTask(process_frame(frame, t))
         #rawCap.truncate(0)
         pending.append(task)
     ch = 0xFF & cv2.waitKey(1)
     if ch == ord(' '):
         threaded_mode = not threaded_mode
     if ch == 27:
         break
Ejemplo n.º 34
0
    def start_detecting(self, serial_connection):
        print("ImageProcessor_start_detecting")

        vs = PiVideoStream(resolution=(640, 480)).start()
        time.sleep(2.0)
        fps = FPS().start()

        while True:
            if not self.detect_image:
                break
            image = vs.read()
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            gray = cv2.blur(gray, (5, 5))

            # detect edges in the image
            _, edged = cv2.threshold(gray, 0, 255,
                                     cv2.THRESH_BINARY + cv2.THRESH_OTSU)

            # find contours in the image
            _, contours, _ = cv2.findContours(edged.copy(), cv2.RETR_LIST,
                                              cv2.CHAIN_APPROX_SIMPLE)
            contours = sorted(contours, key=cv2.contourArea, reverse=True)[:9]
            centers = []
            last_dimensions = []
            for contour in contours:
                # approximate the contour
                peri = cv2.arcLength(contour, True)
                approx = cv2.approxPolyDP(contour, 0.02 * peri, True)

                # if the approximated contour has four points its a rectangle
                if len(approx) == 4:
                    (x, y, w, h) = cv2.boundingRect(approx)
                    if h == 0:
                        h = 0.01
                    aspect_ratio = w / float(h)

                    # check if the rectangle is a square
                    if 0.85 <= aspect_ratio <= 1.15:
                        moments = cv2.moments(approx)
                        m00 = moments['m00']
                        if m00 == 0:
                            m00 = 0.05

                        # get the center of the square
                        cx = int(moments['m10'] / m00)
                        cy = int(moments['m01'] / m00)
                        is_duplicate = self.is_a_duplicate(
                            last_dimensions, h, w)
                        if not is_duplicate:
                            if self.check_if_target_found(
                                    centers, cx, cy, serial_connection):
                                break
                            last_dimensions.append((w, h))
                            centers.append(np.array((cx, cy)))
            fps.update()
        fps.stop()
        print('approx. FPS: {:.2f}'.format(fps.fps()))
        vs.stop()
        cv2.destroyAllWindows()
        self.detect_image = True
        self.slower_already_sent = False