Пример #1
0
    def frames(self):
        raw_capture = PiYUVArray(self.camera, size=RESOLUTION)
        stream = self.camera.capture_continuous(raw_capture,
                                                format="yuv",
                                                use_video_port=True)

        try:
            for frame in stream:
                raw_capture.truncate(0)  # Reset the buffer for the next image
                yield frame
        except KeyboardInterrupt:
            raw_capture.close()
            stream.close()
            print('Closed streams')
Пример #2
0
class PiVideoStream:
    def __init__(self, resolution=(640, 480), framerate=120):
        # initialize the camera and stream
        self.camera = PiCamera()
        self.camera.resolution = resolution
        self.camera.framerate = framerate
        self.rawCapture = PiYUVArray(self.camera, size=resolution)
        self.stream = self.camera.capture_continuous(self.rawCapture,
                                                     format="yuv",
                                                     use_video_port=True)

        # initialize the frame and the variable used to indicate
        # if the thread should be stopped
        self.frame = None
        self.stopped = False

    def start(self):
        # start the thread to read frames from the video stream
        Thread(target=self.update, args=()).start()
        return self

    def update(self):
        # keep looping infinitely until the thread is stopped
        for f in self.stream:
            # grab the frame from the stream and clear the stream in
            # preparation for the next frame
            self.frame = f.array
            self.rawCapture.truncate(0)

            # if the thread indicator variable is set, stop the thread
            # and resource camera resources
            if self.stopped:
                self.stream.close()
                self.rawCapture.close()
                self.camera.close()
                return

    def read(self):
        # return the frame most recently read
        return self.frame

    def stop(self):
        # indicate that the thread should be stopped
        self.stopped = True
    # the timestamp and occupied/unoccupied text
    gray = f.array[:, :,
                   0]  # accedemos a todas las filas, columnas del canal Y
    timestamp = datetime.datetime.now()
    text = "Unoccupied"

    # resize the frame, convert it to grayscale, and blur it
    # frame = imutils.resize(frame, width=500)
    # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (21, 21), 0)

    # if the average frame is None, initialize it
    if avg is None:
        print("[INFO] starting background model...")
        avg = gray.copy().astype("float")
        rawCapture.truncate(0)
        continue

    # accumulate the weighted average between the current frame and
    # previous frames, then compute the difference between the current
    # frame and running average
    cv2.accumulateWeighted(gray, avg, 0.5)
    frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))

    # threshold the delta image, dilate the thresholded image to fill
    # in holes, then find contours on thresholded image
    thresh = cv2.threshold(frameDelta, conf["delta_thresh"], 255,
                           cv2.THRESH_BINARY)[1]
    thresh = cv2.dilate(thresh, None, iterations=2)
    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
from picamera.array import PiYUVArray

import numpy as np

camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
rawCapture = PiYUVArray(camera, size=(640, 480) )

for frame in camera.capture_continuous(rawCapture, format="yuv", use_video_port=True):
	image = frame.array
	print image
	with file('test.txt', 'w') as outfile:
		outfile.write('#Array shape:{0}\n'.format(image.shape) )
		for data_slice in image:
			np.savetxt(outfile, data_slice, fmt='%-7.2f')
			outfile.write('# New slice\n')
	rawCapture.truncate(0)
	print "rawCapture truncate"
	exit()
print "last exit"
exit()
#with picamera.PiCamera() as camera:
#	camera.resolution = (640, 480)
#	camera.resolution = (1920, 1080)
#	start = time.time()
#	camera.capture_sequence('image.csv', format="raw")
#	finish = time.time()
#	final = finish-start
#	print final
Пример #5
0
def main():
    pygame.mixer.init()

    # initialize the camera and grab a reference to the raw camera capture
    camera = PiCamera()
    camera.resolution = (640, 480)
    raw_capture = PiYUVArray(camera, size=camera.resolution)

    # allow the camera to warmup
    time.sleep(0.1)
    font = cv2.FONT_HERSHEY_SIMPLEX

    prev_time = time.time()
    frame_counter = 0
    frame_average = 10
    osd_text = ''

    cv2.namedWindow('Frame', flags=cv2.WND_PROP_FULLSCREEN)
    cv2.setWindowProperty("Frame", cv2.WND_PROP_FULLSCREEN,
                          cv2.WINDOW_FULLSCREEN)

    border = np.zeros((1080, 1920), np.uint8)

    current_team = ''
    start_team = time.time()
    take_photo = False
    picture_time = time.time()
    start_time = time.time()
    ans = ''
    kleur = ''
    given_ans = ''

    color = {'a': 'Rood', 'b': 'Groen', 'c': 'Geel', 'd': 'Oranje'}

    # capture frames from the camera
    for frame in camera.capture_continuous(raw_capture,
                                           format="yuv",
                                           use_video_port=True):
        image = frame.array

        jambo_tags = get_jambo_tags(pyzbar.decode(image))

        if 'jambo:STOP' in jambo_tags:
            print('Stopping')
            break

        if 'jambo:shutdown' in jambo_tags:
            os.system('sudo shutdown -h now')
            break

        elif 'jambo:A' in jambo_tags:
            given_ans = 'A'
        elif 'jambo:B' in jambo_tags:
            given_ans = 'B'
        elif 'jambo:C' in jambo_tags:
            given_ans = 'C'
        elif 'jambo:D' in jambo_tags:
            given_ans = 'D'
        elif 'jambo:rood' in jambo_tags:
            kleur = 'rood'
        elif 'jambo:groen' in jambo_tags:
            kleur = 'groen'
        elif 'jambo:geel' in jambo_tags:
            kleur = 'geel'
        elif 'jambo:oranje' in jambo_tags:
            kleur = 'oranje'

        if len(jambo_tags) == 1:
            try:
                _, team, ans = jambo_tags[0].split(':')

                current_team = team
                start_team = time.time()
            except ValueError:
                print(jambo_tags[0])

        if time.time() - start_team > 20.0 and not take_photo:
            current_team = ''  # Timeout
            ans = ''
            kleur = ''
            given_ans = ''

        found_text = 'Presenteer QR'
        found_color = ''
        found_ans = ''
        if current_team:
            if take_photo:
                found_text = 'GOED! lach voor de foto in: ' + str(
                    int(picture_time - time.time()))
            else:
                if ans.upper() == given_ans.upper() and color[
                        given_ans.lower()].upper() == kleur.upper():
                    ans = ''
                    kleur = ''
                    given_ans = ''
                    found_text = 'Take picture'
                    picture_time = time.time() + 5.0
                    take_photo = True
                    pygame.mixer.music.load('cheer.wav')
                    pygame.mixer.music.play()
                else:
                    found_text = 'Team ' + team + ' geef het antwoord.'

                    if given_ans and kleur:
                        if color[given_ans.lower()].upper() == kleur.upper():
                            found_color = 'Kleur: ' + kleur + ' GOED!'
                        else:
                            found_color = 'Kleur: ' + kleur + ' FOUT!'

                        if ans.upper() == given_ans.upper():
                            found_ans = 'Antwoord: ' + given_ans.upper(
                            ) + ' GOED!'
                        else:
                            found_ans = 'Antwoord: ' + given_ans.upper(
                            ) + ' FOUT!'
                    else:
                        found_color = 'Kleur: ' + kleur
                        found_ans = 'Antwoord: ' + given_ans.upper()

                    print('ans: ' + ans + ' given: ' + given_ans)

        if take_photo and time.time() > picture_time:
            pygame.mixer.music.load('slow_camera_shutter.wav')
            pygame.mixer.music.play()

            save_image = image[:camera.resolution[1], :camera.resolution[0], 0]
            save_image = cv2.resize(save_image,
                                    None,
                                    fx=2,
                                    fy=2,
                                    interpolation=cv2.INTER_CUBIC)
            timestamp = datetime.strftime(datetime.now(), '%Y-%m-%d %H_%M_%S')

            cv2.imwrite('photos/' + current_team + '_' + timestamp + '.png',
                        save_image)
            current_team = ''
            take_photo = False
            time.sleep(3.0)

        cv2.putText(image, found_text, (10, 100), font, 1.0, (255, 255, 255),
                    2, cv2.LINE_AA)
        cv2.putText(image, found_color, (10, 150), font, 1.0, (255, 255, 255),
                    2, cv2.LINE_AA)
        cv2.putText(image, found_ans, (10, 200), font, 1.0, (255, 255, 255), 2,
                    cv2.LINE_AA)

        # On screen text
        frame_counter += 1
        if frame_counter % frame_average == 0:
            dt = time.time() - prev_time
            prev_time = time.time()
            osd_text = 'fps: ' + '{:.2f}'.format(float(frame_average) / dt)

        # cv2.putText(image, osd_text, (10, 50), font, 1.0, (255, 255, 255), 2, cv2.LINE_AA)

        image = image[:camera.resolution[1], :camera.resolution[0], 0]
        image = cv2.resize(image,
                           None,
                           fx=2,
                           fy=2,
                           interpolation=cv2.INTER_CUBIC)

        y_offset = int(0.5 * (border.shape[0] - image.shape[0]))
        x_offset = int(0.5 * (border.shape[1] - image.shape[1]))

        y1, y2 = y_offset, y_offset + image.shape[0]
        x1, x2 = x_offset, x_offset + image.shape[1]

        border[y1:y2, x1:x2] = image

        # show the frame
        cv2.imshow("Frame", border)
        key = cv2.waitKey(1) & 0xFF

        # clear the stream in preparation for the next frame
        raw_capture.truncate(0)

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            return

    cv2.destroyWindow('Frame')
    input()
Пример #6
0
class MetalDetect:
    def __init__(self, parent=None):
        self.camera = PiCamera()
        try:
            self.resolution_width = 2592  # 2592
            self.resolution_height = 256  #256
            self.camera.resolution = (self.resolution_width,
                                      self.resolution_height)
            # self.camera.shutter_speed = 5000
            # self.camera.iso = 200
            # self.image = PiYUVArray(self.camera, self.camera.resolution)
            self.img = None

            time.sleep(3)

            self.ioinput = 29
            GPIO.setmode(GPIO.BCM)
            GPIO.setwarnings(False)
            GPIO.setup(self.ioinput, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
            GPIO.add_event_detect(self.ioinput, GPIO.BOTH, bouncetime=2000)
            # GPIO.add_event_callback(16, self.captureThree).

            self.capture_TimeCost = 0.0
            self.algorithm_TimeCost = 0.0
            self.Total_TimeCost = 0.0
        except Exception as e:
            print(e)

    def __del__(self):
        self.image.seek(0)
        self.image.truncate(0)
        self.camera.close()
        cv2.destroyAllWindows()
        print('destroyAll')

    def captureThree(self):
        pass

    def captureImages(self):
        """
        捕获图像
        :return:
        """
        try:
            self.image = PiYUVArray(self.camera, self.camera.resolution)
            try:
                for i in range(50):

                    time_cap = time.time()
                    self.camera.capture(self.image,
                                        format="yuv",
                                        use_video_port=True)
                    print('capture timeCost:        ' +
                          str(round((time.time() - time_cap), 4)))

                    time_truncate = time.time()
                    '''流的后续处理'''
                    self.image.truncate(0)
                    self.image.seek(0)

                    print('time_truncate timeCost:  ' +
                          str(round((time.time() - time_truncate), 4)))

                    time_array = time.time()
                    img = self.image.array
                    print(img.shape)
                    print('array timeCost:          ' +
                          str(round((time.time() - time_array), 4)))

                    time_resize = time.time()
                    img = imutils.resize(self.image.array,
                                         width=self.resolution_width,
                                         height=self.resolution_height)
                    print('resize timeCost:         ' +
                          str(round((time.time() - time_resize), 4)))
                    self.capture_TimeCost = round((time.time() - time_cap), 4)

                    timeStart = time.time()
                    # self.detectImage_binary(img)
                    print('TotalCost:***************' +
                          str(round((time.time() - time_cap), 4)))
                    self.algorithm_TimeCost = round((time.time() - timeStart),
                                                    4)
                    self.Total_TimeCost = round((time.time() - time_cap), 4)
                    self.timecaclulate(i)
                self.camera.close()
                cv2.destroyAllWindows()
            except Exception as e:
                print(e)
                self.image.seek(0)
                self.image.truncate(0)
        except Exception as e:
            print(e)

    ''' 计算总时间,捕获时间,处理时间比例'''

    def timecaclulate(self, i):
        print('*****************************************')
        print('num: ' + str(i))
        print('Capture_TimeCost  :' +
              str(round((self.capture_TimeCost / self.Total_TimeCost *
                         100), 3)) + '%')
        print('Algorithm_TimeCost:' + str(
            round((self.algorithm_TimeCost / self.Total_TimeCost * 100), 3)) +
              '%')
        print('*****************************************')
        pass

    ''' 无检测,显示保存'''

    def detectImage_ShowSave(self, i, image):
        cv2.imshow('img', image)  # 测试图片采集
        cv2.imwrite("test" + str(i) + ".jpg", image)  # 保存测试样本
        cv2.waitKey(30)  # 界面延时显示

    ''' 阈值化检测'''

    def detectImage_binary(self, image):
        time1 = time.time()
        dest = cv2.cvtColor(image, cv2.COLOR_YUV420p2GRAY, 1)
        # dest = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        print('cvtColor timeCost:       ' +
              str(round((time.time() - time1), 4)))

        time2 = time.time()
        blur = cv2.GaussianBlur(dest, (5, 1), 0)
        print('GaussianBlur timeCost:   ' +
              str(round((time.time() - time2), 4)))

        time3 = time.time()
        ret, binary_frame = cv2.threshold(blur, 200, 255, cv2.THRESH_BINARY)
        print('threshold timeCost:      ' +
              str(round((time.time() - time3), 4)))

        time4 = time.time()
        cv2.imshow("binary_frame", binary_frame)
        cv2.waitKey(30)
        print('Show timeCost:           ' +
              str(round((time.time() - time4), 4)))

    '''滤波阈值检测'''

    def detectImage(self, image):
        dest = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        blur = cv2.GaussianBlur(dest, (5, 1), 0)
        g_kernel = cv2.getGaborKernel((41, 41),
                                      19.04,
                                      0,
                                      34,
                                      0,
                                      0,
                                      ktype=cv2.CV_32F)
        gabor = cv2.filter2D(blur, cv2.CV_8UC3, g_kernel)
        ret, threshold = cv2.threshold(gabor, 60, 255, cv2.THRESH_BINARY)
        kernel = np.ones((5, 5), np.uint8)
        dilation = cv2.dilate(threshold, kernel, iterations=1)

        cv2.imshow("gabor", gabor)
        cv2.imshow("dilation", dilation)
        cv2.imshow("grayFrame", dest)

        cv2.waitKey(30)
Пример #7
0
def main_loop():
    raw_capture = PiYUVArray(camera, size=res)
    stream = camera.capture_continuous(raw_capture, format="yuv", use_video_port=True)

    # for pwm1
    # duty_cycle

    #      (array([482, 542], dtype=int32), {'peak_heights': array([186.831036  , 185.01286929])})

    num_frames = 0

    # if line_pos is 1 then the line is to the right of the car
    # if line_pos is -1 then the line is to the left of the car
    # if line_pos is 0 either uhoh we're doomed
    # if lost is true then the line is lost
    line_pos = STRAIGHT_
    lost = False
    prev_steer_dc = pwm1.duty_cycle
    prev_steer_diff_arr = []
    prev_peak_avg_arr = []

    for i in range(NUM_PREV_VAL_):
        prev_steer_diff_arr.append(-1)
        prev_peak_avg_arr.append(-1)
    
    for f in stream:
        t = time.time()

        # This was intended change speeds as necesary
        # But not anymore since its too volatile, can be manually updated
        # speeds = update_speeds()
        # UCLA_ = speeds[0]
        # SONIC_ = speeds[1]

        # If killSwitch is toggled, breaks out of forever loop and kills the motor
        if killSwitch.value:
            break

        num_frames += 1
        # Get the intensity component of the image (a trick to get black and white images)
        I = f.array[:, :, 0]
        # Reset the buffer for the next image
        raw_capture.truncate(0)
        # Select a horizontal line in the middle of the image
        U = I[UPPER_LINE_, :]
        C = I[CENTER_LINE_, :]
        L = I[LOWER_LINE_, :]
        # Smooth the transitions so we can detect the peaks 
        Lf = filtfilt(b, a, C)
        # Find peaks which are higher than 0.5
        p = find_peaks(Lf, height=128)

        total_peak_amt = 0
        avg_peak_loc = NO_PEAKS_
        num_peaks = 0

        for peak in p[0]:
            num_peaks += 1
            total_peak_amt += peak

        print("Num Peaks: " + num_peaks)

        if num_peaks != 0:
            avg_peak_loc = total_peak_amt / num_peaks
            if num_peaks == 3:
                print("THIS IS THE BEGINNING OF THE END")

        if avg_peak_loc != NO_PEAKS_:
            if avg_peak_loc < MEDIAN_:
                if lost and line_pos == RIGHT_:
                    # If this happens, that means that the center line is to the right
                    # But we encountered the bordering track to the left
                    # Break to stop going over and f*****g shit up
                    # TODO: Make clause that handles this, so in case of race it doesn't stop without violating shit
                    break
                line_pos = LEFT_
            else:
                if lost and line_pos == LEFT_:
                    # If this happens, that means that the center line is to the left
                    # But we encountered the bordering track to the right
                    # Break to stop going over and f*****g shit up
                    # TODO: Make clause that handles this, so in case of race it doesn't stop without violating shit
                    break
                line_pos = RIGHT_

            # Sets the steering position based on the peak of the Center line
            pwm1.duty_cycle = int(STRAIGHT_ - ((avg_peak_loc - MEDIAN_)/MEDIAN_)*500000)
            lost = False
        else:
            if line_pos == LEFT_:
                pwm1.duty_cycle = LEFT_
            elif line_pos == RIGHT_:
                pwm1.duty_cycle = RIGHT_
            else:
                pwm1.duty_cycle = STRAIGHT_

            lost = True
            # pwm0.duty_cycle = SONIC_

        if abs(prev_steer_dc - STRAIGHT_) < abs(pwm1.duty_cycle - STRAIGHT_) and prev_steer_diff_arr[0] > 0 and prev_steer_diff_arr[1] > 0 and abs(pwm1.duty_cycle - STRAIGHT_) > 400000:
            pwm0.duty_cycle = SONIC_
        elif abs(avg_peak_loc - MEDIAN_) <= CENTER_THRESHOLD_:
            i = 0
            while i < NUM_PREV_VAL_:
                if abs(prev_peak_avg_arr[i] - MEDIAN_) > CENTER_THRESHOLD_:
                    break
                i += 1
            # print("STRAIGHT")
            if i == NUM_PREV_VAL_:
                pwm0.duty_cycle = USC_
        else:
            pwm0.duty_cycle = UCLA_
            if abs(pwm1.duty_cycle - STRAIGHT_) > 300000:
                pwm0.duty_cycle = UCLA_ + 4000

        i = NUM_PREV_VAL_ - 1
        while i > 0:
            prev_steer_diff_arr[i] = prev_steer_diff_arr[i - 1]
            prev_peak_avg_arr[i] = prev_peak_avg_arr[i - 1]
            i -= 1
        
        prev_steer_diff_arr[0] = abs(pwm1.duty_cycle - STRAIGHT_) - abs(prev_steer_dc - STRAIGHT_)
        prev_peak_avg_arr[0] = avg_peak_loc

        prev_steer_dc = pwm1.duty_cycle

        time_elapsed = time.time() - t
        
        print(prev_peak_avg_arr)
        print(prev_steer_diff_arr)
        print(sum(prev_steer_diff_arr))
        print(sum(prev_steer_diff_arr) / len(prev_steer_diff_arr))
        print(pwm1.duty_cycle)
        print("Elapsed {:0.4f} seconds, estimated FPS {:0.2f}".format(time_elapsed, 1 / time_elapsed))
        time.sleep(INTERVAL_)
        
    pwm0.duty_cycle = 1000000

    # Release resources
    stream.close()
    raw_capture.close()
    camera.close()
Пример #8
0
def streamVideo():
    resolution = (192, 192)
    fps = 90
    pub_socket = backend.get_msg_streamer()
    # Make sure to set up raspberry pi camera
    # More information here: https://www.raspberrypi.org/documentation/configuration/camera.md
    with picamera.PiCamera() as camera:
        # set camera parameters
        camera.resolution = resolution
        camera.framerate = Fraction(fps, 1)
        # camera.sensor_mode = 7
        # camera.exposure_mode = 'off'
        # camera.shutter_speed = 6000000
        # camera.iso = 1600
        # rawCapture = PiRGBArray(camera, size=resolution)
        rawCapture = PiYUVArray(camera, size=resolution)
        stream = camera.capture_continuous(rawCapture,
                                           format="yuv",
                                           use_video_port=True)
        frame_counter_per_sec = 0
        frame_index = 1
        streamimage = StartThreadToStream(pub_socket, device, resolution,
                                          backend)
        # payload = Payload(device, resolution[0], resolution[1], "gray")    # use this if sending without new thread StartThreadToStream
        fps = 0
        start_time = time()
        image_read_time = time()
        try:
            for f in stream:
                if backend.is_publishable():
                    # grab the frame from the stream and clear the stream in
                    # preparation for the next frame
                    frame = f.array
                    capture_time = monotonic()
                    latency = time() - image_read_time
                    streamimage.dataready(
                        frame[:, :, 0], frame_index, capture_time
                    )  #   give it to StartThreadToStream to publish
                    """ 
                    # use this also if sending without new thread StartThreadToStream but deactivate above line
                    payload.setPayloadParam(capture_time, numpy.ascontiguousarray(frame[:,:,0]), frame_index)
                    pub_socket.send(payload.get())           #   publish here
                    """
                    seconds = time() - start_time
                    if seconds > 1:
                        fps = frame_counter_per_sec
                        frame_counter_per_sec = 0
                        start_time = time()
                    outstr = "Frames: {}, FPS: {}, Frame Read latency: {}".format(
                        frame_index, fps, latency)
                    sys.stdout.write("\r" + outstr)
                    frame_counter_per_sec = frame_counter_per_sec + 1
                    frame_index = frame_index + 1
                    rawCapture.truncate(0)
                    image_read_time = time()
                else:
                    break
        except (KeyboardInterrupt, SystemExit):
            logging.info("Exit due to keyboard interrupt")
        except Exception:
            exp = traceback.format_exc()
            logging.error(exp)
        finally:
            streamimage.close()
            del stream
            del streamimage
            del rawCapture
            logging.info("Total Published frames: {}, FPS:{}.".format(
                frame_index, fps))
            logging.info(
                "Streaming stopped for the device: {}.".format(device))
Пример #9
0
                        size=res)  # Initialize the buffer and start capturing
stream = camera.capture_continuous(rawCapture,
                                   format="yuv",
                                   use_video_port=True)
pid_object = PID()
i = 0
j = 0
temp = time.time()
time_start = temp

try:  # Beginning to get camera stream
    for f in stream:
        I = f.array[:, :,
                    0]  # Get the intensity component of the image (a trick to get black and white images)
        j = j + 1
        rawCapture.truncate(0)  # Reset the buffer for the next image
        diff = time.time() - temp
        if diff < time_th:
            continue
        print("Time difference between frames = ", diff)
        temp = time.time()

        #appending
        #image_raw.append(I)
        #plt.imshow(I)
        #plt.show()
        #file_name = 'a/Apr3_raw_' + str(i) + '.jpg'
        #plt.savefig(file_name)

        peak_pos = get_midpoint_peak(I, row, b, a)