Ejemplo n.º 1
0
		os.makedirs(directory)
	else:
	    os.makedirs(directory)

# Empty temporary directory;
clearDirectory(config["capture"]["files_directory"])

# Capture frames from the camera and grab the raw NumPy array;
if config["debug"]["print_messages"]:
	print "[MESSAGE] Recording and calculating motion, please wait..."
#for f in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
while(rawCapture.isOpened()):

	# Differentiate file or raw capture;
	if config["debug"]["use_file"]:
		ret, frame = rawCapture.read()
	else:
		frame = f.array
	status = 0

	# Stop loop when limit is reached;
	if (not config["debug"]["file_loop"] and frameCounter > config["camera"]["runtime_frames"]) or (frame == None):
		break

	# Write frame (only if not analysing);
	if not config["debug"]["file_loop"]:
		cv2.imwrite(config["capture"]["files_directory"] + str(frameCounter) + ".png", frame)

	# Resize the frame, convert it to grayscale, and blur it;
	frame = imutils.resize(frame, width=600)
	gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
Ejemplo n.º 2
0
class Camera:
    '''
    Camera module to abstract between the Raspberry Pi camera or a standard
    USB camera. Useful for testing and debugging
    '''
    
    def __init__(self, rpiCam=False, cameraNum=0, width=640, height=480, fps=30):
        self.rpiCam = rpiCam
        self.width = width
        self.height = height
        self.fps = fps
        self.cameraNum = cameraNum
        
        if self.rpiCam:
            self.setupRpiCam()
        else:
            self.setupUsbCam()
        
    def setupRpiCam(self):
        # import the necessary packages
        from picamera.array import PiRGBArray
        from picamera import PiCamera
        
        # initialize the camera and grab a reference to the raw camera capture
        camera = PiCamera()
        camera.resolution = (self.width, self.height)
        camera.framerate = self.fps
        camera.crop = (0.0, 0.0, 1.0, 1.0)
        self.camera = camera
        self.rawCapture = PiRGBArray(camera, size=(self.width, self.height))
        self.rgb = bytearray(self.width * self.height * 3)
        self.yuv = bytearray(self.width * self.height * 3 / 2)
        
        # wait for camera to warm up
        time.sleep(0.1)

    def setupUsbCam(self):
        # initialize the camera and grab a reference to the raw camera capture
        self.rawCapture = cv2.VideoCapture(self.cameraNum)

        # wait for camera to warm up
        time.sleep(0.1)

    def grabFrame(self):
        '''
        Grab a single frame from the camera
        '''
        
        if self.rpiCam:
            #import yuv2rgb
            #stream = io.BytesIO()
            #self.camera.capture(stream, use_video_port=True, format='raw')
            #stream.seek(0)
            #stream.readinto(self.yuv)
            #yuv2rgb.convert(self.yuv, self.rgb, self.width, self.height)
            #return pygame.image.frombuffer(self.rgb[0:(self.width*self.height*3)], (self.width, self.height), 'RGB')

            self.camera.capture(self.rawCapture, use_video_port=True, format='bgr')
            image = self.rawCapture.array
            self.rawCapture.truncate(0)
            return image
        else:
            _, image = self.rawCapture.read()
            return image
        
    def streamFrames(self):
        '''
        Generator to stream images from the camera as numpy arrays. The pixels
        are stored as BGR (blue, green, red)
        '''
        
        if self.rpiCam:
            # capture frames from the camera
            for frame in self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True):
                try:
                    yield frame.array
                    self.rawCapture.truncate(0)
                except:
                    break
                finally:
                    self.cleanup()
        else:
            while True:
                try:
                    yield self.grabFrame()
                except:
                    break
                finally:
                    self.cleanup()
                    
    def cleanup(self):
        if self.rpiCam:
            self.camera.close()
        else:
            self.rawCapture.release()
    
    def startPreview(self):
	if self.rpiCam:
	   self.camera.start_preview()

    def stopPreview(self):
	if self.rpiCam:
	   self.camera.stop_preview()
Ejemplo n.º 3
0
class Camera:
    '''
    Camera module to abstract between the Raspberry Pi camera or a standard
    USB camera. Useful for testing and debugging
    '''
    
    def __init__(self, rpiCam=False, cameraNum=0, width=640, height=480, fps=30):
        self.rpiCam = rpiCam
        self.width = width
        self.height = height
        self.fps = fps
        self.cameraNum = cameraNum
        
        if self.rpiCam:
            self.setupRpiCam()
        else:
            self.setupUsbCam()
        
    def setupRpiCam(self):
        # import the necessary packages
        from picamera.array import PiRGBArray
        from picamera import PiCamera
        
        # initialize the camera and grab a reference to the raw camera capture
        camera = PiCamera()
        camera.resolution = (self.width, self.height)
        camera.framerate = self.fps
        self.camera = camera
        self.rawCapture = PiRGBArray(camera, size=(self.width, self.height))
        
        # wait for camera to warm up
        time.sleep(0.1)

    def setupUsbCam(self):
        # initialize the camera and grab a reference to the raw camera capture
        self.rawCapture = cv2.VideoCapture(self.cameraNum)

        # wait for camera to warm up
        time.sleep(0.1)

    def grabFrame(self):
        '''
        Grab a single frame from the camera
        '''
        
        if self.rpiCam:
            self.camera.capture(self.rawCapture, format="bgr")
            image = self.rawCapture.array
            self.rawCapture.truncate(0)
            return image
        else:
            _, image = self.rawCapture.read()
            return image
        
    def streamFrames(self):
        '''
        Generator to stream images from the camera as numpy arrays. The pixels
        are stored as BGR (blue, green, red)
        '''
        
        if self.rpiCam:
            # capture frames from the camera
            for frame in self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True):
                try:
                    yield frame.array
                    self.rawCapture.truncate(0)
                except:
                    break
                finally:
                    self.cleanup()
        else:
            while True:
                try:
                    yield self.grabFrame()
                except:
                    break
                finally:
                    self.cleanup()
                    
    def cleanup(self):
        if self.rpiCam:
            self.camera.close()
        else:
            self.rawCapture.release()
Ejemplo n.º 4
0
class Camera:
    '''
    Camera module to abstract between the Raspberry Pi camera or a standard
    USB camera. Useful for testing and debugging
    '''
    def __init__(self,
                 rpiCam=False,
                 cameraNum=0,
                 width=640,
                 height=480,
                 fps=30):
        self.rpiCam = rpiCam
        self.width = width
        self.height = height
        self.fps = fps
        self.cameraNum = cameraNum

        if self.rpiCam:
            self.setupRpiCam()
        else:
            self.setupUsbCam()

    def setupRpiCam(self):
        # import the necessary packages
        from picamera.array import PiRGBArray
        from picamera import PiCamera

        # initialize the camera and grab a reference to the raw camera capture
        camera = PiCamera()
        camera.resolution = (self.width, self.height)
        camera.framerate = self.fps
        camera.crop = (0.0, 0.0, 1.0, 1.0)
        self.camera = camera
        self.rawCapture = PiRGBArray(camera, size=(self.width, self.height))
        self.rgb = bytearray(self.width * self.height * 3)
        self.yuv = bytearray(self.width * self.height * 3 / 2)

        # wait for camera to warm up
        time.sleep(0.1)

    def setupUsbCam(self):
        # initialize the camera and grab a reference to the raw camera capture
        self.rawCapture = cv2.VideoCapture(self.cameraNum)

        # wait for camera to warm up
        time.sleep(0.1)

    def grabFrame(self):
        '''
        Grab a single frame from the camera
        '''

        if self.rpiCam:
            #import yuv2rgb
            #stream = io.BytesIO()
            #self.camera.capture(stream, use_video_port=True, format='raw')
            #stream.seek(0)
            #stream.readinto(self.yuv)
            #yuv2rgb.convert(self.yuv, self.rgb, self.width, self.height)
            #return pygame.image.frombuffer(self.rgb[0:(self.width*self.height*3)], (self.width, self.height), 'RGB')

            self.camera.capture(self.rawCapture,
                                use_video_port=True,
                                format='bgr')
            image = self.rawCapture.array
            self.rawCapture.truncate(0)
            return image
        else:
            _, image = self.rawCapture.read()
            return image

    def streamFrames(self):
        '''
        Generator to stream images from the camera as numpy arrays. The pixels
        are stored as BGR (blue, green, red)
        '''

        if self.rpiCam:
            # capture frames from the camera
            for frame in self.camera.capture_continuous(self.rawCapture,
                                                        format="bgr",
                                                        use_video_port=True):
                try:
                    yield frame.array
                    self.rawCapture.truncate(0)
                except:
                    break
                finally:
                    self.cleanup()
        else:
            while True:
                try:
                    yield self.grabFrame()
                except:
                    break
                finally:
                    self.cleanup()

    def cleanup(self):
        if self.rpiCam:
            self.camera.close()
        else:
            self.rawCapture.release()

    def startPreview(self):
        if self.rpiCam:
            self.camera.start_preview()

    def stopPreview(self):
        if self.rpiCam:
            self.camera.stop_preview()
Ejemplo n.º 5
0
from picamera.array import PiRGBArray
from picamera import PiCamera
import cv2

camera = PiCamera()
camera.resolution = (320, 240)
camera.framerate = 30
cap = PiRGBArray(camera, size=(320, 240))

while(True):

	ret, frame = cap.read()

	gray_img 	   = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
	ret, threshold_img = cv2.threshold(gray_img, 15, 255, cv2.THRESH_BINARY)

	cv2.imshow('Gray', gray_img)
	threshold2BGR_img  = cv2.cvtColor(threshold_img, cv2.COLOR_GRAY2BGR)

	height, width = threshold_img.shape
	roi_img   = threshold_img[0:height//2, 0:width//2]
	roi_left  = threshold_img[0:height//2, 0:width//4]
	roi_right = threshold_img[0:height//2, width//4:width//2]

	left_pxls  = cv2.countNonZero(roi_left)
	right_pxls = cv2.countNonZero(roi_right)

	print(left_pxls/right_pxls)
	if cv2.waitKey(1) & 0xFF == ord('q'):
	        break
cap.release()
Ejemplo n.º 6
0
class DataLoader(object):

    DEVICE_CAM, DEVICE_CAM_RPI = range(0, 2)

    def __init__(self, dev_type, config):

        self.dev_type = dev_type

        self.cam = None
        self.stop_cap = None
        self.cap = None

        self.__init_device()

    def __init_device(self):

        if self.dev_type == self.DEVICE_CAM_RPI:
            self.cam = PiCamera()

            try:
                self.cam.resolution = [
                    int(config.resulution.width),
                    int(config.resolution.height)
                ]
            except AttributeError as e:
                print(e)
                pass

            try:
                self.cam.framerate = int(config.frame_rate)
            except AttributeError:
                print(e)
                pass

            try:
                self.cam.brightness = int(config.brightness)
            except AttributeError:
                print(e)
                pass

            try:
                self.cam.contrast = int(config.contrast)
            except AttributeError:
                print(e)
                pass

            self.cap = PiRGBArray(self.cam)
            self.stop_cap = self.cam.stop_preview

        elif self.dev_type == self.DEVICE_CAM:
            self.cap = cv2.VideoCapture(0)
            self.stop_cap = self.cap.release

        else:
            raise ValueError("Incorrect device type had been provided.")

    def __get_files(self):

        if self.dev_type == self.DEVICE_CAM_RPI:
            while (True):
                frame = self.cam.capture_continuous(self.cap, format="bgr")
                yield frame

        elif self.dev_type == self.DEVICE_CAM:
            while (True):
                ret, frame = self.cap.read()
                yield frame

    def get_data_frame(self):
        try:
            for dframe in self.__get_files():
                yield dframe

        except StopIteration:
            self.stop_cap()
Ejemplo n.º 7
0
def main():
    import cv2
    import numpy as np
    import time
    import rpi
    from picamera.array import PiRGBArray
    from picamera import PiCamera

    #set the GPIO pins of raspberry pi.
    GPIO.setmode (GPIO.BCM)
    GPIO.setwarnings (False)
    #enable
    GPIO.setup(16, GPIO.OUT)
    GPIO.setup(20, GPIO.OUT)
    #setting the GPIO pin as Output
    GPIO.setup (24, GPIO.OUT)
    GPIO.setup (23, GPIO.OUT)
    GPIO.setup (27, GPIO.OUT)
    GPIO.setup (22, GPIO.OUT)
    #GPIO.PWM( pin, frequency ) it generates software PWM
    PWMR = GPIO.PWM (24, 100)
    PWMR1 = GPIO.PWM (23, 100)
    PWML = GPIO.PWM (27, 100)
    PWML1 = GPIO.PWM (22, 100)
    #Starts PWM at 0% dutycycle
    PWMR.start (0)
    PWMR1.start (0)
    PWML.start (0)
    PWML1.start (0)
    #enable pins of the motor
    GPIO.output(16, GPIO.HIGH)
    GPIO.output(20, GPIO.HIGH)

    
    camera = PiCamera()
    capture = PiRGBArray(camera,(640,480))
    camera.capture(capture, format="bgr")
    

    image = capture.array
        
    capture.set(3,320.0) #set the size
    capture.set(4,240.0)  #set the size
    capture.set(5,15)  #set the frame rate

    #set the resolution of the video to be captured
    camera.resolution = (640,480)
    #set the framerate
    cam.framerate = 30
    
    
    
    for i in range(0,2):
        flag, trash = capture.read() #starting unwanted null value


    while cv2.waitKey(1) != 27:
        flag, frame = capture.read() #read the video in frames
        gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)#convert each frame to grayscale.
        blur=cv2.GaussianBlur(gray,(5,5),0)#blur the grayscale image
        ret,th1 = cv2.threshold(blur,35,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)#using threshold remave noise
        ret1,th2 = cv2.threshold(th1,127,255,cv2.THRESH_BINARY_INV)# invert the pixels of the image frame
        _,contours, hierarchy = cv2.findContours(th2,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) #find the contours
    #     cv2.drawContours(frame,contours,-1,(0,255,0),3)
    #     cv2.imshow('frame',frame) #show video 
        for cnt in contours:
            if cnt is not None:
                area = cv2.contourArea(cnt)# find the area of contour
            if area>=500 : #If detected area of contour is greater than 500 then it indicates Zone Indicator and thus further searches for contours shapes
            # find moment and centroid of detect contours and then search the loop for appropriate image for zone indicator to overlay
                M = cv2.moments(cnt)
                cx = int(M['m10']/M['m00'])
                cy = int(M['m01']/M['m00'])
                rect = cv2.minAreaRect(cnt) 
                box = cv2.boxPoints(rect)
                box = np.int0(box)
                cv2.drawContours(frame,[box],0,(0,0,255),2)
                if box>=5000
                #motor stop code
                pixel = frame [cy,cx]
               if pixel[0] != 255 and pixel[1] != 255 and pixel[2] != 255:

                 x,y,w,h = cv2.boundingRect(cnt)  #compute bounding rectangle  of each contour
           
                 if pixel[0] == 254 :
                       mycolor='BLUE'  #.. I have BLUE Contour
                       
                 if pixel[1] == 127 :
                        mycolor='GREEN'  #.. I have Green Contour put Red Flower
                        overlay_image = cv2.resize(image_green,(h,w))
                 if pixel[2] == 254 :
                        mycolor='RED'  #.. I have RED Contour   
                        overlay_image = cv2.resize(image_red,(h,w))           
                
                 try:
                     if len(approx)==3:
                        myshape= "Triangle"
                     if len(approx)==4:
                        myshape= "Quadrilateral"
                     if len(approx)==5:
                        myshape= "Pentagon"
                     if len(approx)==6:
                        myshape= "Hexgaon"
                     if len(approx) > 7:
                        myshape= "CIRCLE"

                    
                     

                     
                 except:
                   print 'ERROR IN PROCESSING'
                   #Approx polyDP is used to remove shadow (noise) and thus enable us to detect pure path to follow
                     approx = cv2.approxPolyDP(cnt,0.01*cv2.arcLength(cnt,True),True)
                     frame[y:y+w,x:x+h,:] = blend_transparent(frame[y:y+w,x:x+h,:], overlay_image) # this will call the blend function to overlay

                time.sleep(5)  #motor will stop for 5 seconds and overlay will start
                #To detect color and shape and choosing proper image
                if mycolor=='BLUE':
                    if myshape=="Triangle":
                        overlay_image = cv2.resize('tulipblue.png',(h,w))
                    elif myshape== "Square":
                        overlay_image = cv2.resize('hydrangeablue.png',(h,w))
                    elif myshape=="Circle":
                        overlay_image = cv2.resize('orchid.png',(h,w))
                if mycolor == 'GREEN':
                    if myshape=="Triangle":
                        overlay_image = cv2.resize('hydrangeayellow.png',(h,w))
                    elif myshape== "Square":
                        overlay_image = cv2.resize('sunflower.png',(h,w))
                    elif myshape=="Circle":
                        overlay_image = cv2.resize('lily-double.png',(h,w))
                if mycolor == 'RED':
                    if myshape=="Triangle":
                        overlay_image = cv2.resize('tulipred.png',(h,w))
                    elif myshape== "Square":
                        overlay_image = cv2.resize('gerber.png',(h,w))
                    elif myshape=="Circle":
                        overlay_image = cv2.resize('carnation.png',(h,w))
                
                  
                # If Zone Indicator is not detected then Search for contour's position and decide to move accordingly
                # If cx is less than 150 means centroid detect to left so take LEFT turn
                else cx<=150:
                    l=(cx*100/160)
                    PWMR.start (0)
                    PWML.start (0)
                    PWMR1.ChangeDutyCycle (100)
                    PWML1.ChangeDutyCycle (abs(l-25))
                    time.sleep(.08)
                # If cx is greater than 170 means centroid detect to right so take RIGHT turn
       
                elif cx>=170:
                    r=((320-cx)*100/160)
                    PWMR.start (0)
                    PWML.start (0)
                    PWMR1.ChangeDutyCycle (abs(r-25))
                    PWML1.ChangeDutyCycle (100)
                    time.sleep(.08)
               # If in between this range then take a slight LEFT 
                elif cx>151 and cx<169:
                    PWMR.start (0)
                    PWML.start (0)
                    PWMR1.ChangeDutyCycle (96)
                    PWML1.ChangeDutyCycle (100)
                    time.sleep(.3)