예제 #1
0
    def main(self):
        jc = JpegStreamCamera(
            "http://192.168.1.101/videostream.cgi?user=pi&pwd=pi")
        vs = VideoStream("MyVideo2.avi", 25, True)
        for i in range(0, 35):
            print "loop"
            img = jc.getImage()
            print "gotimg"
            imgName = "img/img_%s.jpg" % (i)
            self.makeSaveImg = Process(target=self.saveImgToDisk,
                                       args=(img, imgName))
            self.makeSaveImg.start()
            #self.saveImgToDisk(img,imgName)
            vs.writeFrame(img)
            print i
            print "----------"

        #self.makefilmProcess = Process(target=self.saveFilmToDisk, args=("MyVideo2.avi", "MyoutputFile.mpeg"))
        #self.makefilmProcess.start()

        params = " -i {0} -c:v mpeg4 -b:v 700k -r 24 {1}".format(
            "MyVideo2.avi", "MyoutputFile.avi")

        # run avconv to compress the video since ffmpeg is deprecated (going to be).
        call('avconv' + params, shell=True)
예제 #2
0
class ThreadingObject(object):
    """ Threading object class

    The run() method will be started and it will run in the background
    until the application exits.
    """
 
    def __init__(self, interval=1, video=False):
        """ Constructor

        :type interval: int
        :param interval: Check interval, in seconds
        """
        self.interval = interval
        self.url = "http://192.168.10.222:1201/videostream.cgi?user=admin&pwd="
        self.ipCam = JpegStreamCamera(self.url)
        self.display = Display()

        thread = threading.Thread(target=self.run, args=(video,))
        thread.daemon = True                            # Daemonize thread
        thread.start()                                  # Start the execution
 
    def run(self, video):
        """ Method that runs forever """
        while not self.display.isDone():
            if video:
                imagen = self.ipCam.live()
            else:
                imagen = self.ipCam.getImage().show()
            time.sleep(self.interval)
        imagen.quit()
예제 #3
0
def getImage(video):
    url = "http://192.168.10.222:1201/videostream.cgi?user=admin&pwd="
    ipCam = JpegStreamCamera(url)
    display = Display()
    if video==False:
        imagen = ipCam.getImage().show()
        while not display.isDone():
            pass

    else:
        while not display.isDone():

            imagen = ipCam.getImage()
            #faces = imagen.findHaarFeatures('face')
            #if faces is not None:
            #    faces = faces.sortArea()
            #    bigFace = faces[-1]
                # Draw a green box around the face
            #    bigFace.draw()
            #imagen = ipCam.live()
            imagen.save(display)

    imagen.quit()
예제 #4
0
	def main(self):
		jc = JpegStreamCamera("http://192.168.1.101/videostream.cgi?user=pi&pwd=pi")
		vs = VideoStream("MyVideo2.avi",25, True)
		for i in range(0,35):
			print "loop"
			img = jc.getImage()
			print "gotimg"
			imgName = "img/img_%s.jpg"%(i)
			self.makeSaveImg = Process(target=self.saveImgToDisk, args=(img,imgName))
			self.makeSaveImg.start()
			#self.saveImgToDisk(img,imgName)
			vs.writeFrame(img)
			print i
			print "----------"

		#self.makefilmProcess = Process(target=self.saveFilmToDisk, args=("MyVideo2.avi", "MyoutputFile.mpeg"))
		#self.makefilmProcess.start()

		params = " -i {0} -c:v mpeg4 -b:v 700k -r 24 {1}".format("MyVideo2.avi", "MyoutputFile.avi")
	 
	    # run avconv to compress the video since ffmpeg is deprecated (going to be).
		call('avconv'+params, shell=True)
예제 #5
0
def scan(camera_base_url='http://octopi.local'):
    jc = JpegStreamCamera("{}/webcam/?action=stream".format(camera_base_url))
    qrcode = []

    horizontal_flip = True

    while (not qrcode):
        img_og = jc.getImage()  #gets image from the camera

        img = img_og

        if horizontal_flip:
            img = img_og.flipHorizontal()
            horizontal_flip = False
        else:
            horizontal_flip = True

        qrcode = img.findBarcode()  #finds qr data from image

        if (qrcode is not None):  #if there is some data processed
            qrcode = qrcode[0]
            result = str(qrcode.data)
            return result  #returns result of qr in python shell
예제 #6
0
    return (x>0 and x<image.width and y>0 and y<image.height)

width = 640; lon_0 = 270; lat_0 = 80
pixelPerRadians = 640
height=480
radius = pixelPerRadians

max_length = 0

cam = JpegStreamCamera('http://192.168.43.1:8080/videofeed')#640 * 480
mobile = mobileState.mobileState()
while True:
        mobile.checkUpdate()
        if mobile.isToUpdate:
          mobile.computeRPY()
        image = cam.getImage().rotate(-sp.rad2deg(mobile.roll), fixed = False)
        m = Basemap(width=image.width,height=image.height,projection='aeqd',
		    lat_0=sp.rad2deg(mobile.pitch),lon_0=sp.rad2deg(mobile.yaw), rsphere = radius)
	# fill background.
	#m.drawmapboundary(fill_color='aqua')
	# draw coasts and fill continents.
	#m.drawcoastlines(linewidth=0.5)
	#m.fillcontinents(color='coral',lake_color='aqua')
	# 20 degree graticule.
	# m.drawparallels(np.arange(-90,90,30))
	#m.drawmeridians(np.arange(-180,180,30))
	# draw a black dot at the center.
	#xpt, ypt = m(heading_deg, elevation_deg)
	#m.plot([xpt],[ypt],'ko')
	# draw the title.
	#plt.title('Azimuthal Equidistant Projection')
from SimpleCV import JpegStreamCamera
# Initialize the webcam by providing URL to the camera
cam = JpegStreamCamera("http://142.157.159.116:8080/video.jpeg?sessionId=1378236515.816177")
cam.getImage().show()
예제 #8
0
from SimpleCV import JpegStreamCamera
from SimpleCV import Camera, Display
from time import sleep
cam= JpegStreamCamera("http://192.168.1.4:8083/videofeed")
cam1= JpegStreamCamera("http://192.168.1.7:8070/videofeed")
myDisplay = Display(resolution=(640, 960))
#myDisplay1 = Display(resolution=(640, 480))
while not myDisplay.isDone():
     img=cam.getImage()
     img1=cam1.getImage()
     img.sideBySide(img1).show()
     sleep(.1)
from SimpleCV import JpegStreamCamera
from SimpleCV import Color, Display, RunningSegmentation, Circle
import os, time

cam = JpegStreamCamera("https://192.168.1.102:8080/videofeed")
frame = cam.getImage()
display = Display((frame.width, frame.height))

diff_seg = RunningSegmentation()
default_area = 1000

while display.isNotDone(): 
    original = cam.getImage().flipHorizontal()
    diff_seg.addImage(original)
    diff_image = diff_seg.getSegmentedImage(False).invert()
    active = original - diff_image
    
    if active:
        hue = active.hueDistance(Color.RED).binarize(50)
        blobs = hue.findBlobs()
        if blobs:
            original.dl().polygon(blobs[-1].mConvexHull, color=Color.RED, width=3)
    original.save(display)
    time.sleep(0.1)
    
    
예제 #10
0
lon_0 = 270
lat_0 = 80
pixelPerRadians = 640
height = 480
radius = pixelPerRadians

max_length = 0

cam = JpegStreamCamera('http://192.168.43.1:8080/videofeed')  #640 * 480
mobile = mobileState.mobileState()
mobile.open()
while True:
    mobile.checkUpdate()
    if mobile.isToUpdate:
        mobile.computeRPY()
    image = cam.getImage().rotate(-sp.rad2deg(mobile.roll), fixed=False)
    m = Basemap(width=image.width,
                height=image.height,
                projection='aeqd',
                lat_0=sp.rad2deg(mobile.pitch),
                lon_0=sp.rad2deg(mobile.yaw),
                rsphere=radius)
    # fill background.
    #m.drawmapboundary(fill_color='aqua')
    # draw coasts and fill continents.
    #m.drawcoastlines(linewidth=0.5)
    #m.fillcontinents(color='coral',lake_color='aqua')
    # 20 degree graticule.
    # m.drawparallels(np.arange(-90,90,30))
    #m.drawmeridians(np.arange(-180,180,30))
    # draw a black dot at the center.
예제 #11
0
def carMov(flag, faceX, faceY, update='', movSet=0.5):
    ''' 函數帶入5個引數
        flag: 'u','r','d','l','s','t','web'
        faceX,faceY: 為臉部定位的x,y座標
        update: 更改資料庫的網址,預設為空值
        movSet: 馬達的步進值,預設為0.5 在car預計設定為執行秒數
        
        函數返回
        返回updatet字串,串接get參數字串
        '''
    # 判斷藍芽案的按鈕---start---

    if flag == 'u' or flag == 'r':  # 如果按上或右
        if flag == 'u':
            GPIO.output(17, False)
            GPIO.output(18, True)  #電流方向18->17 23->22
            GPIO.output(22, False)
            GPIO.output(23, True)
            #movSet=1;
            update += 'move=up'
            print '前進..... ' + update
        elif flag == 'r':
            GPIO.output(17, False)
            GPIO.output(18, True)
            GPIO.output(22, False)
            GPIO.output(23, False)
            #movSet=1;
            update += 'move=right'
            print '右轉..... ' + update
    elif flag == 'd' or flag == 'l':  # 按下或左
        if flag == 'd':
            GPIO.output(17, True)
            GPIO.output(18, False)
            GPIO.output(22, True)
            GPIO.output(23, False)
            update += 'move=down'
            print '後退..... ' + update
        elif flag == 'l':
            GPIO.output(17, False)
            GPIO.output(18, False)
            GPIO.output(22, False)
            GPIO.output(23, True)
            update += 'move=left'
            print '左轉..... ' + update
    elif flag == 's':
        GPIO.output(17, False)
        GPIO.output(18, False)
        GPIO.output(22, False)
        GPIO.output(23, False)
        update += 'move=stop'
        print '停車..... ' + update

    elif flag == 't':  #trace face
        cam = JpegStreamCamera("http://114.32.209.33:1234/?action=stream")
        #cam = JpegStreamCamera("http://172.20.10.13:8080/?action=stream")
        while True:
            img = cam.getImage()
            faces = img.findHaarFeatures('face.xml')
            if faces:
                bigFaces = faces.sortArea()[-1]
                bigFaces.draw()  #draw green line
                img.save(js)
                time.sleep(0.01)
                location = bigFaces.coordinates()
                print "偵測到臉部....x位置為:", location[0]
                print "偵測到臉部....y位置為:", location[1]
                faceX = location[0]
                faceY = location[0]
                x = location[0] - 160
                y = location[1] - 120
                print "距離x座標中心(160,120):", x
                print "距離y座標中心(160,120):", y
                break  #有抓到眼睛就跳出while
            else:
                print "沒有抓到臉部"
            # 判斷藍芽案的按鈕---end---
        update += 'move=trace&X=' + str(faceX) + "&Y=" + str(faceY)

    time.sleep(0.1)
    print '執行的網址: ' + update + '&WEB=0'
    return update
예제 #12
0
from SimpleCV import Image, JpegStreamCamera 
import time

#cam = JpegStreamCamera('http://192.168.1.102:8080/videofeed')
cam = JpegStreamCamera('http://172.17.200.241:8080/videofeed')
img = cam.getImage().flipHorizontal().flipVertical()
#img = Image('/home/jinyuan/Downloads/handmagician/finger.jpg')
ts = []
bb = (300,400,20,20) # get Bounding Box from some method

while True:
    img1 = cam.getImage().flipHorizontal().flipVertical()
    ts = img1.track(method="camshift", ts=ts, img=img, bb=bb)
    #ts = img1.track(method="camshift", ts=ts, img=img, bb=bb)
    ts = img1.track(method="mftrack", ts=ts, img=img, bb=bb)
    ts.drawBB()
    img1.show()
예제 #13
0
#!/usr/bin/python
# coding=utf-8
# vlc -vvv "/media/bat/DATA/videos/PERSEPOLIS.avi" --sout '#transcode{vcodec=mjpg,vb=2500,width=640,height=480,acodec=none}:standard{access=http,mux=mpjpeg,dst=localhost:8080/videofeed}'

from SimpleCV import JpegStreamCamera, Display, Image
cam = JpegStreamCamera('http://192.168.1.3:8080/videofeed')
disp = Display()
while disp.isNotDone():
    img = cam.getImage()
    img.save(disp)
예제 #14
0
class Hover:
    """Uses vision tracking and PID-control to hover at a given altitude

    1. Run camera capture in it's own thread; update altitude
    2. Run hover loop in it's own thread; output altitude
    """
    def __init__(self):
        logger.info("{name} init()".format(name=self.__class__.__name__))
        self.initControl()
        self.initCamera()
        self.initTracking()
        self.initHover()

    def initControl(self):
        """Setup control-flow variables"""
        self.exit = False

    def initCamera(self):
        """Setup camera variables

        Will prompt the user for feedback.  Capable of loading webcam or an
        ip-camera streaming JPEG
        """
        # TODO: Track last camera mode; use 'Enter' to repeat that
        camIp = raw_input("Specify camera; enter for webcam, " +
                          "or ip of network camera:\n")
        logger.info("Camera specified as '{camIp}'".format(camIp=camIp))

        if camIp is '':
            self.cam = Camera()
        elif '.' not in camIp:
            self.cam = JpegStreamCamera(
                "http://192.168.1.{ip}:8080/video".format(ip=camIp))
        else:
            self.cam = JpegStreamCamera(
                "http://{ip}:8080/video".format(ip=camIp))

        self.camRes = (800, 600)
        logger.info("Camera resolution={res}".format(res=self.camRes))

    """ setup tracking """

    def initTracking(self):
        self.trackingColor = Color.RED
        self.trackingBlobMin = 10
        self.trackingBlobMax = 5000
        self.x = -1
        self.y = -1
        self.trackingFrameQ = Queue()
        logger.info(
            "Tracking color={color}; blobMin={min}; blobMax={max}".format(
                color=self.trackingColor,
                min=self.trackingBlobMin,
                max=self.trackingBlobMax))

    def initHover(self):
        self.hoverFrameQ = Queue()

    def visionLoop(self):
        while not self.exit:
            # acquire image
            img = self.cam.getImage()

            # exit if we've got nothing
            if img is None:
                break

            # adjust image
            '''
            img = img.resize(self.camRes[0], self.camRes[1])
            img = img.rotate90()
            '''

            # blob search
            colorDiff = img - img.colorDistance(self.trackingColor)
            blobs = colorDiff.findBlobs(-1, self.trackingBlobMin,
                                        self.trackingBlobMax)

            # blob find
            if blobs is not None:
                self.x = blobs[-1].x
                self.y = blobs[-1].y

            # blob show
            if blobs is not None:
                # roi = region of interest
                roiLayer = DrawingLayer((img.width, img.height))

                # draw all blobs
                for blob in blobs:
                    blob.draw(layer=roiLayer)

                # draw a circle around the main blob
                roiLayer.circle((self.x, self.y), 50, Color.RED, 2)

                # apply roi to img
                img.addDrawingLayer(roiLayer)
                img = img.applyLayers()

            img.show()

            # fps
            now = datetime.utcnow()
            self.trackingFrameQ.put(now)
            if self.trackingFrameQ.qsize() < 30:
                fps = 0.0
            else:
                fps = 30.0 / (now - self.trackingFrameQ.get()).total_seconds()

            # logging
            logger.debug("{func} ({x},{y}) {fps:5.2f}".format(
                func=inspect.stack()[0][3], x=self.x, y=self.y, fps=fps))

    def hoverLoop(self):
        while not self.exit:
            sleep(0.01)

            # fps
            now = datetime.utcnow()
            self.hoverFrameQ.put(now)
            if self.hoverFrameQ.qsize() < 30:
                fps = 0.0
            else:
                fps = 30.0 / (now - self.hoverFrameQ.get()).total_seconds()

            # logging
            logger.debug("{func} ({x},{y}) {fps:5.2f}".format(
                func=inspect.stack()[0][3], x=self.x, y=self.y, fps=fps))

    def start(self):
        threading.Thread(target=self.visionLoop).start()
        threading.Thread(target=self.hoverLoop).start()
        raw_input("Press any key to stop")
        self.exit = True
예제 #15
0
class Hover:
    """Uses vision tracking and PID-control to hover at a given altitude

    1. Run camera capture in it's own thread; update altitude
    2. Run hover loop in it's own thread; output altitude
    """
    def __init__(self):
        logger.info("{name} init()".format(name=self.__class__.__name__))
        self.settings = shelve.open('/tmp/hover.settings')
        self.initControl()
        self.initCamera()
        self.initTracking()
        self.initHover()
        self.initCF()
        self.settings.close()

    def initControl(self):
        """Setup control-flow variables"""
        self.exit = False

    def initCamera(self):
        """Setup camera variables

        Will prompt the user for feedback.  Capable of loading webcam or an
        ip-camera streaming JPEG.  Uses shelve to remember last entered
        value
        """

        # get settings
        lastCamUri = self.settings.get('camUri')
        lastCamRotate = self.settings.get('camRotate')

        # prompt for camUri, camRotate
        if lastCamUri:
            _input = raw_input("Specify camera: 'cam' for webcam, " +
                               "an ip of network camera or <ENTER> for the " +
                               "previous value of '{lastCamUri}':\n".format(
                                   lastCamUri=lastCamUri))
            if not _input:
                self.camUri = lastCamUri
            else:
                self.camUri = _input
        else:
            _input = raw_input("Specify camera: 'cam'/<ENTER> for webcam or " +
                               "an ip of network camera:\n")
            if not _input:
                self.camUri = _input
            else:
                self.camUri = 'cam'

        logger.info("CamUri = '{camUri}'".format(camUri=self.camUri))

        if lastCamRotate:
            _input = raw_input("Specify camera rotation or <ENTER> for the " +
                               "previous value of '{lastCamRotate}':\n".format(
                                   lastCamRotate=lastCamRotate))
            if _input:
                self.camRotate = int(_input)
            else:
                self.camRotate = lastCamRotate
        else:
            _input = raw_input("Specify camera rotation or <ENTER> for " +
                               "no rotation':\n")
            if _input:
                self.camRotate = int(_input)
            else:
                self.camRotate = 0

        logger.info("CamRotate = '{camRotate}'"\
                    .format(camRotate=self.camRotate))

        # save settings
        self.settings['camUri'] = self.camUri
        self.settings['camRotate'] = self.camRotate

        # setup camera
        if self.camUri == "cam":
            self.cam = Camera()
        elif '.' not in self.camUri:
            self.cam = JpegStreamCamera(
                "http://192.168.1.{ip}:8080/video".format(ip=self.camUri))
        else:
            self.cam = JpegStreamCamera(
                "http://{ip}:8080/video".format(ip=self.camUri))

        # additional values -- for now, just hard coded
        self.camRes = (800, 600)
        logger.info("Camera resolution={res}".format(res=self.camRes))

    def initTracking(self):
        """Setup tracking variables
        
        Will prompt the user for tracking variables.  Uses shelve to remember
        last entered value
        """

        # get last values
        lastTrackingColor = self.settings.get('trackingColor')

        # prompt for override
        if lastTrackingColor:
            _input = raw_input("Specify tracking color as (R,G,B)" +
                               "or <ENTER> for previous value " +
                               "{lastTrackingColor}:\n".format(
                                   lastTrackingColor=lastTrackingColor))
            if _input:
                self.trackingColor = make_tuple(_input)
            else:
                self.trackingColor = lastTrackingColor
        else:
            _input = raw_input("Specify tracking color as (R,G,B)" +
                               "or <ENTER> for default of BLUE:\n")
            if _input:
                self.trackingColor = make_tuple(_input)
            else:
                self.trackingColor = Color.BLUE

        # save settings
        self.settings['trackingColor'] = self.trackingColor

        # additional values
        self.trackingBlobMin = 5
        self.trackingBlobMax = 1000
        self.x = -1
        self.y = -1
        self.target = (300, 400)
        logger.info(("Tracking color={color}; min={min}; max={max}; " +
                     "target={target}").format(color=self.trackingColor,
                                               min=self.trackingBlobMin,
                                               max=self.trackingBlobMax,
                                               target=self.target))
        self.trackingFrameQ = Queue()

    def initHover(self):
        self.hoverFrames = []
        self.eSum = 0

    def initCF(self):
        self.cfConnected = False
        self.cf = None
        self.cfUri = None

        logger.debug("Initializing Drivers; Debug=FALSE")
        InitDrivers(enable_debug_driver=False)

        logger.info("Scanning")
        available = Scan()

        logger.info("Found:")
        for cf in available:
            logger.debug(cf[0])

        if len(available) > 0:
            self.cfUri = available[0][0]
            self.cf = Crazyflie()
            self.cf.connected.add_callback(self.cfOnConnected)
            self.cf.disconnected.add_callback(self.cfOnDisconnected)
            self.cf.connection_failed.add_callback(self.cfOnFailed)
            self.cf.connection_lost.add_callback(self.cfOnLost)
            logger.info("Crazyflie @{uri} Initialized".format(uri=self.cfUri))
        else:
            logger.info("No Crazyflies found")

    def cfOnConnected(self, uri):
        logger.info("{func} {uri}".format(func=inspect.stack()[0][3], uri=uri))
        self.cfConnected = True

    def cfOnDisconnected(self, uri):
        logger.info("{func} {uri}".format(func=inspect.stack()[0][3], uri=uri))
        self.cfConnected = False

    def cfOnFailed(self, uri):
        logger.info("{func} {uri}".format(func=inspect.stack()[0][3], uri=uri))

    def cfOnLost(self, uri, msg):
        logger.info("{func} {uri} {msg}".format(func=inspect.stack()[0][3],
                                                uri=uri,
                                                msg=msg))

    def visionLoop(self):
        while not self.exit:
            # acquire image
            img = self.cam.getImage()

            # exit if we've got nothing
            if img is None:
                break

            # adjust image
            if self.camRotate != 0:
                img.rotate(self.camrotate)
            '''
            img = img.resize(self.camRes[0], self.camRes[1])
            img = img.rotate90()
            '''

            # blob search
            colorDiff = img - img.colorDistance(self.trackingColor)
            blobs = colorDiff.findBlobs(-1, self.trackingBlobMin,
                                        self.trackingBlobMax)
            '''
            blobs = img.findBlobs((255,60,60), self.trackingBlobMin,
                                  self.trackingBlobMax)
            '''

            # blob find
            if blobs is not None:
                self.x = blobs[-1].x
                self.y = blobs[-1].y

            # blob show
            if blobs is not None:
                # roi = region of interest
                roiLayer = DrawingLayer((img.width, img.height))

                # draw all blobs
                for blob in blobs:
                    blob.draw(layer=roiLayer)

                # draw a circle around the main blob
                roiLayer.circle((self.x, self.y), 50, Color.RED, 2)

                # apply roi to img
                img.addDrawingLayer(roiLayer)
                img = img.applyLayers()

            img.show()

            # fps
            now = datetime.utcnow()
            self.trackingFrameQ.put(now)
            if self.trackingFrameQ.qsize() < 30:
                fps = 0.0
            else:
                fps = 30.0 / (now - self.trackingFrameQ.get()).total_seconds()

            # logging
            logger.debug("{func} ({x},{y}) {fps:5.2f}".format(
                func=inspect.stack()[0][3], x=self.x, y=self.y, fps=fps))
        # loop has ened
        logger.debug("{func} stopped.".format(func=inspect.stack()[0][3]))

    def hoverLoop(self):
        while not self.exit:
            # hover throttled to camera frame rate
            sleep(1.0 / 30.0)

            # fps
            now = datetime.utcnow()
            self.hoverFrames.append(now)
            if len(self.hoverFrames) < 30:
                fps = 0.0
            else:
                fps = 30.0 / (now - self.hoverFrames[-30]).total_seconds()

            # pid variables
            kp = (3000.0 / 400.0)
            ki = 0.18

            # determine immediate error e
            e = self.y - self.target[1]

            # determine accumulated errors eSum
            if len(self.hoverFrames) > 1:
                dt = (now - self.hoverFrames[-2]).total_seconds()
            else:
                dt = 0
            self.eSum = self.eSum + e * dt

            # calculate thrust
            hoverThrust = 37000
            thrust = hoverThrust + (kp * e) + (ki * self.eSum)

            # set the throttle
            self.setThrust(thrust)

            # logging
            logger.debug(("{func} e={e}, dt={dt:0.4f}, eSum={eSum:0.4f}, " +
                          "thrust={thrust},  [{fps:5.2f}]").format(
                              func=inspect.stack()[0][3],
                              e=e,
                              dt=dt,
                              eSum=self.eSum,
                              thrust=thrust,
                              fps=fps))

        # loop has ened
        logger.debug("{func} stopped.".format(func=inspect.stack()[0][3]))

    def setThrust(self, thrust):
        """ sets thrust - but if control has exited, will kill thrust """
        if self.exit:
            thrust = 0

        if self.cf is not None:
            self.cf.commander.send_setpoint(0, 0, 0, thrust)

    def cfOpenLink(self):
        if self.cf is not None:
            self.cf.open_link(self.cfUri)
            logger.info("Linked opened to {uri}".format(uri=self.cfUri))

    def start(self):
        logger.info("Starting VisionLoop")
        threading.Thread(target=self.visionLoop).start()

        logger.info("Starting HoverLoop")
        threading.Thread(target=self.hoverLoop).start()

        logger.info("Opening Crazyflie link")
        self.cfOpenLink()

        raw_input("Press enter to stop")
        self.stop()

    def stop(self):
        # kill our loops
        self.exit = True

        # explicitly kill thrust
        self.setThrust(0)

        # kill crazyflie
        if self.cf and self.cfConnected:
            self.cf.close_link()
예제 #16
0
from SimpleCV import Display, JpegStreamCamera, Camera, Color
cam = JpegStreamCamera('http://192.168.1.50:8080/videofeed')
#cam = Camera(0)
disp = Display( (1000,1000))
colorToDisplay = Color.RED
while True:

  img = cam.getImage()
  img.drawRectangle(0, 0, 999, 999, color = colorToDisplay, width=0, alpha=255)
  img.save(disp)
  red_dist = img.colorDistance(color=Color.RED)
  col = red_dist.getPixel(0,0)
  #red_dist.save(disp)
  summation = col[0]+col[1]+col[2]
  print summation
  if  summation < 400:
    colorToDisplay = Color.BLUE
  else:
    colorToDisplay = Color.RED
예제 #17
0
class Hover:
    """Uses vision tracking and PID-control to hover at a given altitude

    1. Run camera capture in it's own thread; update altitude
    2. Run hover loop in it's own thread; output altitude
    """

    def __init__(self):
        logger.info("{name} init()".format(name=self.__class__.__name__))
        self.settings = shelve.open('/tmp/hover.settings')
        self.initControl()
        self.initCamera()
        self.initTracking()
        self.initHover()
        self.initCF()
        self.settings.close()

    def initControl(self):
        """Setup control-flow variables"""
        self.exit = False

    def initCamera(self):
        """Setup camera variables

        Will prompt the user for feedback.  Capable of loading webcam or an
        ip-camera streaming JPEG.  Uses shelve to remember last entered
        value
        """
        
        # get settings
        lastCamUri = self.settings.get('camUri')
        lastCamRotate = self.settings.get('camRotate')

        # prompt for camUri, camRotate
        if lastCamUri:
            _input = raw_input("Specify camera: 'cam' for webcam, " +
                              "an ip of network camera or <ENTER> for the " +
                              "previous value of '{lastCamUri}':\n"
                              .format(lastCamUri=lastCamUri))
            if not _input:
                self.camUri = lastCamUri
            else:
                self.camUri = _input
        else:
            _input = raw_input("Specify camera: 'cam'/<ENTER> for webcam or " +
                                "an ip of network camera:\n")
            if not _input:
                self.camUri = _input
            else:
                self.camUri = 'cam'
        
        logger.info("CamUri = '{camUri}'".format(camUri=self.camUri))
        
        if lastCamRotate:
            _input = raw_input("Specify camera rotation or <ENTER> for the " +
                               "previous value of '{lastCamRotate}':\n"
                               .format(lastCamRotate=lastCamRotate))
            if _input:
                self.camRotate = int(_input)
            else:
                self.camRotate = lastCamRotate
        else:
            _input = raw_input("Specify camera rotation or <ENTER> for " +
                               "no rotation':\n")
            if _input:
                self.camRotate = int(_input)
            else:
                self.camRotate = 0

        logger.info("CamRotate = '{camRotate}'"\
                    .format(camRotate=self.camRotate))
        
        # save settings
        self.settings['camUri'] = self.camUri
        self.settings['camRotate'] = self.camRotate

        # setup camera
        if self.camUri == "cam":
            self.cam = Camera()
        elif '.' not in self.camUri:
            self.cam = JpegStreamCamera("http://192.168.1.{ip}:8080/video"
                                        .format(ip=self.camUri))
        else:
            self.cam = JpegStreamCamera("http://{ip}:8080/video"
                                        .format(ip=self.camUri))

        # additional values -- for now, just hard coded
        self.camRes = (800,600)
        logger.info("Camera resolution={res}".format(res=self.camRes))

    def initTracking(self):
        """Setup tracking variables
        
        Will prompt the user for tracking variables.  Uses shelve to remember
        last entered value
        """

        # get last values
        lastTrackingColor = self.settings.get('trackingColor')

        # prompt for override
        if lastTrackingColor:
            _input = raw_input("Specify tracking color as (R,G,B)" +
                               "or <ENTER> for previous value " +
                               "{lastTrackingColor}:\n"
                               .format(lastTrackingColor=lastTrackingColor))
            if _input:
                self.trackingColor = make_tuple(_input)
            else:
                self.trackingColor = lastTrackingColor
        else:
            _input = raw_input("Specify tracking color as (R,G,B)" +
                               "or <ENTER> for default of BLUE:\n")
            if _input:
                self.trackingColor = make_tuple(_input)
            else:
                self.trackingColor = Color.BLUE

        # save settings
        self.settings['trackingColor'] = self.trackingColor

        # additional values
        self.trackingBlobMin = 5
        self.trackingBlobMax = 1000
        self.x = -1
        self.y = -1
        self.target = (300,400)
        logger.info(("Tracking color={color}; min={min}; max={max}; " +
                     "target={target}")
                    .format(color=self.trackingColor,
                            min=self.trackingBlobMin,
                            max=self.trackingBlobMax,
                            target=self.target))
        self.trackingFrameQ = Queue()

    def initHover(self):
        self.hoverFrames = []
        self.eSum = 0

    def initCF(self):
        self.cfConnected = False
        self.cf = None
        self.cfUri = None

        logger.debug("Initializing Drivers; Debug=FALSE")
        InitDrivers(enable_debug_driver=False)

        logger.info("Scanning")
        available = Scan()

        logger.info("Found:")
        for cf in available:
            logger.debug(cf[0])
        
        if len(available) > 0:
            self.cfUri = available[0][0]
            self.cf = Crazyflie()
            self.cf.connected.add_callback(self.cfOnConnected)
            self.cf.disconnected.add_callback(self.cfOnDisconnected)
            self.cf.connection_failed.add_callback(self.cfOnFailed)
            self.cf.connection_lost.add_callback(self.cfOnLost)
            logger.info("Crazyflie @{uri} Initialized".format(uri=self.cfUri)) 
        else:
            logger.info("No Crazyflies found")
    
    def cfOnConnected(self, uri):
        logger.info("{func} {uri}".format(func=inspect.stack()[0][3], uri=uri))
        self.cfConnected = True

    def cfOnDisconnected(self, uri):
        logger.info("{func} {uri}".format(func=inspect.stack()[0][3], uri=uri))
        self.cfConnected = False

    def cfOnFailed(self, uri):
        logger.info("{func} {uri}".format(func=inspect.stack()[0][3], uri=uri))

    def cfOnLost(self, uri, msg):
        logger.info("{func} {uri} {msg}".format(func=inspect.stack()[0][3], 
                                                uri=uri, msg=msg))

    def visionLoop(self):
        while not self.exit:
            # acquire image
            img = self.cam.getImage()
            
            # exit if we've got nothing
            if img is None:
                break

            # adjust image
            if self.camRotate != 0:
                img.rotate(self.camrotate)
            '''
            img = img.resize(self.camRes[0], self.camRes[1])
            img = img.rotate90()
            '''

            # blob search
            colorDiff = img - img.colorDistance(self.trackingColor)
            blobs = colorDiff.findBlobs(-1, self.trackingBlobMin, 
                                        self.trackingBlobMax)
            '''
            blobs = img.findBlobs((255,60,60), self.trackingBlobMin,
                                  self.trackingBlobMax)
            '''

            # blob find
            if blobs is not None:
                self.x = blobs[-1].x
                self.y = blobs[-1].y

            # blob show
            if blobs is not None:
                # roi = region of interest
                roiLayer = DrawingLayer((img.width, img.height))
                
                # draw all blobs
                for blob in blobs:
                    blob.draw(layer=roiLayer)
                
                # draw a circle around the main blob
                roiLayer.circle((self.x,self.y), 50, Color.RED, 2)

                # apply roi to img
                img.addDrawingLayer(roiLayer)
                img = img.applyLayers()
            
            img.show()

            # fps
            now = datetime.utcnow()
            self.trackingFrameQ.put(now)
            if self.trackingFrameQ.qsize() < 30:
                fps = 0.0
            else:
                fps = 30.0/(now - self.trackingFrameQ.get()).total_seconds()

            # logging
            logger.debug("{func} ({x},{y}) {fps:5.2f}"
                         .format(func=inspect.stack()[0][3],
                                 x=self.x, y=self.y, fps=fps))
        # loop has ened
        logger.debug("{func} stopped.".format(func=inspect.stack()[0][3]))

    def hoverLoop(self):
        while not self.exit:
            # hover throttled to camera frame rate
            sleep(1.0/30.0)

            # fps
            now = datetime.utcnow()
            self.hoverFrames.append(now)
            if len(self.hoverFrames) < 30:
                fps = 0.0
            else:
                fps = 30.0/(now - self.hoverFrames[-30]).total_seconds()
            
            # pid variables
            kp = (3000.0/400.0)
            ki = 0.18

            # determine immediate error e
            e = self.y - self.target[1]
            
            # determine accumulated errors eSum
            if len(self.hoverFrames) > 1:
                dt = (now - self.hoverFrames[-2]).total_seconds()
            else:
                dt = 0
            self.eSum = self.eSum + e * dt

            # calculate thrust
            hoverThrust = 37000
            thrust = hoverThrust + (kp * e) + (ki * self.eSum)

            # set the throttle
            self.setThrust(thrust)

            # logging
            logger.debug(("{func} e={e}, dt={dt:0.4f}, eSum={eSum:0.4f}, " +
                          "thrust={thrust},  [{fps:5.2f}]")
                         .format(func=inspect.stack()[0][3],
                                 e=e, dt=dt, eSum=self.eSum, thrust=thrust,
                                 fps=fps)) 
        
        
        # loop has ened
        logger.debug("{func} stopped.".format(func=inspect.stack()[0][3]))

    def setThrust(self, thrust):
        """ sets thrust - but if control has exited, will kill thrust """
        if self.exit:
            thrust = 0
        
        if self.cf is not None:
            self.cf.commander.send_setpoint(0,0,0, thrust)

    def cfOpenLink(self):
        if self.cf is not None:
            self.cf.open_link(self.cfUri)
            logger.info("Linked opened to {uri}".format(uri=self.cfUri))

    def start(self):
        logger.info("Starting VisionLoop")
        threading.Thread(target=self.visionLoop).start()

        logger.info("Starting HoverLoop")
        threading.Thread(target=self.hoverLoop).start()

        logger.info("Opening Crazyflie link")
        self.cfOpenLink()

        raw_input("Press enter to stop")
        self.stop()
        
    def stop(self):
        # kill our loops
        self.exit = True

        # explicitly kill thrust
        self.setThrust(0)

        # kill crazyflie
        if self.cf and self.cfConnected:
            self.cf.close_link()
예제 #18
0
def motorMov(flag, argX, argY, update='', movSet=0.5):
    ''' 函數帶入5個引數
            flag: 'u','r','d','l','web'
            argX,argY: 為資料庫的x,y座標
            update: 更改資料庫的網址,預設為空值
            movSet: 馬達的步進值,預設為0.5

        函數返回
            返回updatet字串,串接get參數字串
    '''
    # 判斷藍芽案的按鈕---start---
    if flag == 'u' or flag == 'r':  # 如果按上或右
        if flag == 'u':
            argY += movSet
            update += 'move=up'
        elif flag == 'r':
            argX += movSet
            update += 'move=right'

    elif flag == 'd' or flag == 'l':  # 按下或左
        if flag == 'd':
            argY += -movSet
            update += 'move=down'
        elif flag == 'l':
            argX += -movSet
            update += 'move=left'
    # 判斷藍芽案的按鈕---end---

    elif flag == 'web':  # 若藍芽沒動則Web動
        global isYMov
        # 判斷Web的按鈕並驅動馬達---start---
        if isYMov:
            dutyResultY = round(transDuty(argY), 2)  # 四捨五入小數點第二位
            print 'Web Y軸dutyCycle: ' + str(dutyResultY)
            pwm_motorY.ChangeDutyCycle(dutyResultY)  # 驅動Y軸馬達
        else:
            dutyResultX = round(transDuty(argX), 2)
            print 'Web X軸dutyCycle: ' + str(dutyResultX)
            pwm_motorX.ChangeDutyCycle(dutyResultX)  # 驅動Y軸馬達

        update += 'move=web'
        print '執行的網址: ' + update
        time.sleep(0.1)
        return update
        # 判斷web的按鈕並驅動馬達---end---

    elif flag == 'trace':
        cam = JpegStreamCamera("http://172.20.10.4:8080/?action=stream")
        while True:
            img = cam.getImage()
            eyes = img.findHaarFeatures('right_eye.xml')
            if eyes:
                bigEyes = eyes.sortArea()[-1]
                location = bigEyes.coordinates()
                print "偵測到臉部....x位置為:", location[0]
                print "偵測到臉部....y位置為:", location[1]
                x = location[0] - 160
                y = location[1] - 120
                print "距離x座標中心(160,120):", x
                print "距離y座標中心(160,120):", y

                movX = round(transDuty(argX), 2)
                movY = round(transDuty(argY), 2)

                if y >= 60:
                    movY += 1
                elif y <= -60:
                    movY -= 1
                elif 60 > y >= 35:
                    movY += 0.5
                elif -60 < y <= -35:
                    movY -= 0.5

                if movY > 11.5:
                    movY = 11.5
                elif movY < 3:
                    movY = 3

                if x >= 80:
                    movX -= 1
                elif x <= -80:
                    movX += 1
                elif 80 > x >= 50:
                    movX -= 0.5
                elif -80 < x <= -50:
                    movX += 0.5

                if movX > 11.5:
                    movX = 11.5
                elif movX < 3:
                    movX = 3

                backX = round((7.25 - movX) / 17 * 40, 2)
                backY = round((7.25 - movY) / 17 * 40, 2)
                break

        pwm_motorX.start(movX)
        pwm_motorY.start(movY)
        update += 'move=trace&X=' + str(backX) + '&Y=' + str(backY)
        time.sleep(0.1)
        print '執行的網址: ' + update
        return update

    if (argY - resultY) != 0:
        dutyResultY = round(transDuty(argY), 2)  # 將Y座標轉換為DutyCycle
        print 'BT Y軸dutyCycle: ' + str(dutyResultY)
        pwm_motorY.ChangeDutyCycle(dutyResultY)
    else:
        dutyResultX = round(transDuty(argX), 2)  # 將X座標轉換為DutyCycle
        print 'BT X軸dutyCycle: ' + str(dutyResultX)
        pwm_motorX.ChangeDutyCycle(dutyResultX)
    time.sleep(0.1)

    print '執行的網址: ' + update
    return update