Example #1
0
    def main(self):
        jc = JpegStreamCamera(
            "http://192.168.1.101/videostream.cgi?user=pi&pwd=pi")
        vs = VideoStream("MyVideo2.avi", 25, True)
        for i in range(0, 35):
            print "loop"
            img = jc.getImage()
            print "gotimg"
            imgName = "img/img_%s.jpg" % (i)
            self.makeSaveImg = Process(target=self.saveImgToDisk,
                                       args=(img, imgName))
            self.makeSaveImg.start()
            #self.saveImgToDisk(img,imgName)
            vs.writeFrame(img)
            print i
            print "----------"

        #self.makefilmProcess = Process(target=self.saveFilmToDisk, args=("MyVideo2.avi", "MyoutputFile.mpeg"))
        #self.makefilmProcess.start()

        params = " -i {0} -c:v mpeg4 -b:v 700k -r 24 {1}".format(
            "MyVideo2.avi", "MyoutputFile.avi")

        # run avconv to compress the video since ffmpeg is deprecated (going to be).
        call('avconv' + params, shell=True)
Example #2
0
class ThreadingObject(object):
    """ Threading object class

    The run() method will be started and it will run in the background
    until the application exits.
    """
 
    def __init__(self, interval=1, video=False):
        """ Constructor

        :type interval: int
        :param interval: Check interval, in seconds
        """
        self.interval = interval
        self.url = "http://192.168.10.222:1201/videostream.cgi?user=admin&pwd="
        self.ipCam = JpegStreamCamera(self.url)
        self.display = Display()

        thread = threading.Thread(target=self.run, args=(video,))
        thread.daemon = True                            # Daemonize thread
        thread.start()                                  # Start the execution
 
    def run(self, video):
        """ Method that runs forever """
        while not self.display.isDone():
            if video:
                imagen = self.ipCam.live()
            else:
                imagen = self.ipCam.getImage().show()
            time.sleep(self.interval)
        imagen.quit()
Example #3
0
    def initCamera(self):
        """Setup camera variables

        Will prompt the user for feedback.  Capable of loading webcam or an
        ip-camera streaming JPEG
        """
        # TODO: Track last camera mode; use 'Enter' to repeat that
        camIp = raw_input("Specify camera; enter for webcam, " +
                          "or ip of network camera:\n")
        logger.info("Camera specified as '{camIp}'".format(camIp=camIp))

        if camIp is '':
            self.cam = Camera()
        elif '.' not in camIp:
            self.cam = JpegStreamCamera(
                "http://192.168.1.{ip}:8080/video".format(ip=camIp))
        else:
            self.cam = JpegStreamCamera(
                "http://{ip}:8080/video".format(ip=camIp))

        self.camRes = (800, 600)
        logger.info("Camera resolution={res}".format(res=self.camRes))
	def main(self):
		jc = JpegStreamCamera("http://192.168.1.101/videostream.cgi?user=pi&pwd=pi")
		vs = VideoStream("MyVideo2.avi",25, True)
		for i in range(0,35):
			print "loop"
			img = jc.getImage()
			print "gotimg"
			imgName = "img/img_%s.jpg"%(i)
			self.makeSaveImg = Process(target=self.saveImgToDisk, args=(img,imgName))
			self.makeSaveImg.start()
			#self.saveImgToDisk(img,imgName)
			vs.writeFrame(img)
			print i
			print "----------"

		#self.makefilmProcess = Process(target=self.saveFilmToDisk, args=("MyVideo2.avi", "MyoutputFile.mpeg"))
		#self.makefilmProcess.start()

		params = " -i {0} -c:v mpeg4 -b:v 700k -r 24 {1}".format("MyVideo2.avi", "MyoutputFile.avi")
	 
	    # run avconv to compress the video since ffmpeg is deprecated (going to be).
		call('avconv'+params, shell=True)
Example #5
0
def scan(camera_base_url='http://octopi.local'):
    jc = JpegStreamCamera("{}/webcam/?action=stream".format(camera_base_url))
    qrcode = []

    horizontal_flip = True

    while (not qrcode):
        img_og = jc.getImage()  #gets image from the camera

        img = img_og

        if horizontal_flip:
            img = img_og.flipHorizontal()
            horizontal_flip = False
        else:
            horizontal_flip = True

        qrcode = img.findBarcode()  #finds qr data from image

        if (qrcode is not None):  #if there is some data processed
            qrcode = qrcode[0]
            result = str(qrcode.data)
            return result  #returns result of qr in python shell
Example #6
0
def getImage(video):
    url = "http://192.168.10.222:1201/videostream.cgi?user=admin&pwd="
    ipCam = JpegStreamCamera(url)
    display = Display()
    if video==False:
        imagen = ipCam.getImage().show()
        while not display.isDone():
            pass

    else:
        while not display.isDone():

            imagen = ipCam.getImage()
            #faces = imagen.findHaarFeatures('face')
            #if faces is not None:
            #    faces = faces.sortArea()
            #    bigFace = faces[-1]
                # Draw a green box around the face
            #    bigFace.draw()
            #imagen = ipCam.live()
            imagen.save(display)

    imagen.quit()
Example #7
0
    def __init__(self, interval=1, video=False):
        """ Constructor

        :type interval: int
        :param interval: Check interval, in seconds
        """
        self.interval = interval
        self.url = "http://192.168.10.222:1201/videostream.cgi?user=admin&pwd="
        self.ipCam = JpegStreamCamera(self.url)
        self.display = Display()

        thread = threading.Thread(target=self.run, args=(video,))
        thread.daemon = True                            # Daemonize thread
        thread.start()                                  # Start the execution
from SimpleCV import JpegStreamCamera, Image, FaceRecognizer

cam = JpegStreamCamera("http://192.168.1.9:8080/video")
recognizer = FaceRecognizer()
recognizer.load("Saranyaraj_Others(Gates-Elon-Sathiya)_trainingdata.xml")

while True:
    img = cam.getImage()
    feat = img.findAndRecognizeFaces(recognizer, "face.xml")
    if feat is not None:    	
	for feature, label, confidence in feat:
        	feature.draw(width=4)
        	img.drawText(str("Saranyaraj" if label else "Unauthorised"), fontsize=30)
        	img.show()



from SimpleCV import JpegStreamCamera
# Initialize the webcam by providing URL to the camera
cam = JpegStreamCamera("http://142.157.159.116:8080/video.jpeg?sessionId=1378236515.816177")
cam.getImage().show()
Example #10
0
from SimpleCV import JpegStreamCamera
from SimpleCV import Camera, Display
from time import sleep
cam= JpegStreamCamera("http://192.168.1.4:8083/videofeed")
cam1= JpegStreamCamera("http://192.168.1.7:8070/videofeed")
myDisplay = Display(resolution=(640, 960))
#myDisplay1 = Display(resolution=(640, 480))
while not myDisplay.isDone():
     img=cam.getImage()
     img1=cam1.getImage()
     img.sideBySide(img1).show()
     sleep(.1)
Example #11
0
class Hover:
    """Uses vision tracking and PID-control to hover at a given altitude

    1. Run camera capture in it's own thread; update altitude
    2. Run hover loop in it's own thread; output altitude
    """
    def __init__(self):
        logger.info("{name} init()".format(name=self.__class__.__name__))
        self.initControl()
        self.initCamera()
        self.initTracking()
        self.initHover()

    def initControl(self):
        """Setup control-flow variables"""
        self.exit = False

    def initCamera(self):
        """Setup camera variables

        Will prompt the user for feedback.  Capable of loading webcam or an
        ip-camera streaming JPEG
        """
        # TODO: Track last camera mode; use 'Enter' to repeat that
        camIp = raw_input("Specify camera; enter for webcam, " +
                          "or ip of network camera:\n")
        logger.info("Camera specified as '{camIp}'".format(camIp=camIp))

        if camIp is '':
            self.cam = Camera()
        elif '.' not in camIp:
            self.cam = JpegStreamCamera(
                "http://192.168.1.{ip}:8080/video".format(ip=camIp))
        else:
            self.cam = JpegStreamCamera(
                "http://{ip}:8080/video".format(ip=camIp))

        self.camRes = (800, 600)
        logger.info("Camera resolution={res}".format(res=self.camRes))

    """ setup tracking """

    def initTracking(self):
        self.trackingColor = Color.RED
        self.trackingBlobMin = 10
        self.trackingBlobMax = 5000
        self.x = -1
        self.y = -1
        self.trackingFrameQ = Queue()
        logger.info(
            "Tracking color={color}; blobMin={min}; blobMax={max}".format(
                color=self.trackingColor,
                min=self.trackingBlobMin,
                max=self.trackingBlobMax))

    def initHover(self):
        self.hoverFrameQ = Queue()

    def visionLoop(self):
        while not self.exit:
            # acquire image
            img = self.cam.getImage()

            # exit if we've got nothing
            if img is None:
                break

            # adjust image
            '''
            img = img.resize(self.camRes[0], self.camRes[1])
            img = img.rotate90()
            '''

            # blob search
            colorDiff = img - img.colorDistance(self.trackingColor)
            blobs = colorDiff.findBlobs(-1, self.trackingBlobMin,
                                        self.trackingBlobMax)

            # blob find
            if blobs is not None:
                self.x = blobs[-1].x
                self.y = blobs[-1].y

            # blob show
            if blobs is not None:
                # roi = region of interest
                roiLayer = DrawingLayer((img.width, img.height))

                # draw all blobs
                for blob in blobs:
                    blob.draw(layer=roiLayer)

                # draw a circle around the main blob
                roiLayer.circle((self.x, self.y), 50, Color.RED, 2)

                # apply roi to img
                img.addDrawingLayer(roiLayer)
                img = img.applyLayers()

            img.show()

            # fps
            now = datetime.utcnow()
            self.trackingFrameQ.put(now)
            if self.trackingFrameQ.qsize() < 30:
                fps = 0.0
            else:
                fps = 30.0 / (now - self.trackingFrameQ.get()).total_seconds()

            # logging
            logger.debug("{func} ({x},{y}) {fps:5.2f}".format(
                func=inspect.stack()[0][3], x=self.x, y=self.y, fps=fps))

    def hoverLoop(self):
        while not self.exit:
            sleep(0.01)

            # fps
            now = datetime.utcnow()
            self.hoverFrameQ.put(now)
            if self.hoverFrameQ.qsize() < 30:
                fps = 0.0
            else:
                fps = 30.0 / (now - self.hoverFrameQ.get()).total_seconds()

            # logging
            logger.debug("{func} ({x},{y}) {fps:5.2f}".format(
                func=inspect.stack()[0][3], x=self.x, y=self.y, fps=fps))

    def start(self):
        threading.Thread(target=self.visionLoop).start()
        threading.Thread(target=self.hoverLoop).start()
        raw_input("Press any key to stop")
        self.exit = True
Example #12
0
    def track(self):
        print "Press right mouse button to pause or play"
        print "Use left mouse button to select target"
        print "Target color must be different from background"
        print "Target must have width larger than height"
        print "Target can be upside down"

        #Parameters
        isUDPConnection = False  # Currently switched manually in the code
        display = True
        displayDebug = True
        useBasemap = False
        maxRelativeMotionPerFrame = 2  # How much the target can moved between two succesive frames
        pixelPerRadians = 320
        radius = pixelPerRadians
        referenceImage = '../ObjectTracking/kite_detail.jpg'
        scaleFactor = 0.5
        isVirtualCamera = True
        useHDF5 = False

        # Open reference image: this is used at initlalisation
        target_detail = Image(referenceImage)

        # Get RGB color palette of target (was found to work better than using hue)
        pal = target_detail.getPalette(bins=2, hue=False)

        # Open video to analyse or live stream
        #cam = JpegStreamCamera('http://192.168.1.29:8080/videofeed')#640 * 480
        if isVirtualCamera:
            #cam = VirtualCamera('../../zenith-wind-power-read-only/KiteControl-Qt/videos/kiteFlying.avi','video')
            #cam = VirtualCamera('/media/bat/DATA/Baptiste/Nautilab/kite_project/robokite/ObjectTracking/00095.MTS', 'video')
            #cam = VirtualCamera('output.avi', 'video')
            cam = VirtualCamera(
                '../Recording/Videos/Flying kite images (for kite steering unit development)-YTMgX1bvrTo.mp4',
                'video')
            virtualCameraFPS = 25
        else:
            cam = JpegStreamCamera(
                'http://192.168.43.1:8080/videofeed')  #640 * 480
            #cam = Camera()

        # Get a sample image to initialize the display at the same size
        img = cam.getImage().scale(scaleFactor)
        print img.width, img.height
        # Create a pygame display
        if display:
            if img.width > img.height:
                disp = Display(
                    (27 * 640 / 10, 25 * 400 / 10)
                )  #(int(2*img.width/scaleFactor), int(2*img.height/scaleFactor)))
            else:
                disp = Display((810, 1080))
        #js = JpegStreamer()

        # Initialize variables
        previous_angle = 0  # target has to be upright when starting. Target width has to be larger than target heigth.
        previous_coord_px = (
            0, 0)  # Initialized to top left corner, which always exists
        previous_dCoord = previous_coord_px
        previous_dAngle = previous_angle
        angles = []
        coords_px = []
        coord_px = [0, 0]
        angle = 0
        target_elevations = []
        target_bearings = []
        times = []
        wasTargetFoundInPreviousFrame = False
        i_frame = 0
        isPaused = False
        selectionInProgress = False
        th = [100, 100, 100]
        skycolor = Color.BLUE
        timeLastTarget = 0

        # Prepare recording
        recordFilename = datetime.datetime.utcnow().strftime(
            "%Y%m%d_%Hh%M_") + 'simpleTrack'
        if useHDF5:
            try:
                os.remove(recordFilename + '.hdf5')
            except:
                print('Creating file ' + recordFilename + '.hdf5')
            """ The following line is used to silence the following error (according to http://stackoverflow.com/questions/15117128/h5py-in-memory-file-and-multiprocessing-error)
    #000: ../../../src/H5F.c line 1526 in H5Fopen(): unable to open file
    major: File accessability
    minor: Unable to open file"""
            h5py._errors.silence_errors()
            recordFile = h5py.File(
                os.path.join(os.getcwd(), 'log', recordFilename + '.hdf5'),
                'a')
            hdfSize = 0
            dset = recordFile.create_dataset('kite', (2, 2),
                                             maxshape=(None, 7))
            imset = recordFile.create_dataset('image',
                                              (2, img.width, img.height, 3),
                                              maxshape=(None, img.width,
                                                        img.height, 3))
        else:
            try:
                os.remove(recordFilename + '.csv')
            except:
                print('Creating file ' + recordFilename + '.csv')
            recordFile = file(
                os.path.join(os.getcwd(), 'log', recordFilename + '.csv'), 'a')
            csv_writer = csv.writer(recordFile)
            csv_writer.writerow([
                'Time (s)', 'x (px)', 'y (px)', 'Orientation (rad)',
                'Elevation (rad)', 'Bearing (rad)', 'ROT (rad/s)'
            ])

        # Launch a thread to get UDP message with orientation of the camera
        mobile = mobileState.mobileState()
        if isUDPConnection:
            mobile.open()
        # Loop while not canceled by user
        t0 = time.time()
        previousTime = t0
        while not (display) or disp.isNotDone():
            t = time.time()
            deltaT = (t - previousTime)
            FPS = 1.0 / deltaT
            #print 'FPS =', FPS
            if isVirtualCamera:
                deltaT = 1.0 / virtualCameraFPS
            previousTime = t
            i_frame = i_frame + 1
            timestamp = datetime.datetime.utcnow()

            # Receive orientation of the camera
            if isUDPConnection:
                mobile.computeRPY([2, 0, 1], [-1, 1, 1])
            ctm = np.array([[sp.cos(mobile.roll), -sp.sin(mobile.roll)], \
                    [sp.sin(mobile.roll), sp.cos(mobile.roll)]]) # Coordinate transform matrix

            if useBasemap:
                # Warning this really slows down the computation
                m = Basemap(width=img.width,
                            height=img.height,
                            projection='aeqd',
                            lat_0=sp.rad2deg(mobile.pitch),
                            lon_0=sp.rad2deg(mobile.yaw),
                            rsphere=radius)

            # Get an image from camera
            if not isPaused:
                img = cam.getImage()
                img = img.resize(int(scaleFactor * img.width),
                                 int(scaleFactor * img.height))

            if display:
                # Pause image when right button is pressed
                dwn = disp.rightButtonDownPosition()
                if dwn is not None:
                    isPaused = not (isPaused)
                    dwn = None

            if display:
                # Create a layer to enable user to make a selection of the target
                selectionLayer = DrawingLayer((img.width, img.height))

            if img:
                if display:
                    # Create a new layer to host information retrieved from video
                    layer = DrawingLayer((img.width, img.height))
                    # Selection is a rectangle drawn while holding mouse left button down
                    if disp.leftButtonDown:
                        corner1 = (disp.mouseX, disp.mouseY)
                        selectionInProgress = True
                    if selectionInProgress:
                        corner2 = (disp.mouseX, disp.mouseY)
                        bb = disp.pointsToBoundingBox(
                            corner1,
                            corner2)  # Display the temporary selection
                        if disp.leftButtonUp:  # User has finished is selection
                            selectionInProgress = False
                            selection = img.crop(bb[0], bb[1], bb[2], bb[3])
                            if selection != None:
                                # The 3 main colors in the area selected are considered.
                                # Note that the selection should be included in the target and not contain background
                                try:
                                    selection.save('../ObjectTracking/' +
                                                   'kite_detail_tmp.jpg')
                                    img0 = Image(
                                        "kite_detail_tmp.jpg"
                                    )  # For unknown reason I have to reload the image...
                                    pal = img0.getPalette(bins=2, hue=False)
                                except:  # getPalette is sometimes bugging and raising LinalgError because matrix not positive definite
                                    pal = pal
                                wasTargetFoundInPreviousFrame = False
                                previous_coord_px = (bb[0] + bb[2] / 2,
                                                     bb[1] + bb[3] / 2)
                        if corner1 != corner2:
                            selectionLayer.rectangle((bb[0], bb[1]),
                                                     (bb[2], bb[3]),
                                                     width=5,
                                                     color=Color.YELLOW)

                # If the target was already found, we can save computation time by
                # reducing the Region Of Interest around predicted position
                if wasTargetFoundInPreviousFrame:
                    ROITopLeftCorner = (max(0, previous_coord_px[0]-maxRelativeMotionPerFrame/2*width), \
                              max(0, previous_coord_px[1] -height*maxRelativeMotionPerFrame/2))
                    ROI = img.crop(ROITopLeftCorner[0], ROITopLeftCorner[1],                          \
                                         maxRelativeMotionPerFrame*width, maxRelativeMotionPerFrame*height, \
                             centered = False)
                    if display:
                        # Draw the rectangle corresponding to the ROI on the complete image
                        layer.rectangle((previous_coord_px[0]-maxRelativeMotionPerFrame/2*width,  \
                                                 previous_coord_px[1]-maxRelativeMotionPerFrame/2*height), \
                                              (maxRelativeMotionPerFrame*width, maxRelativeMotionPerFrame*height), \
                               color = Color.GREEN, width = 2)
                else:
                    # Search on the whole image if no clue of where is the target
                    ROITopLeftCorner = (0, 0)
                    ROI = img
                    '''#Option 1
        target_part0 = ROI.hueDistance(color=(142,50,65)).invert().threshold(150)
        target_part1 = ROI.hueDistance(color=(93,16,28)).invert().threshold(150)
        target_part2 = ROI.hueDistance(color=(223,135,170)).invert().threshold(150)
        target_raw_img = target_part0+target_part1+target_part2
        target_img = target_raw_img.erode(5).dilate(5)

        #Option 2
        target_img = ROI.hueDistance(imgModel.getPixel(10,10)).binarize().invert().erode(2).dilate(2)'''

                    # Find sky color
                sky = (img - img.binarize()).findBlobs(minsize=10000)
                if sky:
                    skycolor = sky[0].meanColor()
                # Option 3
                target_img = ROI - ROI  # Black image

                # Loop through palette of target colors
                if display and displayDebug:
                    decomposition = []
                i_col = 0
                for col in pal:
                    c = tuple([int(col[i]) for i in range(0, 3)])
                    # Search the target based on color
                    ROI.save('../ObjectTracking/' + 'ROI_tmp.jpg')
                    img1 = Image('../ObjectTracking/' + 'ROI_tmp.jpg')
                    filter_img = img1.colorDistance(color=c)
                    h = filter_img.histogram(numbins=256)
                    cs = np.cumsum(h)
                    thmax = np.argmin(
                        abs(cs - 0.02 * img.width * img.height)
                    )  # find the threshold to have 10% of the pixel in the expected color
                    thmin = np.argmin(
                        abs(cs - 0.005 * img.width * img.height)
                    )  # find the threshold to have 10% of the pixel in the expected color
                    if thmin == thmax:
                        newth = thmin
                    else:
                        newth = np.argmin(h[thmin:thmax]) + thmin
                    alpha = 0.5
                    th[i_col] = alpha * th[i_col] + (1 - alpha) * newth
                    filter_img = filter_img.threshold(
                        max(40, min(200, th[i_col]))).invert()
                    target_img = target_img + filter_img
                    #print th
                    i_col = i_col + 1
                    if display and displayDebug:
                        [R, G, B] = filter_img.splitChannels()
                        white = (R - R).invert()
                        r = R * 1.0 / 255 * c[0]
                        g = G * 1.0 / 255 * c[1]
                        b = B * 1.0 / 255 * c[2]
                        tmp = white.mergeChannels(r, g, b)
                        decomposition.append(tmp)

                # Get a black background with with white target foreground
                target_img = target_img.threshold(150)

                target_img = target_img - ROI.colorDistance(
                    color=skycolor).threshold(80).invert()

                if display and displayDebug:
                    small_ini = target_img.resize(
                        int(img.width / (len(pal) + 1)),
                        int(img.height / (len(pal) + 1)))
                    for tmp in decomposition:
                        small_ini = small_ini.sideBySide(tmp.resize(
                            int(img.width / (len(pal) + 1)),
                            int(img.height / (len(pal) + 1))),
                                                         side='bottom')
                    small_ini = small_ini.adaptiveScale(
                        (int(img.width), int(img.height)))
                    toDisplay = img.sideBySide(small_ini)
                else:
                    toDisplay = img
                    #target_img = ROI.hueDistance(color = Color.RED).threshold(10).invert()

                # Search for binary large objects representing potential target
                target = target_img.findBlobs(minsize=500)

                if target:  # If a target was found

                    if wasTargetFoundInPreviousFrame:
                        predictedTargetPosition = (
                            width * maxRelativeMotionPerFrame / 2,
                            height * maxRelativeMotionPerFrame / 2
                        )  # Target will most likely be close to the center of the ROI
                    else:
                        predictedTargetPosition = previous_coord_px
                        # If there are several targets in the image, take the one which is the closest of the predicted position
                    target = target.sortDistance(predictedTargetPosition)

                    # Get target coordinates according to minimal bounding rectangle or centroid.
                    coordMinRect = ROITopLeftCorner + np.array(
                        (target[0].minRectX(), target[0].minRectY()))
                    coord_px = ROITopLeftCorner + np.array(
                        target[0].centroid())

                    # Rotate the coordinates of roll angle around the middle of the screen
                    rot_coord_px = np.dot(
                        ctm, coord_px -
                        np.array([img.width / 2, img.height / 2])) + np.array(
                            [img.width / 2, img.height / 2])
                    if useBasemap:
                        coord = sp.deg2rad(
                            m(rot_coord_px[0],
                              img.height - rot_coord_px[1],
                              inverse=True))
                    else:
                        coord = localProjection(
                            rot_coord_px[0] - img.width / 2,
                            img.height / 2 - rot_coord_px[1],
                            radius,
                            mobile.yaw,
                            mobile.pitch,
                            inverse=True)
                    target_bearing, target_elevation = coord

                    # Get minimum bounding rectangle for display purpose
                    minR = ROITopLeftCorner + np.array(target[0].minRect())

                    contours = target[0].contour()

                    contours = [
                        ROITopLeftCorner + np.array(contour)
                        for contour in contours
                    ]

                    # Get target features
                    angle = sp.deg2rad(target[0].angle()) + mobile.roll
                    angle = sp.deg2rad(
                        unwrap180(sp.rad2deg(angle),
                                  sp.rad2deg(previous_angle)))
                    width = target[0].width()
                    height = target[0].height()

                    # Check if the kite is upside down
                    # First rotate the kite
                    ctm2 = np.array([[sp.cos(-angle+mobile.roll), -sp.sin(-angle+mobile.roll)], \
                        [sp.sin(-angle+mobile.roll), sp.cos(-angle+mobile.roll)]]) # Coordinate transform matrix
                    rotated_contours = [
                        np.dot(ctm2, contour - coordMinRect)
                        for contour in contours
                    ]
                    y = [-tmp[1] for tmp in rotated_contours]
                    itop = np.argmax(y)  # Then looks at the points at the top
                    ibottom = np.argmin(y)  # and the point at the bottom
                    # The point the most excentered is at the bottom
                    if abs(rotated_contours[itop][0]) > abs(
                            rotated_contours[ibottom][0]):
                        isInverted = True
                    else:
                        isInverted = False

                    if isInverted:
                        angle = angle + sp.pi

                        # Filter the data
                    alpha = 1 - sp.exp(-deltaT / self.filterTimeConstant)
                    if not (isPaused):
                        dCoord = np.array(previous_dCoord) * (
                            1 - alpha) + alpha * (
                                np.array(coord_px) - previous_coord_px
                            )  # related to the speed only if cam is fixed
                        dAngle = np.array(previous_dAngle) * (
                            1 - alpha) + alpha * (np.array(angle) -
                                                  previous_angle)
                    else:
                        dCoord = np.array([0, 0])
                        dAngle = np.array([0])


#print coord_px, angle, width, height, dCoord

# Record important data
                    times.append(timestamp)
                    coords_px.append(coord_px)
                    angles.append(angle)
                    target_elevations.append(target_elevation)
                    target_bearings.append(target_bearing)

                    # Export data to controller
                    self.elevation = target_elevation
                    self.bearing = target_bearing
                    self.orientation = angle
                    dt = time.time() - timeLastTarget
                    self.ROT = dAngle / dt
                    self.lastUpdateTime = t

                    # Save for initialisation of next step
                    previous_dCoord = dCoord
                    previous_angle = angle
                    previous_coord_px = (int(coord_px[0]), int(coord_px[1]))
                    wasTargetFoundInPreviousFrame = True
                    timeLastTarget = time.time()

                else:
                    wasTargetFoundInPreviousFrame = False

                if useHDF5:
                    hdfSize = hdfSize + 1
                    dset.resize((hdfSize, 7))
                    imset.resize((hdfSize, img.width, img.height, 3))
                    dset[hdfSize - 1, :] = [
                        time.time(), coord_px[0], coord_px[1], angle,
                        self.elevation, self.bearing, self.ROT
                    ]
                    imset[hdfSize - 1, :, :, :] = img.getNumpy()
                    recordFile.flush()
                else:
                    csv_writer.writerow([
                        time.time(), coord_px[0], coord_px[1], angle,
                        self.elevation, self.bearing, self.ROT
                    ])

                if display:
                    if target:
                        # Add target features to layer
                        # Minimal rectange and its center in RED
                        layer.polygon(minR[(0, 1, 3, 2), :],
                                      color=Color.RED,
                                      width=5)
                        layer.circle(
                            (int(coordMinRect[0]), int(coordMinRect[1])),
                            10,
                            filled=True,
                            color=Color.RED)

                        # Target contour and centroid in BLUE
                        layer.circle((int(coord_px[0]), int(coord_px[1])),
                                     10,
                                     filled=True,
                                     color=Color.BLUE)
                        layer.polygon(contours, color=Color.BLUE, width=5)

                        # Speed vector in BLACK
                        layer.line((int(coord_px[0]), int(coord_px[1])),
                                   (int(coord_px[0] + 20 * dCoord[0]),
                                    int(coord_px[1] + 20 * dCoord[1])),
                                   width=3)

                        # Line giving angle
                        layer.line((int(coord_px[0] + 200 * sp.cos(angle)),
                                    int(coord_px[1] + 200 * sp.sin(angle))),
                                   (int(coord_px[0] - 200 * sp.cos(angle)),
                                    int(coord_px[1] - 200 * sp.sin(angle))),
                                   color=Color.RED)

                    # Line giving rate of turn
                    #layer.line((int(coord_px[0]+200*sp.cos(angle+dAngle*10)), int(coord_px[1]+200*sp.sin(angle+dAngle*10))), (int(coord_px[0]-200*sp.cos(angle + dAngle*10)), int(coord_px[1]-200*sp.sin(angle+dAngle*10))))

                # Add the layer to the raw image
                    toDisplay.addDrawingLayer(layer)
                    toDisplay.addDrawingLayer(selectionLayer)

                    # Add time metadata
                    toDisplay.drawText(str(i_frame) + " " + str(timestamp),
                                       x=0,
                                       y=0,
                                       fontsize=20)

                    # Add Line giving horizon
                    #layer.line((0, int(img.height/2 + mobile.pitch*pixelPerRadians)),(img.width, int(img.height/2 + mobile.pitch*pixelPerRadians)), width = 3, color = Color.RED)

                    # Plot parallels
                    for lat in range(-90, 90, 15):
                        r = range(0, 361, 10)
                        if useBasemap:
                            # \todo improve for high roll
                            l = m(r, [lat] * len(r))
                            pix = [np.array(l[0]), img.height - np.array(l[1])]
                        else:
                            l = localProjection(sp.deg2rad(r), \
                                    sp.deg2rad([lat]*len(r)), \
                                    radius, \
                                    lon_0 = mobile.yaw, \
                                    lat_0 = mobile.pitch, \
                                    inverse = False)
                            l = np.dot(ctm, l)
                            pix = [
                                np.array(l[0]) + img.width / 2,
                                img.height / 2 - np.array(l[1])
                            ]

                        for i in range(len(r) - 1):
                            if isPixelInImage(
                                (pix[0][i], pix[1][i]), img) or isPixelInImage(
                                    (pix[0][i + 1], pix[1][i + 1]), img):
                                layer.line((pix[0][i], pix[1][i]),
                                           (pix[0][i + 1], pix[1][i + 1]),
                                           color=Color.WHITE,
                                           width=2)

                # Plot meridians
                    for lon in range(0, 360, 15):
                        r = range(-90, 91, 10)
                        if useBasemap:
                            # \todo improve for high roll
                            l = m([lon] * len(r), r)
                            pix = [np.array(l[0]), img.height - np.array(l[1])]
                        else:
                            l= localProjection(sp.deg2rad([lon]*len(r)), \
                                    sp.deg2rad(r), \
                                    radius, \
                                    lon_0 = mobile.yaw, \
                                    lat_0 = mobile.pitch, \
                                    inverse = False)
                            l = np.dot(ctm, l)
                            pix = [
                                np.array(l[0]) + img.width / 2,
                                img.height / 2 - np.array(l[1])
                            ]

                        for i in range(len(r) - 1):
                            if isPixelInImage(
                                (pix[0][i], pix[1][i]), img) or isPixelInImage(
                                    (pix[0][i + 1], pix[1][i + 1]), img):
                                layer.line((pix[0][i], pix[1][i]),
                                           (pix[0][i + 1], pix[1][i + 1]),
                                           color=Color.WHITE,
                                           width=2)

                # Text giving bearing
                # \todo improve for high roll
                    for bearing_deg in range(0, 360, 30):
                        l = localProjection(sp.deg2rad(bearing_deg),
                                            sp.deg2rad(0),
                                            radius,
                                            lon_0=mobile.yaw,
                                            lat_0=mobile.pitch,
                                            inverse=False)
                        l = np.dot(ctm, l)
                        layer.text(
                            str(bearing_deg),
                            (img.width / 2 + int(l[0]), img.height - 20),
                            color=Color.RED)

                # Text giving elevation
                # \todo improve for high roll
                    for elevation_deg in range(-60, 91, 30):
                        l = localProjection(0,
                                            sp.deg2rad(elevation_deg),
                                            radius,
                                            lon_0=mobile.yaw,
                                            lat_0=mobile.pitch,
                                            inverse=False)
                        l = np.dot(ctm, l)
                        layer.text(str(elevation_deg),
                                   (img.width / 2, img.height / 2 - int(l[1])),
                                   color=Color.RED)

                    #toDisplay.save(js)
                    toDisplay.save(disp)
            if display:
                toDisplay.removeDrawingLayer(1)
                toDisplay.removeDrawingLayer(0)
        recordFile.close()
Example #13
0

def isPixelInImage((x, y), image):
    return (x > 0 and x < image.width and y > 0 and y < image.height)


width = 640
lon_0 = 270
lat_0 = 80
pixelPerRadians = 640
height = 480
radius = pixelPerRadians

max_length = 0

cam = JpegStreamCamera('http://192.168.43.1:8080/videofeed')  #640 * 480
mobile = mobileState.mobileState()
mobile.open()
while True:
    mobile.checkUpdate()
    if mobile.isToUpdate:
        mobile.computeRPY()
    image = cam.getImage().rotate(-sp.rad2deg(mobile.roll), fixed=False)
    m = Basemap(width=image.width,
                height=image.height,
                projection='aeqd',
                lat_0=sp.rad2deg(mobile.pitch),
                lon_0=sp.rad2deg(mobile.yaw),
                rsphere=radius)
    # fill background.
    #m.drawmapboundary(fill_color='aqua')
from SimpleCV import Display, JpegStreamCamera, Camera, Color
cam = JpegStreamCamera('http://192.168.1.50:8080/videofeed')
#cam = Camera(0)
disp = Display( (1000,1000))
colorToDisplay = Color.RED
while True:

  img = cam.getImage()
  img.drawRectangle(0, 0, 999, 999, color = colorToDisplay, width=0, alpha=255)
  img.save(disp)
  red_dist = img.colorDistance(color=Color.RED)
  col = red_dist.getPixel(0,0)
  #red_dist.save(disp)
  summation = col[0]+col[1]+col[2]
  print summation
  if  summation < 400:
    colorToDisplay = Color.BLUE
  else:
    colorToDisplay = Color.RED
Example #15
0
# -*- coding: utf8 -*-
from SimpleCV import Camera, Image, VirtualCamera, Display, DrawingLayer, Color, JpegStreamCamera
import time
import os
import sys
import threading
sys.path.append(os.getcwd())
sys.path.append('../../Sensors')
import mobileState
# Open video to analyse or live stream
#cam = VirtualCamera('/media/bat/DATA/Baptiste/Nautilab/kite_project/zenith-wind-power-read-only/KiteControl-Qt/videos/kiteFlying.avi','video')
#cam = VirtualCamera('/media/bat/DATA/images/2013/05/18/00169.MTS','video')

cam = JpegStreamCamera('http://192.168.43.1:8080/videofeed')  #640 * 480
#cam = Camera(0)
img = cam.getImage()
disp = Display((img.width * 2, img.height * 2))
t0 = time.time()
previousTime = t0
FPSList = []
mobile = mobileState.mobileState()
#a = threading.Thread(None, mobileState.mobileState.checkUpdate, None, (mobile,))
#a.start()
i = 0
while time.time() - t0 < 10:
    mobile.computeRPY()
    i = i + 1
    t = time.time()
    img = cam.getImage()
    print img.getPixel(0, 0)
    FPS = 1 / (t - previousTime)
class Hover:
    """Uses vision tracking and PID-control to hover at a given altitude

    1. Run camera capture in it's own thread; update altitude
    2. Run hover loop in it's own thread; output altitude
    """

    def __init__(self):
        logger.info("{name} init()".format(name=self.__class__.__name__))
        self.settings = shelve.open('/tmp/hover.settings')
        self.initControl()
        self.initCamera()
        self.initTracking()
        self.initHover()
        self.initCF()
        self.settings.close()

    def initControl(self):
        """Setup control-flow variables"""
        self.exit = False

    def initCamera(self):
        """Setup camera variables

        Will prompt the user for feedback.  Capable of loading webcam or an
        ip-camera streaming JPEG.  Uses shelve to remember last entered
        value
        """
        
        # get settings
        lastCamUri = self.settings.get('camUri')
        lastCamRotate = self.settings.get('camRotate')

        # prompt for camUri, camRotate
        if lastCamUri:
            _input = raw_input("Specify camera: 'cam' for webcam, " +
                              "an ip of network camera or <ENTER> for the " +
                              "previous value of '{lastCamUri}':\n"
                              .format(lastCamUri=lastCamUri))
            if not _input:
                self.camUri = lastCamUri
            else:
                self.camUri = _input
        else:
            _input = raw_input("Specify camera: 'cam'/<ENTER> for webcam or " +
                                "an ip of network camera:\n")
            if not _input:
                self.camUri = _input
            else:
                self.camUri = 'cam'
        
        logger.info("CamUri = '{camUri}'".format(camUri=self.camUri))
        
        if lastCamRotate:
            _input = raw_input("Specify camera rotation or <ENTER> for the " +
                               "previous value of '{lastCamRotate}':\n"
                               .format(lastCamRotate=lastCamRotate))
            if _input:
                self.camRotate = int(_input)
            else:
                self.camRotate = lastCamRotate
        else:
            _input = raw_input("Specify camera rotation or <ENTER> for " +
                               "no rotation':\n")
            if _input:
                self.camRotate = int(_input)
            else:
                self.camRotate = 0

        logger.info("CamRotate = '{camRotate}'"\
                    .format(camRotate=self.camRotate))
        
        # save settings
        self.settings['camUri'] = self.camUri
        self.settings['camRotate'] = self.camRotate

        # setup camera
        if self.camUri == "cam":
            self.cam = Camera()
        elif '.' not in self.camUri:
            self.cam = JpegStreamCamera("http://192.168.1.{ip}:8080/video"
                                        .format(ip=self.camUri))
        else:
            self.cam = JpegStreamCamera("http://{ip}:8080/video"
                                        .format(ip=self.camUri))

        # additional values -- for now, just hard coded
        self.camRes = (800,600)
        logger.info("Camera resolution={res}".format(res=self.camRes))

    def initTracking(self):
        """Setup tracking variables
        
        Will prompt the user for tracking variables.  Uses shelve to remember
        last entered value
        """

        # get last values
        lastTrackingColor = self.settings.get('trackingColor')

        # prompt for override
        if lastTrackingColor:
            _input = raw_input("Specify tracking color as (R,G,B)" +
                               "or <ENTER> for previous value " +
                               "{lastTrackingColor}:\n"
                               .format(lastTrackingColor=lastTrackingColor))
            if _input:
                self.trackingColor = make_tuple(_input)
            else:
                self.trackingColor = lastTrackingColor
        else:
            _input = raw_input("Specify tracking color as (R,G,B)" +
                               "or <ENTER> for default of BLUE:\n")
            if _input:
                self.trackingColor = make_tuple(_input)
            else:
                self.trackingColor = Color.BLUE

        # save settings
        self.settings['trackingColor'] = self.trackingColor

        # additional values
        self.trackingBlobMin = 5
        self.trackingBlobMax = 1000
        self.x = -1
        self.y = -1
        self.target = (300,400)
        logger.info(("Tracking color={color}; min={min}; max={max}; " +
                     "target={target}")
                    .format(color=self.trackingColor,
                            min=self.trackingBlobMin,
                            max=self.trackingBlobMax,
                            target=self.target))
        self.trackingFrameQ = Queue()

    def initHover(self):
        self.hoverFrames = []
        self.eSum = 0

    def initCF(self):
        self.cfConnected = False
        self.cf = None
        self.cfUri = None

        logger.debug("Initializing Drivers; Debug=FALSE")
        InitDrivers(enable_debug_driver=False)

        logger.info("Scanning")
        available = Scan()

        logger.info("Found:")
        for cf in available:
            logger.debug(cf[0])
        
        if len(available) > 0:
            self.cfUri = available[0][0]
            self.cf = Crazyflie()
            self.cf.connected.add_callback(self.cfOnConnected)
            self.cf.disconnected.add_callback(self.cfOnDisconnected)
            self.cf.connection_failed.add_callback(self.cfOnFailed)
            self.cf.connection_lost.add_callback(self.cfOnLost)
            logger.info("Crazyflie @{uri} Initialized".format(uri=self.cfUri)) 
        else:
            logger.info("No Crazyflies found")
    
    def cfOnConnected(self, uri):
        logger.info("{func} {uri}".format(func=inspect.stack()[0][3], uri=uri))
        self.cfConnected = True

    def cfOnDisconnected(self, uri):
        logger.info("{func} {uri}".format(func=inspect.stack()[0][3], uri=uri))
        self.cfConnected = False

    def cfOnFailed(self, uri):
        logger.info("{func} {uri}".format(func=inspect.stack()[0][3], uri=uri))

    def cfOnLost(self, uri, msg):
        logger.info("{func} {uri} {msg}".format(func=inspect.stack()[0][3], 
                                                uri=uri, msg=msg))

    def visionLoop(self):
        while not self.exit:
            # acquire image
            img = self.cam.getImage()
            
            # exit if we've got nothing
            if img is None:
                break

            # adjust image
            if self.camRotate != 0:
                img.rotate(self.camrotate)
            '''
            img = img.resize(self.camRes[0], self.camRes[1])
            img = img.rotate90()
            '''

            # blob search
            colorDiff = img - img.colorDistance(self.trackingColor)
            blobs = colorDiff.findBlobs(-1, self.trackingBlobMin, 
                                        self.trackingBlobMax)
            '''
            blobs = img.findBlobs((255,60,60), self.trackingBlobMin,
                                  self.trackingBlobMax)
            '''

            # blob find
            if blobs is not None:
                self.x = blobs[-1].x
                self.y = blobs[-1].y

            # blob show
            if blobs is not None:
                # roi = region of interest
                roiLayer = DrawingLayer((img.width, img.height))
                
                # draw all blobs
                for blob in blobs:
                    blob.draw(layer=roiLayer)
                
                # draw a circle around the main blob
                roiLayer.circle((self.x,self.y), 50, Color.RED, 2)

                # apply roi to img
                img.addDrawingLayer(roiLayer)
                img = img.applyLayers()
            
            img.show()

            # fps
            now = datetime.utcnow()
            self.trackingFrameQ.put(now)
            if self.trackingFrameQ.qsize() < 30:
                fps = 0.0
            else:
                fps = 30.0/(now - self.trackingFrameQ.get()).total_seconds()

            # logging
            logger.debug("{func} ({x},{y}) {fps:5.2f}"
                         .format(func=inspect.stack()[0][3],
                                 x=self.x, y=self.y, fps=fps))
        # loop has ened
        logger.debug("{func} stopped.".format(func=inspect.stack()[0][3]))

    def hoverLoop(self):
        while not self.exit:
            # hover throttled to camera frame rate
            sleep(1.0/30.0)

            # fps
            now = datetime.utcnow()
            self.hoverFrames.append(now)
            if len(self.hoverFrames) < 30:
                fps = 0.0
            else:
                fps = 30.0/(now - self.hoverFrames[-30]).total_seconds()
            
            # pid variables
            kp = (3000.0/400.0)
            ki = 0.18

            # determine immediate error e
            e = self.y - self.target[1]
            
            # determine accumulated errors eSum
            if len(self.hoverFrames) > 1:
                dt = (now - self.hoverFrames[-2]).total_seconds()
            else:
                dt = 0
            self.eSum = self.eSum + e * dt

            # calculate thrust
            hoverThrust = 37000
            thrust = hoverThrust + (kp * e) + (ki * self.eSum)

            # set the throttle
            self.setThrust(thrust)

            # logging
            logger.debug(("{func} e={e}, dt={dt:0.4f}, eSum={eSum:0.4f}, " +
                          "thrust={thrust},  [{fps:5.2f}]")
                         .format(func=inspect.stack()[0][3],
                                 e=e, dt=dt, eSum=self.eSum, thrust=thrust,
                                 fps=fps)) 
        
        
        # loop has ened
        logger.debug("{func} stopped.".format(func=inspect.stack()[0][3]))

    def setThrust(self, thrust):
        """ sets thrust - but if control has exited, will kill thrust """
        if self.exit:
            thrust = 0
        
        if self.cf is not None:
            self.cf.commander.send_setpoint(0,0,0, thrust)

    def cfOpenLink(self):
        if self.cf is not None:
            self.cf.open_link(self.cfUri)
            logger.info("Linked opened to {uri}".format(uri=self.cfUri))

    def start(self):
        logger.info("Starting VisionLoop")
        threading.Thread(target=self.visionLoop).start()

        logger.info("Starting HoverLoop")
        threading.Thread(target=self.hoverLoop).start()

        logger.info("Opening Crazyflie link")
        self.cfOpenLink()

        raw_input("Press enter to stop")
        self.stop()
        
    def stop(self):
        # kill our loops
        self.exit = True

        # explicitly kill thrust
        self.setThrust(0)

        # kill crazyflie
        if self.cf and self.cfConnected:
            self.cf.close_link()
Example #17
0
from SimpleCV import Image, JpegStreamCamera 
import time

#cam = JpegStreamCamera('http://192.168.1.102:8080/videofeed')
cam = JpegStreamCamera('http://172.17.200.241:8080/videofeed')
img = cam.getImage().flipHorizontal().flipVertical()
#img = Image('/home/jinyuan/Downloads/handmagician/finger.jpg')
ts = []
bb = (300,400,20,20) # get Bounding Box from some method

while True:
    img1 = cam.getImage().flipHorizontal().flipVertical()
    ts = img1.track(method="camshift", ts=ts, img=img, bb=bb)
    #ts = img1.track(method="camshift", ts=ts, img=img, bb=bb)
    ts = img1.track(method="mftrack", ts=ts, img=img, bb=bb)
    ts.drawBB()
    img1.show()
Example #18
0
#!/usr/bin/python
# coding=utf-8
# vlc -vvv "/media/bat/DATA/videos/PERSEPOLIS.avi" --sout '#transcode{vcodec=mjpg,vb=2500,width=640,height=480,acodec=none}:standard{access=http,mux=mpjpeg,dst=localhost:8080/videofeed}'

from SimpleCV import JpegStreamCamera, Display, Image
cam = JpegStreamCamera('http://192.168.1.3:8080/videofeed')
disp = Display()
while disp.isNotDone():
    img = cam.getImage()
    img.save(disp)
class Hover:
    """Uses vision tracking and PID-control to hover at a given altitude

    1. Run camera capture in it's own thread; update altitude
    2. Run hover loop in it's own thread; output altitude
    """
    def __init__(self):
        logger.info("{name} init()".format(name=self.__class__.__name__))
        self.settings = shelve.open('/tmp/hover.settings')
        self.initControl()
        self.initCamera()
        self.initTracking()
        self.initHover()
        self.initCF()
        self.settings.close()

    def initControl(self):
        """Setup control-flow variables"""
        self.exit = False

    def initCamera(self):
        """Setup camera variables

        Will prompt the user for feedback.  Capable of loading webcam or an
        ip-camera streaming JPEG.  Uses shelve to remember last entered
        value
        """

        # get settings
        lastCamUri = self.settings.get('camUri')
        lastCamRotate = self.settings.get('camRotate')

        # prompt for camUri, camRotate
        if lastCamUri:
            _input = raw_input("Specify camera: 'cam' for webcam, " +
                               "an ip of network camera or <ENTER> for the " +
                               "previous value of '{lastCamUri}':\n".format(
                                   lastCamUri=lastCamUri))
            if not _input:
                self.camUri = lastCamUri
            else:
                self.camUri = _input
        else:
            _input = raw_input("Specify camera: 'cam'/<ENTER> for webcam or " +
                               "an ip of network camera:\n")
            if not _input:
                self.camUri = _input
            else:
                self.camUri = 'cam'

        logger.info("CamUri = '{camUri}'".format(camUri=self.camUri))

        if lastCamRotate:
            _input = raw_input("Specify camera rotation or <ENTER> for the " +
                               "previous value of '{lastCamRotate}':\n".format(
                                   lastCamRotate=lastCamRotate))
            if _input:
                self.camRotate = int(_input)
            else:
                self.camRotate = lastCamRotate
        else:
            _input = raw_input("Specify camera rotation or <ENTER> for " +
                               "no rotation':\n")
            if _input:
                self.camRotate = int(_input)
            else:
                self.camRotate = 0

        logger.info("CamRotate = '{camRotate}'"\
                    .format(camRotate=self.camRotate))

        # save settings
        self.settings['camUri'] = self.camUri
        self.settings['camRotate'] = self.camRotate

        # setup camera
        if self.camUri == "cam":
            self.cam = Camera()
        elif '.' not in self.camUri:
            self.cam = JpegStreamCamera(
                "http://192.168.1.{ip}:8080/video".format(ip=self.camUri))
        else:
            self.cam = JpegStreamCamera(
                "http://{ip}:8080/video".format(ip=self.camUri))

        # additional values -- for now, just hard coded
        self.camRes = (800, 600)
        logger.info("Camera resolution={res}".format(res=self.camRes))

    def initTracking(self):
        """Setup tracking variables
        
        Will prompt the user for tracking variables.  Uses shelve to remember
        last entered value
        """

        # get last values
        lastTrackingColor = self.settings.get('trackingColor')

        # prompt for override
        if lastTrackingColor:
            _input = raw_input("Specify tracking color as (R,G,B)" +
                               "or <ENTER> for previous value " +
                               "{lastTrackingColor}:\n".format(
                                   lastTrackingColor=lastTrackingColor))
            if _input:
                self.trackingColor = make_tuple(_input)
            else:
                self.trackingColor = lastTrackingColor
        else:
            _input = raw_input("Specify tracking color as (R,G,B)" +
                               "or <ENTER> for default of BLUE:\n")
            if _input:
                self.trackingColor = make_tuple(_input)
            else:
                self.trackingColor = Color.BLUE

        # save settings
        self.settings['trackingColor'] = self.trackingColor

        # additional values
        self.trackingBlobMin = 5
        self.trackingBlobMax = 1000
        self.x = -1
        self.y = -1
        self.target = (300, 400)
        logger.info(("Tracking color={color}; min={min}; max={max}; " +
                     "target={target}").format(color=self.trackingColor,
                                               min=self.trackingBlobMin,
                                               max=self.trackingBlobMax,
                                               target=self.target))
        self.trackingFrameQ = Queue()

    def initHover(self):
        self.hoverFrames = []
        self.eSum = 0

    def initCF(self):
        self.cfConnected = False
        self.cf = None
        self.cfUri = None

        logger.debug("Initializing Drivers; Debug=FALSE")
        InitDrivers(enable_debug_driver=False)

        logger.info("Scanning")
        available = Scan()

        logger.info("Found:")
        for cf in available:
            logger.debug(cf[0])

        if len(available) > 0:
            self.cfUri = available[0][0]
            self.cf = Crazyflie()
            self.cf.connected.add_callback(self.cfOnConnected)
            self.cf.disconnected.add_callback(self.cfOnDisconnected)
            self.cf.connection_failed.add_callback(self.cfOnFailed)
            self.cf.connection_lost.add_callback(self.cfOnLost)
            logger.info("Crazyflie @{uri} Initialized".format(uri=self.cfUri))
        else:
            logger.info("No Crazyflies found")

    def cfOnConnected(self, uri):
        logger.info("{func} {uri}".format(func=inspect.stack()[0][3], uri=uri))
        self.cfConnected = True

    def cfOnDisconnected(self, uri):
        logger.info("{func} {uri}".format(func=inspect.stack()[0][3], uri=uri))
        self.cfConnected = False

    def cfOnFailed(self, uri):
        logger.info("{func} {uri}".format(func=inspect.stack()[0][3], uri=uri))

    def cfOnLost(self, uri, msg):
        logger.info("{func} {uri} {msg}".format(func=inspect.stack()[0][3],
                                                uri=uri,
                                                msg=msg))

    def visionLoop(self):
        while not self.exit:
            # acquire image
            img = self.cam.getImage()

            # exit if we've got nothing
            if img is None:
                break

            # adjust image
            if self.camRotate != 0:
                img.rotate(self.camrotate)
            '''
            img = img.resize(self.camRes[0], self.camRes[1])
            img = img.rotate90()
            '''

            # blob search
            colorDiff = img - img.colorDistance(self.trackingColor)
            blobs = colorDiff.findBlobs(-1, self.trackingBlobMin,
                                        self.trackingBlobMax)
            '''
            blobs = img.findBlobs((255,60,60), self.trackingBlobMin,
                                  self.trackingBlobMax)
            '''

            # blob find
            if blobs is not None:
                self.x = blobs[-1].x
                self.y = blobs[-1].y

            # blob show
            if blobs is not None:
                # roi = region of interest
                roiLayer = DrawingLayer((img.width, img.height))

                # draw all blobs
                for blob in blobs:
                    blob.draw(layer=roiLayer)

                # draw a circle around the main blob
                roiLayer.circle((self.x, self.y), 50, Color.RED, 2)

                # apply roi to img
                img.addDrawingLayer(roiLayer)
                img = img.applyLayers()

            img.show()

            # fps
            now = datetime.utcnow()
            self.trackingFrameQ.put(now)
            if self.trackingFrameQ.qsize() < 30:
                fps = 0.0
            else:
                fps = 30.0 / (now - self.trackingFrameQ.get()).total_seconds()

            # logging
            logger.debug("{func} ({x},{y}) {fps:5.2f}".format(
                func=inspect.stack()[0][3], x=self.x, y=self.y, fps=fps))
        # loop has ened
        logger.debug("{func} stopped.".format(func=inspect.stack()[0][3]))

    def hoverLoop(self):
        while not self.exit:
            # hover throttled to camera frame rate
            sleep(1.0 / 30.0)

            # fps
            now = datetime.utcnow()
            self.hoverFrames.append(now)
            if len(self.hoverFrames) < 30:
                fps = 0.0
            else:
                fps = 30.0 / (now - self.hoverFrames[-30]).total_seconds()

            # pid variables
            kp = (3000.0 / 400.0)
            ki = 0.18

            # determine immediate error e
            e = self.y - self.target[1]

            # determine accumulated errors eSum
            if len(self.hoverFrames) > 1:
                dt = (now - self.hoverFrames[-2]).total_seconds()
            else:
                dt = 0
            self.eSum = self.eSum + e * dt

            # calculate thrust
            hoverThrust = 37000
            thrust = hoverThrust + (kp * e) + (ki * self.eSum)

            # set the throttle
            self.setThrust(thrust)

            # logging
            logger.debug(("{func} e={e}, dt={dt:0.4f}, eSum={eSum:0.4f}, " +
                          "thrust={thrust},  [{fps:5.2f}]").format(
                              func=inspect.stack()[0][3],
                              e=e,
                              dt=dt,
                              eSum=self.eSum,
                              thrust=thrust,
                              fps=fps))

        # loop has ened
        logger.debug("{func} stopped.".format(func=inspect.stack()[0][3]))

    def setThrust(self, thrust):
        """ sets thrust - but if control has exited, will kill thrust """
        if self.exit:
            thrust = 0

        if self.cf is not None:
            self.cf.commander.send_setpoint(0, 0, 0, thrust)

    def cfOpenLink(self):
        if self.cf is not None:
            self.cf.open_link(self.cfUri)
            logger.info("Linked opened to {uri}".format(uri=self.cfUri))

    def start(self):
        logger.info("Starting VisionLoop")
        threading.Thread(target=self.visionLoop).start()

        logger.info("Starting HoverLoop")
        threading.Thread(target=self.hoverLoop).start()

        logger.info("Opening Crazyflie link")
        self.cfOpenLink()

        raw_input("Press enter to stop")
        self.stop()

    def stop(self):
        # kill our loops
        self.exit = True

        # explicitly kill thrust
        self.setThrust(0)

        # kill crazyflie
        if self.cf and self.cfConnected:
            self.cf.close_link()
Example #20
0
import sys
sys.path.append(os.getcwd())
sys.path.append('../Sensors')
import mobileState

def isPixelInImage((x,y), image):
    return (x>0 and x<image.width and y>0 and y<image.height)

width = 640; lon_0 = 270; lat_0 = 80
pixelPerRadians = 640
height=480
radius = pixelPerRadians

max_length = 0

cam = JpegStreamCamera('http://192.168.43.1:8080/videofeed')#640 * 480
mobile = mobileState.mobileState()
while True:
        mobile.checkUpdate()
        if mobile.isToUpdate:
          mobile.computeRPY()
        image = cam.getImage().rotate(-sp.rad2deg(mobile.roll), fixed = False)
        m = Basemap(width=image.width,height=image.height,projection='aeqd',
		    lat_0=sp.rad2deg(mobile.pitch),lon_0=sp.rad2deg(mobile.yaw), rsphere = radius)
	# fill background.
	#m.drawmapboundary(fill_color='aqua')
	# draw coasts and fill continents.
	#m.drawcoastlines(linewidth=0.5)
	#m.fillcontinents(color='coral',lake_color='aqua')
	# 20 degree graticule.
	# m.drawparallels(np.arange(-90,90,30))
from SimpleCV import JpegStreamCamera
from SimpleCV import Color, Display, RunningSegmentation, Circle
import os, time

cam = JpegStreamCamera("https://192.168.1.102:8080/videofeed")
frame = cam.getImage()
display = Display((frame.width, frame.height))

diff_seg = RunningSegmentation()
default_area = 1000

while display.isNotDone(): 
    original = cam.getImage().flipHorizontal()
    diff_seg.addImage(original)
    diff_image = diff_seg.getSegmentedImage(False).invert()
    active = original - diff_image
    
    if active:
        hue = active.hueDistance(Color.RED).binarize(50)
        blobs = hue.findBlobs()
        if blobs:
            original.dl().polygon(blobs[-1].mConvexHull, color=Color.RED, width=3)
    original.save(display)
    time.sleep(0.1)
    
    
Example #22
0
def carMov(flag, faceX, faceY, update='', movSet=0.5):
    ''' 函數帶入5個引數
        flag: 'u','r','d','l','s','t','web'
        faceX,faceY: 為臉部定位的x,y座標
        update: 更改資料庫的網址,預設為空值
        movSet: 馬達的步進值,預設為0.5 在car預計設定為執行秒數
        
        函數返回
        返回updatet字串,串接get參數字串
        '''
    # 判斷藍芽案的按鈕---start---

    if flag == 'u' or flag == 'r':  # 如果按上或右
        if flag == 'u':
            GPIO.output(17, False)
            GPIO.output(18, True)  #電流方向18->17 23->22
            GPIO.output(22, False)
            GPIO.output(23, True)
            #movSet=1;
            update += 'move=up'
            print '前進..... ' + update
        elif flag == 'r':
            GPIO.output(17, False)
            GPIO.output(18, True)
            GPIO.output(22, False)
            GPIO.output(23, False)
            #movSet=1;
            update += 'move=right'
            print '右轉..... ' + update
    elif flag == 'd' or flag == 'l':  # 按下或左
        if flag == 'd':
            GPIO.output(17, True)
            GPIO.output(18, False)
            GPIO.output(22, True)
            GPIO.output(23, False)
            update += 'move=down'
            print '後退..... ' + update
        elif flag == 'l':
            GPIO.output(17, False)
            GPIO.output(18, False)
            GPIO.output(22, False)
            GPIO.output(23, True)
            update += 'move=left'
            print '左轉..... ' + update
    elif flag == 's':
        GPIO.output(17, False)
        GPIO.output(18, False)
        GPIO.output(22, False)
        GPIO.output(23, False)
        update += 'move=stop'
        print '停車..... ' + update

    elif flag == 't':  #trace face
        cam = JpegStreamCamera("http://114.32.209.33:1234/?action=stream")
        #cam = JpegStreamCamera("http://172.20.10.13:8080/?action=stream")
        while True:
            img = cam.getImage()
            faces = img.findHaarFeatures('face.xml')
            if faces:
                bigFaces = faces.sortArea()[-1]
                bigFaces.draw()  #draw green line
                img.save(js)
                time.sleep(0.01)
                location = bigFaces.coordinates()
                print "偵測到臉部....x位置為:", location[0]
                print "偵測到臉部....y位置為:", location[1]
                faceX = location[0]
                faceY = location[0]
                x = location[0] - 160
                y = location[1] - 120
                print "距離x座標中心(160,120):", x
                print "距離y座標中心(160,120):", y
                break  #有抓到眼睛就跳出while
            else:
                print "沒有抓到臉部"
            # 判斷藍芽案的按鈕---end---
        update += 'move=trace&X=' + str(faceX) + "&Y=" + str(faceY)

    time.sleep(0.1)
    print '執行的網址: ' + update + '&WEB=0'
    return update
    def initCamera(self):
        """Setup camera variables

        Will prompt the user for feedback.  Capable of loading webcam or an
        ip-camera streaming JPEG.  Uses shelve to remember last entered
        value
        """

        # get settings
        lastCamUri = self.settings.get('camUri')
        lastCamRotate = self.settings.get('camRotate')

        # prompt for camUri, camRotate
        if lastCamUri:
            _input = raw_input("Specify camera: 'cam' for webcam, " +
                               "an ip of network camera or <ENTER> for the " +
                               "previous value of '{lastCamUri}':\n".format(
                                   lastCamUri=lastCamUri))
            if not _input:
                self.camUri = lastCamUri
            else:
                self.camUri = _input
        else:
            _input = raw_input("Specify camera: 'cam'/<ENTER> for webcam or " +
                               "an ip of network camera:\n")
            if not _input:
                self.camUri = _input
            else:
                self.camUri = 'cam'

        logger.info("CamUri = '{camUri}'".format(camUri=self.camUri))

        if lastCamRotate:
            _input = raw_input("Specify camera rotation or <ENTER> for the " +
                               "previous value of '{lastCamRotate}':\n".format(
                                   lastCamRotate=lastCamRotate))
            if _input:
                self.camRotate = int(_input)
            else:
                self.camRotate = lastCamRotate
        else:
            _input = raw_input("Specify camera rotation or <ENTER> for " +
                               "no rotation':\n")
            if _input:
                self.camRotate = int(_input)
            else:
                self.camRotate = 0

        logger.info("CamRotate = '{camRotate}'"\
                    .format(camRotate=self.camRotate))

        # save settings
        self.settings['camUri'] = self.camUri
        self.settings['camRotate'] = self.camRotate

        # setup camera
        if self.camUri == "cam":
            self.cam = Camera()
        elif '.' not in self.camUri:
            self.cam = JpegStreamCamera(
                "http://192.168.1.{ip}:8080/video".format(ip=self.camUri))
        else:
            self.cam = JpegStreamCamera(
                "http://{ip}:8080/video".format(ip=self.camUri))

        # additional values -- for now, just hard coded
        self.camRes = (800, 600)
        logger.info("Camera resolution={res}".format(res=self.camRes))
    def initCamera(self):
        """Setup camera variables

        Will prompt the user for feedback.  Capable of loading webcam or an
        ip-camera streaming JPEG.  Uses shelve to remember last entered
        value
        """
        
        # get settings
        lastCamUri = self.settings.get('camUri')
        lastCamRotate = self.settings.get('camRotate')

        # prompt for camUri, camRotate
        if lastCamUri:
            _input = raw_input("Specify camera: 'cam' for webcam, " +
                              "an ip of network camera or <ENTER> for the " +
                              "previous value of '{lastCamUri}':\n"
                              .format(lastCamUri=lastCamUri))
            if not _input:
                self.camUri = lastCamUri
            else:
                self.camUri = _input
        else:
            _input = raw_input("Specify camera: 'cam'/<ENTER> for webcam or " +
                                "an ip of network camera:\n")
            if not _input:
                self.camUri = _input
            else:
                self.camUri = 'cam'
        
        logger.info("CamUri = '{camUri}'".format(camUri=self.camUri))
        
        if lastCamRotate:
            _input = raw_input("Specify camera rotation or <ENTER> for the " +
                               "previous value of '{lastCamRotate}':\n"
                               .format(lastCamRotate=lastCamRotate))
            if _input:
                self.camRotate = int(_input)
            else:
                self.camRotate = lastCamRotate
        else:
            _input = raw_input("Specify camera rotation or <ENTER> for " +
                               "no rotation':\n")
            if _input:
                self.camRotate = int(_input)
            else:
                self.camRotate = 0

        logger.info("CamRotate = '{camRotate}'"\
                    .format(camRotate=self.camRotate))
        
        # save settings
        self.settings['camUri'] = self.camUri
        self.settings['camRotate'] = self.camRotate

        # setup camera
        if self.camUri == "cam":
            self.cam = Camera()
        elif '.' not in self.camUri:
            self.cam = JpegStreamCamera("http://192.168.1.{ip}:8080/video"
                                        .format(ip=self.camUri))
        else:
            self.cam = JpegStreamCamera("http://{ip}:8080/video"
                                        .format(ip=self.camUri))

        # additional values -- for now, just hard coded
        self.camRes = (800,600)
        logger.info("Camera resolution={res}".format(res=self.camRes))
Example #25
0
def motorMov(flag, argX, argY, update='', movSet=0.5):
    ''' 函數帶入5個引數
            flag: 'u','r','d','l','web'
            argX,argY: 為資料庫的x,y座標
            update: 更改資料庫的網址,預設為空值
            movSet: 馬達的步進值,預設為0.5

        函數返回
            返回updatet字串,串接get參數字串
    '''
    # 判斷藍芽案的按鈕---start---
    if flag == 'u' or flag == 'r':  # 如果按上或右
        if flag == 'u':
            argY += movSet
            update += 'move=up'
        elif flag == 'r':
            argX += movSet
            update += 'move=right'

    elif flag == 'd' or flag == 'l':  # 按下或左
        if flag == 'd':
            argY += -movSet
            update += 'move=down'
        elif flag == 'l':
            argX += -movSet
            update += 'move=left'
    # 判斷藍芽案的按鈕---end---

    elif flag == 'web':  # 若藍芽沒動則Web動
        global isYMov
        # 判斷Web的按鈕並驅動馬達---start---
        if isYMov:
            dutyResultY = round(transDuty(argY), 2)  # 四捨五入小數點第二位
            print 'Web Y軸dutyCycle: ' + str(dutyResultY)
            pwm_motorY.ChangeDutyCycle(dutyResultY)  # 驅動Y軸馬達
        else:
            dutyResultX = round(transDuty(argX), 2)
            print 'Web X軸dutyCycle: ' + str(dutyResultX)
            pwm_motorX.ChangeDutyCycle(dutyResultX)  # 驅動Y軸馬達

        update += 'move=web'
        print '執行的網址: ' + update
        time.sleep(0.1)
        return update
        # 判斷web的按鈕並驅動馬達---end---

    elif flag == 'trace':
        cam = JpegStreamCamera("http://172.20.10.4:8080/?action=stream")
        while True:
            img = cam.getImage()
            eyes = img.findHaarFeatures('right_eye.xml')
            if eyes:
                bigEyes = eyes.sortArea()[-1]
                location = bigEyes.coordinates()
                print "偵測到臉部....x位置為:", location[0]
                print "偵測到臉部....y位置為:", location[1]
                x = location[0] - 160
                y = location[1] - 120
                print "距離x座標中心(160,120):", x
                print "距離y座標中心(160,120):", y

                movX = round(transDuty(argX), 2)
                movY = round(transDuty(argY), 2)

                if y >= 60:
                    movY += 1
                elif y <= -60:
                    movY -= 1
                elif 60 > y >= 35:
                    movY += 0.5
                elif -60 < y <= -35:
                    movY -= 0.5

                if movY > 11.5:
                    movY = 11.5
                elif movY < 3:
                    movY = 3

                if x >= 80:
                    movX -= 1
                elif x <= -80:
                    movX += 1
                elif 80 > x >= 50:
                    movX -= 0.5
                elif -80 < x <= -50:
                    movX += 0.5

                if movX > 11.5:
                    movX = 11.5
                elif movX < 3:
                    movX = 3

                backX = round((7.25 - movX) / 17 * 40, 2)
                backY = round((7.25 - movY) / 17 * 40, 2)
                break

        pwm_motorX.start(movX)
        pwm_motorY.start(movY)
        update += 'move=trace&X=' + str(backX) + '&Y=' + str(backY)
        time.sleep(0.1)
        print '執行的網址: ' + update
        return update

    if (argY - resultY) != 0:
        dutyResultY = round(transDuty(argY), 2)  # 將Y座標轉換為DutyCycle
        print 'BT Y軸dutyCycle: ' + str(dutyResultY)
        pwm_motorY.ChangeDutyCycle(dutyResultY)
    else:
        dutyResultX = round(transDuty(argX), 2)  # 將X座標轉換為DutyCycle
        print 'BT X軸dutyCycle: ' + str(dutyResultX)
        pwm_motorX.ChangeDutyCycle(dutyResultX)
    time.sleep(0.1)

    print '執行的網址: ' + update
    return update