Example #1
0
    def __init__(self, pitchnum, stdout, sourcefile, resetPitchSize, resetThresholds, displayBlur, normalizeAtStartup, noDribbling):
               
        self.running = True
        self.connected = False
   
        self.stdout = stdout 

        if sourcefile is None:  
            self.cap = Camera()
        else:
            filetype = 'video'
            if sourcefile.endswith(('jpg', 'png')):
                filetype = 'image'

            self.cap = VirtualCamera(sourcefile, filetype)
        
        calibrationPath = os.path.join('calibration', 'pitch{0}'.format(pitchnum))
        self.cap.loadCalibration(os.path.join(sys.path[0], calibrationPath))

        self.preprocessor = Preprocessor(pitchnum, resetPitchSize)
        if self.preprocessor.hasPitchSize:
            self.gui = Gui(self.preprocessor.pitch_size)
        else:
            self.gui = Gui()
        self.threshold = Threshold(pitchnum, resetThresholds, displayBlur, normalizeAtStartup)
        self.thresholdGui = ThresholdGui(self.threshold, self.gui)
        self.features = Features(self.gui, self.threshold)
        self.filter = Filter(noDribbling)
        
        eventHandler = self.gui.getEventHandler()
        eventHandler.addListener('q', self.quit)

        while self.running:
            try:
                if not self.stdout:
                    self.connect()
                else:
                    self.connected = True

                if self.preprocessor.hasPitchSize:
                    self.outputPitchSize()
                    self.gui.setShowMouse(False)
                else:
                    eventHandler.setClickListener(self.setNextPitchCorner)

                while self.running:
                    self.doStuff()

            except socket.error:
                self.connected = False
                # If the rest of the system is not up yet/gets quit,
                # just wait for it to come available.
                time.sleep(1)

                # Strange things seem to happen to X sometimes if the
                # display isn't updated for a while
                self.doStuff()

        if not self.stdout:
            self.socket.close()
Example #2
0
def main():
    parser = argparse.ArgumentParser(description='Run background subtraction.')

    parser.add_argument('--prefix', help='Data directory prefix', required=True)
    parser.add_argument('--bg', default='bg.mp4', help='Background video name')
    parser.add_argument('--fg', default='fg.mp4', help='Foreground video name')

    args = parser.parse_args()

    src_dir = args.prefix

    bgvid = src_dir + "/" + args.bg
    fgvid = src_dir + "/" + args.fg

    fg_raw_dir  = src_dir + "/raw"
    fg_mask_dir = src_dir + "/mask"
    bg_raw_dir  = fg_raw_dir  + "_bg"
    bg_mask_dir = fg_mask_dir + "_bg"

    bg_sub = BackgroundSubtractor()

    for curr_dir in (fg_raw_dir, fg_mask_dir, bg_raw_dir, bg_mask_dir):
        if not os.path.exists(curr_dir):
            os.makedirs(curr_dir)

    bgcam = VirtualCamera(bgvid, "video")
    # Process background frames
    print "Processing background"
    i = 0
    while True:
        img = bgcam.getImage()
        if img.getBitmap() == '':
            print "Could not get image"
            break
        img.save(bg_raw_dir + "/frame_{}.png".format(i))
        print "Processing frame", i
        mask = bg_sub.update(img)
        mask.save(bg_mask_dir + "/frame_{}.png".format(i))
        print "Processed frame", i
        i += 1

    vidcam = VirtualCamera(fgvid, "video")
    i = 0
    while True:
        img = vidcam.getImage()
        if img.getBitmap() == '':
            print "Could not get frame {}".format(i)
            break
        img.save(fg_raw_dir + "/frame_{}.png".format(i))
        print "Processing frame", i
        mask = bg_sub.get_mask(img)
        mask.save(fg_mask_dir + "/frame_{}.png".format(i))
        print "Processed frame", i
        i += 1
Example #3
0
def frame_by_frame(config, vid_path):
    vid = VirtualCamera(vid_path, 'video')
    with open(config) as c:
        conf = json.load(c)
    fields = conf.get('inputs')
    img = vid.getImage()
    while img:
        for f in fields:
            img.drawRectangle(f['x'], f['y'], f['h'], f['w'], color=Color.RED)
        img.show()
        raw_input()
        img = vid.getImage()
Example #4
0
    def __init__(self, camera=None):
        """Initialize the finger tracker

        :param TrackerIn ti: Tracker input
        :param camera: Camera index, filename, or None
        """

        if camera is None:
            self.cam = Camera(0)
        elif type(camera) == int:
            self.cam = Camera(camera)
        else:
            self.cam = VirtualCamera(camera, "video")
Example #5
0
class Video:
    def __init__(self,path):
        self.capture = VirtualCamera(path,"video")
        self.im = None
    def step(self,stepsize=1,scale=0.50):
        for i in range(stepsize-1):
            self.capture.getImage()
        self.im = self.capture.getImage().copy().scale(scale)
        return self.get_image()
    def show(self):
        plt.figure()
        plt.show()
        plt.imshow(self.im,cmap="gray")
    def get_image(self):
        return self.im
    def save(self):
        pass
Example #6
0
def main_method():

    vid = VirtualCamera('../20141112-071614.mpeg', 'video')
    v = VirtualCamera('../20141112-071614.mpeg', 'video')
    field_1 = get_empty(v, fieldDim1[0], fieldDim1[1], fieldDim1[2], fieldDim1[3], 25)
    field_1.show()
    raw_input()

    background = vid.getImage()
    # field_1 = background
    # field_1 = crop_image(field_1, fieldDim1[0], fieldDim1[1], fieldDim1[2], fieldDim1[3])

    field_2 = background
    field_2 = crop_image(field_2, fieldDim2[0], fieldDim2[1], fieldDim2[2], fieldDim2[3])
    field_2.show()
    # background = crop_image(background, 0, 0, 50, 50)
    #field_1.show()
    #time.sleep(100)
    numOfCars = 0
    carPass = False
    while True:
        #time.sleep(1)
        current = vid.getImage()
        fld_1 = current
        fld_1 = crop_image(fld_1, fieldDim1[0], fieldDim1[1], fieldDim1[2], fieldDim1[3])
        #fld_1.show()
        #field_1.show()
        diff = fld_1 - field_1
        #diff.show()
        matrix = diff.getNumpy()
        mean = matrix.mean()
        print mean
        threshold = 10.0
        if mean >= threshold:
            if not carPass:
                carPass = True
                numOfCars=numOfCars+1
                print numOfCars
                current.drawRectangle(fieldDim1[0], fieldDim1[1], fieldDim1[2], fieldDim1[3], color=Color.RED)
        else:
            carPass = False
        current.show()
Example #7
0
    def _extract(self, src, maxFrames, verbose):
        # get the full dataset and append it to the data vector dictionary.
        self.data = {"r": [], "g": [], "b": [], "i": [], "h": []}
        if isinstance(src, ImageSet):
            src = VirtualCamera(src, st="imageset")  # this could cause a bug
        elif isinstance(src, (VirtualCamera, Camera)):
            count = 0
            for i in range(0, maxFrames):
                img = src.getImage()
                count = count + 1
                if verbose:
                    print "Got Frame {0}".format(count)
                if isinstance(src, Camera):
                    time.sleep(0.05)  # let the camera sleep
                if img is None:
                    break
                else:
                    self._getDataFromImg(img)

        else:
            raise Exception("Not a valid training source")
            return None
    def _extract(self, src, maxFrames, verbose):
        # get the full dataset and append it to the data vector dictionary.
        self.data = {'r': [], 'g': [], 'b': [], 'i': [], 'h': []}
        if (isinstance(src, ImageSet)):
            src = VirtualCamera(src, st='imageset')  # this could cause a bug
        elif (isinstance(src, (VirtualCamera, Camera))):
            count = 0
            for i in range(0, maxFrames):
                img = src.getImage()
                count = count + 1
                if (verbose):
                    print("Got Frame {0}".format(count))
                if (isinstance(src, Camera)):
                    time.sleep(0.05)  # let the camera sleep
                if (img is None):
                    break
                else:
                    self._getDataFromImg(img)

        else:
            raise Exception('Not a valid training source')
            return None
Example #9
0
 def __init__(self, name='default', crop=None, **cinfo):
     self.name = name
     self.crop = crop
     if 'virtual' in cinfo:
         cam = VirtualCamera(cinfo['source'], cinfo['virtual'])
     elif 'scanner' in cinfo:
         cinfo.pop('scanner')
         id = cinfo.pop('id')
         cam = Scanner(id, cinfo)
     elif 'directory' in cinfo:
         cam = DirectoryCamera(cinfo['directory'])
     elif 'kinect' in cinfo:
         cam = Kinect()
         cam._usedepth = 0
         cam._usematrix = 0
         if cinfo["kinect"] == "depth":
             cam._usedepth = 1
         elif cinfo["kinect"] == "matrix":
             cam._usematrix = 1
     else:
         cam = ScvCamera(cinfo['id'], prop_set=cinfo)
     self._scv_cam = cam
Example #10
0
	#return map1, map2

# do the unwarping 
def unwarp(img,xmap,ymap):
    output = cv2.remap(img.getNumpyCv2(),xmap,ymap,cv2.INTER_LINEAR)
    result = Image(output, cv2image=True)
    # return result
    return result


#disp = Display((800,600))
disp = Display((1296,972))
vals = []
last = (0,0)
# Load the video from the rpi
vc = VirtualCamera("/var/www/html/vid_files/video.h264","video")
# Sometimes there is crud at the begining, buffer it out
for i in range(0,10):
    img = vc.getImage()
    img.save(disp)
# Show the user a frame let them left click the center
# of the "donut" and the right inner and outer edge
# in that order. Press esc to exit the display
while not disp.isDone():
    test = disp.leftButtonDownPosition()
    if( test != last and test is not None):
        last = test
        vals.append(test)

# 0 = xc yc
# 1 = r1
Example #11
0
#!/usr/bin/env python

# /media/ajay/5056EE7956EE5EEA/Users/Ajay/Desktop/gopro/DCIM/100GOPRO/repaired

from __future__ import print_function
from SimpleCV import Image, Color, VirtualCamera, Display

video = VirtualCamera(
    '/media/ajay/5056EE7956EE5EEA/Users/Ajay/Desktop/gopro/DCIM/100GOPRO/repaired/GOPR0429.MP4',
    'video')
display = Display()

while display.isNotDone():
    img = video.getImage()
    try:
        dist = img - img.colorDistance(Color.BLUE)
        dist.show()
    except KeyboardInterrupt:
        display.done = True
    if display.mouseRight:
        display.done = True
display.quit()
Example #12
0
from SimpleCV import Display, Camera, Image, DrawingLayer, VirtualCamera

disp = Display((600, 800))
#cam = Camera()
cam = VirtualCamera(
    '../../Recording/Videos/kiteTest from Zenith Wind Power-fNMO3kAX0PE.mp4',
    'video')
isPaused = False
updateSelection = False
while (disp.isNotDone()):
    if not isPaused:
        img_flip = cam.getImage().flipHorizontal()
        img = img_flip.edges(150, 100).dilate()
    if disp.rightButtonDown:
        isPaused = not (isPaused)
    selectionLayer = DrawingLayer((img.width, img.height))
    if disp.leftButtonDown:
        corner1 = (disp.mouseX, disp.mouseY)
        updateSelection = True
    if updateSelection:
        corner2 = (disp.mouseX, disp.mouseY)
        bb = disp.pointsToBoundingBox(corner1, corner2)
        if disp.leftButtonUp:
            updateSelection = False
        if corner1 != corner2:
            selectionLayer.rectangle((bb[0], bb[1]), (bb[2], bb[3]))
    img.addDrawingLayer(selectionLayer)
    img.save(disp)
    img.removeDrawingLayer(0)
Example #13
0
    def _Convert(self):
        # selectfile = self.listView.DirModel.fileName(self.listView.currentIndex())
        selectfile = self.tree.DirModel.filePath(self.tree.currentIndex())
        sfilename = self.tree.DirModel.fileName(self.tree.currentIndex())
        strfn = sfilename[0:13]
        print selectfile + " convert"
        #os.system('sh /home/pi/panoBox/dewarp.sh')
        disp = Display((800,480)) #
        vals = []
        last = (0,0)
        # Load the video from the rpi
        vc = VirtualCamera(str(selectfile),"video")
        #vc = picamera.PiCamera()
        # Sometimes there is crud at the begining, buffer it out
        for i in range(0,10):
            img = vc.getImage()
        #    img = vc.capture()
            img.save(disp)
        
        """
        cnt = 0
        while not disp.isDone():
            test = disp.leftButtonDownPosition()
            if( test != last and test is not None):
                last= test
                vals.append(test)
                cnt += 1
                if cnt == 3:
                    break
        """
        ###############################################
        #480
        Cx = 260
        Cy = 195
        R1x = 320
        R1y = 195
        R2x = 380
        R2y = 195
        """
        #1200
        Cx = 645
        Cy = 490
        R1x = 787
        R1y = 490
        R2x = 937
        R2y = 490
        """
        ##############################################
        """
        Cx = vals[0][0]
        Cy = vals[0][1]
        R1x = vals[1][0]
        R1y = vals[1][1]
        R2x = vals[2][0]
        R2y = vals[2][1]
        print Cx
        print Cy 
        print R1x 
        print R1y 
        print R2x 
        print R2y
        """
        ##############################################
        R1 = R1x-Cx
        R2 = R2x-Cx
        Wd = round(float(max(R1, R2)) * 2.0 * np.pi)
        Hd = (R2-R1)
        Ws = img.width
        Hs = img.height
        # build the pixel map, this could be sped up
        print "BUILDING MAP!"
        xmap,ymap = buildMap(Ws,Hs,Wd,Hd,R1,R2,Cx,Cy)
        print "MAP DONE!"
        # do an unwarping and show it to us
        result = unwarp(img,xmap,ymap)
        result.save(disp)
        # I used these params for converting the raw frames to video
        # avconv -f image2 -r 30 -v:b 1024K -i samples/lapinsnipermin/%03d.jpeg output.mpeg
        i = 0
        while img is not None:
            print img.width, img.height
            result = unwarp(img,xmap,ymap)

            result.save(disp)
            # Save to file
            fname = "/home/pi/box1/panoImageFiles/FRAME{num:05d}.png".format(num=i)
            result.save(fname)
            #vs.writeFrame(derp)
            # get the next frame
            img = vc.getImage()
            i = i + 1
        disp.quit()
        ff = "sudo avconv -r 12 -i /home/pi/box1/panoImageFiles/FRAME%05d.png -vf 'scale=trunc(iw/2)*2:trunc(ih/2)*2 , transpose=1, transpose=1' -c:v libx264 /home/pi/box1/storage/panoVideoFiles/"+str(strfn)+".mp4&&sudo rm /home/pi/box1/panoImageFiles/*.png"
        os.system(ff)
Example #14
0
from SimpleCV import VirtualCamera
# Load an existing video into the virtual camera
vir = VirtualCamera("mov.mp4", "video")
#vir.getImage().show()
vir.live()
Example #15
0
from SimpleCV import VirtualCamera, Display

cam = VirtualCamera("/home/seg/Desktop/Hackers.mkv", "video")

disp = Display(cam.getImage().size())

while disp.isNotDone():
    img = cam.getImage()

    faces = img.findHaarFeatures('face')

    if faces is not None:
        faces = faces.sortArea()
        bigFace = faces[-1]

        bigFace.draw()
    img.save(disp)
from SimpleCV import Display, Image, VirtualCamera
import os, sys

if len(sys.argv) == 1:
	print("Give (at least one) file name(s) of the images(s) to process")
	sys.exit()

disp = Display((1280,1024))
virtual_cam = VirtualCamera(sys.argv[1], "image")
img = virtual_cam.getImage()
img.save(disp)

points = []
last_pt = (0, 0)

# Show the first image, the user had to left click the center of the donut,
# followed by the right inner and outer edge (in that order). Press "Esc" to exit.
# All the images are processed with the same parameters
while not disp.isDone():
    temp_pt = disp.leftButtonDownPosition()
    if( temp_pt != last_pt and temp_pt is not None):
        last_pt = temp_pt
        points.append(temp_pt)

# Center of the donut 
Cx = points[0][0]
Cy = points[0][1]
# Inner donut radius
R1x = points[1][0]
R1y = points[1][1]
R1 = R1x-Cx
from SimpleCV import JpegStreamer, Display, VirtualCamera, VideoStream
import time
import numpy as np

cam = VirtualCamera(
    '../Recording/Videos/Flying kite images (for kite steering unit development)-YTMgX1bvrTo.flv',
    'video')
port = 8000 + np.random.randint(100)
print port
js = JpegStreamer(hostandport=port, st=0.1)
vs = VideoStream("out.avi", fps=15)
endReached = False
t0 = time.time()
FPS = 25
while not (endReached):
    t = time.time() - t0
    toDisplay = cam.getFrame(np.ceil(t * FPS))
    if toDisplay.size() != (0, 0):
        toDisplay.save(js)
    else:
        endReached = True
    time.sleep(0.05)
Example #18
0
#
# Python Tutorial Session 10
# (c) Oliver Dressler, 2014
#
# Quantifying cell ordering
#
#-------------------------------------------------

from SimpleCV import VirtualCamera
from matplotlib import pyplot as plt
import time

PATH = '60ul_center.avi'

# We will load the video using the VirtualCamera from simplecv
video = VirtualCamera(PATH, 'video')
# video.getImage().show() ; time.sleep(10)

# Calculate average of first 100 frames
def average_image(video, num_frames = 100):
	avg = video.getImage()
	for i in range(num_frames - 1):
		# Some kind of running average
		avg = (avg + video.getImage()) / 2
	return avg

avg = average_image(video)
# avg.show() ; time.sleep(10)


# Open video again to be able to analyze full video
Example #19
0
    def track(self):
        print "Press right mouse button to pause or play"
        print "Use left mouse button to select target"
        print "Target color must be different from background"
        print "Target must have width larger than height"
        print "Target can be upside down"

        #Parameters
        isUDPConnection = False  # Currently switched manually in the code
        display = True
        displayDebug = True
        useBasemap = False
        maxRelativeMotionPerFrame = 2  # How much the target can moved between two succesive frames
        pixelPerRadians = 320
        radius = pixelPerRadians
        referenceImage = '../ObjectTracking/kite_detail.jpg'
        scaleFactor = 0.5
        isVirtualCamera = True
        useHDF5 = False

        # Open reference image: this is used at initlalisation
        target_detail = Image(referenceImage)

        # Get RGB color palette of target (was found to work better than using hue)
        pal = target_detail.getPalette(bins=2, hue=False)

        # Open video to analyse or live stream
        #cam = JpegStreamCamera('http://192.168.1.29:8080/videofeed')#640 * 480
        if isVirtualCamera:
            #cam = VirtualCamera('../../zenith-wind-power-read-only/KiteControl-Qt/videos/kiteFlying.avi','video')
            #cam = VirtualCamera('/media/bat/DATA/Baptiste/Nautilab/kite_project/robokite/ObjectTracking/00095.MTS', 'video')
            #cam = VirtualCamera('output.avi', 'video')
            cam = VirtualCamera(
                '../Recording/Videos/Flying kite images (for kite steering unit development)-YTMgX1bvrTo.mp4',
                'video')
            virtualCameraFPS = 25
        else:
            cam = JpegStreamCamera(
                'http://192.168.43.1:8080/videofeed')  #640 * 480
            #cam = Camera()

        # Get a sample image to initialize the display at the same size
        img = cam.getImage().scale(scaleFactor)
        print img.width, img.height
        # Create a pygame display
        if display:
            if img.width > img.height:
                disp = Display(
                    (27 * 640 / 10, 25 * 400 / 10)
                )  #(int(2*img.width/scaleFactor), int(2*img.height/scaleFactor)))
            else:
                disp = Display((810, 1080))
        #js = JpegStreamer()

        # Initialize variables
        previous_angle = 0  # target has to be upright when starting. Target width has to be larger than target heigth.
        previous_coord_px = (
            0, 0)  # Initialized to top left corner, which always exists
        previous_dCoord = previous_coord_px
        previous_dAngle = previous_angle
        angles = []
        coords_px = []
        coord_px = [0, 0]
        angle = 0
        target_elevations = []
        target_bearings = []
        times = []
        wasTargetFoundInPreviousFrame = False
        i_frame = 0
        isPaused = False
        selectionInProgress = False
        th = [100, 100, 100]
        skycolor = Color.BLUE
        timeLastTarget = 0

        # Prepare recording
        recordFilename = datetime.datetime.utcnow().strftime(
            "%Y%m%d_%Hh%M_") + 'simpleTrack'
        if useHDF5:
            try:
                os.remove(recordFilename + '.hdf5')
            except:
                print('Creating file ' + recordFilename + '.hdf5')
            """ The following line is used to silence the following error (according to http://stackoverflow.com/questions/15117128/h5py-in-memory-file-and-multiprocessing-error)
    #000: ../../../src/H5F.c line 1526 in H5Fopen(): unable to open file
    major: File accessability
    minor: Unable to open file"""
            h5py._errors.silence_errors()
            recordFile = h5py.File(
                os.path.join(os.getcwd(), 'log', recordFilename + '.hdf5'),
                'a')
            hdfSize = 0
            dset = recordFile.create_dataset('kite', (2, 2),
                                             maxshape=(None, 7))
            imset = recordFile.create_dataset('image',
                                              (2, img.width, img.height, 3),
                                              maxshape=(None, img.width,
                                                        img.height, 3))
        else:
            try:
                os.remove(recordFilename + '.csv')
            except:
                print('Creating file ' + recordFilename + '.csv')
            recordFile = file(
                os.path.join(os.getcwd(), 'log', recordFilename + '.csv'), 'a')
            csv_writer = csv.writer(recordFile)
            csv_writer.writerow([
                'Time (s)', 'x (px)', 'y (px)', 'Orientation (rad)',
                'Elevation (rad)', 'Bearing (rad)', 'ROT (rad/s)'
            ])

        # Launch a thread to get UDP message with orientation of the camera
        mobile = mobileState.mobileState()
        if isUDPConnection:
            mobile.open()
        # Loop while not canceled by user
        t0 = time.time()
        previousTime = t0
        while not (display) or disp.isNotDone():
            t = time.time()
            deltaT = (t - previousTime)
            FPS = 1.0 / deltaT
            #print 'FPS =', FPS
            if isVirtualCamera:
                deltaT = 1.0 / virtualCameraFPS
            previousTime = t
            i_frame = i_frame + 1
            timestamp = datetime.datetime.utcnow()

            # Receive orientation of the camera
            if isUDPConnection:
                mobile.computeRPY([2, 0, 1], [-1, 1, 1])
            ctm = np.array([[sp.cos(mobile.roll), -sp.sin(mobile.roll)], \
                    [sp.sin(mobile.roll), sp.cos(mobile.roll)]]) # Coordinate transform matrix

            if useBasemap:
                # Warning this really slows down the computation
                m = Basemap(width=img.width,
                            height=img.height,
                            projection='aeqd',
                            lat_0=sp.rad2deg(mobile.pitch),
                            lon_0=sp.rad2deg(mobile.yaw),
                            rsphere=radius)

            # Get an image from camera
            if not isPaused:
                img = cam.getImage()
                img = img.resize(int(scaleFactor * img.width),
                                 int(scaleFactor * img.height))

            if display:
                # Pause image when right button is pressed
                dwn = disp.rightButtonDownPosition()
                if dwn is not None:
                    isPaused = not (isPaused)
                    dwn = None

            if display:
                # Create a layer to enable user to make a selection of the target
                selectionLayer = DrawingLayer((img.width, img.height))

            if img:
                if display:
                    # Create a new layer to host information retrieved from video
                    layer = DrawingLayer((img.width, img.height))
                    # Selection is a rectangle drawn while holding mouse left button down
                    if disp.leftButtonDown:
                        corner1 = (disp.mouseX, disp.mouseY)
                        selectionInProgress = True
                    if selectionInProgress:
                        corner2 = (disp.mouseX, disp.mouseY)
                        bb = disp.pointsToBoundingBox(
                            corner1,
                            corner2)  # Display the temporary selection
                        if disp.leftButtonUp:  # User has finished is selection
                            selectionInProgress = False
                            selection = img.crop(bb[0], bb[1], bb[2], bb[3])
                            if selection != None:
                                # The 3 main colors in the area selected are considered.
                                # Note that the selection should be included in the target and not contain background
                                try:
                                    selection.save('../ObjectTracking/' +
                                                   'kite_detail_tmp.jpg')
                                    img0 = Image(
                                        "kite_detail_tmp.jpg"
                                    )  # For unknown reason I have to reload the image...
                                    pal = img0.getPalette(bins=2, hue=False)
                                except:  # getPalette is sometimes bugging and raising LinalgError because matrix not positive definite
                                    pal = pal
                                wasTargetFoundInPreviousFrame = False
                                previous_coord_px = (bb[0] + bb[2] / 2,
                                                     bb[1] + bb[3] / 2)
                        if corner1 != corner2:
                            selectionLayer.rectangle((bb[0], bb[1]),
                                                     (bb[2], bb[3]),
                                                     width=5,
                                                     color=Color.YELLOW)

                # If the target was already found, we can save computation time by
                # reducing the Region Of Interest around predicted position
                if wasTargetFoundInPreviousFrame:
                    ROITopLeftCorner = (max(0, previous_coord_px[0]-maxRelativeMotionPerFrame/2*width), \
                              max(0, previous_coord_px[1] -height*maxRelativeMotionPerFrame/2))
                    ROI = img.crop(ROITopLeftCorner[0], ROITopLeftCorner[1],                          \
                                         maxRelativeMotionPerFrame*width, maxRelativeMotionPerFrame*height, \
                             centered = False)
                    if display:
                        # Draw the rectangle corresponding to the ROI on the complete image
                        layer.rectangle((previous_coord_px[0]-maxRelativeMotionPerFrame/2*width,  \
                                                 previous_coord_px[1]-maxRelativeMotionPerFrame/2*height), \
                                              (maxRelativeMotionPerFrame*width, maxRelativeMotionPerFrame*height), \
                               color = Color.GREEN, width = 2)
                else:
                    # Search on the whole image if no clue of where is the target
                    ROITopLeftCorner = (0, 0)
                    ROI = img
                    '''#Option 1
        target_part0 = ROI.hueDistance(color=(142,50,65)).invert().threshold(150)
        target_part1 = ROI.hueDistance(color=(93,16,28)).invert().threshold(150)
        target_part2 = ROI.hueDistance(color=(223,135,170)).invert().threshold(150)
        target_raw_img = target_part0+target_part1+target_part2
        target_img = target_raw_img.erode(5).dilate(5)

        #Option 2
        target_img = ROI.hueDistance(imgModel.getPixel(10,10)).binarize().invert().erode(2).dilate(2)'''

                    # Find sky color
                sky = (img - img.binarize()).findBlobs(minsize=10000)
                if sky:
                    skycolor = sky[0].meanColor()
                # Option 3
                target_img = ROI - ROI  # Black image

                # Loop through palette of target colors
                if display and displayDebug:
                    decomposition = []
                i_col = 0
                for col in pal:
                    c = tuple([int(col[i]) for i in range(0, 3)])
                    # Search the target based on color
                    ROI.save('../ObjectTracking/' + 'ROI_tmp.jpg')
                    img1 = Image('../ObjectTracking/' + 'ROI_tmp.jpg')
                    filter_img = img1.colorDistance(color=c)
                    h = filter_img.histogram(numbins=256)
                    cs = np.cumsum(h)
                    thmax = np.argmin(
                        abs(cs - 0.02 * img.width * img.height)
                    )  # find the threshold to have 10% of the pixel in the expected color
                    thmin = np.argmin(
                        abs(cs - 0.005 * img.width * img.height)
                    )  # find the threshold to have 10% of the pixel in the expected color
                    if thmin == thmax:
                        newth = thmin
                    else:
                        newth = np.argmin(h[thmin:thmax]) + thmin
                    alpha = 0.5
                    th[i_col] = alpha * th[i_col] + (1 - alpha) * newth
                    filter_img = filter_img.threshold(
                        max(40, min(200, th[i_col]))).invert()
                    target_img = target_img + filter_img
                    #print th
                    i_col = i_col + 1
                    if display and displayDebug:
                        [R, G, B] = filter_img.splitChannels()
                        white = (R - R).invert()
                        r = R * 1.0 / 255 * c[0]
                        g = G * 1.0 / 255 * c[1]
                        b = B * 1.0 / 255 * c[2]
                        tmp = white.mergeChannels(r, g, b)
                        decomposition.append(tmp)

                # Get a black background with with white target foreground
                target_img = target_img.threshold(150)

                target_img = target_img - ROI.colorDistance(
                    color=skycolor).threshold(80).invert()

                if display and displayDebug:
                    small_ini = target_img.resize(
                        int(img.width / (len(pal) + 1)),
                        int(img.height / (len(pal) + 1)))
                    for tmp in decomposition:
                        small_ini = small_ini.sideBySide(tmp.resize(
                            int(img.width / (len(pal) + 1)),
                            int(img.height / (len(pal) + 1))),
                                                         side='bottom')
                    small_ini = small_ini.adaptiveScale(
                        (int(img.width), int(img.height)))
                    toDisplay = img.sideBySide(small_ini)
                else:
                    toDisplay = img
                    #target_img = ROI.hueDistance(color = Color.RED).threshold(10).invert()

                # Search for binary large objects representing potential target
                target = target_img.findBlobs(minsize=500)

                if target:  # If a target was found

                    if wasTargetFoundInPreviousFrame:
                        predictedTargetPosition = (
                            width * maxRelativeMotionPerFrame / 2,
                            height * maxRelativeMotionPerFrame / 2
                        )  # Target will most likely be close to the center of the ROI
                    else:
                        predictedTargetPosition = previous_coord_px
                        # If there are several targets in the image, take the one which is the closest of the predicted position
                    target = target.sortDistance(predictedTargetPosition)

                    # Get target coordinates according to minimal bounding rectangle or centroid.
                    coordMinRect = ROITopLeftCorner + np.array(
                        (target[0].minRectX(), target[0].minRectY()))
                    coord_px = ROITopLeftCorner + np.array(
                        target[0].centroid())

                    # Rotate the coordinates of roll angle around the middle of the screen
                    rot_coord_px = np.dot(
                        ctm, coord_px -
                        np.array([img.width / 2, img.height / 2])) + np.array(
                            [img.width / 2, img.height / 2])
                    if useBasemap:
                        coord = sp.deg2rad(
                            m(rot_coord_px[0],
                              img.height - rot_coord_px[1],
                              inverse=True))
                    else:
                        coord = localProjection(
                            rot_coord_px[0] - img.width / 2,
                            img.height / 2 - rot_coord_px[1],
                            radius,
                            mobile.yaw,
                            mobile.pitch,
                            inverse=True)
                    target_bearing, target_elevation = coord

                    # Get minimum bounding rectangle for display purpose
                    minR = ROITopLeftCorner + np.array(target[0].minRect())

                    contours = target[0].contour()

                    contours = [
                        ROITopLeftCorner + np.array(contour)
                        for contour in contours
                    ]

                    # Get target features
                    angle = sp.deg2rad(target[0].angle()) + mobile.roll
                    angle = sp.deg2rad(
                        unwrap180(sp.rad2deg(angle),
                                  sp.rad2deg(previous_angle)))
                    width = target[0].width()
                    height = target[0].height()

                    # Check if the kite is upside down
                    # First rotate the kite
                    ctm2 = np.array([[sp.cos(-angle+mobile.roll), -sp.sin(-angle+mobile.roll)], \
                        [sp.sin(-angle+mobile.roll), sp.cos(-angle+mobile.roll)]]) # Coordinate transform matrix
                    rotated_contours = [
                        np.dot(ctm2, contour - coordMinRect)
                        for contour in contours
                    ]
                    y = [-tmp[1] for tmp in rotated_contours]
                    itop = np.argmax(y)  # Then looks at the points at the top
                    ibottom = np.argmin(y)  # and the point at the bottom
                    # The point the most excentered is at the bottom
                    if abs(rotated_contours[itop][0]) > abs(
                            rotated_contours[ibottom][0]):
                        isInverted = True
                    else:
                        isInverted = False

                    if isInverted:
                        angle = angle + sp.pi

                        # Filter the data
                    alpha = 1 - sp.exp(-deltaT / self.filterTimeConstant)
                    if not (isPaused):
                        dCoord = np.array(previous_dCoord) * (
                            1 - alpha) + alpha * (
                                np.array(coord_px) - previous_coord_px
                            )  # related to the speed only if cam is fixed
                        dAngle = np.array(previous_dAngle) * (
                            1 - alpha) + alpha * (np.array(angle) -
                                                  previous_angle)
                    else:
                        dCoord = np.array([0, 0])
                        dAngle = np.array([0])


#print coord_px, angle, width, height, dCoord

# Record important data
                    times.append(timestamp)
                    coords_px.append(coord_px)
                    angles.append(angle)
                    target_elevations.append(target_elevation)
                    target_bearings.append(target_bearing)

                    # Export data to controller
                    self.elevation = target_elevation
                    self.bearing = target_bearing
                    self.orientation = angle
                    dt = time.time() - timeLastTarget
                    self.ROT = dAngle / dt
                    self.lastUpdateTime = t

                    # Save for initialisation of next step
                    previous_dCoord = dCoord
                    previous_angle = angle
                    previous_coord_px = (int(coord_px[0]), int(coord_px[1]))
                    wasTargetFoundInPreviousFrame = True
                    timeLastTarget = time.time()

                else:
                    wasTargetFoundInPreviousFrame = False

                if useHDF5:
                    hdfSize = hdfSize + 1
                    dset.resize((hdfSize, 7))
                    imset.resize((hdfSize, img.width, img.height, 3))
                    dset[hdfSize - 1, :] = [
                        time.time(), coord_px[0], coord_px[1], angle,
                        self.elevation, self.bearing, self.ROT
                    ]
                    imset[hdfSize - 1, :, :, :] = img.getNumpy()
                    recordFile.flush()
                else:
                    csv_writer.writerow([
                        time.time(), coord_px[0], coord_px[1], angle,
                        self.elevation, self.bearing, self.ROT
                    ])

                if display:
                    if target:
                        # Add target features to layer
                        # Minimal rectange and its center in RED
                        layer.polygon(minR[(0, 1, 3, 2), :],
                                      color=Color.RED,
                                      width=5)
                        layer.circle(
                            (int(coordMinRect[0]), int(coordMinRect[1])),
                            10,
                            filled=True,
                            color=Color.RED)

                        # Target contour and centroid in BLUE
                        layer.circle((int(coord_px[0]), int(coord_px[1])),
                                     10,
                                     filled=True,
                                     color=Color.BLUE)
                        layer.polygon(contours, color=Color.BLUE, width=5)

                        # Speed vector in BLACK
                        layer.line((int(coord_px[0]), int(coord_px[1])),
                                   (int(coord_px[0] + 20 * dCoord[0]),
                                    int(coord_px[1] + 20 * dCoord[1])),
                                   width=3)

                        # Line giving angle
                        layer.line((int(coord_px[0] + 200 * sp.cos(angle)),
                                    int(coord_px[1] + 200 * sp.sin(angle))),
                                   (int(coord_px[0] - 200 * sp.cos(angle)),
                                    int(coord_px[1] - 200 * sp.sin(angle))),
                                   color=Color.RED)

                    # Line giving rate of turn
                    #layer.line((int(coord_px[0]+200*sp.cos(angle+dAngle*10)), int(coord_px[1]+200*sp.sin(angle+dAngle*10))), (int(coord_px[0]-200*sp.cos(angle + dAngle*10)), int(coord_px[1]-200*sp.sin(angle+dAngle*10))))

                # Add the layer to the raw image
                    toDisplay.addDrawingLayer(layer)
                    toDisplay.addDrawingLayer(selectionLayer)

                    # Add time metadata
                    toDisplay.drawText(str(i_frame) + " " + str(timestamp),
                                       x=0,
                                       y=0,
                                       fontsize=20)

                    # Add Line giving horizon
                    #layer.line((0, int(img.height/2 + mobile.pitch*pixelPerRadians)),(img.width, int(img.height/2 + mobile.pitch*pixelPerRadians)), width = 3, color = Color.RED)

                    # Plot parallels
                    for lat in range(-90, 90, 15):
                        r = range(0, 361, 10)
                        if useBasemap:
                            # \todo improve for high roll
                            l = m(r, [lat] * len(r))
                            pix = [np.array(l[0]), img.height - np.array(l[1])]
                        else:
                            l = localProjection(sp.deg2rad(r), \
                                    sp.deg2rad([lat]*len(r)), \
                                    radius, \
                                    lon_0 = mobile.yaw, \
                                    lat_0 = mobile.pitch, \
                                    inverse = False)
                            l = np.dot(ctm, l)
                            pix = [
                                np.array(l[0]) + img.width / 2,
                                img.height / 2 - np.array(l[1])
                            ]

                        for i in range(len(r) - 1):
                            if isPixelInImage(
                                (pix[0][i], pix[1][i]), img) or isPixelInImage(
                                    (pix[0][i + 1], pix[1][i + 1]), img):
                                layer.line((pix[0][i], pix[1][i]),
                                           (pix[0][i + 1], pix[1][i + 1]),
                                           color=Color.WHITE,
                                           width=2)

                # Plot meridians
                    for lon in range(0, 360, 15):
                        r = range(-90, 91, 10)
                        if useBasemap:
                            # \todo improve for high roll
                            l = m([lon] * len(r), r)
                            pix = [np.array(l[0]), img.height - np.array(l[1])]
                        else:
                            l= localProjection(sp.deg2rad([lon]*len(r)), \
                                    sp.deg2rad(r), \
                                    radius, \
                                    lon_0 = mobile.yaw, \
                                    lat_0 = mobile.pitch, \
                                    inverse = False)
                            l = np.dot(ctm, l)
                            pix = [
                                np.array(l[0]) + img.width / 2,
                                img.height / 2 - np.array(l[1])
                            ]

                        for i in range(len(r) - 1):
                            if isPixelInImage(
                                (pix[0][i], pix[1][i]), img) or isPixelInImage(
                                    (pix[0][i + 1], pix[1][i + 1]), img):
                                layer.line((pix[0][i], pix[1][i]),
                                           (pix[0][i + 1], pix[1][i + 1]),
                                           color=Color.WHITE,
                                           width=2)

                # Text giving bearing
                # \todo improve for high roll
                    for bearing_deg in range(0, 360, 30):
                        l = localProjection(sp.deg2rad(bearing_deg),
                                            sp.deg2rad(0),
                                            radius,
                                            lon_0=mobile.yaw,
                                            lat_0=mobile.pitch,
                                            inverse=False)
                        l = np.dot(ctm, l)
                        layer.text(
                            str(bearing_deg),
                            (img.width / 2 + int(l[0]), img.height - 20),
                            color=Color.RED)

                # Text giving elevation
                # \todo improve for high roll
                    for elevation_deg in range(-60, 91, 30):
                        l = localProjection(0,
                                            sp.deg2rad(elevation_deg),
                                            radius,
                                            lon_0=mobile.yaw,
                                            lat_0=mobile.pitch,
                                            inverse=False)
                        l = np.dot(ctm, l)
                        layer.text(str(elevation_deg),
                                   (img.width / 2, img.height / 2 - int(l[1])),
                                   color=Color.RED)

                    #toDisplay.save(js)
                    toDisplay.save(disp)
            if display:
                toDisplay.removeDrawingLayer(1)
                toDisplay.removeDrawingLayer(0)
        recordFile.close()
Example #20
0
class Vision:
    
    def __init__(self, pitchnum, stdout, sourcefile, resetPitchSize):
               
        self.running = True
        self.connected = False
        
        self.stdout = stdout 

        if sourcefile is None:  
            self.cap = Camera()
        else:
            filetype = 'video'
            if sourcefile.endswith(('jpg', 'png')):
                filetype = 'image'

            self.cap = VirtualCamera(sourcefile, filetype)
        
        calibrationPath = os.path.join('calibration', 'pitch{0}'.format(pitchnum))
        self.cap.loadCalibration(os.path.join(sys.path[0], calibrationPath))

        self.gui = Gui()
        self.threshold = Threshold(pitchnum)
        self.thresholdGui = ThresholdGui(self.threshold, self.gui)
        self.preprocessor = Preprocessor(resetPitchSize)
        self.features = Features(self.gui, self.threshold)
        
        eventHandler = self.gui.getEventHandler()
        eventHandler.addListener('q', self.quit)

        while self.running:
            try:
                if not self.stdout:
                    self.connect()
                else:
                    self.connected = True

                if self.preprocessor.hasPitchSize:
                    self.outputPitchSize()
                    self.gui.setShowMouse(False)
                else:
                    eventHandler.setClickListener(self.setNextPitchCorner)

                while self.running:
                    self.doStuff()

            except socket.error:
                self.connected = False
                # If the rest of the system is not up yet/gets quit,
                # just wait for it to come available.
                time.sleep(1)

                # Strange things seem to happen to X sometimes if the
                # display isn't updated for a while
                self.doStuff()

        if not self.stdout:
            self.socket.close()
        
    def connect(self):
        print("Attempting to connect...")
        self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.socket.connect( (HOST, PORT) )
        self.connected = True

    def quit(self):
        self.running = False
        
    def doStuff(self):
        if self.cap.getCameraMatrix is None:
            frame = self.cap.getImage()
        else:
            frame = self.cap.getImageUndistort()

        frame = self.preprocessor.preprocess(frame)
        
        self.gui.updateLayer('raw', frame)

        ents = self.features.extractFeatures(frame)
        self.outputEnts(ents)

        self.gui.loop()

    def setNextPitchCorner(self, where):
        self.preprocessor.setNextPitchCorner(where)
        
        if self.preprocessor.hasPitchSize:
            print("Pitch size: {0!r}".format(self.preprocessor.pitch_size))
            self.outputPitchSize()
            self.gui.setShowMouse(False)
            self.gui.updateLayer('corner', None)
        else:
            self.gui.drawCrosshair(where, 'corner')
    
    def outputPitchSize(self):
        print(self.preprocessor.pitch_size)
        self.send('{0} {1} {2} \n'.format(
                PITCH_SIZE_BIT, self.preprocessor.pitch_size[0], self.preprocessor.pitch_size[1]))

    def outputEnts(self, ents):

        # Messyyy
        if not self.connected or not self.preprocessor.hasPitchSize:
            return

        self.send("{0} ".format(ENTITY_BIT))

        for name in ['yellow', 'blue', 'ball']:
            entity = ents[name]
            x, y = entity.coordinates()

            # The rest of the system needs (0, 0) at the bottom left
            if y != -1:
                y = self.preprocessor.pitch_size[1] - y

            if name == 'ball':
                self.send('{0} {1} '.format(x, y))
            else:
                angle = 360 - (((entity.angle() * (180/math.pi)) - 360) % 360)
                self.send('{0} {1} {2} '.format(x, y, angle))

        self.send(str(int(time.time() * 1000)) + " \n")
        
    def send(self, string):
        if self.stdout:
            sys.stdout.write(string)
        else:
            self.socket.send(string)
Example #21
0
 def __init__(self, name='default', crop=None, **cinfo):
     self.name = name
     self.localtz = cinfo.get('timezone', 'UTC')
     self.crop = crop
     if 'virtual' in cinfo:
         cam = VirtualCamera(cinfo['source'], cinfo['virtual'])
     elif 'scanner' in cinfo:
         cinfo.pop('scanner')
         id = cinfo.pop('id')
         cam = Scanner(id, cinfo)
     elif 'directory' in cinfo:
         cam = DirectoryCamera(cinfo['directory'])
     elif 'avt' in cinfo:
         from SimpleCV import AVTCamera
         cam = AVTCamera(cinfo['id'], cinfo)
     elif 'kinect' in cinfo:
         cam = Kinect()
         cam._usedepth = 0
         cam._usematrix = 0
         if cinfo["kinect"] == "depth":
             cam._usedepth = 1
         elif cinfo["kinect"] == "matrix":
             cam._usematrix = 1
     elif 'jpegstream' in cinfo:
         cam = JpegStreamCamera(cinfo['jpegstream'])
     else:
         cam = ScvCamera(cinfo['id'], prop_set=cinfo)
     self._scv_cam = cam
Example #22
0
def open_vid(vid_path):
    vid = VirtualCamera(vid_path, 'video')
    empty = vid.getImage()
    return vid, empty
Example #23
0
        map_x[y] = Cx + rMap[y] * sinMap
        map_y[y] = Cy + rMap[y] * cosMap
        
    return map_x, map_y
# do the unwarping 
def unwarp(img,xmap,ymap):
    output = cv2.remap(img.getNumpyCv2(),xmap,ymap,cv2.INTER_LINEAR)
    result = Image(output,cv2image=True)
    return result


disp = Display((800,600))
vals = []
last = (0,0)
# Load the video from the rpi
vc = VirtualCamera("video.h264","video")
# Sometimes there is crud at the begining, buffer it out
for i in range(0,10):
    img = vc.getImage()
    img.save(disp)
# Show the user a frame let them left click the center
# of the "donut" and the right inner and outer edge
# in that order. Press esc to exit the display
while not disp.isDone():
    test = disp.leftButtonDownPosition()
    if( test != last and test is not None):
        last = test
        vals.append(test)

# 0 = xc yc
# 1 = r1
Example #24
0
from SimpleCV import ColorSegmentation, Image, Camera, VirtualCamera, Display, Color

# Open reference video
cam = VirtualCamera(
    '../../Recording/Videos/kiteFlying from Zenith Wind Power-jn9RrUCiWKM.mp4',
    'video')
# Select reference image
img = cam.getFrame(50)
modelImage = img.crop(255, 180, 70, 20)
modelImage = Image('../kite_detail.jpg')
ts = []
disp = Display()
for i in range(0, 50):
    img = cam.getImage()
while (disp.isNotDone()):
    img = cam.getImage()
    bb = (255, 180, 70, 20)
    ts = img.track("camshift", ts, modelImage, bb, num_frames=1)
    modelImage = Image('../kite_detail.jpg')
    # now here in first loop iteration since ts is empty,
    # img0 and bb will be considered.
    # New tracking object will be created and added in ts (TrackSet)
    # After first iteration, ts is not empty and hence the previous
    # image frames and bounding box will be taken from ts and img0
    # and bb will be ignored.
    ts.draw()
    ts.drawBB()
    ts.showCoordinates()
    img.show()
Example #25
0
 def __init__(self,path):
     self.capture = VirtualCamera(path,"video")
     self.im = None
Example #26
0
#!/usr/bin/env python
# -*- coding: utf8 -*-
#
# Copyright (c) 2013 Nautilabs
#
# Licensed under the MIT License,
# https://github.com/baptistelabat/robokite
# Authors: Baptiste LABAT


from SimpleCV import Camera, Image, VirtualCamera, Display, DrawingLayer, Color, JpegStreamCamera, JpegStreamer
import scipy as sp
import numpy as np

cam = VirtualCamera('../Recording/Videos/Kite with leds in night - YouTube [360p].mp4','video')
img = cam.getImage()
disp = Display((810,1080))
display = True
predictedTargetPosition = (img.size()[0]/2, img.size()[1]/2)
while (not(display) or disp.isNotDone()) and img.size()!= (0, 0) :
    img = cam.getImage()
    if img.size()!= (0, 0):
     if img:
      if display: 
	    # Create a new layer to host information retrieved from video
	      layer = DrawingLayer((img.width, img.height))
      maskred = img.colorDistance(color=(200,50,70)).invert().threshold(170)
      imgred = (img*(maskred/255)).dilate(3)
      targetred=imgred.findBlobs(maxsize=200)
      maskwhite = img.colorDistance(color=(200,200,200)).invert().threshold(230)
      imgwhite = (img*(maskwhite/255)).dilate(3)
Example #27
0
from SimpleCV import ColorSegmentation, Image, Camera, VirtualCamera, Display, Color

# Open reference video
cam=VirtualCamera('/media/bat/DATA/Baptiste/Nautilab/kite_project/zenith-wind-power-read-only/KiteControl-Qt/videos/kiteFlying.avi','video')
# Select reference image
img=cam.getFrame(50)
modelImage = img.crop(255, 180, 70, 20)
modelImage = Image('kite_detail.jpg')
ts = []
disp=Display()
for i in range(0,50):
    img = cam.getImage()
while (disp.isNotDone()):
        img = cam.getImage()
	bb = (255, 180, 70, 20)
        ts = img.track("camshift",ts,modelImage,bb, num_frames = 1)
	modelImage = Image('kite_detail.jpg')
        # now here in first loop iteration since ts is empty,
        # img0 and bb will be considered.
        # New tracking object will be created and added in ts (TrackSet)
        # After first iteration, ts is not empty and hence the previous
        # image frames and bounding box will be taken from ts and img0
        # and bb will be ignored.
	ts.draw()
	ts.drawBB()
	ts.showCoordinates()
	img.show()


Example #28
0
class FingerTracker(object):
    """Finger tracking base class
    """

    def __init__(self, camera=None):
        """Initialize the finger tracker

        :param TrackerIn ti: Tracker input
        :param camera: Camera index, filename, or None
        """

        if camera is None:
            self.cam = Camera(0)
        elif type(camera) == int:
            self.cam = Camera(camera)
        else:
            self.cam = VirtualCamera(camera, "video")

    @staticmethod
    def get_tracker(algorithm):
        """Get the tracker for a given algorithm.

        Supported algorithms:
          default:  Same as 'skeleton'
          peaks:    peak-finding
          skeleton: skeleton-finding
        """
        if algorithm == "default":
            algorithm = "peaks"

        if algorithm == "peaks":
            from fingertracker_peaks import FingerTrackerPeaks
            return FingerTrackerPeaks
        elif algorithm == "skeleton":
            from fingertracker_skeleton import FingerTrackerSkeleton
            return FingerTrackerSkeleton
        else:
            print "Unknown algorithm: {}".format(algorithm)

    def add_two_positions(self, ti, positions):
        """Adds two finger positions to ti

        :param ti: TrackerIn object
        :param list positions: List of (x,y) coordinates
        """
        pos = []
        last_x = -100
        for x, y in sorted(positions):
            if x - last_x > 10:
                pos.append((x, y))
                last_x = x
        if len(pos) == 1:
            pos *= 2
        elif len(pos) > 2:
            return
        ti.add_positions(pos)

    def add_positions(self, ti, positions):
        """Adds two finger positions to ti

        :param ti: TrackerIn object
        :param list positions: List of (x,y) coordinates
        """
        ti.add_positions(positions)

    def run_frame(self, ti, img):
        """Run the algorithm for one frame

        :param TrackerIn ti: TrackerIn object to send events to
        :return: True if I should be called with the next frame
        """
        pass

    def finish(self):
        """Finish tracking (called when the tracker is interrupted or closed"""
        pass

    def run(self, ti):
        """Run the algorithm

        :param TrackerIn ti: TrackerIn object to send events to
        """
        if sys.platform == "darwin": 
            dis = Display() # needed to work on macs
            time.sleep(2) # needed to work on macs
        try:
            loop = True
            while loop:
                img = self.cam.getImage()
                if img.getBitmap() == '':
                    break
                loop = self.run_frame(ti, img)
        except KeyboardInterrupt:
            pass
        self.finish()
Example #29
0
#!/usr/bin/env python
# -*- coding: utf8 -*-
#
# Copyright (c) 2013 Nautilabs
#
# Licensed under the MIT License,
# https://github.com/baptistelabat/robokite
# Authors: Baptiste LABAT

from SimpleCV import Camera, Image, VirtualCamera, Display, DrawingLayer, Color, JpegStreamCamera, JpegStreamer
import scipy as sp
import numpy as np

cam = VirtualCamera(
    '../Recording/Videos/Kite with leds in night-LgvpmMt-SA0.mp4', 'video')
img = cam.getImage()
disp = Display((810, 1080))
display = True
predictedTargetPosition = (img.size()[0] / 2, img.size()[1] / 2)
while (not (display) or disp.isNotDone()) and img.size() != (0, 0):
    img = cam.getImage()
    if img.size() != (0, 0):
        if img:
            if display:
                # Create a new layer to host information retrieved from video
                layer = DrawingLayer((img.width, img.height))
            maskred = img.colorDistance(color=(200, 50,
                                               70)).invert().threshold(170)
            imgred = (img * (maskred / 255)).dilate(3)
            targetred = imgred.findBlobs(maxsize=200)
            maskwhite = img.colorDistance(color=(200, 200,
Example #30
0
from SimpleCV import ColorSegmentation, Image, Camera, VirtualCamera

# Open reference video
cam=VirtualCamera('/media/bat/DATA/Baptiste/Nautilab/kite_project/zenith-wind-power-read-only/KiteControl-Qt/videos/kiteFlying.avi','video')
# Select reference image
img=cam.getFrame(50)
modelImage = img.crop(255, 180, 70, 20)
cs = ColorSegmentation()

cs.addToModel(modelImage)


img = cam.getImage()
cs.addImage(img)
res=cs.getSegmentedImage()
res.show()
Example #31
0
from SimpleCV import Display, Camera, Image, DrawingLayer, VirtualCamera
disp = Display((600,800))
#cam = Camera()
cam = VirtualCamera('/media/bat/DATA/Baptiste/Nautilab/kite_project/zenith-wind-power-read-only/KiteControl-Qt/videos/kiteTest.avi','video')
isPaused = False
updateSelection = False
while(disp.isNotDone()):  
  if not isPaused:
    img_flip = cam.getImage().flipHorizontal()
    img = img_flip.edges(150, 100).dilate()
  if disp.rightButtonDown:
    isPaused = not(isPaused)
  selectionLayer = DrawingLayer((img.width, img.height))
  if disp.leftButtonDown:
	corner1 = (disp.mouseX, disp.mouseY)
        updateSelection = True
  if updateSelection:
    corner2 = (disp.mouseX, disp.mouseY)
    bb = disp.pointsToBoundingBox(corner1, corner2)
    if disp.leftButtonUp: 
      updateSelection = False
    if corner1!=corner2:
     selectionLayer.rectangle((bb[0],bb[1]),(bb[2],bb[3]))
  img.addDrawingLayer(selectionLayer)
  img.save(disp)
  img.removeDrawingLayer(0)

Example #32
0
class Vision:

    def __init__(self, pitch_num, stdout, reset_pitch_size, reset_thresh,
                 scale, colour_order, render_tlayers, file_input=None):
        self.running = True
        self.connected = False
        self.scale = scale
        self.stdout = stdout
        self._logger = Logger('vision_errors.log')

        if file_input is None:
            self.cam = Camera(prop_set = {"width": 720, "height": 540})
        else:
            file_type = 'video'
            if file_input.endswith(('jpg', 'png')):
                file_type = 'image'
            self.cam = VirtualCamera(file_input, file_type)

        try:
            calibration_path = os.path.join('calibration', 'pitch{0}'.format(pitch_num))
            self.cam.loadCalibration(os.path.join(sys.path[0], calibration_path))
        except TypeError:
            error_msg = 'Calibration file not found.'
            self._logger.log(error_msg)
            print error_msg

        self.cropper = Cropper(pitch_num=pitch_num, reset_pitch=reset_pitch_size)
        self.processor = Processor(pitch_num, reset_pitch_size, reset_thresh, scale)
        if self.cropper.is_ready():
            self.gui = Gui(self.cropper.pitch_size)
        else:
            self.gui = Gui()
        self.threshold_gui = ThresholdGui(self.processor, self.gui, pitch_num = pitch_num)
        self.detection = Detection(self.gui, self.processor, colour_order, scale, pitch_num,
                                   render_tlayers=render_tlayers)
        self.event_handler = self.gui.get_event_handler()
        self.event_handler.add_listener('q', self.quit)

        while self.running:
            try:
                if not self.stdout:
                    self.connect()
                else:
                    self.connected = True
                if self.cropper.is_ready():
                    #self.output_pitch_size()
                    self.detection.set_coord_rect(self.cropper.get_coord_rect())
                    self.detection.set_pitch_dims(self.cropper.pitch_size)
                    self.processor.set_crop_rect(self.cropper.get_crop_rect())
                    self.gui.set_show_mouse(False)
                else:
                    self.event_handler.set_click_listener(self.set_next_pitch_corner)
                while self.running:
                    self.process_frame()
            except socket.error:
                self.connected = False
                # If the rest of the system is not up yet/gets quit,
                # just wait for it to come available.
                time.sleep(1)
                error_msg = 'Connection error, sleeping 1s...' 
                self._logger.log(error_msg)
                print error_msg
                self.process_frame()

        if not self.stdout:
            self.socket.close()

    def process_frame(self):
        """Get frame, detect objects and display frame
        """
        # This is where calibration comes in
        if self.cam.getCameraMatrix is None:
            frame = self.cam.getImage()
        else:
            frame = self.cam.getImageUndistort()

        self.processor.preprocess(frame, self.scale)
        if self.cropper.is_ready():
            self.gui.update_layer('raw', self.processor.get_bgr_frame())
        else:
            self.gui.update_layer('raw', frame)

        if self.cropper.is_ready():
            entities = self.detection.detect_objects()
            self.output_entities(entities)

        self.gui.process_update()

    def set_next_pitch_corner(self, where):

        self.cropper.set_point(where)

        if self.cropper.is_ready():
            #self.output_pitch_size()
            self.processor.set_crop_rect(self.cropper.get_crop_rect())
            self.detection.set_pitch_dims(self.cropper.pitch_size)
            self.detection.set_coord_rect(self.cropper.get_coord_rect())
            self.gui.draw_crosshair(self.cropper.get_coord_rect()[0], 'corner1')
            self.gui.draw_crosshair(self.cropper.get_coord_rect()[1], 'corner2')
            self.cropper.get_coord_rect()[0]
            self.gui.set_show_mouse(False)
            self.gui.update_layer('corner', None)
        else:
            self.gui.draw_crosshair(where, 'corner')

    def output_pitch_size(self):
        self.send('{0} {1}\n'.format(PITCH_SIZE_BIT, self.processor.pitch_points_string))
        

    def output_entities(self, entities):

        if not self.connected or not self.cropper.is_ready():
            return

        self.send('{0} '.format(ENTITY_BIT))

        for i in range(0, 4):
            entity = entities[i]
            x, y = entity.get_coordinates()
            angle = -1 if entity.get_angle() is None else entity.get_angle()
            self.send('{0} {1} {2} '.format(x, y, angle))

        x, y = entities[BALL].get_coordinates()
        self.send('{0} {1} '.format(x, y))
        self.send(str(int(time.time() * 1000)) + "\n")

    def send(self, string):
        if self.stdout:
            sys.stdout.write(string)
        else:
            self.socket.send(string)

    def connect(self):
        print('Attempting to connect...')
        self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.socket.connect((HOST, PORT))
        self.connected = True
        print('Successfully connected.')

    def quit(self):
        self.running = False
Example #33
0
 def track(self):
  print "Press right mouse button to pause or play"
  print "Use left mouse button to select target"
  print "Target color must be different from background"
  print "Target must have width larger than height"
  print "Target can be upside down"

  #Parameters
  isUDPConnection = False # Currently switched manually in the code
  display = True
  displayDebug = True
  useBasemap = False
  maxRelativeMotionPerFrame = 2 # How much the target can moved between two succesive frames
  pixelPerRadians = 320
  radius = pixelPerRadians
  referenceImage = '../ObjectTracking/kite_detail.jpg'
  scaleFactor = 0.5
  isVirtualCamera = True
  useHDF5 = False

  # Open reference image: this is used at initlalisation
  target_detail = Image(referenceImage)

  # Get RGB color palette of target (was found to work better than using hue)
  pal = target_detail.getPalette(bins = 2, hue = False)

  # Open video to analyse or live stream
  #cam = JpegStreamCamera('http://192.168.1.29:8080/videofeed')#640 * 480
  if isVirtualCamera:
    #cam = VirtualCamera('../../zenith-wind-power-read-only/KiteControl-Qt/videos/kiteFlying.avi','video')
    #cam = VirtualCamera('/media/bat/DATA/Baptiste/Nautilab/kite_project/robokite/ObjectTracking/00095.MTS', 'video')
    #cam = VirtualCamera('output.avi', 'video')
    cam = VirtualCamera('../Recording/Videos/Flying kite images (for kite steering unit development)-YTMgX1bvrTo.flv','video')
    virtualCameraFPS = 25
  else:
    cam = JpegStreamCamera('http://192.168.43.1:8080/videofeed')#640 * 480
    #cam = Camera() 

  # Get a sample image to initialize the display at the same size
  img = cam.getImage().scale(scaleFactor)
  print img.width, img.height
  # Create a pygame display
  if display:
   if img.width>img.height:
     disp = Display((27*640/10,25*400/10))#(int(2*img.width/scaleFactor), int(2*img.height/scaleFactor)))
   else:
     disp = Display((810,1080))
  #js = JpegStreamer()



  # Initialize variables
  previous_angle = 0 # target has to be upright when starting. Target width has to be larger than target heigth.
  previous_coord_px = (0, 0) # Initialized to top left corner, which always exists
  previous_dCoord = previous_coord_px
  previous_dAngle = previous_angle
  angles = []
  coords_px = []
  coord_px = [0, 0]
  angle = 0
  target_elevations = []
  target_bearings = []
  times = []
  wasTargetFoundInPreviousFrame = False
  i_frame = 0
  isPaused = False
  selectionInProgress = False
  th = [100, 100, 100]
  skycolor = Color.BLUE
  timeLastTarget = 0

  # Prepare recording
  recordFilename = datetime.datetime.utcnow().strftime("%Y%m%d_%Hh%M_")+ 'simpleTrack'
  if useHDF5:
    try:
      os.remove(recordFilename + '.hdf5') 
    except:
      print('Creating file ' + recordFilename + '.hdf5')
    """ The following line is used to silence the following error (according to http://stackoverflow.com/questions/15117128/h5py-in-memory-file-and-multiprocessing-error)
    #000: ../../../src/H5F.c line 1526 in H5Fopen(): unable to open file
    major: File accessability
    minor: Unable to open file"""
    h5py._errors.silence_errors()
    recordFile = h5py.File(recordFilename + '.hdf5', 'a') 
    hdfSize = 0    
    dset = recordFile.create_dataset('kite', (2,2), maxshape=(None,7))
    imset = recordFile.create_dataset('image', (2,img.width,img.height,3 ), maxshape=(None, img.width, img.height, 3))
  else:
    try:
      os.remove(recordFilename + '.csv')   
    except:
      print('Creating file ' + recordFilename + '.csv') 
    recordFile = file(recordFilename + '.csv', 'a')
    csv_writer = csv.writer(recordFile)
    csv_writer.writerow(['Time (s)', 'x (px)', 'y (px)', 'Orientation (rad)', 'Elevation (rad)', 'Bearing (rad)', 'ROT (rad/s)'])

  # Launch a thread to get UDP message with orientation of the camera
  mobile = mobileState.mobileState()
  if isUDPConnection:
   a = threading.Thread(None, mobileState.mobileState.checkUpdate, None, (mobile,))
   a.start()

  # Loop while not canceled by user
  t0 = time.time()
  previousTime = t0
  while not(display) or disp.isNotDone():
    t = time.time()
    deltaT = (t-previousTime)
    FPS = 1.0/deltaT
    #print 'FPS =', FPS
    if isVirtualCamera:
      deltaT = 1.0/virtualCameraFPS
    previousTime = t
    i_frame = i_frame + 1
    timestamp = datetime.datetime.utcnow()

    # Receive orientation of the camera
    if isUDPConnection:
      mobile.computeRPY([2, 0, 1], [-1, 1, 1])
    ctm = np.array([[sp.cos(mobile.roll), -sp.sin(mobile.roll)], \
            [sp.sin(mobile.roll), sp.cos(mobile.roll)]]) # Coordinate transform matrix

    if useBasemap:
    # Warning this really slows down the computation
      m = Basemap(width=img.width, height=img.height, projection='aeqd',
            lat_0=sp.rad2deg(mobile.pitch), lon_0=sp.rad2deg(mobile.yaw), rsphere = radius)

    # Get an image from camera
    if not isPaused:
      img = cam.getImage()
      img = img.resize(int(scaleFactor*img.width), int(scaleFactor*img.height))
    
    if display:
      # Pause image when right button is pressed
      dwn = disp.rightButtonDownPosition()
      if dwn is not None:
        isPaused = not(isPaused)
        dwn = None

    if display:
    # Create a layer to enable user to make a selection of the target
      selectionLayer = DrawingLayer((img.width, img.height))

    if img:
      if display: 
      # Create a new layer to host information retrieved from video
        layer = DrawingLayer((img.width, img.height))
          # Selection is a rectangle drawn while holding mouse left button down
        if disp.leftButtonDown:
          corner1 = (disp.mouseX, disp.mouseY)
          selectionInProgress = True
        if selectionInProgress:
          corner2 = (disp.mouseX, disp.mouseY)
          bb = disp.pointsToBoundingBox(corner1, corner2)# Display the temporary selection
          if disp.leftButtonUp: # User has finished is selection
            selectionInProgress = False
            selection = img.crop(bb[0], bb[1], bb[2], bb[3])
            if selection != None:
                    # The 3 main colors in the area selected are considered.
            # Note that the selection should be included in the target and not contain background
              try:
                selection.save('../ObjectTracking/'+ 'kite_detail_tmp.jpg')
                img0 = Image("kite_detail_tmp.jpg") # For unknown reason I have to reload the image...
                pal = img0.getPalette(bins = 2, hue = False)
              except: # getPalette is sometimes bugging and raising LinalgError because matrix not positive definite
                pal = pal
              wasTargetFoundInPreviousFrame = False
              previous_coord_px = (bb[0] + bb[2]/2, bb[1] + bb[3]/2)
          if corner1 != corner2:
            selectionLayer.rectangle((bb[0], bb[1]), (bb[2], bb[3]), width = 5, color = Color.YELLOW)
                       
   
      # If the target was already found, we can save computation time by
      # reducing the Region Of Interest around predicted position
      if wasTargetFoundInPreviousFrame:
        ROITopLeftCorner = (max(0, previous_coord_px[0]-maxRelativeMotionPerFrame/2*width), \
                  max(0, previous_coord_px[1] -height*maxRelativeMotionPerFrame/2))
        ROI = img.crop(ROITopLeftCorner[0], ROITopLeftCorner[1],                          \
                             maxRelativeMotionPerFrame*width, maxRelativeMotionPerFrame*height, \
                 centered = False)
        if display :
      # Draw the rectangle corresponding to the ROI on the complete image
          layer.rectangle((previous_coord_px[0]-maxRelativeMotionPerFrame/2*width,  \
                                   previous_coord_px[1]-maxRelativeMotionPerFrame/2*height), \
                                (maxRelativeMotionPerFrame*width, maxRelativeMotionPerFrame*height), \
                 color = Color.GREEN, width = 2)
      else:
        # Search on the whole image if no clue of where is the target
        ROITopLeftCorner = (0, 0)
        ROI = img

        '''#Option 1
        target_part0 = ROI.hueDistance(color=(142,50,65)).invert().threshold(150)
        target_part1 = ROI.hueDistance(color=(93,16,28)).invert().threshold(150)
        target_part2 = ROI.hueDistance(color=(223,135,170)).invert().threshold(150)
        target_raw_img = target_part0+target_part1+target_part2
        target_img = target_raw_img.erode(5).dilate(5)

        #Option 2
        target_img = ROI.hueDistance(imgModel.getPixel(10,10)).binarize().invert().erode(2).dilate(2)'''
    
          # Find sky color
      sky = (img-img.binarize()).findBlobs(minsize=10000)
      if sky:
       skycolor = sky[0].meanColor()
      # Option 3
      target_img = ROI-ROI # Black image
              
      # Loop through palette of target colors
      if display and displayDebug:
            decomposition = []
      i_col = 0
      for col in pal: 
        c = tuple([int(col[i]) for i in range(0,3)])
            # Search the target based on color
        ROI.save('../ObjectTracking/'+ 'ROI_tmp.jpg')
        img1 = Image('../ObjectTracking/'+ 'ROI_tmp.jpg')
        filter_img = img1.colorDistance(color = c)
        h = filter_img.histogram(numbins=256)
        cs = np.cumsum(h)
        thmax = np.argmin(abs(cs- 0.02*img.width*img.height)) # find the threshold to have 10% of the pixel in the expected color
        thmin = np.argmin(abs(cs- 0.005*img.width*img.height)) # find the threshold to have 10% of the pixel in the expected color
        if thmin==thmax:
          newth = thmin
        else:
          newth = np.argmin(h[thmin:thmax]) + thmin
        alpha = 0.5
        th[i_col] = alpha*th[i_col]+(1-alpha)*newth
        filter_img = filter_img.threshold(max(40,min(200,th[i_col]))).invert()
        target_img = target_img + filter_img
        #print th
        i_col = i_col + 1
        if display and displayDebug:
          [R, G, B] = filter_img.splitChannels()
          white = (R-R).invert()
          r = R*1.0/255*c[0]
          g = G*1.0/255*c[1]
          b = B*1.0/255*c[2]
          tmp = white.mergeChannels(r, g, b)
          decomposition.append(tmp)

      # Get a black background with with white target foreground
      target_img = target_img.threshold(150)
  
      target_img = target_img - ROI.colorDistance(color = skycolor).threshold(80).invert()

      if display and displayDebug:
        small_ini = target_img.resize(int(img.width/(len(pal)+1)),  int(img.height/(len(pal)+1)))
        for tmp in decomposition:
          small_ini = small_ini.sideBySide(tmp.resize(int(img.width/(len(pal)+1)), int(img.height/(len(pal)+1))), side = 'bottom')
        small_ini = small_ini.adaptiveScale((int(img.width), int(img.height)))
        toDisplay = img.sideBySide(small_ini)
      else:
        toDisplay = img
          #target_img = ROI.hueDistance(color = Color.RED).threshold(10).invert()

      # Search for binary large objects representing potential target
      target = target_img.findBlobs(minsize = 500)
      
      if target: # If a target was found
      
        if wasTargetFoundInPreviousFrame:
          predictedTargetPosition = (width*maxRelativeMotionPerFrame/2, height*maxRelativeMotionPerFrame/2) # Target will most likely be close to the center of the ROI   
        else:
          predictedTargetPosition = previous_coord_px
              # If there are several targets in the image, take the one which is the closest of the predicted position
        target = target.sortDistance(predictedTargetPosition)

        # Get target coordinates according to minimal bounding rectangle or centroid.
        coordMinRect = ROITopLeftCorner + np.array((target[0].minRectX(), target[0].minRectY()))
        coord_px = ROITopLeftCorner + np.array(target[0].centroid())

        # Rotate the coordinates of roll angle around the middle of the screen
        rot_coord_px = np.dot(ctm, coord_px - np.array([img.width/2, img.height/2])) + np.array([img.width/2, img.height/2])
        if useBasemap:
          coord = sp.deg2rad(m(rot_coord_px[0], img.height-rot_coord_px[1], inverse = True))
        else:
          coord = localProjection(rot_coord_px[0]-img.width/2, img.height/2-rot_coord_px[1], radius, mobile.yaw, mobile.pitch, inverse = True)
        target_bearing, target_elevation = coord

      # Get minimum bounding rectangle for display purpose
        minR = ROITopLeftCorner + np.array(target[0].minRect())

        contours = target[0].contour()

        contours = [ ROITopLeftCorner + np.array(contour) for contour in contours]


  
        # Get target features
        angle = sp.deg2rad(target[0].angle()) + mobile.roll
        angle =  sp.deg2rad(unwrap180(sp.rad2deg(angle), sp.rad2deg(previous_angle)))
        width = target[0].width()
        height = target[0].height()

        # Check if the kite is upside down
        # First rotate the kite
        ctm2 = np.array([[sp.cos(-angle+mobile.roll), -sp.sin(-angle+mobile.roll)], \
            [sp.sin(-angle+mobile.roll), sp.cos(-angle+mobile.roll)]]) # Coordinate transform matrix
        rotated_contours = [np.dot(ctm2, contour-coordMinRect) for contour in contours]  
        y = [-tmp[1] for tmp in rotated_contours]
        itop = np.argmax(y) # Then looks at the points at the top
        ibottom = np.argmin(y) # and the point at the bottom
        # The point the most excentered is at the bottom
        if abs(rotated_contours[itop][0])>abs(rotated_contours[ibottom][0]):
          isInverted = True
        else:
          isInverted = False    
        
        if isInverted:
            angle = angle + sp.pi    

        
                # Filter the data
        alpha = 1-sp.exp(-deltaT/self.filterTimeConstant)
        if not(isPaused):
          dCoord = np.array(previous_dCoord)*(1-alpha) + alpha*(np.array(coord_px) - previous_coord_px) # related to the speed only if cam is fixed
          dAngle = np.array(previous_dAngle)*(1-alpha) + alpha*(np.array(angle) - previous_angle)
        else : 
          dCoord = np.array([0, 0])
          dAngle = np.array([0]) 
#print coord_px, angle, width, height, dCoord
    
        # Record important data
        times.append(timestamp)
        coords_px.append(coord_px)
        angles.append(angle)
        target_elevations.append(target_elevation)
        target_bearings.append(target_bearing)
        
        # Export data to controller
        self.elevation = target_elevation
        self.bearing = target_bearing
        self.orientation = angle
        dt = time.time()-timeLastTarget
        self.ROT = dAngle/dt
        self.lastUpdateTime = t
        
        # Save for initialisation of next step
        previous_dCoord = dCoord
        previous_angle = angle
        previous_coord_px = (int(coord_px[0]), int(coord_px[1]))
        wasTargetFoundInPreviousFrame = True
        timeLastTarget = time.time()
      
      else:
        wasTargetFoundInPreviousFrame = False
        
      if useHDF5:
        hdfSize = hdfSize+1
        dset.resize((hdfSize, 7))
        imset.resize((hdfSize, img.width, img.height, 3))
        dset[hdfSize-1,:] = [time.time(), coord_px[0], coord_px[1], angle, self.elevation, self.bearing, self.ROT]
        imset[hdfSize-1,:,:,:] = img.getNumpy()
        recordFile.flush()
      else:
        csv_writer.writerow([time.time(), coord_px[0], coord_px[1], angle, self.elevation, self.bearing, self.ROT])



      if display :
        if target:
        # Add target features to layer
        # Minimal rectange and its center in RED
          layer.polygon(minR[(0, 1, 3, 2), :], color = Color.RED, width = 5)
          layer.circle((int(coordMinRect[0]), int(coordMinRect[1])), 10, filled = True, color = Color.RED)
        
                # Target contour and centroid in BLUE
          layer.circle((int(coord_px[0]), int(coord_px[1])), 10, filled = True, color = Color.BLUE)
          layer.polygon(contours, color = Color.BLUE, width = 5)

        # Speed vector in BLACK
          layer.line((int(coord_px[0]), int(coord_px[1])), (int(coord_px[0]+20*dCoord[0]), int(coord_px[1]+20*dCoord[1])), width = 3)
        
        # Line giving angle
          layer.line((int(coord_px[0]+200*sp.cos(angle)), int(coord_px[1]+200*sp.sin(angle))), (int(coord_px[0]-200*sp.cos(angle)), int(coord_px[1]-200*sp.sin(angle))), color = Color.RED)

        # Line giving rate of turn
        #layer.line((int(coord_px[0]+200*sp.cos(angle+dAngle*10)), int(coord_px[1]+200*sp.sin(angle+dAngle*10))), (int(coord_px[0]-200*sp.cos(angle + dAngle*10)), int(coord_px[1]-200*sp.sin(angle+dAngle*10))))
            
      # Add the layer to the raw image 
        toDisplay.addDrawingLayer(layer)
        toDisplay.addDrawingLayer(selectionLayer)

      # Add time metadata
        toDisplay.drawText(str(i_frame)+" "+ str(timestamp), x=0, y=0, fontsize=20)

      # Add Line giving horizon
          #layer.line((0, int(img.height/2 + mobile.pitch*pixelPerRadians)),(img.width, int(img.height/2 + mobile.pitch*pixelPerRadians)), width = 3, color = Color.RED)

      # Plot parallels
        for lat in range(-90, 90, 15):
          r = range(0, 361, 10)
          if useBasemap:
            # \todo improve for high roll
            l = m (r, [lat]*len(r))
            pix = [np.array(l[0]), img.height-np.array(l[1])]
          else:
            l = localProjection(sp.deg2rad(r), \
                    sp.deg2rad([lat]*len(r)), \
                    radius, \
                    lon_0 = mobile.yaw, \
                    lat_0 = mobile.pitch, \
                    inverse = False)
            l = np.dot(ctm, l)
            pix = [np.array(l[0])+img.width/2, img.height/2-np.array(l[1])]

          for i in range(len(r)-1):
            if isPixelInImage((pix[0][i],pix[1][i]), img) or isPixelInImage((pix[0][i+1],pix[1][i+1]), img):
              layer.line((pix[0][i],pix[1][i]), (pix[0][i+1], pix[1][i+1]), color=Color.WHITE, width = 2)

      # Plot meridians
        for lon in range(0, 360, 15):
          r = range(-90, 91, 10)
          if useBasemap:
        # \todo improve for high roll
            l = m ([lon]*len(r), r)
            pix = [np.array(l[0]), img.height-np.array(l[1])]
          else:
            l= localProjection(sp.deg2rad([lon]*len(r)), \
                    sp.deg2rad(r), \
                    radius, \
                    lon_0 = mobile.yaw, \
                    lat_0 = mobile.pitch, \
                    inverse = False)
            l = np.dot(ctm, l)
            pix = [np.array(l[0])+img.width/2, img.height/2-np.array(l[1])]

          for i in range(len(r)-1):
            if isPixelInImage((pix[0][i],pix[1][i]), img) or isPixelInImage((pix[0][i+1],pix[1][i+1]), img):
              layer.line((pix[0][i],pix[1][i]), (pix[0][i+1], pix[1][i+1]), color=Color.WHITE, width = 2)

      # Text giving bearing
      # \todo improve for high roll
        for bearing_deg in range(0, 360, 30):
          l = localProjection(sp.deg2rad(bearing_deg), sp.deg2rad(0), radius, lon_0 = mobile.yaw, lat_0 = mobile.pitch, inverse = False)
          l = np.dot(ctm, l)
          layer.text(str(bearing_deg), ( img.width/2+int(l[0]), img.height-20), color = Color.RED)

      # Text giving elevation
      # \todo improve for high roll
        for elevation_deg in range(-60, 91, 30):
          l = localProjection(0, sp.deg2rad(elevation_deg), radius, lon_0 = mobile.yaw, lat_0 = mobile.pitch, inverse = False)
          l = np.dot(ctm, l)
          layer.text(str(elevation_deg), ( img.width/2 ,img.height/2-int(l[1])), color = Color.RED)

        #toDisplay.save(js)
        toDisplay.save(disp)
    if display : 
      toDisplay.removeDrawingLayer(1)
      toDisplay.removeDrawingLayer(0)
  recordFile.close()
Example #34
0
    def __init__(self, pitch_num, stdout, reset_pitch_size, reset_thresh,
                 scale, colour_order, render_tlayers, file_input=None):
        self.running = True
        self.connected = False
        self.scale = scale
        self.stdout = stdout
        self._logger = Logger('vision_errors.log')

        if file_input is None:
            self.cam = Camera(prop_set = {"width": 720, "height": 540})
        else:
            file_type = 'video'
            if file_input.endswith(('jpg', 'png')):
                file_type = 'image'
            self.cam = VirtualCamera(file_input, file_type)

        try:
            calibration_path = os.path.join('calibration', 'pitch{0}'.format(pitch_num))
            self.cam.loadCalibration(os.path.join(sys.path[0], calibration_path))
        except TypeError:
            error_msg = 'Calibration file not found.'
            self._logger.log(error_msg)
            print error_msg

        self.cropper = Cropper(pitch_num=pitch_num, reset_pitch=reset_pitch_size)
        self.processor = Processor(pitch_num, reset_pitch_size, reset_thresh, scale)
        if self.cropper.is_ready():
            self.gui = Gui(self.cropper.pitch_size)
        else:
            self.gui = Gui()
        self.threshold_gui = ThresholdGui(self.processor, self.gui, pitch_num = pitch_num)
        self.detection = Detection(self.gui, self.processor, colour_order, scale, pitch_num,
                                   render_tlayers=render_tlayers)
        self.event_handler = self.gui.get_event_handler()
        self.event_handler.add_listener('q', self.quit)

        while self.running:
            try:
                if not self.stdout:
                    self.connect()
                else:
                    self.connected = True
                if self.cropper.is_ready():
                    #self.output_pitch_size()
                    self.detection.set_coord_rect(self.cropper.get_coord_rect())
                    self.detection.set_pitch_dims(self.cropper.pitch_size)
                    self.processor.set_crop_rect(self.cropper.get_crop_rect())
                    self.gui.set_show_mouse(False)
                else:
                    self.event_handler.set_click_listener(self.set_next_pitch_corner)
                while self.running:
                    self.process_frame()
            except socket.error:
                self.connected = False
                # If the rest of the system is not up yet/gets quit,
                # just wait for it to come available.
                time.sleep(1)
                error_msg = 'Connection error, sleeping 1s...' 
                self._logger.log(error_msg)
                print error_msg
                self.process_frame()

        if not self.stdout:
            self.socket.close()
Example #35
0
    return map_x, map_y


# do the unwarping
def unwarp(img, xmap, ymap):
    output = cv2.remap(img.getNumpyCv2(), xmap, ymap, cv2.INTER_LINEAR)
    result = Image(output, cv2image=True)
    return result


disp = Display((800, 600))
vals = []
last = (0, 0)
# Load the video from the rpi
vc = VirtualCamera("video.h264", "video")
# Sometimes there is crud at the begining, buffer it out
for i in range(0, 10):
    img = vc.getImage()
    img.save(disp)
# Show the user a frame let them left click the center
# of the "donut" and the right inner and outer edge
# in that order. Press esc to exit the display
while not disp.isDone():
    test = disp.leftButtonDownPosition()
    if (test != last and test is not None):
        last = test
        vals.append(test)

# 0 = xc yc
# 1 = r1
from SimpleCV import JpegStreamer, Display, VirtualCamera, VideoStream
import time
import numpy as np

cam = VirtualCamera('../Recording/Videos/Flying kite images (for kite steering unit development)-YTMgX1bvrTo.flv','video')
port = 8000+np.random.randint(100)
print port
js = JpegStreamer(hostandport=port
, st=0.1)
vs = VideoStream("out.avi", fps=15)
endReached = False
t0 = time.time()
FPS = 25
while not(endReached):
  t = time.time()-t0
  toDisplay = cam.getFrame(np.ceil(t*FPS))
  if toDisplay.size()!=(0,0):
    toDisplay.save(js)
  else:
	  endReached = True
  time.sleep(0.05)
Example #37
0
    def new_dewarp(self):
        vidpath = self.iVidPath  #get input video path

        # isInROI is deprecated and not used in this program
        def isInROI(x, y, R1, R2, Cx, Cy):
            isInOuter = False
            isInInner = False
            xv = x - Cx
            yv = y - Cy
            rt = (xv * xv) + (yv * yv)
            if (rt < R2 * R2):
                isInOuter = True
                if (rt < R1 * R1):
                    isInInner = True
            return isInOuter and not isInInner

        """ ws = width of input video
            hs = height of input video
            wd = width of destination/output video
            Hd = height of destinaton/output video
          
        """

        def buildMap(Ws, Hs, Wd, Hd, R1, R2, Cx, Cy):
            #the function throws type error, if Wd and Hd are not converted to integers
            Hd = int(Hd)
            Wd = int(Wd)
            map_x = np.zeros((Hd, Wd), np.float32)
            map_y = np.zeros((Hd, Wd), np.float32)
            rMap = np.linspace(R1, R1 + (R2 - R1), Hd)
            thetaMap = np.linspace(0, 0 + float(Wd) * 2.0 * np.pi, Wd)
            sinMap = np.sin(thetaMap)
            cosMap = np.cos(thetaMap)

            for y in xrange(0, int(Hd - 1)):
                map_x[y] = Cx + rMap[y] * sinMap
                map_y[y] = Cy + rMap[y] * cosMap

            return map_x, map_y

        # do the unwarping
        def unwarp(img, xmap, ymap):
            output = cv2.remap(img.getNumpyCv2(), xmap, ymap, cv2.INTER_LINEAR)
            result = Image(output, cv2image=True)
            # return result
            return result

        disp = Display(
            (800, 600))  #initialise a 800x600 simplecv display to show preview
        #disp = Display((1296,972))
        vals = []
        last = (0, 0)
        # Load the video
        vc = VirtualCamera(vidpath, "video")
        # Sometimes there is crud at the begining, buffer it out
        for i in range(0, 10):
            img = vc.getImage()
            img.save(disp)
        # Show the user a frame let them left click the center
        #    of the "donut" and the right inner and outer edge
        # in that order. Press esc to exit the display
        while not disp.isDone():
            test = disp.leftButtonDownPosition()
            if test != last and test is not None:
                last = test
                print "[360fy]------- center = {0}\n".format(last)

                vals.append(test)
        print "[360fy]------- Dewarping video and generating frames using center, offset1, offset2\n"

        Cx = vals[0][0]
        Cy = vals[0][1]
        #print str(Cx) + " " + str(Cy)
        # Inner donut radius
        R1x = vals[1][0]
        R1y = vals[1][1]
        R1 = R1x - Cx
        #print str(R1)
        # outer donut radius
        R2x = vals[2][0]
        R2y = vals[2][1]
        R2 = R2x - Cx
        #print str(R2)
        # our input and output image siZes
        Wd = round(float(max(R1, R2)) * 2.0 * np.pi)
        #Wd = 2.0*((R2+R1)/2)*np.pi
        #Hd = (2.0*((R2+R1)/2)*np.pi) * (90/360)
        Hd = (R2 - R1)
        Ws = img.width
        Hs = img.height
        # build the pixel map, this could be sped up
        print "BUILDING MAP"

        xmap, ymap = buildMap(Ws, Hs, Wd, Hd, R1, R2, Cx, Cy)
        print "MAP DONE"

        result = unwarp(img, xmap, ymap)

        result.save(disp)

        print "[360fy]------- Storing frames into ../temp_data/frames\n"
        i = 0
        while img is not None:
            print bcolors.OKBLUE + "\rFrame Number: {0}".format(
                i) + bcolors.ENDC,

            sys.stdout.flush(
            )  #flushes stdout so that frame numbers print continually without skipping
            #print " percent complete         \r",
            result = unwarp(img, xmap, ymap)
            result.save(disp)
            # Save to file
            fname = "../temp_data/frames/FY{num:06d}.png".format(num=i)
            result.save(fname)

            img = vc.getImage()
            i = i + 1
        print " \n"

        if img is None:
            self.statusText.setText(str("Status: Done"))
            disp.quit()
Example #38
0
writeData([0x55])
writeCommand([0xC7])  # CONTRASTMASTER
writeData([0x0F])
writeCommand([0xB4])  # SETVSL
writeData([0xA0])
writeData([0xB5])
writeData([0x55])
writeCommand([0xB6])  # PRECHARGE2
writeData([0x01])
writeCommand([0xAF])  # DISPLAYON

#end Initialization Sequence for display SSD1351

# Setup video

video = VirtualCamera("Pirate.mp4",
                      "video")  # Load an existing video into a virtual camera

#end set up

# main program

image = video.getImage()
image.embiggen(
    (128, 96), Color.BLUE, (0, 11)
)  #.save("test.jpg") remove '#' if you wish to save test image. Enlarge image and add selected colour bars

#image test
'''
image.show()
print image.getPixel(50,25)
pixelRGB = image.getPixel(50,25)