Example #1
0
def main():

    x = 0;
    cam  = Camera (prop_set={'width':640, 'height':480})
    disp = Display (resolution=(320,240))
    while disp.isNotDone():
        img = cam.getImage()
        img = img.scale(0.5)
        faces = img.findHaarFeatures("eye.xml")
        #print "not Detected"
        if faces:
            for face in faces:
                face.draw()
                print "eyes Detected"
               # x = 0
        else:
            
                 # x += 1

                 print "close eyes"
                  #print (x)    
                  #if x > 10:
                  #  print "HOY GISING"

                   # return main()
        img.save(disp)
def main(cameraNumber, camWidth, camHeight, outputFile):
    BUFFER_NAME = 'motiontest.avi'

    # create the video stream for saving the video file
    #vs = VideoStream(fps=24, filename=fname, framefill=True)
    vs = VideoStream(fps=24, filename=BUFFER_NAME, framefill=True)

    # create a display with size (width, height)
    disp = Display((camWidth, camHeight))

    # Initialize Camera
    cam = Camera(cameraNumber, prop_set={"width": camWidth, "height": camHeight})

    # while the user does not press 'esc'
    while disp.isNotDone():
        # KISS: just get the image... don't get fancy
        img = cam.getImage()

        #img.show()

        # write the frame to videostream
        vs.writeFrame(img)

        # show the image on the display
        img.save(disp)

    # Finished the acquisition of images now Transform into a film
    #self.makefilmProcess = Process(target=self.saveFilmToDisk, args=(BUFFER_NAME, outputFile))
    #self.makefilmProcess.start()
    saveFilmToDisk(BUFFER_NAME, outputFile)
Example #3
0
def main():

    x = 0
    cam = Camera(prop_set={'width': 640, 'height': 480})
    disp = Display(resolution=(320, 240))
    while disp.isNotDone():
        img = cam.getImage()
        img = img.scale(0.5)
        faces = img.findHaarFeatures("eye.xml")
        #print "not Detected"
        if faces:
            for face in faces:
                face.draw()
                print "eyes Detected"
            # x = 0
        else:

            # x += 1

            print "close eyes"
            #print (x)
            #if x > 10:
            #  print "HOY GISING"

            # return main()
        img.save(disp)
Example #4
0
class GUIManager:

  def init(self):
    self.dis = Display(title='FRC Team 3341 Targeting')  # creates a window
    
  def setImage(self, image):
    self.img = image
    
  def setImageText(self, imageText):
    self.img.drawtext(imagetext, self.img.height/2, self.img.width/2)  
    # the height and width may need to be switched around
    # it could also be that the image is bigger than the display
    
  def setFeatures(self, blobs=None, ls=None):  #draws the features like blobs and Ls
    if self.blobs:
      for b in blobs:
        b.draw()

    if ls:
        for l in ls:
          l.draw()  # the L draw function draws the Ls upside down for some reason
    
  def show(self):  
    # I took out the isFile parameter because a video is not displayed differently than an image in simplecv
    # the thing where it waits for a key to be pressed was taken out too because it made the program crash
    self.img.save(self.dis)
    
  def disIsNotDone(self):
    # this is used to find if the window has been exited out of
    return self.dis.isNotDone()
Example #5
0
def control_by_cam():
    scale_amount = (200, 150)
    d = Display(scale_amount)
    cam = Camera(0)
    prev = cam.getImage().flipHorizontal().scale(scale_amount[0],
                                                 scale_amount[1])
    time.sleep(0.5)
    t = 0.5
    buffer = 20
    count = 0
    while d.isNotDone():
        current = cam.getImage().flipHorizontal()
        current = current.scale(scale_amount[0], scale_amount[1])
        if (count < buffer):
            count = count + 1
        else:
            fs = current.findMotion(prev, window=15, method="BM")
            lengthOfFs = len(fs)
            if fs:
                dx = 0
                for f in fs:
                    dx = dx + f.dx
                dx = (dx / lengthOfFs)
                motionStr = movement_check(dx, t)
                current.drawText(motionStr, 10, 10)
        prev = current
        time.sleep(0.01)
        current.save(d)
        return motionStr
Example #6
0
def main(cameraNumber, camWidth, camHeight, outputFile):
    BUFFER_NAME = 'buffer.avi'

    # create the video stream for saving the video file
    vs = VideoStream(fps=24, filename=BUFFER_NAME, framefill=True)
    
    # create a display with size (width, height)
    disp = Display((camWidth, camHeight))
    
    # Initialize Camera
    cam = Camera(cameraNumber, prop_set={"width": camWidth, "height": camHeight})
    
    # while the user does not press 'esc'
    while disp.isNotDone():
        # KISS: just get the image... don't get fancy
        img = cam.getImage()
        
        # write the frame to videostream
        vs.writeFrame(img)
        
        # show the image on the display
        img.save(disp)
    
    # Finished the acquisition of images now Transform into a film
    makefilmProcess = Process(self.saveFilmToDisk, args=(BUFFER_NAME, outputFile))
    makefilmProcess.start()


    def saveFilmToDisk(self, bufferName, outname):
        # construct the encoding arguments
        params = " -i {0} -c:v mpeg4 -b:v 700k -r 24 {1}".format(bufferName, outname)
        
        # run avconv to compress the video since ffmpeg is deprecated (going to be).
        call('avconv'+params, shell=True)
def main(cameraNumber, camWidth, camHeight, outputFile):

    BUFFER_NAME = 'cloud3.avi'
    vs = VideoStream(fps=24, filename=BUFFER_NAME, framefill=True)

    disp = Display((camWidth, camHeight))
    cam = Camera(cameraNumber, prop_set={"width": camWidth, "height": camHeight})

    # while the user does not press 'esc'
    start_time = time()
    count = 0
    while disp.isNotDone():
        # KISS: just get the image... don't get fancy
        img = cam.getImage()
        print type(img)

        skimage.io.push(img)

        #img.show()

        # write the frame to videostream
        vs.writeFrame(img)

        # show the image on the display
        img.save(disp)

        current_time = time()
        if current_time-start_time>=5:
            outputFile = "testing_chunk_%d.mp4" % (count)
            print "Saving %s" % (outputFile)
            saveFilmToDisk(BUFFER_NAME, outputFile)
            start_time = time()
            count += 1
    def run(self):
        m = alsaaudio.Mixer()   # defined alsaaudio.Mixer to change volume
        scale = (300,250)    # increased from (200,150). works well
        d = Display(scale)
        cam = Camera()
        prev = cam.getImage().scale(scale[0],scale[1])
        sleep(0.5)
        buffer = 20
        count = 0
        prev_t = time()    # Note initial time
        while d.isNotDone():
            current = cam.getImage()
            current = current.scale(scale[0],scale[1])
            if( count < buffer ):
                count = count + 1
            else:
                fs = current.findMotion(prev, method="LK")   # find motion
                # Tried BM, and LK, LK is better. need to learn more about LK
                if fs:      # if featureset found
                    dx = 0
                    dy = 0
                    for f in fs:
                        dx = dx + f.dx      # add all the optical flow detected
                        dy = dy + f.dy
                
                    dx = (dx / len(fs))     # Taking average
                    dy = (dy / len(fs))

                    prev = current
                    sleep(0.01)
                    current.save(d)
                    
                    if dy > 2 or dy < -2:
                        vol = int(m.getvolume()[0]) # getting master volume
                        if dy < 0:
                            vol = vol + (-dy*3)
                        else:
                            vol = vol + (-dy*3)
                        if vol > 100:
                            vol = 100
                        elif vol < 0:
                            vol = 0
                        print vol
                        m.setvolume(int(vol))   # setting master volume
                        
                    if dx > 3:
                        cur_t = time()
                        if cur_t > 5 + prev_t:  # adding some time delay
                            self.play("next")   # changing next
                            prev_t = cur_t
                        
                    if dx < -3:
                        cur_t = time()
                        if cur_t > 5 + prev_t:
                            prev_t = cur_t
                        self.play("previous")   # changing previous
Example #9
0
    def run(self):
        m = alsaaudio.Mixer()  # defined alsaaudio.Mixer to change volume
        scale = (300, 250)  # increased from (200,150). works well
        d = Display(scale)
        cam = Camera()
        prev = cam.getImage().scale(scale[0], scale[1])
        sleep(0.5)
        buffer = 20
        count = 0
        prev_t = time()  # Note initial time
        while d.isNotDone():
            current = cam.getImage()
            current = current.scale(scale[0], scale[1])
            if (count < buffer):
                count = count + 1
            else:
                fs = current.findMotion(prev, method="LK")  # find motion
                # Tried BM, and LK, LK is better. need to learn more about LK
                if fs:  # if featureset found
                    dx = 0
                    dy = 0
                    for f in fs:
                        dx = dx + f.dx  # add all the optical flow detected
                        dy = dy + f.dy

                    dx = (dx / len(fs))  # Taking average
                    dy = (dy / len(fs))

                    prev = current
                    sleep(0.01)
                    current.save(d)

                    if dy > 2 or dy < -2:
                        vol = int(m.getvolume()[0])  # getting master volume
                        if dy < 0:
                            vol = vol + (-dy * 3)
                        else:
                            vol = vol + (-dy * 3)
                        if vol > 100:
                            vol = 100
                        elif vol < 0:
                            vol = 0
                        print vol
                        m.setvolume(int(vol))  # setting master volume

                    if dx > 3:
                        cur_t = time()
                        if cur_t > 5 + prev_t:  # adding some time delay
                            self.play("next")  # changing next
                            prev_t = cur_t

                    if dx < -3:
                        cur_t = time()
                        if cur_t > 5 + prev_t:
                            prev_t = cur_t
                        self.play("previous")  # changing previous
Example #10
0
def show_img(img):
    display = Display()
    img.show()

    # Wait for user to close the window or break out of it.
    while display.isNotDone():
        try:
            pass
        except KeyboardInterrupt:
            display.done = True
        if display.mouseRight:
            display.done = True
    display.quit()
Example #11
0
def interactiveTranslation():
	cam = Camera()
	disp = Display()
	current = " "
	while disp.isNotDone():
		image = cam.getImage()
		if disp.mouseLeft: break
		if disp.mouseRight:
			text = image.readText()
			text = cleanText(text)
			translated = trans.translate(text, langpair)
			if translated: current = translated
		image.drawText(current, 0, 0, color=Color.BLACK, fontsize=40)
		image.save(disp)
Example #12
0
def Run(cmdPipe):
    steadyStateFPS = 10
    desiredBuffer = 60*60 #1 minute * 60 seconds
    numberOfFrames = steadyStateFPS*desiredBuffer;
    fmt = '%Y-%m-%d %H:%M:%S'

    disp = Display()

    filelist = []
    frameCounter = 101
    sleepTime = .1

    while disp.isNotDone():
        # check command
        if cmdPipe.poll():
            cmd = cmdPipe.recv()
            if cmd=='shutdown':
                print('player', 0, "Shutting down.")
                break

        if frameCounter > 100 or len(filelist) == 0:
            frameCounter = 0
            filelist = glob("images/*.jpg")
            if len(filelist)>numberOfFrames:
                sleepTime = 1.0/steadyStateFPS
                print("player", 0, "number of frames in buffer="+str(len(filelist))+" desired="+str(numberOfFrames)+" setting sleeptime to "+str(sleepTime))
            else:
                sleepTime = (1.0/steadyStateFPS)+.01
                print("player", 0, "number of frames in buffer="+str(len(filelist))+" desired="+str(numberOfFrames)+" setting sleeptime to "+str(sleepTime))


        filename = filelist.pop(0)
        img = Image(filename)
        matchObj = re.search(r'[0-9- :]+', filename)

        d1_ts = time.mktime(datetime.strptime(matchObj.group(), fmt).timetuple())
        d2_ts = time.mktime(datetime.utcnow().timetuple())
        offset = int(d1_ts-d2_ts)/60
        img.drawText(str(offset),  x=600, y=470)
        img.save(disp)
        os.remove(filename)
        frameCounter = frameCounter+1
        time.sleep(sleepTime)
Example #13
0
def main():
    global ON_CIRCLE

    colour = Color.RED
    cam  = Camera()
    disp = Display()
    obj_x = 150
    obj_y = 75
    radius = 25
    normaldisplay = True
    while disp.isNotDone():
        if disp.mouseRight:
            normaldisplay = not(normaldisplay)
            print "Display Mode:", "Normal" if normaldisplay else "Segmented"

        img = cam.getImage()
        img = img.scale(0.5).flipHorizontal()
        dist = img.colorDistance(Color.BLACK).dilate(2)
        img.dl().circle((obj_x, obj_y), radius, colour, filled = True)
        segmented = dist.stretch(200,255)
        palm = img.findHaarFeatures('/home/malithsen/downloads/palm.xml')
        fist = img.findHaarFeatures('/home/malithsen/downloads/agest.xml')
        if palm:
            # palm = palm.sortArea()
            palm = palm[-1]
            colour = parm_on_obj(obj_x, obj_y, radius, palm)
            palm.draw()
        elif fist:
            # fist = fist.sortArea()
            fist = fist[-1]
            fist.draw()
            if ON_CIRCLE:
                colour = Color.GREEN
                obj_x, obj_y = fist.x, fist.y
        if normaldisplay:
            img.show()
        else:
            segmented.show()
Example #14
0
def record(filename):
    from SimpleCV import Camera, Display
    import time
    neg_dir = "rawdata/%s" % filename
    if not os.path.exists(neg_dir):
        os.makedirs(neg_dir)
    cam = Camera()
    dis = Display()
    time.sleep(2)
    targetFps = 15.0
    fps = targetFps
    sleepTime = 1/targetFps
    start = time.time()
    prevTime = None
    count = 0
    try:
        print "Recording... [keyboard interrupt to quit]"
        while dis.isNotDone():
            img = cam.getImage()
            img = scaleDown(img)
            if fps > targetFps + .5:
                sleepTime += 0.005
            elif fps < targetFps:
                sleepTime = max(sleepTime - 0.005, 0.01)
            if prevTime is not None:
                fps = 1.0 / (time.time() - prevTime)
            prevTime = time.time()
            img.save("%s/%05d.jpg" % (neg_dir, count + 1600))
            count += 1
            img.dl().ezViewText("{0:.3f} fps".format(fps), (0, 0))
            img.save(dis)
            if dis.mouseRight:
                dis.quit()
            time.sleep(sleepTime)

    except KeyboardInterrupt:
        print "Done recording"
Example #15
0
def main():
    # Get file name
    if len(sys.argv) < 2:
        print "<%s> usage : <%s> <directory name>" %(sys.argv[0], sys.argv[0])
        sys.exit()
    else:
        filename = sys.argv[1]

    config = ConfigParser.RawConfigParser()
    try:
        config.readfp(open("./experiments/" + filename + "/config.ini"))
    except IOError: 
        print "Error: can\'t find file or read data"
        sys.exit()

    # Display
    display = Display()

    # Set of slide images
    slideSet = ImageSet("./experiments/" + filename)

    lineList = config.items('sequence')
    length = len(lineList)
    sleepList = list()

    # Sleep times
    for line in lineList:
        sleepList.append(int(line[1])/1000)

    # Main loop    
    while display.isNotDone():
        for index, img in enumerate(slideSet):
            img.show()
            time.sleep(sleepList[index])

    sys.exit()
Example #16
0
# coding: utf-8

# # Hello World

# In[ ]:

from SimpleCV import Camera,Color,Display,Image
camera = Camera()
disp = Display()
while disp.isNotDone():
    image = camera.getImage()
    image.save(disp)
print("Done")
exit()


# # Detect Yellow Object

# In[1]:

from SimpleCV import Camera,Color,Display,Image
camera = Camera()
disp = Display()
while disp.isNotDone():
    image = camera.getImage()
    yellow = image.colorDistance(Color.YELLOW).binarize(140).invert()
    onlyYellow = image-yellow
    onlyYellow.save(disp)
    
print("Done")
Example #17
0
 def track(self):
  print "Press right mouse button to pause or play"
  print "Use left mouse button to select target"
  print "Target color must be different from background"
  print "Target must have width larger than height"
  print "Target can be upside down"

  #Parameters
  isUDPConnection = False # Currently switched manually in the code
  display = True
  displayDebug = True
  useBasemap = False
  maxRelativeMotionPerFrame = 2 # How much the target can moved between two succesive frames
  pixelPerRadians = 320
  radius = pixelPerRadians
  referenceImage = '../ObjectTracking/kite_detail.jpg'
  scaleFactor = 0.5
  isVirtualCamera = True
  useHDF5 = False

  # Open reference image: this is used at initlalisation
  target_detail = Image(referenceImage)

  # Get RGB color palette of target (was found to work better than using hue)
  pal = target_detail.getPalette(bins = 2, hue = False)

  # Open video to analyse or live stream
  #cam = JpegStreamCamera('http://192.168.1.29:8080/videofeed')#640 * 480
  if isVirtualCamera:
    #cam = VirtualCamera('../../zenith-wind-power-read-only/KiteControl-Qt/videos/kiteFlying.avi','video')
    #cam = VirtualCamera('/media/bat/DATA/Baptiste/Nautilab/kite_project/robokite/ObjectTracking/00095.MTS', 'video')
    #cam = VirtualCamera('output.avi', 'video')
    cam = VirtualCamera('../Recording/Videos/Flying kite images (for kite steering unit development)-YTMgX1bvrTo.flv','video')
    virtualCameraFPS = 25
  else:
    cam = JpegStreamCamera('http://192.168.43.1:8080/videofeed')#640 * 480
    #cam = Camera() 

  # Get a sample image to initialize the display at the same size
  img = cam.getImage().scale(scaleFactor)
  print img.width, img.height
  # Create a pygame display
  if display:
   if img.width>img.height:
     disp = Display((27*640/10,25*400/10))#(int(2*img.width/scaleFactor), int(2*img.height/scaleFactor)))
   else:
     disp = Display((810,1080))
  #js = JpegStreamer()



  # Initialize variables
  previous_angle = 0 # target has to be upright when starting. Target width has to be larger than target heigth.
  previous_coord_px = (0, 0) # Initialized to top left corner, which always exists
  previous_dCoord = previous_coord_px
  previous_dAngle = previous_angle
  angles = []
  coords_px = []
  coord_px = [0, 0]
  angle = 0
  target_elevations = []
  target_bearings = []
  times = []
  wasTargetFoundInPreviousFrame = False
  i_frame = 0
  isPaused = False
  selectionInProgress = False
  th = [100, 100, 100]
  skycolor = Color.BLUE
  timeLastTarget = 0

  # Prepare recording
  recordFilename = datetime.datetime.utcnow().strftime("%Y%m%d_%Hh%M_")+ 'simpleTrack'
  if useHDF5:
    try:
      os.remove(recordFilename + '.hdf5') 
    except:
      print('Creating file ' + recordFilename + '.hdf5')
    """ The following line is used to silence the following error (according to http://stackoverflow.com/questions/15117128/h5py-in-memory-file-and-multiprocessing-error)
    #000: ../../../src/H5F.c line 1526 in H5Fopen(): unable to open file
    major: File accessability
    minor: Unable to open file"""
    h5py._errors.silence_errors()
    recordFile = h5py.File(recordFilename + '.hdf5', 'a') 
    hdfSize = 0    
    dset = recordFile.create_dataset('kite', (2,2), maxshape=(None,7))
    imset = recordFile.create_dataset('image', (2,img.width,img.height,3 ), maxshape=(None, img.width, img.height, 3))
  else:
    try:
      os.remove(recordFilename + '.csv')   
    except:
      print('Creating file ' + recordFilename + '.csv') 
    recordFile = file(recordFilename + '.csv', 'a')
    csv_writer = csv.writer(recordFile)
    csv_writer.writerow(['Time (s)', 'x (px)', 'y (px)', 'Orientation (rad)', 'Elevation (rad)', 'Bearing (rad)', 'ROT (rad/s)'])

  # Launch a thread to get UDP message with orientation of the camera
  mobile = mobileState.mobileState()
  if isUDPConnection:
   a = threading.Thread(None, mobileState.mobileState.checkUpdate, None, (mobile,))
   a.start()

  # Loop while not canceled by user
  t0 = time.time()
  previousTime = t0
  while not(display) or disp.isNotDone():
    t = time.time()
    deltaT = (t-previousTime)
    FPS = 1.0/deltaT
    #print 'FPS =', FPS
    if isVirtualCamera:
      deltaT = 1.0/virtualCameraFPS
    previousTime = t
    i_frame = i_frame + 1
    timestamp = datetime.datetime.utcnow()

    # Receive orientation of the camera
    if isUDPConnection:
      mobile.computeRPY([2, 0, 1], [-1, 1, 1])
    ctm = np.array([[sp.cos(mobile.roll), -sp.sin(mobile.roll)], \
            [sp.sin(mobile.roll), sp.cos(mobile.roll)]]) # Coordinate transform matrix

    if useBasemap:
    # Warning this really slows down the computation
      m = Basemap(width=img.width, height=img.height, projection='aeqd',
            lat_0=sp.rad2deg(mobile.pitch), lon_0=sp.rad2deg(mobile.yaw), rsphere = radius)

    # Get an image from camera
    if not isPaused:
      img = cam.getImage()
      img = img.resize(int(scaleFactor*img.width), int(scaleFactor*img.height))
    
    if display:
      # Pause image when right button is pressed
      dwn = disp.rightButtonDownPosition()
      if dwn is not None:
        isPaused = not(isPaused)
        dwn = None

    if display:
    # Create a layer to enable user to make a selection of the target
      selectionLayer = DrawingLayer((img.width, img.height))

    if img:
      if display: 
      # Create a new layer to host information retrieved from video
        layer = DrawingLayer((img.width, img.height))
          # Selection is a rectangle drawn while holding mouse left button down
        if disp.leftButtonDown:
          corner1 = (disp.mouseX, disp.mouseY)
          selectionInProgress = True
        if selectionInProgress:
          corner2 = (disp.mouseX, disp.mouseY)
          bb = disp.pointsToBoundingBox(corner1, corner2)# Display the temporary selection
          if disp.leftButtonUp: # User has finished is selection
            selectionInProgress = False
            selection = img.crop(bb[0], bb[1], bb[2], bb[3])
            if selection != None:
                    # The 3 main colors in the area selected are considered.
            # Note that the selection should be included in the target and not contain background
              try:
                selection.save('../ObjectTracking/'+ 'kite_detail_tmp.jpg')
                img0 = Image("kite_detail_tmp.jpg") # For unknown reason I have to reload the image...
                pal = img0.getPalette(bins = 2, hue = False)
              except: # getPalette is sometimes bugging and raising LinalgError because matrix not positive definite
                pal = pal
              wasTargetFoundInPreviousFrame = False
              previous_coord_px = (bb[0] + bb[2]/2, bb[1] + bb[3]/2)
          if corner1 != corner2:
            selectionLayer.rectangle((bb[0], bb[1]), (bb[2], bb[3]), width = 5, color = Color.YELLOW)
                       
   
      # If the target was already found, we can save computation time by
      # reducing the Region Of Interest around predicted position
      if wasTargetFoundInPreviousFrame:
        ROITopLeftCorner = (max(0, previous_coord_px[0]-maxRelativeMotionPerFrame/2*width), \
                  max(0, previous_coord_px[1] -height*maxRelativeMotionPerFrame/2))
        ROI = img.crop(ROITopLeftCorner[0], ROITopLeftCorner[1],                          \
                             maxRelativeMotionPerFrame*width, maxRelativeMotionPerFrame*height, \
                 centered = False)
        if display :
      # Draw the rectangle corresponding to the ROI on the complete image
          layer.rectangle((previous_coord_px[0]-maxRelativeMotionPerFrame/2*width,  \
                                   previous_coord_px[1]-maxRelativeMotionPerFrame/2*height), \
                                (maxRelativeMotionPerFrame*width, maxRelativeMotionPerFrame*height), \
                 color = Color.GREEN, width = 2)
      else:
        # Search on the whole image if no clue of where is the target
        ROITopLeftCorner = (0, 0)
        ROI = img

        '''#Option 1
        target_part0 = ROI.hueDistance(color=(142,50,65)).invert().threshold(150)
        target_part1 = ROI.hueDistance(color=(93,16,28)).invert().threshold(150)
        target_part2 = ROI.hueDistance(color=(223,135,170)).invert().threshold(150)
        target_raw_img = target_part0+target_part1+target_part2
        target_img = target_raw_img.erode(5).dilate(5)

        #Option 2
        target_img = ROI.hueDistance(imgModel.getPixel(10,10)).binarize().invert().erode(2).dilate(2)'''
    
          # Find sky color
      sky = (img-img.binarize()).findBlobs(minsize=10000)
      if sky:
       skycolor = sky[0].meanColor()
      # Option 3
      target_img = ROI-ROI # Black image
              
      # Loop through palette of target colors
      if display and displayDebug:
            decomposition = []
      i_col = 0
      for col in pal: 
        c = tuple([int(col[i]) for i in range(0,3)])
            # Search the target based on color
        ROI.save('../ObjectTracking/'+ 'ROI_tmp.jpg')
        img1 = Image('../ObjectTracking/'+ 'ROI_tmp.jpg')
        filter_img = img1.colorDistance(color = c)
        h = filter_img.histogram(numbins=256)
        cs = np.cumsum(h)
        thmax = np.argmin(abs(cs- 0.02*img.width*img.height)) # find the threshold to have 10% of the pixel in the expected color
        thmin = np.argmin(abs(cs- 0.005*img.width*img.height)) # find the threshold to have 10% of the pixel in the expected color
        if thmin==thmax:
          newth = thmin
        else:
          newth = np.argmin(h[thmin:thmax]) + thmin
        alpha = 0.5
        th[i_col] = alpha*th[i_col]+(1-alpha)*newth
        filter_img = filter_img.threshold(max(40,min(200,th[i_col]))).invert()
        target_img = target_img + filter_img
        #print th
        i_col = i_col + 1
        if display and displayDebug:
          [R, G, B] = filter_img.splitChannels()
          white = (R-R).invert()
          r = R*1.0/255*c[0]
          g = G*1.0/255*c[1]
          b = B*1.0/255*c[2]
          tmp = white.mergeChannels(r, g, b)
          decomposition.append(tmp)

      # Get a black background with with white target foreground
      target_img = target_img.threshold(150)
  
      target_img = target_img - ROI.colorDistance(color = skycolor).threshold(80).invert()

      if display and displayDebug:
        small_ini = target_img.resize(int(img.width/(len(pal)+1)),  int(img.height/(len(pal)+1)))
        for tmp in decomposition:
          small_ini = small_ini.sideBySide(tmp.resize(int(img.width/(len(pal)+1)), int(img.height/(len(pal)+1))), side = 'bottom')
        small_ini = small_ini.adaptiveScale((int(img.width), int(img.height)))
        toDisplay = img.sideBySide(small_ini)
      else:
        toDisplay = img
          #target_img = ROI.hueDistance(color = Color.RED).threshold(10).invert()

      # Search for binary large objects representing potential target
      target = target_img.findBlobs(minsize = 500)
      
      if target: # If a target was found
      
        if wasTargetFoundInPreviousFrame:
          predictedTargetPosition = (width*maxRelativeMotionPerFrame/2, height*maxRelativeMotionPerFrame/2) # Target will most likely be close to the center of the ROI   
        else:
          predictedTargetPosition = previous_coord_px
              # If there are several targets in the image, take the one which is the closest of the predicted position
        target = target.sortDistance(predictedTargetPosition)

        # Get target coordinates according to minimal bounding rectangle or centroid.
        coordMinRect = ROITopLeftCorner + np.array((target[0].minRectX(), target[0].minRectY()))
        coord_px = ROITopLeftCorner + np.array(target[0].centroid())

        # Rotate the coordinates of roll angle around the middle of the screen
        rot_coord_px = np.dot(ctm, coord_px - np.array([img.width/2, img.height/2])) + np.array([img.width/2, img.height/2])
        if useBasemap:
          coord = sp.deg2rad(m(rot_coord_px[0], img.height-rot_coord_px[1], inverse = True))
        else:
          coord = localProjection(rot_coord_px[0]-img.width/2, img.height/2-rot_coord_px[1], radius, mobile.yaw, mobile.pitch, inverse = True)
        target_bearing, target_elevation = coord

      # Get minimum bounding rectangle for display purpose
        minR = ROITopLeftCorner + np.array(target[0].minRect())

        contours = target[0].contour()

        contours = [ ROITopLeftCorner + np.array(contour) for contour in contours]


  
        # Get target features
        angle = sp.deg2rad(target[0].angle()) + mobile.roll
        angle =  sp.deg2rad(unwrap180(sp.rad2deg(angle), sp.rad2deg(previous_angle)))
        width = target[0].width()
        height = target[0].height()

        # Check if the kite is upside down
        # First rotate the kite
        ctm2 = np.array([[sp.cos(-angle+mobile.roll), -sp.sin(-angle+mobile.roll)], \
            [sp.sin(-angle+mobile.roll), sp.cos(-angle+mobile.roll)]]) # Coordinate transform matrix
        rotated_contours = [np.dot(ctm2, contour-coordMinRect) for contour in contours]  
        y = [-tmp[1] for tmp in rotated_contours]
        itop = np.argmax(y) # Then looks at the points at the top
        ibottom = np.argmin(y) # and the point at the bottom
        # The point the most excentered is at the bottom
        if abs(rotated_contours[itop][0])>abs(rotated_contours[ibottom][0]):
          isInverted = True
        else:
          isInverted = False    
        
        if isInverted:
            angle = angle + sp.pi    

        
                # Filter the data
        alpha = 1-sp.exp(-deltaT/self.filterTimeConstant)
        if not(isPaused):
          dCoord = np.array(previous_dCoord)*(1-alpha) + alpha*(np.array(coord_px) - previous_coord_px) # related to the speed only if cam is fixed
          dAngle = np.array(previous_dAngle)*(1-alpha) + alpha*(np.array(angle) - previous_angle)
        else : 
          dCoord = np.array([0, 0])
          dAngle = np.array([0]) 
#print coord_px, angle, width, height, dCoord
    
        # Record important data
        times.append(timestamp)
        coords_px.append(coord_px)
        angles.append(angle)
        target_elevations.append(target_elevation)
        target_bearings.append(target_bearing)
        
        # Export data to controller
        self.elevation = target_elevation
        self.bearing = target_bearing
        self.orientation = angle
        dt = time.time()-timeLastTarget
        self.ROT = dAngle/dt
        self.lastUpdateTime = t
        
        # Save for initialisation of next step
        previous_dCoord = dCoord
        previous_angle = angle
        previous_coord_px = (int(coord_px[0]), int(coord_px[1]))
        wasTargetFoundInPreviousFrame = True
        timeLastTarget = time.time()
      
      else:
        wasTargetFoundInPreviousFrame = False
        
      if useHDF5:
        hdfSize = hdfSize+1
        dset.resize((hdfSize, 7))
        imset.resize((hdfSize, img.width, img.height, 3))
        dset[hdfSize-1,:] = [time.time(), coord_px[0], coord_px[1], angle, self.elevation, self.bearing, self.ROT]
        imset[hdfSize-1,:,:,:] = img.getNumpy()
        recordFile.flush()
      else:
        csv_writer.writerow([time.time(), coord_px[0], coord_px[1], angle, self.elevation, self.bearing, self.ROT])



      if display :
        if target:
        # Add target features to layer
        # Minimal rectange and its center in RED
          layer.polygon(minR[(0, 1, 3, 2), :], color = Color.RED, width = 5)
          layer.circle((int(coordMinRect[0]), int(coordMinRect[1])), 10, filled = True, color = Color.RED)
        
                # Target contour and centroid in BLUE
          layer.circle((int(coord_px[0]), int(coord_px[1])), 10, filled = True, color = Color.BLUE)
          layer.polygon(contours, color = Color.BLUE, width = 5)

        # Speed vector in BLACK
          layer.line((int(coord_px[0]), int(coord_px[1])), (int(coord_px[0]+20*dCoord[0]), int(coord_px[1]+20*dCoord[1])), width = 3)
        
        # Line giving angle
          layer.line((int(coord_px[0]+200*sp.cos(angle)), int(coord_px[1]+200*sp.sin(angle))), (int(coord_px[0]-200*sp.cos(angle)), int(coord_px[1]-200*sp.sin(angle))), color = Color.RED)

        # Line giving rate of turn
        #layer.line((int(coord_px[0]+200*sp.cos(angle+dAngle*10)), int(coord_px[1]+200*sp.sin(angle+dAngle*10))), (int(coord_px[0]-200*sp.cos(angle + dAngle*10)), int(coord_px[1]-200*sp.sin(angle+dAngle*10))))
            
      # Add the layer to the raw image 
        toDisplay.addDrawingLayer(layer)
        toDisplay.addDrawingLayer(selectionLayer)

      # Add time metadata
        toDisplay.drawText(str(i_frame)+" "+ str(timestamp), x=0, y=0, fontsize=20)

      # Add Line giving horizon
          #layer.line((0, int(img.height/2 + mobile.pitch*pixelPerRadians)),(img.width, int(img.height/2 + mobile.pitch*pixelPerRadians)), width = 3, color = Color.RED)

      # Plot parallels
        for lat in range(-90, 90, 15):
          r = range(0, 361, 10)
          if useBasemap:
            # \todo improve for high roll
            l = m (r, [lat]*len(r))
            pix = [np.array(l[0]), img.height-np.array(l[1])]
          else:
            l = localProjection(sp.deg2rad(r), \
                    sp.deg2rad([lat]*len(r)), \
                    radius, \
                    lon_0 = mobile.yaw, \
                    lat_0 = mobile.pitch, \
                    inverse = False)
            l = np.dot(ctm, l)
            pix = [np.array(l[0])+img.width/2, img.height/2-np.array(l[1])]

          for i in range(len(r)-1):
            if isPixelInImage((pix[0][i],pix[1][i]), img) or isPixelInImage((pix[0][i+1],pix[1][i+1]), img):
              layer.line((pix[0][i],pix[1][i]), (pix[0][i+1], pix[1][i+1]), color=Color.WHITE, width = 2)

      # Plot meridians
        for lon in range(0, 360, 15):
          r = range(-90, 91, 10)
          if useBasemap:
        # \todo improve for high roll
            l = m ([lon]*len(r), r)
            pix = [np.array(l[0]), img.height-np.array(l[1])]
          else:
            l= localProjection(sp.deg2rad([lon]*len(r)), \
                    sp.deg2rad(r), \
                    radius, \
                    lon_0 = mobile.yaw, \
                    lat_0 = mobile.pitch, \
                    inverse = False)
            l = np.dot(ctm, l)
            pix = [np.array(l[0])+img.width/2, img.height/2-np.array(l[1])]

          for i in range(len(r)-1):
            if isPixelInImage((pix[0][i],pix[1][i]), img) or isPixelInImage((pix[0][i+1],pix[1][i+1]), img):
              layer.line((pix[0][i],pix[1][i]), (pix[0][i+1], pix[1][i+1]), color=Color.WHITE, width = 2)

      # Text giving bearing
      # \todo improve for high roll
        for bearing_deg in range(0, 360, 30):
          l = localProjection(sp.deg2rad(bearing_deg), sp.deg2rad(0), radius, lon_0 = mobile.yaw, lat_0 = mobile.pitch, inverse = False)
          l = np.dot(ctm, l)
          layer.text(str(bearing_deg), ( img.width/2+int(l[0]), img.height-20), color = Color.RED)

      # Text giving elevation
      # \todo improve for high roll
        for elevation_deg in range(-60, 91, 30):
          l = localProjection(0, sp.deg2rad(elevation_deg), radius, lon_0 = mobile.yaw, lat_0 = mobile.pitch, inverse = False)
          l = np.dot(ctm, l)
          layer.text(str(elevation_deg), ( img.width/2 ,img.height/2-int(l[1])), color = Color.RED)

        #toDisplay.save(js)
        toDisplay.save(disp)
    if display : 
      toDisplay.removeDrawingLayer(1)
      toDisplay.removeDrawingLayer(0)
  recordFile.close()
Example #18
0
# Check out the video here:
# http://www.youtube.com/watch?v=cAL6u6Q0Xuc
from SimpleCV import Image, Display
import time
 
#webcam-URLs
marktplatz = 'http://www.tuebingen.de/camera/webcam...'
marktgasse = 'http://leuchtengalerie.com/webcam/leu...'
neckarbruecke1 = 'http://www.tagblatt.de/cms_media/webc...'
neckarbruecke2 = 'http://tuebingen-info.de/fileadmin/we...'
 
display = Display((1240, 960))
 
counter = 0
 
while not display.isNotDone():
img1 = Image(marktplatz)
img1 = img1.adaptiveScale((640, 480))
img2 = Image(marktgasse)
img2 = img2.adaptiveScale((640, 480))
img3 = Image(neckarbruecke1)
img3 = img3.adaptiveScale((640, 480))
img4 = Image(neckarbruecke2)
img4 = img4.adaptiveScale((640, 480))
top = img1.sideBySide(img2)
bottom = img3.sideBySide(img4)
combined = top.sideBySide(bottom, side="bottom")
combined.save(display)
combined.save("webcam" +str(counter).zfill(4) +".jpg")
time.sleep(60)
counter = counter + 1
Example #19
0
from SimpleCV import Image, Display, Color, Camera
cam = Camera(0)  # Get the first camera
disp = Display((640, 480))  # Create a 640x480 display
while (disp.isNotDone()):  # While we don't exit the display
    img = cam.getImage().binarize()  # Get an image and make it black and white
    # Draw the text "Hello World" at (40,40) in red.
    img.drawText("Hello World!", 40, 40, fontsize=60, color=Color.RED)
    img.save(disp)  # Save it to the screen
Example #20
0
from SimpleCV import Display, Camera, Image, DrawingLayer, VirtualCamera
disp = Display((600,800))
#cam = Camera()
cam = VirtualCamera('/media/bat/DATA/Baptiste/Nautilab/kite_project/zenith-wind-power-read-only/KiteControl-Qt/videos/kiteTest.avi','video')
isPaused = False
updateSelection = False
while(disp.isNotDone()):  
  if not isPaused:
    img_flip = cam.getImage().flipHorizontal()
    img = img_flip.edges(150, 100).dilate()
  if disp.rightButtonDown:
    isPaused = not(isPaused)
  selectionLayer = DrawingLayer((img.width, img.height))
  if disp.leftButtonDown:
	corner1 = (disp.mouseX, disp.mouseY)
        updateSelection = True
  if updateSelection:
    corner2 = (disp.mouseX, disp.mouseY)
    bb = disp.pointsToBoundingBox(corner1, corner2)
    if disp.leftButtonUp: 
      updateSelection = False
    if corner1!=corner2:
     selectionLayer.rectangle((bb[0],bb[1]),(bb[2],bb[3]))
  img.addDrawingLayer(selectionLayer)
  img.save(disp)
  img.removeDrawingLayer(0)

from SimpleCV import Color, Camera, Display
import os
import webbrowser

cam = Camera()  #starts the camera
display = Display()

while (display.isNotDone()):

    img = cam.getImage()  #gets image from the camera

    barcode = img.findBarcode()  #finds barcode data from image
    if (barcode is not None):  #if there is some data processed
        barcode = barcode[0]
        result = str(barcode.data)
        print result  #prints result of barcode in python shell
        #Above line launches a browser to link scanned. Returns error if not a URL
        barcode = []  #reset barcode data to empty set
        img.save(display)  #shows the image on the screen
Example #22
0
from SimpleCV import ColorSegmentation, Image, Camera, VirtualCamera, Display, Color

# Open reference video
cam = VirtualCamera(
    '../../Recording/Videos/kiteFlying from Zenith Wind Power-jn9RrUCiWKM.mp4',
    'video')
# Select reference image
img = cam.getFrame(50)
modelImage = img.crop(255, 180, 70, 20)
modelImage = Image('../kite_detail.jpg')
ts = []
disp = Display()
for i in range(0, 50):
    img = cam.getImage()
while (disp.isNotDone()):
    img = cam.getImage()
    bb = (255, 180, 70, 20)
    ts = img.track("camshift", ts, modelImage, bb, num_frames=1)
    modelImage = Image('../kite_detail.jpg')
    # now here in first loop iteration since ts is empty,
    # img0 and bb will be considered.
    # New tracking object will be created and added in ts (TrackSet)
    # After first iteration, ts is not empty and hence the previous
    # image frames and bounding box will be taken from ts and img0
    # and bb will be ignored.
    ts.draw()
    ts.drawBB()
    ts.showCoordinates()
    img.show()
Example #23
0
#
# Licensed under the MIT License,
# https://github.com/baptistelabat/robokite
# Authors: Baptiste LABAT


from SimpleCV import Camera, Image, VirtualCamera, Display, DrawingLayer, Color, JpegStreamCamera, JpegStreamer
import scipy as sp
import numpy as np

cam = VirtualCamera('../Recording/Videos/Kite with leds in night - YouTube [360p].mp4','video')
img = cam.getImage()
disp = Display((810,1080))
display = True
predictedTargetPosition = (img.size()[0]/2, img.size()[1]/2)
while (not(display) or disp.isNotDone()) and img.size()!= (0, 0) :
    img = cam.getImage()
    if img.size()!= (0, 0):
     if img:
      if display: 
	    # Create a new layer to host information retrieved from video
	      layer = DrawingLayer((img.width, img.height))
      maskred = img.colorDistance(color=(200,50,70)).invert().threshold(170)
      imgred = (img*(maskred/255)).dilate(3)
      targetred=imgred.findBlobs(maxsize=200)
      maskwhite = img.colorDistance(color=(200,200,200)).invert().threshold(230)
      imgwhite = (img*(maskwhite/255)).dilate(3)
      targetwhite=imgwhite.findBlobs(maxsize=200)
      
      
      if targetred:
Example #24
0
#
# Licensed under the MIT License,
# https://github.com/baptistelabat/robokite
# Authors: Baptiste LABAT

from SimpleCV import Camera, Image, VirtualCamera, Display, DrawingLayer, Color, JpegStreamCamera, JpegStreamer
import scipy as sp
import numpy as np

cam = VirtualCamera(
    '../Recording/Videos/Kite with leds in night-LgvpmMt-SA0.mp4', 'video')
img = cam.getImage()
disp = Display((810, 1080))
display = True
predictedTargetPosition = (img.size()[0] / 2, img.size()[1] / 2)
while (not (display) or disp.isNotDone()) and img.size() != (0, 0):
    img = cam.getImage()
    if img.size() != (0, 0):
        if img:
            if display:
                # Create a new layer to host information retrieved from video
                layer = DrawingLayer((img.width, img.height))
            maskred = img.colorDistance(color=(200, 50,
                                               70)).invert().threshold(170)
            imgred = (img * (maskred / 255)).dilate(3)
            targetred = imgred.findBlobs(maxsize=200)
            maskwhite = img.colorDistance(color=(200, 200,
                                                 200)).invert().threshold(230)
            imgwhite = (img * (maskwhite / 255)).dilate(3)
            targetwhite = imgwhite.findBlobs(maxsize=200)
Example #25
0
from SimpleCV import Image, Color, Display
import time

car_in_lot = Image("parking-car.png")

car = car_in_lot.crop(470,200,200,200)

yellow_car = car.colorDistance(Color.YELLOW)

only_car = car - yellow_car
only_car = only_car.toRGB()

displayObject = Display()

print only_car.meanColor()

# Show the results
only_car.save(displayObject)

while displayObject.isNotDone():
    time.sleep(0.5)
Example #26
0
from SimpleCV import Image, Display, Color, Camera

cam = Camera(0)  # Get the first camera
disp = Display((640, 480))  # Create a 640x480 display
while disp.isNotDone():  # While we don't exit the display
    img = cam.getImage().binarize()  # Get an image and make it black and white
    # Draw the text "Hello World" at (40,40) in red.
    img.drawText("Hello World!", 40, 40, fontsize=60, color=Color.RED)
    img.save(disp)  # Save it to the screen
Example #27
0
'''
This program super imposes the camera onto the television in the picture
'''
print __doc__

from SimpleCV import Camera, Image, Display

tv_original = Image("family_watching_television_1958.jpg", sample=True)

tv_coordinates = [(353, 379), (433, 380), (432, 448), (354, 446)]
tv_mask = Image(tv_original.size()).invert().warp(tv_coordinates)
tv = tv_original - tv_mask

c = Camera()
d = Display(tv.size())

while d.isNotDone():
    bwimage = c.getImage().grayscale().resize(tv.width, tv.height)
    on_tv = tv + bwimage.warp(tv_coordinates)
    on_tv.save(d)
Example #28
0
params.filterByConvexity = False
params.filterByColor = False
params.filterByCircularity = False
params.filterByArea = True
params.minArea = 5.0
params.maxArea = 200.0
params.minThreshold = 15
params.maxThreshold = 255

b = cv2.SimpleBlobDetector(params)

display = Display()
counter = 0
box_dim = 48

while display.isNotDone():

    # Capture frame-by-frame
    ret, frame = cap.read()
    blob = b.detect(frame)

    fcount = 0
    for beest in blob:

        if fcount > 100:
            continue
        tmpImg = Image(frame, cv2image=True).crop(int(beest.pt[0]),
                                                  int(beest.pt[1]),
                                                  box_dim,
                                                  box_dim,
                                                  centered=True)
Example #29
0
# 
# Released under the BSD license. See LICENSE file for details.
"""
This program basically does face detection an blurs the face out.
"""
print __doc__

from SimpleCV import Camera, Display, HaarCascade

# Initialize the camera
cam = Camera()

# Create the display to show the image
display = Display()

# Haar Cascade face detection, only faces
haarcascade = HaarCascade("face")

# Loop forever
while display.isNotDone():
    # Get image, flip it so it looks mirrored, scale to speed things up
    img = cam.getImage().flipHorizontal().scale(0.5)
    # Load in trained face file
    faces = img.findHaarFeatures(haarcascade)
    # Pixelize the detected face
    if faces:
        bb = faces[-1].boundingBox()
        img = img.pixelize(10, region=(bb[0], bb[1], bb[2], bb[3]))
    # Display the image
    img.save(display)
Example #30
0
from SimpleCV import Image, Color, Display
import time

car_in_lot = Image("parking-car.png")

car = car_in_lot.crop(470, 200, 200, 200)

yellow_car = car.colorDistance(Color.YELLOW)

only_car = car - yellow_car
only_car = only_car.toRGB()

displayObject = Display()

print only_car.meanColor()

# Show the results
only_car.save(displayObject)

while displayObject.isNotDone():
    time.sleep(0.5)
Example #31
0
    GPIO.setup(7, GPIO.OUT)
    GPIO.output(ledPin, True)

    # start OSC
    osc = OSC.OSCClient()
    osc.connect((send_address, send_port))
    print "Sending OSC to", send_address, "port:", send_port

    cam = Camera()  #starts the camera
    
    prevScan = None
    while(1):
        img = cam.getImage() #gets image from the camera
        if args.display:
            img.save(display)
            display.isNotDone()
        barcode = img.findBarcode() #finds barcode data from image
        if barcode:
            barcode = barcode[0] 
            result = str(barcode.data)
            accountID = result[-4:]
            
            # reset the 2 second delay timer 
            timer = time.time() 
            
            # special command to shutdown the pi 
            if accountID == "0000":
            	os.system("sudo shutdown now -h")

            if accountID == "0001":
                os.system("sudo reboot now")
Example #32
0
from SimpleCV import Image, Display, Camera, DrawingLayer
import time
print "Seleccione una de las siguientes aplicaciones de seguimiento: "
print "\t1.- Deteccion y seguimiento de un rectangulo blanco" ##Creamos menu para realizar la eleccion de la aplicacion requerida
print "\t2.- Deteccion y seguimiento de varios rectangulos blancos"
print "\t3.- Aplicacion Medica"
print "\t4.- Salir" 

while True:
    opc=raw_input("Ingrese el numero de una opcion: ")
    if opc=="1":
        d=Display() ## Se le asigna una variable al display para posteriormente controlarlo mediante un ciclo while
        c=Camera() ## Se le asigna una variable a la camara
        while d.isNotDone(): ## Se crea un ciclo infinito hasta que se cierre la ventana del display por el usuario
            img=c.getImage() ## Se captura la imagen con la camara
            dis=img.colorDistance((0,0,0)) ## Se calcula la distancia de colores con respecto al negro 
            seg=dis.stretch(220,255) ## se realiza un estiramiento de histograma para dejar solamente los pixeles en donde se encuentra el objeto de interes y se mejora el contraste
            blobs=seg.findBlobs() ## se buscan grupos de pixeles
            blobs.sortArea() ## se ordenan los grupos por el area de cada uno en forma ascendente
            if blobs:
                rect=blobs.filter([b.isRectangle(0.15) for b in blobs]) ## se filtran los grupos por aquellos que se asemejan a un rectangulo con cierta tolerancia
                if rect:
                    fl=DrawingLayer((img.width,img.height)) ## Se crea una mascara para dibujar los rectangulos
                    fbd=(rect[-1].width(),rect[-1].height()) ## Se guarda la dimension del rectangulo de mayor area
                    tlc=rect[-1].topLeftCorner() ## Se obtiene la coordenada de la esquina superior izquierda del rectangulo
                    fb=fl.rectangle(tlc,fbd,(0,200,0),3) ## se dibuja el rectangulo con los datos nombrados
                    img.addDrawingLayer(fl) ## se agrega la mascara sobre la imagen original
                    img.applyLayers()           
            img.show() ## Se muestra la imagen en tiempo "real"
    elif opc=="2":
        d=Display() ## Se le asigna una variable al display para posteriormente controlarlo mediante un ciclo while
Example #33
0
#!/usr/bin/env python

from SimpleCV import Color,Display,Image

display = Display() 

while(display.isNotDone()):
 
    img = Image('example.jpg')
    barcode = img.findBarcode() #finds barcode data from image

    if(barcode is not None): #if there is some data processed
        barcode = barcode[0] 
        result = str(barcode.data)
        print result #prints result of barcode in python shell
        barcode = [] #reset barcode data to empty set

    img.save(display) #shows the image on the screen
Example #34
0
def doface(aa, f1, cc, f2, ee):

    camera = PiCamera()
    #imgg = Image('img1.jpg')
    #disp = Display(imgg.size())
    dsize = (640, 480)
    disp = Display(dsize)
    #drawing = Image('mustache.png')
    #maskk = drawing.createAlphaMask()

    #camera.start_preview()
    #sleep(2)

    #['right_eye.xml', 'lefteye.xml', 'face3.xml', 'glasses.xml',
    # 'right_ear.xml', 'fullbody.xml', 'profile.xml', 'upper_body2.xml',
    # 'face.xml', 'face4.xml', 'two_eyes_big.xml', 'right_eye2.xml',
    # 'left_ear.xml', 'nose.xml', 'upper_body.xml', 'left_eye2.xml',
    # 'two_eyes_small.xml', 'face2.xml', 'eye.xml', 'face_cv2.xml',
    # 'mouth.xml', 'lower_body.xml']

    while disp.isNotDone():
        camera.capture('img2.png')
        img = Image('img2.png')
        img = img.resize(640, 480)
        #whatt = img.listHaarFeatures()
        faces = img.findHaarFeatures('face.xml')
        print 'faces:', faces
        if faces:  #is not None:
            face = faces.sortArea()[-1]
            #print 'size:',face.size
            if aa == 'none':
                break
            elif aa == 'block':
                face.draw()
            else:
                f0draw = aa + '.png'
                draw0 = Image('use/' + f0draw)
                face = face.blit(draw0, pos=(100, 200))
            #bigFace = face[-1]

            myface = face.crop()
            if f1 and cc is not None:
                feature01 = f1 + '.xml'
                f1draw = cc + '.png'
                draw1 = Image('/home/pi/cv/use/' + f1draw)

                feature1s = myface.findHaarFeatures(feature01)
                if feature1s is not None:
                    feature1 = feature1s.sortArea()[-1]
                    xpos1 = face.points[0][0] + feature1.x - (draw1.width / 2)
                    ypos1 = face.points[0][
                        1] + feature1.y  #+ (2*draw1.height/3)
                    #pos = (xmust,ymust)
                    img = img.blit(draw1, pos=(xpos1, ypos1))  #mask=maskk)

            if f2 and ee is not None:
                feature02 = f2 + '.xml'
                f2draw = ee + '.png'
                draw2 = Image('/home/pi/cv/use/' + f2draw)

                feature2s = myface.findHaarFeatures(feature02)
                if feature2s is not None:
                    feature2 = feature2s.sortArea()[-1]
                    xpos2 = face.points[0][0] + feature2.x - (draw2.width / 2)
                    ypos2 = face.points[0][
                        1] + feature2.y  #+ (2*draw2.height/3)
                    #pos = (xmust,ymust)
                    img = img.blit(draw2, pos=(xpos2, ypos2))  #mask=maskk)

            img.save(disp)
        else:
            print 'no face~~'
Example #35
0
blueBlock = Image('blueblock.png')

cs = ColorSegmentation()
cs.addToModel(redBlock)
cs.addToModel(greenBlock)
cs.addToModel(blueBlock)

cards = ImageSet('cards')
card = None

disp = Display((320, 240))

score = 0
isPrimary = False

while (cards or card) and disp.isNotDone():

	if card is None:
		card = cards.pop()
		
		cs.addImage(card)
		res = cs.getSegmentedImage()
		
		color = res.meanColor()
		if ((color[0] < 254) and (color[1] < 254) and (color[2] < 254)):
			isPrimary = True
		else:
			isPrimary = False
			
		card.drawText('Click left if primary, otherwise right', 0, 0)
		card.drawText(str(score) + ' correct answers', 0, 210)
Example #36
0
    def track(self):
        print "Press right mouse button to pause or play"
        print "Use left mouse button to select target"
        print "Target color must be different from background"
        print "Target must have width larger than height"
        print "Target can be upside down"

        #Parameters
        isUDPConnection = False  # Currently switched manually in the code
        display = True
        displayDebug = True
        useBasemap = False
        maxRelativeMotionPerFrame = 2  # How much the target can moved between two succesive frames
        pixelPerRadians = 320
        radius = pixelPerRadians
        referenceImage = '../ObjectTracking/kite_detail.jpg'
        scaleFactor = 0.5
        isVirtualCamera = True
        useHDF5 = False

        # Open reference image: this is used at initlalisation
        target_detail = Image(referenceImage)

        # Get RGB color palette of target (was found to work better than using hue)
        pal = target_detail.getPalette(bins=2, hue=False)

        # Open video to analyse or live stream
        #cam = JpegStreamCamera('http://192.168.1.29:8080/videofeed')#640 * 480
        if isVirtualCamera:
            #cam = VirtualCamera('../../zenith-wind-power-read-only/KiteControl-Qt/videos/kiteFlying.avi','video')
            #cam = VirtualCamera('/media/bat/DATA/Baptiste/Nautilab/kite_project/robokite/ObjectTracking/00095.MTS', 'video')
            #cam = VirtualCamera('output.avi', 'video')
            cam = VirtualCamera(
                '../Recording/Videos/Flying kite images (for kite steering unit development)-YTMgX1bvrTo.mp4',
                'video')
            virtualCameraFPS = 25
        else:
            cam = JpegStreamCamera(
                'http://192.168.43.1:8080/videofeed')  #640 * 480
            #cam = Camera()

        # Get a sample image to initialize the display at the same size
        img = cam.getImage().scale(scaleFactor)
        print img.width, img.height
        # Create a pygame display
        if display:
            if img.width > img.height:
                disp = Display(
                    (27 * 640 / 10, 25 * 400 / 10)
                )  #(int(2*img.width/scaleFactor), int(2*img.height/scaleFactor)))
            else:
                disp = Display((810, 1080))
        #js = JpegStreamer()

        # Initialize variables
        previous_angle = 0  # target has to be upright when starting. Target width has to be larger than target heigth.
        previous_coord_px = (
            0, 0)  # Initialized to top left corner, which always exists
        previous_dCoord = previous_coord_px
        previous_dAngle = previous_angle
        angles = []
        coords_px = []
        coord_px = [0, 0]
        angle = 0
        target_elevations = []
        target_bearings = []
        times = []
        wasTargetFoundInPreviousFrame = False
        i_frame = 0
        isPaused = False
        selectionInProgress = False
        th = [100, 100, 100]
        skycolor = Color.BLUE
        timeLastTarget = 0

        # Prepare recording
        recordFilename = datetime.datetime.utcnow().strftime(
            "%Y%m%d_%Hh%M_") + 'simpleTrack'
        if useHDF5:
            try:
                os.remove(recordFilename + '.hdf5')
            except:
                print('Creating file ' + recordFilename + '.hdf5')
            """ The following line is used to silence the following error (according to http://stackoverflow.com/questions/15117128/h5py-in-memory-file-and-multiprocessing-error)
    #000: ../../../src/H5F.c line 1526 in H5Fopen(): unable to open file
    major: File accessability
    minor: Unable to open file"""
            h5py._errors.silence_errors()
            recordFile = h5py.File(
                os.path.join(os.getcwd(), 'log', recordFilename + '.hdf5'),
                'a')
            hdfSize = 0
            dset = recordFile.create_dataset('kite', (2, 2),
                                             maxshape=(None, 7))
            imset = recordFile.create_dataset('image',
                                              (2, img.width, img.height, 3),
                                              maxshape=(None, img.width,
                                                        img.height, 3))
        else:
            try:
                os.remove(recordFilename + '.csv')
            except:
                print('Creating file ' + recordFilename + '.csv')
            recordFile = file(
                os.path.join(os.getcwd(), 'log', recordFilename + '.csv'), 'a')
            csv_writer = csv.writer(recordFile)
            csv_writer.writerow([
                'Time (s)', 'x (px)', 'y (px)', 'Orientation (rad)',
                'Elevation (rad)', 'Bearing (rad)', 'ROT (rad/s)'
            ])

        # Launch a thread to get UDP message with orientation of the camera
        mobile = mobileState.mobileState()
        if isUDPConnection:
            mobile.open()
        # Loop while not canceled by user
        t0 = time.time()
        previousTime = t0
        while not (display) or disp.isNotDone():
            t = time.time()
            deltaT = (t - previousTime)
            FPS = 1.0 / deltaT
            #print 'FPS =', FPS
            if isVirtualCamera:
                deltaT = 1.0 / virtualCameraFPS
            previousTime = t
            i_frame = i_frame + 1
            timestamp = datetime.datetime.utcnow()

            # Receive orientation of the camera
            if isUDPConnection:
                mobile.computeRPY([2, 0, 1], [-1, 1, 1])
            ctm = np.array([[sp.cos(mobile.roll), -sp.sin(mobile.roll)], \
                    [sp.sin(mobile.roll), sp.cos(mobile.roll)]]) # Coordinate transform matrix

            if useBasemap:
                # Warning this really slows down the computation
                m = Basemap(width=img.width,
                            height=img.height,
                            projection='aeqd',
                            lat_0=sp.rad2deg(mobile.pitch),
                            lon_0=sp.rad2deg(mobile.yaw),
                            rsphere=radius)

            # Get an image from camera
            if not isPaused:
                img = cam.getImage()
                img = img.resize(int(scaleFactor * img.width),
                                 int(scaleFactor * img.height))

            if display:
                # Pause image when right button is pressed
                dwn = disp.rightButtonDownPosition()
                if dwn is not None:
                    isPaused = not (isPaused)
                    dwn = None

            if display:
                # Create a layer to enable user to make a selection of the target
                selectionLayer = DrawingLayer((img.width, img.height))

            if img:
                if display:
                    # Create a new layer to host information retrieved from video
                    layer = DrawingLayer((img.width, img.height))
                    # Selection is a rectangle drawn while holding mouse left button down
                    if disp.leftButtonDown:
                        corner1 = (disp.mouseX, disp.mouseY)
                        selectionInProgress = True
                    if selectionInProgress:
                        corner2 = (disp.mouseX, disp.mouseY)
                        bb = disp.pointsToBoundingBox(
                            corner1,
                            corner2)  # Display the temporary selection
                        if disp.leftButtonUp:  # User has finished is selection
                            selectionInProgress = False
                            selection = img.crop(bb[0], bb[1], bb[2], bb[3])
                            if selection != None:
                                # The 3 main colors in the area selected are considered.
                                # Note that the selection should be included in the target and not contain background
                                try:
                                    selection.save('../ObjectTracking/' +
                                                   'kite_detail_tmp.jpg')
                                    img0 = Image(
                                        "kite_detail_tmp.jpg"
                                    )  # For unknown reason I have to reload the image...
                                    pal = img0.getPalette(bins=2, hue=False)
                                except:  # getPalette is sometimes bugging and raising LinalgError because matrix not positive definite
                                    pal = pal
                                wasTargetFoundInPreviousFrame = False
                                previous_coord_px = (bb[0] + bb[2] / 2,
                                                     bb[1] + bb[3] / 2)
                        if corner1 != corner2:
                            selectionLayer.rectangle((bb[0], bb[1]),
                                                     (bb[2], bb[3]),
                                                     width=5,
                                                     color=Color.YELLOW)

                # If the target was already found, we can save computation time by
                # reducing the Region Of Interest around predicted position
                if wasTargetFoundInPreviousFrame:
                    ROITopLeftCorner = (max(0, previous_coord_px[0]-maxRelativeMotionPerFrame/2*width), \
                              max(0, previous_coord_px[1] -height*maxRelativeMotionPerFrame/2))
                    ROI = img.crop(ROITopLeftCorner[0], ROITopLeftCorner[1],                          \
                                         maxRelativeMotionPerFrame*width, maxRelativeMotionPerFrame*height, \
                             centered = False)
                    if display:
                        # Draw the rectangle corresponding to the ROI on the complete image
                        layer.rectangle((previous_coord_px[0]-maxRelativeMotionPerFrame/2*width,  \
                                                 previous_coord_px[1]-maxRelativeMotionPerFrame/2*height), \
                                              (maxRelativeMotionPerFrame*width, maxRelativeMotionPerFrame*height), \
                               color = Color.GREEN, width = 2)
                else:
                    # Search on the whole image if no clue of where is the target
                    ROITopLeftCorner = (0, 0)
                    ROI = img
                    '''#Option 1
        target_part0 = ROI.hueDistance(color=(142,50,65)).invert().threshold(150)
        target_part1 = ROI.hueDistance(color=(93,16,28)).invert().threshold(150)
        target_part2 = ROI.hueDistance(color=(223,135,170)).invert().threshold(150)
        target_raw_img = target_part0+target_part1+target_part2
        target_img = target_raw_img.erode(5).dilate(5)

        #Option 2
        target_img = ROI.hueDistance(imgModel.getPixel(10,10)).binarize().invert().erode(2).dilate(2)'''

                    # Find sky color
                sky = (img - img.binarize()).findBlobs(minsize=10000)
                if sky:
                    skycolor = sky[0].meanColor()
                # Option 3
                target_img = ROI - ROI  # Black image

                # Loop through palette of target colors
                if display and displayDebug:
                    decomposition = []
                i_col = 0
                for col in pal:
                    c = tuple([int(col[i]) for i in range(0, 3)])
                    # Search the target based on color
                    ROI.save('../ObjectTracking/' + 'ROI_tmp.jpg')
                    img1 = Image('../ObjectTracking/' + 'ROI_tmp.jpg')
                    filter_img = img1.colorDistance(color=c)
                    h = filter_img.histogram(numbins=256)
                    cs = np.cumsum(h)
                    thmax = np.argmin(
                        abs(cs - 0.02 * img.width * img.height)
                    )  # find the threshold to have 10% of the pixel in the expected color
                    thmin = np.argmin(
                        abs(cs - 0.005 * img.width * img.height)
                    )  # find the threshold to have 10% of the pixel in the expected color
                    if thmin == thmax:
                        newth = thmin
                    else:
                        newth = np.argmin(h[thmin:thmax]) + thmin
                    alpha = 0.5
                    th[i_col] = alpha * th[i_col] + (1 - alpha) * newth
                    filter_img = filter_img.threshold(
                        max(40, min(200, th[i_col]))).invert()
                    target_img = target_img + filter_img
                    #print th
                    i_col = i_col + 1
                    if display and displayDebug:
                        [R, G, B] = filter_img.splitChannels()
                        white = (R - R).invert()
                        r = R * 1.0 / 255 * c[0]
                        g = G * 1.0 / 255 * c[1]
                        b = B * 1.0 / 255 * c[2]
                        tmp = white.mergeChannels(r, g, b)
                        decomposition.append(tmp)

                # Get a black background with with white target foreground
                target_img = target_img.threshold(150)

                target_img = target_img - ROI.colorDistance(
                    color=skycolor).threshold(80).invert()

                if display and displayDebug:
                    small_ini = target_img.resize(
                        int(img.width / (len(pal) + 1)),
                        int(img.height / (len(pal) + 1)))
                    for tmp in decomposition:
                        small_ini = small_ini.sideBySide(tmp.resize(
                            int(img.width / (len(pal) + 1)),
                            int(img.height / (len(pal) + 1))),
                                                         side='bottom')
                    small_ini = small_ini.adaptiveScale(
                        (int(img.width), int(img.height)))
                    toDisplay = img.sideBySide(small_ini)
                else:
                    toDisplay = img
                    #target_img = ROI.hueDistance(color = Color.RED).threshold(10).invert()

                # Search for binary large objects representing potential target
                target = target_img.findBlobs(minsize=500)

                if target:  # If a target was found

                    if wasTargetFoundInPreviousFrame:
                        predictedTargetPosition = (
                            width * maxRelativeMotionPerFrame / 2,
                            height * maxRelativeMotionPerFrame / 2
                        )  # Target will most likely be close to the center of the ROI
                    else:
                        predictedTargetPosition = previous_coord_px
                        # If there are several targets in the image, take the one which is the closest of the predicted position
                    target = target.sortDistance(predictedTargetPosition)

                    # Get target coordinates according to minimal bounding rectangle or centroid.
                    coordMinRect = ROITopLeftCorner + np.array(
                        (target[0].minRectX(), target[0].minRectY()))
                    coord_px = ROITopLeftCorner + np.array(
                        target[0].centroid())

                    # Rotate the coordinates of roll angle around the middle of the screen
                    rot_coord_px = np.dot(
                        ctm, coord_px -
                        np.array([img.width / 2, img.height / 2])) + np.array(
                            [img.width / 2, img.height / 2])
                    if useBasemap:
                        coord = sp.deg2rad(
                            m(rot_coord_px[0],
                              img.height - rot_coord_px[1],
                              inverse=True))
                    else:
                        coord = localProjection(
                            rot_coord_px[0] - img.width / 2,
                            img.height / 2 - rot_coord_px[1],
                            radius,
                            mobile.yaw,
                            mobile.pitch,
                            inverse=True)
                    target_bearing, target_elevation = coord

                    # Get minimum bounding rectangle for display purpose
                    minR = ROITopLeftCorner + np.array(target[0].minRect())

                    contours = target[0].contour()

                    contours = [
                        ROITopLeftCorner + np.array(contour)
                        for contour in contours
                    ]

                    # Get target features
                    angle = sp.deg2rad(target[0].angle()) + mobile.roll
                    angle = sp.deg2rad(
                        unwrap180(sp.rad2deg(angle),
                                  sp.rad2deg(previous_angle)))
                    width = target[0].width()
                    height = target[0].height()

                    # Check if the kite is upside down
                    # First rotate the kite
                    ctm2 = np.array([[sp.cos(-angle+mobile.roll), -sp.sin(-angle+mobile.roll)], \
                        [sp.sin(-angle+mobile.roll), sp.cos(-angle+mobile.roll)]]) # Coordinate transform matrix
                    rotated_contours = [
                        np.dot(ctm2, contour - coordMinRect)
                        for contour in contours
                    ]
                    y = [-tmp[1] for tmp in rotated_contours]
                    itop = np.argmax(y)  # Then looks at the points at the top
                    ibottom = np.argmin(y)  # and the point at the bottom
                    # The point the most excentered is at the bottom
                    if abs(rotated_contours[itop][0]) > abs(
                            rotated_contours[ibottom][0]):
                        isInverted = True
                    else:
                        isInverted = False

                    if isInverted:
                        angle = angle + sp.pi

                        # Filter the data
                    alpha = 1 - sp.exp(-deltaT / self.filterTimeConstant)
                    if not (isPaused):
                        dCoord = np.array(previous_dCoord) * (
                            1 - alpha) + alpha * (
                                np.array(coord_px) - previous_coord_px
                            )  # related to the speed only if cam is fixed
                        dAngle = np.array(previous_dAngle) * (
                            1 - alpha) + alpha * (np.array(angle) -
                                                  previous_angle)
                    else:
                        dCoord = np.array([0, 0])
                        dAngle = np.array([0])


#print coord_px, angle, width, height, dCoord

# Record important data
                    times.append(timestamp)
                    coords_px.append(coord_px)
                    angles.append(angle)
                    target_elevations.append(target_elevation)
                    target_bearings.append(target_bearing)

                    # Export data to controller
                    self.elevation = target_elevation
                    self.bearing = target_bearing
                    self.orientation = angle
                    dt = time.time() - timeLastTarget
                    self.ROT = dAngle / dt
                    self.lastUpdateTime = t

                    # Save for initialisation of next step
                    previous_dCoord = dCoord
                    previous_angle = angle
                    previous_coord_px = (int(coord_px[0]), int(coord_px[1]))
                    wasTargetFoundInPreviousFrame = True
                    timeLastTarget = time.time()

                else:
                    wasTargetFoundInPreviousFrame = False

                if useHDF5:
                    hdfSize = hdfSize + 1
                    dset.resize((hdfSize, 7))
                    imset.resize((hdfSize, img.width, img.height, 3))
                    dset[hdfSize - 1, :] = [
                        time.time(), coord_px[0], coord_px[1], angle,
                        self.elevation, self.bearing, self.ROT
                    ]
                    imset[hdfSize - 1, :, :, :] = img.getNumpy()
                    recordFile.flush()
                else:
                    csv_writer.writerow([
                        time.time(), coord_px[0], coord_px[1], angle,
                        self.elevation, self.bearing, self.ROT
                    ])

                if display:
                    if target:
                        # Add target features to layer
                        # Minimal rectange and its center in RED
                        layer.polygon(minR[(0, 1, 3, 2), :],
                                      color=Color.RED,
                                      width=5)
                        layer.circle(
                            (int(coordMinRect[0]), int(coordMinRect[1])),
                            10,
                            filled=True,
                            color=Color.RED)

                        # Target contour and centroid in BLUE
                        layer.circle((int(coord_px[0]), int(coord_px[1])),
                                     10,
                                     filled=True,
                                     color=Color.BLUE)
                        layer.polygon(contours, color=Color.BLUE, width=5)

                        # Speed vector in BLACK
                        layer.line((int(coord_px[0]), int(coord_px[1])),
                                   (int(coord_px[0] + 20 * dCoord[0]),
                                    int(coord_px[1] + 20 * dCoord[1])),
                                   width=3)

                        # Line giving angle
                        layer.line((int(coord_px[0] + 200 * sp.cos(angle)),
                                    int(coord_px[1] + 200 * sp.sin(angle))),
                                   (int(coord_px[0] - 200 * sp.cos(angle)),
                                    int(coord_px[1] - 200 * sp.sin(angle))),
                                   color=Color.RED)

                    # Line giving rate of turn
                    #layer.line((int(coord_px[0]+200*sp.cos(angle+dAngle*10)), int(coord_px[1]+200*sp.sin(angle+dAngle*10))), (int(coord_px[0]-200*sp.cos(angle + dAngle*10)), int(coord_px[1]-200*sp.sin(angle+dAngle*10))))

                # Add the layer to the raw image
                    toDisplay.addDrawingLayer(layer)
                    toDisplay.addDrawingLayer(selectionLayer)

                    # Add time metadata
                    toDisplay.drawText(str(i_frame) + " " + str(timestamp),
                                       x=0,
                                       y=0,
                                       fontsize=20)

                    # Add Line giving horizon
                    #layer.line((0, int(img.height/2 + mobile.pitch*pixelPerRadians)),(img.width, int(img.height/2 + mobile.pitch*pixelPerRadians)), width = 3, color = Color.RED)

                    # Plot parallels
                    for lat in range(-90, 90, 15):
                        r = range(0, 361, 10)
                        if useBasemap:
                            # \todo improve for high roll
                            l = m(r, [lat] * len(r))
                            pix = [np.array(l[0]), img.height - np.array(l[1])]
                        else:
                            l = localProjection(sp.deg2rad(r), \
                                    sp.deg2rad([lat]*len(r)), \
                                    radius, \
                                    lon_0 = mobile.yaw, \
                                    lat_0 = mobile.pitch, \
                                    inverse = False)
                            l = np.dot(ctm, l)
                            pix = [
                                np.array(l[0]) + img.width / 2,
                                img.height / 2 - np.array(l[1])
                            ]

                        for i in range(len(r) - 1):
                            if isPixelInImage(
                                (pix[0][i], pix[1][i]), img) or isPixelInImage(
                                    (pix[0][i + 1], pix[1][i + 1]), img):
                                layer.line((pix[0][i], pix[1][i]),
                                           (pix[0][i + 1], pix[1][i + 1]),
                                           color=Color.WHITE,
                                           width=2)

                # Plot meridians
                    for lon in range(0, 360, 15):
                        r = range(-90, 91, 10)
                        if useBasemap:
                            # \todo improve for high roll
                            l = m([lon] * len(r), r)
                            pix = [np.array(l[0]), img.height - np.array(l[1])]
                        else:
                            l= localProjection(sp.deg2rad([lon]*len(r)), \
                                    sp.deg2rad(r), \
                                    radius, \
                                    lon_0 = mobile.yaw, \
                                    lat_0 = mobile.pitch, \
                                    inverse = False)
                            l = np.dot(ctm, l)
                            pix = [
                                np.array(l[0]) + img.width / 2,
                                img.height / 2 - np.array(l[1])
                            ]

                        for i in range(len(r) - 1):
                            if isPixelInImage(
                                (pix[0][i], pix[1][i]), img) or isPixelInImage(
                                    (pix[0][i + 1], pix[1][i + 1]), img):
                                layer.line((pix[0][i], pix[1][i]),
                                           (pix[0][i + 1], pix[1][i + 1]),
                                           color=Color.WHITE,
                                           width=2)

                # Text giving bearing
                # \todo improve for high roll
                    for bearing_deg in range(0, 360, 30):
                        l = localProjection(sp.deg2rad(bearing_deg),
                                            sp.deg2rad(0),
                                            radius,
                                            lon_0=mobile.yaw,
                                            lat_0=mobile.pitch,
                                            inverse=False)
                        l = np.dot(ctm, l)
                        layer.text(
                            str(bearing_deg),
                            (img.width / 2 + int(l[0]), img.height - 20),
                            color=Color.RED)

                # Text giving elevation
                # \todo improve for high roll
                    for elevation_deg in range(-60, 91, 30):
                        l = localProjection(0,
                                            sp.deg2rad(elevation_deg),
                                            radius,
                                            lon_0=mobile.yaw,
                                            lat_0=mobile.pitch,
                                            inverse=False)
                        l = np.dot(ctm, l)
                        layer.text(str(elevation_deg),
                                   (img.width / 2, img.height / 2 - int(l[1])),
                                   color=Color.RED)

                    #toDisplay.save(js)
                    toDisplay.save(disp)
            if display:
                toDisplay.removeDrawingLayer(1)
                toDisplay.removeDrawingLayer(0)
        recordFile.close()
Example #37
0
def get_bounding_box(keyword, url, filename):
    # get the image
    img = Image(url)

    # resize the image so things aren't so slow, if necessary
    w, h = img.size()
    if w > 1200 or h > 1200:
        maxdim = max(w, h)
        ratio = math.ceil(maxdim/800.0)
        print "   resizing..."
        img = img.resize(w=int(w/ratio), h=int(h/ratio))
    else:
        ratio = 1

    # get the canvas
    disp = Display((800, 800))
    # text overlay
    textlayer = DrawingLayer(img.size())
    textlayer.setFontSize(30)
    cx, cy = 10, 10
    for xoff in range(-2, 3):
        for yoff in range(-2, 3):
            textlayer.text(keyword, (cx + xoff, cy + yoff), color=Color.BLACK)
    textlayer.text(keyword, (cx, cy), color=Color.WHITE)

    # two points to declare a bounding box
    point1 = None
    point2 = None
    while disp.isNotDone():
        cursor = (disp.mouseX, disp.mouseY)
        if disp.leftButtonUp:
            if point1 and point2:
                point1 = None
                point2 = None
            if point1:
                point2 = disp.leftButtonUpPosition()
            else:
                point1 = disp.leftButtonUpPosition()
        bb = None
        if point1 and point2:
            bb = disp.pointsToBoundingBox(point1, point2)
        elif point1 and not point2:
            bb = disp.pointsToBoundingBox(point1, cursor)

        img.clearLayers()
        drawlayer = DrawingLayer(img.size())
        if bb:
            drawlayer.rectangle((bb[0], bb[1]), (bb[2], bb[3]), color=Color.RED)

        # keyboard commands
        if pygame.key.get_pressed()[pygame.K_s]:
            # skip for now
            raise Skip()
        elif pygame.key.get_pressed()[pygame.K_b]:
            # mark it as an invalid picture
            raise BadImage()
        elif pygame.key.get_pressed()[pygame.K_RETURN]:
            if point1 and point2:
                bb = disp.pointsToBoundingBox(scale(ratio, point1), scale(ratio, point2))
                return bb
            elif not point1 and not point2:
                bb = disp.pointsToBoundingBox((0, 0), (w, h))
                return bb


        drawlayer.line((cursor[0], 0), (cursor[0], img.height), color=Color.BLUE)
        drawlayer.line((0, cursor[1]), (img.width, cursor[1]), color=Color.BLUE)
        #drawlayer.circle(cursor, 2, color=Color.BLUE, filled=True)
        img.addDrawingLayer(textlayer)
        img.addDrawingLayer(drawlayer)
        img.save(disp)
Example #38
0
'''
This program super imposes the camera onto the television in the picture
'''
print __doc__

from SimpleCV import Camera, Image, Display

tv_original = Image("family_watching_television_1958.jpg", sample=True)

tv_coordinates = [(353, 379), (433,380),(432, 448), (354,446)]
tv_mask = Image(tv_original.size()).invert().warp(tv_coordinates)
tv = tv_original - tv_mask

c = Camera()
d = Display(tv.size())

while d.isNotDone():
   bwimage = c.getImage().grayscale().resize(tv.width, tv.height)
   on_tv = tv + bwimage.warp(tv_coordinates)
   on_tv.save(d)