示例#1
0
    def find(self):
        new_file_path = self.nfn('dot-blobs')
        img = Image(self.img_path)
        new_img = img.colorDistance((160, 255, 160)).invert().binarize(
            (200, 200, 200)).invert().erode(1)
        for blob in new_img.findBlobs():
            print str(blob) + " --> " + str(
                self.chance_blob_is_an_ellipse(blob))

        dots = sorted(new_img.findBlobs()[-5:],
                      key=lambda blob: blob.centroid()[1])
        for blob in dots:
            blob.draw()
            new_img.dl().circle(blob.centroid(), 5, Color.RED)

        centroids = map(lambda blob: blob.centroid(), dots)

        bottom_screws = sorted(centroids[2:4],
                               key=lambda centroid: centroid[0])

        shoe_measurements = ShoeMeasurements(self.shoe.left_or_right,
                                             centroids[0], centroids[1],
                                             bottom_screws[0],
                                             bottom_screws[1], centroids[4])
        new_img = shoe_measurements.draw_on_img(new_img)
        new_img.save(new_file_path)
        return (new_file_path, shoe_measurements)
	def trataImagen(self):
		
		img = Image(self.rutaImagenReducida)
		result = img.colorDistance((self.ajustes.r, self.ajustes.g, self.ajustes.b))
		result.save(self.rutaImagenTratada_Fase1) 
		result = result.invert()
		result = result.binarize(float(self.ajustes.umbralBinarizado)).invert()
		result.save(self.rutaImagenTratada_Fase2) 
示例#3
0
    def trataImagen(self):

        img = Image(self.rutaImagenReducida)
        result = img.colorDistance(
            (self.ajustes.r, self.ajustes.g, self.ajustes.b))
        result.save(self.rutaImagenTratada_Fase1)
        result = result.invert()
        result = result.binarize(float(self.ajustes.umbralBinarizado)).invert()
        result.save(self.rutaImagenTratada_Fase2)
	def trataImagen(self, r, g, b, umbralBinarizado):
		
		inicio = time.time()
		
		img = Image(self.rutaImagenReducida)
		result = img.colorDistance((r, g, b)).invert()
		result = result.binarize(umbralBinarizado).invert()
		result.save(self.rutaImagenTratada)
		
		fin = time.time()
		self.tiempoTratamiento = fin - inicio
示例#5
0
def filter_to_point(filter, imgname):
    img = Image(imgname)

    zones = img.colorDistance(filter).invert()
    blobs = zones.binarize(230)

    blobs.findBlobs().draw(color=Color.BLACK, width=20)
    blobs.save('blobs.bmp')
    blobs_img = Image('blobs.bmp').invert()

    points = blobs_img.findBlobs(minsize=100).coordinates()
    return list(points[0])
示例#6
0
文件: align.py 项目: sachinsiby/SPARQ
def findYellowMask(image_file):
	original = Image(image_file)

	yellow_only = original.colorDistance((232,166,30))
	yellow_only = yellow_only*4

	mask = yellow_only.invert()
	#mask.save("yellow_mask.jpg")

	binarizedMask = mask.binarize().invert()
	#binarizedMask.save("binarized_mask_yellow.jpg")

	return binarizedMask
示例#7
0
def dot_blobs(si, image_path):
	new_file_path = step_file_path(si, 'dot-blobs')
	img = Image(image_path)
	new_img = img.colorDistance((160, 255, 160)).invert().binarize((200, 200, 200)).invert().erode(1)
	dots = sorted(new_img.findBlobs()[-5:], key=lambda blob: blob.centroid()[1])
	for blob in dots:
		blob.draw()
		new_img.dl().circle(blob.centroid(), 5, Color.RED)

	centroids = map(lambda blob: blob.centroid(), dots)

	bottom_screws = sorted(centroids[2:4], key=lambda centroid: centroid[0])

	shoe_measurements = ShoeMeasurements(si.foot, centroids[0], centroids[1], bottom_screws[0], bottom_screws[1], centroids[4])
	new_img = shoe_measurements.draw_on_img(new_img)
	new_img.save(new_file_path)
	si.step_outputs.append(new_file_path)
	return (new_file_path, shoe_measurements)
示例#8
0
	def find(self):
		new_file_path = self.nfn('dot-blobs')
		img = Image(self.img_path)
		new_img = img.colorDistance((160, 255, 160)).invert().binarize((200, 200, 200)).invert().erode(1)
		for blob in new_img.findBlobs():
			print str(blob) + " --> " + str(self.chance_blob_is_an_ellipse(blob))

		dots = sorted(new_img.findBlobs()[-5:], key=lambda blob: blob.centroid()[1])
		for blob in dots:
			blob.draw()
			new_img.dl().circle(blob.centroid(), 5, Color.RED)

		centroids = map(lambda blob: blob.centroid(), dots)

		bottom_screws = sorted(centroids[2:4], key=lambda centroid: centroid[0])

		shoe_measurements = ShoeMeasurements(self.shoe.left_or_right, centroids[0], centroids[1], bottom_screws[0], bottom_screws[1], centroids[4])
		new_img = shoe_measurements.draw_on_img(new_img)
		new_img.save(new_file_path)
		return (new_file_path, shoe_measurements)
示例#9
0
def dot_blobs(si, image_path):
    new_file_path = step_file_path(si, 'dot-blobs')
    img = Image(image_path)
    new_img = img.colorDistance((160, 255, 160)).invert().binarize(
        (200, 200, 200)).invert().erode(1)
    dots = sorted(new_img.findBlobs()[-5:],
                  key=lambda blob: blob.centroid()[1])
    for blob in dots:
        blob.draw()
        new_img.dl().circle(blob.centroid(), 5, Color.RED)

    centroids = map(lambda blob: blob.centroid(), dots)

    bottom_screws = sorted(centroids[2:4], key=lambda centroid: centroid[0])

    shoe_measurements = ShoeMeasurements(si.foot, centroids[0], centroids[1],
                                         bottom_screws[0], bottom_screws[1],
                                         centroids[4])
    new_img = shoe_measurements.draw_on_img(new_img)
    new_img.save(new_file_path)
    si.step_outputs.append(new_file_path)
    return (new_file_path, shoe_measurements)
示例#10
0
文件: align.py 项目: sachinsiby/SPARQ
def detectCenter(image_file):

	original = Image(image_file)

	center_only = original.colorDistance((155,9,49))*8

	mask = center_only.invert()
	#mask.save("center_mask.jpg")

	binarizedMask = mask.binarize().invert()
	#binarizedMask.save("binarized_mask_center.jpg")

	blobs = original.findBlobsFromMask(binarizedMask)

	if blobs == None :
		#print "No red found"
		return detectGreenLowQuality(image_file)


	bestBlob = blobs[-1]
	bestBlob.drawMinRect(color=Color.RED,width =10)

	bestBlob.image = original

	original.save("align.png")


	centroidX = bestBlob.minRectX()
	centroidY = bestBlob.minRectY()

	#Have to find out which part of the screen centroid is in
	maxX = original.getNumpy().shape[0]
	maxY = original.getNumpy().shape[1]+100
	

	#assume width of 150 pixels
	return align_center(maxX,maxY,centroidX,centroidY,80,80)
示例#11
0
文件: align.py 项目: sachinsiby/SPARQ
def detectYellowLowQuality(image_file):

	original = Image(image_file)

	yellow_only = original.colorDistance((156,130,76))*2
	#yellow_only = yellow_only*4

	mask = yellow_only.invert()
	#mask.save("yellow_mask.jpg")

	binarizedMask = mask.binarize().invert()
	#binarizedMask.save("binarized_mask_yellow.jpg")

	blobs = original.findBlobsFromMask(binarizedMask)

	if blobs == None:
		#print "No yellow found"
		return -1

	blobs[-1].drawMinRect(color=Color.RED,width =10)
	blobs.image = original

	#original.save("foundBlobs_yellow.jpg")

	bestBlob = blobs[-1]


	centroidX = bestBlob.minRectX()
	centroidY = bestBlob.minRectY()

	#Have to find out which part of the screen centroid is in
	maxX = original.getNumpy().shape[0]
	maxY = original.getNumpy().shape[1]+100
	

	#assume width of 150 pixels
	return align_center(maxX,maxY,centroidX,centroidY,50,50)
示例#12
0
文件: align.py 项目: sachinsiby/SPARQ
def detectGreenLowQuality(image_file):
	
	original = Image(image_file)

	#binarizedYellowMask = findYellowMask(image_file)
	#subtractedMask = original - binarizedYellowMask
	#subtractedMask.save("subtractedMask.jpg")

	#green_only = subtractedMask.colorDistance((94,116,33))
	#green_only = subtractedMask.colorDistance((50,116,45))
	green_only = original.colorDistance((0,70,6))

	green_only = green_only*6

	mask = green_only.invert()
	#mask.save("green_mask.jpg")


	binarizedMask = mask.binarize().invert()
	#binarizedMask.save("binarized_mask_green.jpg")


	blobs = original.findBlobsFromMask(binarizedMask)


	if blobs == None:
		#print "No green found"
		return detectYellowLowQuality(image_file)


	blobs.image = original
	
	#Assume best blob is the largest blob
	bestBlob = blobs[-1]

	bestBlob.drawMinRect(color=Color.RED,width =10)
	#original.save("foundBlobs_green.jpg")
	

	'''
	#blobs[-1].drawRect(color=Color.RED,width =10)
	coordinates = bestBlob.minRect()


	#Find the center point
	centroidX = bestBlob.minRectX()
	centroidY = bestBlob.minRectY()

	minLeftY = 0
	minRightY = 0

	#Find the bottom left and bottom right coordinates
	for coordinate in coordinates:
		if coordinate[0] < centroidX and coordinate[1] > minLeftY:
			bottomLeft = coordinate
			minLeftY = coordinate[1]
		elif coordinate[0] > centroidX and coordinate[1] > minRightY:
			bottomRight = coordinate
			minRightY = coordinate[1]
			
	'''


	centroidX = bestBlob.minRectX()
	centroidY = bestBlob.minRectY()

	#Have to find out which part of the screen centroid is in
	maxX = original.getNumpy().shape[0]
	maxY = original.getNumpy().shape[1]+100
	

	#assume width of 150 pixels
	return align_center(maxX,maxY,centroidX,centroidY,50,50)
示例#13
0
#!/usr/local/env python
# coding=utf-8
#
# Author: Archer Reilly
# Desc: 按照颜色找出物体blob
# File: FindBlobs.py
# Date: 30/July/2016
#
from SimpleCV import Color, Image

# img = Image('/home/archer/Downloads/Chapter 8/mandms-dark.png')
img = Image('/home/archer/Downloads/1185391864.jpg')

# blue_distance = img.colorDistance(Color.BLUE).invert()
blue_distance = img.colorDistance(Color.BLACK).invert()

blobs = blue_distance.findBlobs(minsize=15)

blobs.draw(color=Color.RED, width=3)

blue_distance.show()

img.addDrawingLayer(blue_distance.dl())
img.save('res.png')
img.show()
示例#14
0
 def track(self):
  print "Press right mouse button to pause or play"
  print "Use left mouse button to select target"
  print "Target color must be different from background"
  print "Target must have width larger than height"
  print "Target can be upside down"

  #Parameters
  isUDPConnection = False # Currently switched manually in the code
  display = True
  displayDebug = True
  useBasemap = False
  maxRelativeMotionPerFrame = 2 # How much the target can moved between two succesive frames
  pixelPerRadians = 320
  radius = pixelPerRadians
  referenceImage = '../ObjectTracking/kite_detail.jpg'
  scaleFactor = 0.5
  isVirtualCamera = True
  useHDF5 = False

  # Open reference image: this is used at initlalisation
  target_detail = Image(referenceImage)

  # Get RGB color palette of target (was found to work better than using hue)
  pal = target_detail.getPalette(bins = 2, hue = False)

  # Open video to analyse or live stream
  #cam = JpegStreamCamera('http://192.168.1.29:8080/videofeed')#640 * 480
  if isVirtualCamera:
    #cam = VirtualCamera('../../zenith-wind-power-read-only/KiteControl-Qt/videos/kiteFlying.avi','video')
    #cam = VirtualCamera('/media/bat/DATA/Baptiste/Nautilab/kite_project/robokite/ObjectTracking/00095.MTS', 'video')
    #cam = VirtualCamera('output.avi', 'video')
    cam = VirtualCamera('../Recording/Videos/Flying kite images (for kite steering unit development)-YTMgX1bvrTo.flv','video')
    virtualCameraFPS = 25
  else:
    cam = JpegStreamCamera('http://192.168.43.1:8080/videofeed')#640 * 480
    #cam = Camera() 

  # Get a sample image to initialize the display at the same size
  img = cam.getImage().scale(scaleFactor)
  print img.width, img.height
  # Create a pygame display
  if display:
   if img.width>img.height:
     disp = Display((27*640/10,25*400/10))#(int(2*img.width/scaleFactor), int(2*img.height/scaleFactor)))
   else:
     disp = Display((810,1080))
  #js = JpegStreamer()



  # Initialize variables
  previous_angle = 0 # target has to be upright when starting. Target width has to be larger than target heigth.
  previous_coord_px = (0, 0) # Initialized to top left corner, which always exists
  previous_dCoord = previous_coord_px
  previous_dAngle = previous_angle
  angles = []
  coords_px = []
  coord_px = [0, 0]
  angle = 0
  target_elevations = []
  target_bearings = []
  times = []
  wasTargetFoundInPreviousFrame = False
  i_frame = 0
  isPaused = False
  selectionInProgress = False
  th = [100, 100, 100]
  skycolor = Color.BLUE
  timeLastTarget = 0

  # Prepare recording
  recordFilename = datetime.datetime.utcnow().strftime("%Y%m%d_%Hh%M_")+ 'simpleTrack'
  if useHDF5:
    try:
      os.remove(recordFilename + '.hdf5') 
    except:
      print('Creating file ' + recordFilename + '.hdf5')
    """ The following line is used to silence the following error (according to http://stackoverflow.com/questions/15117128/h5py-in-memory-file-and-multiprocessing-error)
    #000: ../../../src/H5F.c line 1526 in H5Fopen(): unable to open file
    major: File accessability
    minor: Unable to open file"""
    h5py._errors.silence_errors()
    recordFile = h5py.File(recordFilename + '.hdf5', 'a') 
    hdfSize = 0    
    dset = recordFile.create_dataset('kite', (2,2), maxshape=(None,7))
    imset = recordFile.create_dataset('image', (2,img.width,img.height,3 ), maxshape=(None, img.width, img.height, 3))
  else:
    try:
      os.remove(recordFilename + '.csv')   
    except:
      print('Creating file ' + recordFilename + '.csv') 
    recordFile = file(recordFilename + '.csv', 'a')
    csv_writer = csv.writer(recordFile)
    csv_writer.writerow(['Time (s)', 'x (px)', 'y (px)', 'Orientation (rad)', 'Elevation (rad)', 'Bearing (rad)', 'ROT (rad/s)'])

  # Launch a thread to get UDP message with orientation of the camera
  mobile = mobileState.mobileState()
  if isUDPConnection:
   a = threading.Thread(None, mobileState.mobileState.checkUpdate, None, (mobile,))
   a.start()

  # Loop while not canceled by user
  t0 = time.time()
  previousTime = t0
  while not(display) or disp.isNotDone():
    t = time.time()
    deltaT = (t-previousTime)
    FPS = 1.0/deltaT
    #print 'FPS =', FPS
    if isVirtualCamera:
      deltaT = 1.0/virtualCameraFPS
    previousTime = t
    i_frame = i_frame + 1
    timestamp = datetime.datetime.utcnow()

    # Receive orientation of the camera
    if isUDPConnection:
      mobile.computeRPY([2, 0, 1], [-1, 1, 1])
    ctm = np.array([[sp.cos(mobile.roll), -sp.sin(mobile.roll)], \
            [sp.sin(mobile.roll), sp.cos(mobile.roll)]]) # Coordinate transform matrix

    if useBasemap:
    # Warning this really slows down the computation
      m = Basemap(width=img.width, height=img.height, projection='aeqd',
            lat_0=sp.rad2deg(mobile.pitch), lon_0=sp.rad2deg(mobile.yaw), rsphere = radius)

    # Get an image from camera
    if not isPaused:
      img = cam.getImage()
      img = img.resize(int(scaleFactor*img.width), int(scaleFactor*img.height))
    
    if display:
      # Pause image when right button is pressed
      dwn = disp.rightButtonDownPosition()
      if dwn is not None:
        isPaused = not(isPaused)
        dwn = None

    if display:
    # Create a layer to enable user to make a selection of the target
      selectionLayer = DrawingLayer((img.width, img.height))

    if img:
      if display: 
      # Create a new layer to host information retrieved from video
        layer = DrawingLayer((img.width, img.height))
          # Selection is a rectangle drawn while holding mouse left button down
        if disp.leftButtonDown:
          corner1 = (disp.mouseX, disp.mouseY)
          selectionInProgress = True
        if selectionInProgress:
          corner2 = (disp.mouseX, disp.mouseY)
          bb = disp.pointsToBoundingBox(corner1, corner2)# Display the temporary selection
          if disp.leftButtonUp: # User has finished is selection
            selectionInProgress = False
            selection = img.crop(bb[0], bb[1], bb[2], bb[3])
            if selection != None:
                    # The 3 main colors in the area selected are considered.
            # Note that the selection should be included in the target and not contain background
              try:
                selection.save('../ObjectTracking/'+ 'kite_detail_tmp.jpg')
                img0 = Image("kite_detail_tmp.jpg") # For unknown reason I have to reload the image...
                pal = img0.getPalette(bins = 2, hue = False)
              except: # getPalette is sometimes bugging and raising LinalgError because matrix not positive definite
                pal = pal
              wasTargetFoundInPreviousFrame = False
              previous_coord_px = (bb[0] + bb[2]/2, bb[1] + bb[3]/2)
          if corner1 != corner2:
            selectionLayer.rectangle((bb[0], bb[1]), (bb[2], bb[3]), width = 5, color = Color.YELLOW)
                       
   
      # If the target was already found, we can save computation time by
      # reducing the Region Of Interest around predicted position
      if wasTargetFoundInPreviousFrame:
        ROITopLeftCorner = (max(0, previous_coord_px[0]-maxRelativeMotionPerFrame/2*width), \
                  max(0, previous_coord_px[1] -height*maxRelativeMotionPerFrame/2))
        ROI = img.crop(ROITopLeftCorner[0], ROITopLeftCorner[1],                          \
                             maxRelativeMotionPerFrame*width, maxRelativeMotionPerFrame*height, \
                 centered = False)
        if display :
      # Draw the rectangle corresponding to the ROI on the complete image
          layer.rectangle((previous_coord_px[0]-maxRelativeMotionPerFrame/2*width,  \
                                   previous_coord_px[1]-maxRelativeMotionPerFrame/2*height), \
                                (maxRelativeMotionPerFrame*width, maxRelativeMotionPerFrame*height), \
                 color = Color.GREEN, width = 2)
      else:
        # Search on the whole image if no clue of where is the target
        ROITopLeftCorner = (0, 0)
        ROI = img

        '''#Option 1
        target_part0 = ROI.hueDistance(color=(142,50,65)).invert().threshold(150)
        target_part1 = ROI.hueDistance(color=(93,16,28)).invert().threshold(150)
        target_part2 = ROI.hueDistance(color=(223,135,170)).invert().threshold(150)
        target_raw_img = target_part0+target_part1+target_part2
        target_img = target_raw_img.erode(5).dilate(5)

        #Option 2
        target_img = ROI.hueDistance(imgModel.getPixel(10,10)).binarize().invert().erode(2).dilate(2)'''
    
          # Find sky color
      sky = (img-img.binarize()).findBlobs(minsize=10000)
      if sky:
       skycolor = sky[0].meanColor()
      # Option 3
      target_img = ROI-ROI # Black image
              
      # Loop through palette of target colors
      if display and displayDebug:
            decomposition = []
      i_col = 0
      for col in pal: 
        c = tuple([int(col[i]) for i in range(0,3)])
            # Search the target based on color
        ROI.save('../ObjectTracking/'+ 'ROI_tmp.jpg')
        img1 = Image('../ObjectTracking/'+ 'ROI_tmp.jpg')
        filter_img = img1.colorDistance(color = c)
        h = filter_img.histogram(numbins=256)
        cs = np.cumsum(h)
        thmax = np.argmin(abs(cs- 0.02*img.width*img.height)) # find the threshold to have 10% of the pixel in the expected color
        thmin = np.argmin(abs(cs- 0.005*img.width*img.height)) # find the threshold to have 10% of the pixel in the expected color
        if thmin==thmax:
          newth = thmin
        else:
          newth = np.argmin(h[thmin:thmax]) + thmin
        alpha = 0.5
        th[i_col] = alpha*th[i_col]+(1-alpha)*newth
        filter_img = filter_img.threshold(max(40,min(200,th[i_col]))).invert()
        target_img = target_img + filter_img
        #print th
        i_col = i_col + 1
        if display and displayDebug:
          [R, G, B] = filter_img.splitChannels()
          white = (R-R).invert()
          r = R*1.0/255*c[0]
          g = G*1.0/255*c[1]
          b = B*1.0/255*c[2]
          tmp = white.mergeChannels(r, g, b)
          decomposition.append(tmp)

      # Get a black background with with white target foreground
      target_img = target_img.threshold(150)
  
      target_img = target_img - ROI.colorDistance(color = skycolor).threshold(80).invert()

      if display and displayDebug:
        small_ini = target_img.resize(int(img.width/(len(pal)+1)),  int(img.height/(len(pal)+1)))
        for tmp in decomposition:
          small_ini = small_ini.sideBySide(tmp.resize(int(img.width/(len(pal)+1)), int(img.height/(len(pal)+1))), side = 'bottom')
        small_ini = small_ini.adaptiveScale((int(img.width), int(img.height)))
        toDisplay = img.sideBySide(small_ini)
      else:
        toDisplay = img
          #target_img = ROI.hueDistance(color = Color.RED).threshold(10).invert()

      # Search for binary large objects representing potential target
      target = target_img.findBlobs(minsize = 500)
      
      if target: # If a target was found
      
        if wasTargetFoundInPreviousFrame:
          predictedTargetPosition = (width*maxRelativeMotionPerFrame/2, height*maxRelativeMotionPerFrame/2) # Target will most likely be close to the center of the ROI   
        else:
          predictedTargetPosition = previous_coord_px
              # If there are several targets in the image, take the one which is the closest of the predicted position
        target = target.sortDistance(predictedTargetPosition)

        # Get target coordinates according to minimal bounding rectangle or centroid.
        coordMinRect = ROITopLeftCorner + np.array((target[0].minRectX(), target[0].minRectY()))
        coord_px = ROITopLeftCorner + np.array(target[0].centroid())

        # Rotate the coordinates of roll angle around the middle of the screen
        rot_coord_px = np.dot(ctm, coord_px - np.array([img.width/2, img.height/2])) + np.array([img.width/2, img.height/2])
        if useBasemap:
          coord = sp.deg2rad(m(rot_coord_px[0], img.height-rot_coord_px[1], inverse = True))
        else:
          coord = localProjection(rot_coord_px[0]-img.width/2, img.height/2-rot_coord_px[1], radius, mobile.yaw, mobile.pitch, inverse = True)
        target_bearing, target_elevation = coord

      # Get minimum bounding rectangle for display purpose
        minR = ROITopLeftCorner + np.array(target[0].minRect())

        contours = target[0].contour()

        contours = [ ROITopLeftCorner + np.array(contour) for contour in contours]


  
        # Get target features
        angle = sp.deg2rad(target[0].angle()) + mobile.roll
        angle =  sp.deg2rad(unwrap180(sp.rad2deg(angle), sp.rad2deg(previous_angle)))
        width = target[0].width()
        height = target[0].height()

        # Check if the kite is upside down
        # First rotate the kite
        ctm2 = np.array([[sp.cos(-angle+mobile.roll), -sp.sin(-angle+mobile.roll)], \
            [sp.sin(-angle+mobile.roll), sp.cos(-angle+mobile.roll)]]) # Coordinate transform matrix
        rotated_contours = [np.dot(ctm2, contour-coordMinRect) for contour in contours]  
        y = [-tmp[1] for tmp in rotated_contours]
        itop = np.argmax(y) # Then looks at the points at the top
        ibottom = np.argmin(y) # and the point at the bottom
        # The point the most excentered is at the bottom
        if abs(rotated_contours[itop][0])>abs(rotated_contours[ibottom][0]):
          isInverted = True
        else:
          isInverted = False    
        
        if isInverted:
            angle = angle + sp.pi    

        
                # Filter the data
        alpha = 1-sp.exp(-deltaT/self.filterTimeConstant)
        if not(isPaused):
          dCoord = np.array(previous_dCoord)*(1-alpha) + alpha*(np.array(coord_px) - previous_coord_px) # related to the speed only if cam is fixed
          dAngle = np.array(previous_dAngle)*(1-alpha) + alpha*(np.array(angle) - previous_angle)
        else : 
          dCoord = np.array([0, 0])
          dAngle = np.array([0]) 
#print coord_px, angle, width, height, dCoord
    
        # Record important data
        times.append(timestamp)
        coords_px.append(coord_px)
        angles.append(angle)
        target_elevations.append(target_elevation)
        target_bearings.append(target_bearing)
        
        # Export data to controller
        self.elevation = target_elevation
        self.bearing = target_bearing
        self.orientation = angle
        dt = time.time()-timeLastTarget
        self.ROT = dAngle/dt
        self.lastUpdateTime = t
        
        # Save for initialisation of next step
        previous_dCoord = dCoord
        previous_angle = angle
        previous_coord_px = (int(coord_px[0]), int(coord_px[1]))
        wasTargetFoundInPreviousFrame = True
        timeLastTarget = time.time()
      
      else:
        wasTargetFoundInPreviousFrame = False
        
      if useHDF5:
        hdfSize = hdfSize+1
        dset.resize((hdfSize, 7))
        imset.resize((hdfSize, img.width, img.height, 3))
        dset[hdfSize-1,:] = [time.time(), coord_px[0], coord_px[1], angle, self.elevation, self.bearing, self.ROT]
        imset[hdfSize-1,:,:,:] = img.getNumpy()
        recordFile.flush()
      else:
        csv_writer.writerow([time.time(), coord_px[0], coord_px[1], angle, self.elevation, self.bearing, self.ROT])



      if display :
        if target:
        # Add target features to layer
        # Minimal rectange and its center in RED
          layer.polygon(minR[(0, 1, 3, 2), :], color = Color.RED, width = 5)
          layer.circle((int(coordMinRect[0]), int(coordMinRect[1])), 10, filled = True, color = Color.RED)
        
                # Target contour and centroid in BLUE
          layer.circle((int(coord_px[0]), int(coord_px[1])), 10, filled = True, color = Color.BLUE)
          layer.polygon(contours, color = Color.BLUE, width = 5)

        # Speed vector in BLACK
          layer.line((int(coord_px[0]), int(coord_px[1])), (int(coord_px[0]+20*dCoord[0]), int(coord_px[1]+20*dCoord[1])), width = 3)
        
        # Line giving angle
          layer.line((int(coord_px[0]+200*sp.cos(angle)), int(coord_px[1]+200*sp.sin(angle))), (int(coord_px[0]-200*sp.cos(angle)), int(coord_px[1]-200*sp.sin(angle))), color = Color.RED)

        # Line giving rate of turn
        #layer.line((int(coord_px[0]+200*sp.cos(angle+dAngle*10)), int(coord_px[1]+200*sp.sin(angle+dAngle*10))), (int(coord_px[0]-200*sp.cos(angle + dAngle*10)), int(coord_px[1]-200*sp.sin(angle+dAngle*10))))
            
      # Add the layer to the raw image 
        toDisplay.addDrawingLayer(layer)
        toDisplay.addDrawingLayer(selectionLayer)

      # Add time metadata
        toDisplay.drawText(str(i_frame)+" "+ str(timestamp), x=0, y=0, fontsize=20)

      # Add Line giving horizon
          #layer.line((0, int(img.height/2 + mobile.pitch*pixelPerRadians)),(img.width, int(img.height/2 + mobile.pitch*pixelPerRadians)), width = 3, color = Color.RED)

      # Plot parallels
        for lat in range(-90, 90, 15):
          r = range(0, 361, 10)
          if useBasemap:
            # \todo improve for high roll
            l = m (r, [lat]*len(r))
            pix = [np.array(l[0]), img.height-np.array(l[1])]
          else:
            l = localProjection(sp.deg2rad(r), \
                    sp.deg2rad([lat]*len(r)), \
                    radius, \
                    lon_0 = mobile.yaw, \
                    lat_0 = mobile.pitch, \
                    inverse = False)
            l = np.dot(ctm, l)
            pix = [np.array(l[0])+img.width/2, img.height/2-np.array(l[1])]

          for i in range(len(r)-1):
            if isPixelInImage((pix[0][i],pix[1][i]), img) or isPixelInImage((pix[0][i+1],pix[1][i+1]), img):
              layer.line((pix[0][i],pix[1][i]), (pix[0][i+1], pix[1][i+1]), color=Color.WHITE, width = 2)

      # Plot meridians
        for lon in range(0, 360, 15):
          r = range(-90, 91, 10)
          if useBasemap:
        # \todo improve for high roll
            l = m ([lon]*len(r), r)
            pix = [np.array(l[0]), img.height-np.array(l[1])]
          else:
            l= localProjection(sp.deg2rad([lon]*len(r)), \
                    sp.deg2rad(r), \
                    radius, \
                    lon_0 = mobile.yaw, \
                    lat_0 = mobile.pitch, \
                    inverse = False)
            l = np.dot(ctm, l)
            pix = [np.array(l[0])+img.width/2, img.height/2-np.array(l[1])]

          for i in range(len(r)-1):
            if isPixelInImage((pix[0][i],pix[1][i]), img) or isPixelInImage((pix[0][i+1],pix[1][i+1]), img):
              layer.line((pix[0][i],pix[1][i]), (pix[0][i+1], pix[1][i+1]), color=Color.WHITE, width = 2)

      # Text giving bearing
      # \todo improve for high roll
        for bearing_deg in range(0, 360, 30):
          l = localProjection(sp.deg2rad(bearing_deg), sp.deg2rad(0), radius, lon_0 = mobile.yaw, lat_0 = mobile.pitch, inverse = False)
          l = np.dot(ctm, l)
          layer.text(str(bearing_deg), ( img.width/2+int(l[0]), img.height-20), color = Color.RED)

      # Text giving elevation
      # \todo improve for high roll
        for elevation_deg in range(-60, 91, 30):
          l = localProjection(0, sp.deg2rad(elevation_deg), radius, lon_0 = mobile.yaw, lat_0 = mobile.pitch, inverse = False)
          l = np.dot(ctm, l)
          layer.text(str(elevation_deg), ( img.width/2 ,img.height/2-int(l[1])), color = Color.RED)

        #toDisplay.save(js)
        toDisplay.save(disp)
    if display : 
      toDisplay.removeDrawingLayer(1)
      toDisplay.removeDrawingLayer(0)
  recordFile.close()
from SimpleCV import Image, Color

bluePincels = Image("ex19.jpg")  # Open mm.png too :)

# Get a random value for color in a pixel 100,100 of the image
pixel = bluePincels.getPixel(100, 100)
print pixel

# Distantiate the color of pixel picked previously
colorDist = bluePincels.colorDistance(pixel)

# Binarize the image and invert the colors to show most of the blue parts of the image
colorDistBin = colorDist.binarize(50).invert()
colorDistBin.show()
示例#16
0
    blobs = justColor.findBlobs()
    if blobs:
        # To do: put in check for a reasonably sized blob. We don't want to return specks or the background/sky.
        return blobs[-1] # largest blob
    else:
        print "No {0} blobs found".format(color)




if __name__ == "__main__":
    #img = Image("mm.jpg")
    img = Image("rocks.png")
    for color in COLORS:
        print COLORS[color]
        color_distance = img - img.colorDistance(color)
        blobs = color_distance.findBlobs()[-1] #only biggest one
        half = halfsies(img,color_distance.binarize(10).invert().erode(1).dilate(1)) 
            #http://www.reedbushey.com/65Practical%20Computer%20Vision%20with%20Simplecv.pdf
            #http://my.safaribooksonline.com/book/programming/python/9781449337865/image-morphology/id2749011
        my_show(half,length=70)
        # img.addDrawingLayer(color_distance.dl())
        # my_show(color_distance.binarize(),length=50)


    # filename = sys.argv[1]
    # # testImg = Image("officialRocks.JPG")
    # testImg = Image(filename)
    # for color in colors:
    #     print colors[color]
    #     b = findColorBlob(testImg, color)
#import the modules
import time
import SimpleCV
from SimpleCV import Image
from SimpleCV import Color

hay_alguien = Image("me.jpg")

win = hay_alguien.show()
time.sleep(3)
win.quit()

extraccion_color = hay_alguien.colorDistance(Color.BLACK)
win = extraccion_color.show()
time.sleep(3)
win.quit()

me = hay_alguien - extraccion_color
win = me.show()
time.sleep(3)
win.quit()

print('resultados primera comparacion:')
R = me.meanColor()[2]
print('R: ' + str(R))
G = me.meanColor()[1]
print('G: ' + str(G))
B = me.meanColor()[0]
print('B: ' + str(B))

cam = SimpleCV.Camera()
示例#18
0
# encoding: utf-8

__author__ = 'yonka'

import SimpleCV
from SimpleCV import Image

Image.colorDistance()
from SimpleCV import Image, Color

bluePincels = Image("ex19.jpg") # Open mm.png too :)

# Get a random value for color in a pixel 100,100 of the image
pixel = bluePincels.getPixel(100,100)
print pixel 

# Distantiate the color of pixel picked previously
colorDist = bluePincels.colorDistance(pixel)

# Binarize the image and invert the colors to show most of the blue parts of the image
colorDistBin = colorDist.binarize(50).invert()
colorDistBin.show()
from SimpleCV import Color, Image

img = Image("ex23b.png") #Open ex23b.png too :)

colorDist = img.colorDistance(Color.BLUE).invert()
blobs = colorDist.findBlobs()

# Draw a BLACK border at blobs
blobs.draw(color=Color.BLACK, width=3)

# The thing is at this line before
img.addDrawingLayer(colorDist.dl())
img.show()
##    img = tableroAjustado

    #si no ajustamos color, hemos de aplicar la transformacion nula
    img = funcionNula(tableroEncuadrado)  


    if runInLinux == True:
        img.save ('temp/tempFile.png')
        img = Image('temp/tempFile.png')    

    if guardar_detalles_encuadre == True:
        img.save ('temp/tablero_encuadrado%02d.png' %(contador))


    # Detección del robot (o cualquier otro objeto sobre el tablero)
    robot_filter = img.colorDistance(COLOR_OBJETIVO)#.invert()
    robot_filter = robot_filter.binarize(UMBRAL_BINARIZADO)#.invert()
    blobs_Robot = robot_filter.findBlobs()

    #Solo procedemos a calcular posicioners y angulos si existen candidatos a robot
    if blobs_Robot and len(blobs_Robot) >=2: 
        culo_ROBOT = blobs_Robot.pop()   #el blob mas grande es el culo 
        cabeza_ROBOT = blobs_Robot.pop() #el blob pequeño es la cabeza

        #trasladamos las coordenadas a unos ejes en el centro de la imagen
        ofssetX = int((tableroANCHO * COEFICIENTE_AMPLIACION) /2)
        ofssetY = int((tableroALTO * COEFICIENTE_AMPLIACION) /2)

        cabezaX = cabeza_ROBOT.x - ofssetX
        cabezaY = ofssetY - cabeza_ROBOT.y
from SimpleCV import Color, Image

img = Image("ex23b.png")  #Open ex23b.png too :)

colorDist = img.colorDistance(Color.BLUE).invert()
blobs = colorDist.findBlobs()

# Draw a BLACK border at blobs
blobs.draw(color=Color.BLACK, width=3)

# The thing is at this line before
img.addDrawingLayer(colorDist.dl())
img.show()
from SimpleCV import Image
import time

#image Segmentation using Color Difference
img1 = Image('/home/pi/book/test_set/4.1.03.tiff')
img1.show()
time.sleep(5)

greendist = img1.colorDistance((108, 139, 133))

greendistbin = greendist.binarize(30).invert()
greendistbin.show()
time.sleep(5)

onlygreen = img1 - greendistbin
onlygreen.show()
time.sleep(5)
示例#24
0
from SimpleCV import Image, Color
import time

yellowTool = Image('yellowtool.png')

yellowDist = yellowTool.colorDistance((223, 191, 29))

yellowDist.show()
time.sleep(3)

yellowDistBin = yellowDist.binarize(50).invert()

yellowDistBin.show()
time.sleep(3)

onlyYellow = yellowTool - yellowDistBin

onlyYellow.show()
time.sleep(3)
示例#25
0
from __future__ import print_function
from SimpleCV import Image, Color

im = Image("img/ardrone.jpg")
# im = im.erode()
im = im.crop(2000, 1500, 2000, 2000, centered=True)
# im = im.erode()
# blobs = im.findBlobs()
# blobs.draw()

only_orange = im - im.colorDistance(Color.ORANGE)
only_black 	= im - im.colorDistance(Color.BLACK)
only_drone  = only_orange + only_black


body.save("img/ardrone2.jpg")
# raw_input("Hit enter to quit: ")
# window.quit()
示例#26
0
from SimpleCV import Image, Color
import time

w = 320
h = 240
threshold = 64

camera = PiCamera()
camera.resolution = (w, h)

rawcapture = PiRGBArray(camera)
camera.capture(rawcapture, format='rgb')

img = Image(rawcapture.array)

red_object = img.colorDistance(Color.RED)
red_object.show()
time.sleep(1)

matrix = red_object.getNumpy()

#print matrix
c = 0
for i in range(0, h - 1):
    for l in range(0, w - 1):
        if matrix[i, l, 0] <= threshold:
            c += 1
        # print matrix[i,l,0], i, l

percentage = 100.00 * c / (w * h)
示例#27
0
from SimpleCV import Color, Image 
import time
import math
import sys

if len(sys.argv) == 0:
	couleur = Color.BLUE
elif sys.argv[1] == "rouge":
	couleur = Color.RED
elif sys.argv[1] == "bleu":
	couleur = Color.BLUE
elif sys.argv[1] == "jaune":
	couleur = Color.YELLOW
elif sys.argv[1] == "vert":
	couleur = Color.GREEN

img = Image("test.jpg") 
color_distance = img.colorDistance(couleur).invert() 
blobs = color_distance.findBlobs() 
coord = blobs[-1].centroid() 

x = math.floor(coord[0])

sys.exit(x)

# points = [(x+10,y+10),(x+10,y-10),(x-10,y-10),(x-10,y+10)]
# img.dl().polygon(points, filled=True, color=Color.RED)
# img.show()
#time.sleep(10)
示例#28
0
def process(filename):
    # Preprocessing
    input_img = Image('./images/' + filename)
    img = input_img.colorDistance(Color.BLACK).stretch(PARAM_STRETCH_THRESH_LOW, PARAM_STRETCH_THRESH_HIGH).scale(2)
    img.save('./processed/' + filename)


    # OCR
    ocr_bin = shlex.split('./ocr ./processed/' + filename)
    process = subprocess.Popen(ocr_bin, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    out, err = process.communicate()


    # Parse output
    with open('eng.user-words', 'r') as f:
        choices = f.read().strip().split('\n')

    lines = out.split('\n')
    results = []
    confidences = []
    ratios = []
    flags = []
    ignore_count = 0
    for ln in lines:
        # don't process empty lines
        if(len(ln) == 0):
            continue

        # ignore 1st x lines of heading eg. FAIRPRICE XTRA
        if(ignore_count<HEADING_LINES_COUNT):
            ignore_count+=1
            continue

        # line in format "confidence || line"
        ocr_line = ln.split('||')
        conf, item_and_price = ocr_line[0], ocr_line[1].split(' ')

        # assume price is last word in line
        item, price = ' '.join(item_and_price[:-1]).strip(), item_and_price[-1].strip()

        # Use fuzzy string matching to correct result
        predicted, ratio = FProcess.extractOne(item, choices)
        if ratio >= FLAG_CRITERIA_FUZZY and float(conf) >= FLAG_CRITERIA_OCR:
            output_line = predicted
            flag = ''
        else:
            output_line = item
            flag = 'Flag'

        results.append(output_line + ' ' + price)
        confidences.append(conf)
        ratios.append(str(ratio))
        flags.append(flag)


    # Write results with flags for manual checking
    zipped = zip(results, confidences, ratios, flags)
    mapped = map(lambda tup: ' || '.join(tup), zipped)

    outfile = './results/' + filename + '.txt'
    with open(outfile, 'w') as f:
        f.write('\n'.join(mapped))


    return zipped
from gpiozero import LED
import subprocess
from SimpleCV import Image
from SimpleCV improt Color

import time 


figure_here = Image("figure.png")
figure_here.show()
time.sleep(2)
red_figure = figure_here.colorDistance(Color.RED)
red_figure.show()
time.sleep(2)

only_figure = figure_here-red_figure
only_figure.show()
time.sleep(2)
figure_color_val = only_figure.meanColor()
print "color val with figure" 
print figure_color_val



figure_not_here = Image("figure.png")
figure_not_here.show()
time.sleep(2)
red_figure = figure_not_here.colorDistance(Color.RED)
red_figure.show()
time.sleep(2)
示例#30
0
import SimpleCV
from SimpleCV import Image
 
display = SimpleCV.Display()

img = Image("ping/source/image3.jpeg")

# parametre pour image1 : (dilate=2), (stretch(200,255), isCircle(0.2)
# parametre pour image2 : non trouvé
# parametre pour image3 : (dilate=2), (stretch(2,120), isCircle(0.2)
# parametre pour image4 : (dilate=2), (stretch(2,120), isCircle(0.2)
# parametre pour image5 : (dilate=2), (stretch(2,120), isCircle(0.2)
# parametre pour image6 : plante
# parametre pour image7 : non trouvé
            
dist = img.colorDistance(SimpleCV.Color.ORANGE).dilate(2)

segmented = dist.stretch(2,120).invert()

blobs = segmented.findBlobs()

if blobs:

        balles = blobs.filter([b.isCircle(0.2) for b in blobs])
                
        if balles:
                for balle in balles:
                        img.drawCircle((balle.x,balle.y),balle.radius(),SimpleCV.Color.BLUE,3 )

img.show()
示例#31
0
    def track(self):
        print "Press right mouse button to pause or play"
        print "Use left mouse button to select target"
        print "Target color must be different from background"
        print "Target must have width larger than height"
        print "Target can be upside down"

        #Parameters
        isUDPConnection = False  # Currently switched manually in the code
        display = True
        displayDebug = True
        useBasemap = False
        maxRelativeMotionPerFrame = 2  # How much the target can moved between two succesive frames
        pixelPerRadians = 320
        radius = pixelPerRadians
        referenceImage = '../ObjectTracking/kite_detail.jpg'
        scaleFactor = 0.5
        isVirtualCamera = True
        useHDF5 = False

        # Open reference image: this is used at initlalisation
        target_detail = Image(referenceImage)

        # Get RGB color palette of target (was found to work better than using hue)
        pal = target_detail.getPalette(bins=2, hue=False)

        # Open video to analyse or live stream
        #cam = JpegStreamCamera('http://192.168.1.29:8080/videofeed')#640 * 480
        if isVirtualCamera:
            #cam = VirtualCamera('../../zenith-wind-power-read-only/KiteControl-Qt/videos/kiteFlying.avi','video')
            #cam = VirtualCamera('/media/bat/DATA/Baptiste/Nautilab/kite_project/robokite/ObjectTracking/00095.MTS', 'video')
            #cam = VirtualCamera('output.avi', 'video')
            cam = VirtualCamera(
                '../Recording/Videos/Flying kite images (for kite steering unit development)-YTMgX1bvrTo.mp4',
                'video')
            virtualCameraFPS = 25
        else:
            cam = JpegStreamCamera(
                'http://192.168.43.1:8080/videofeed')  #640 * 480
            #cam = Camera()

        # Get a sample image to initialize the display at the same size
        img = cam.getImage().scale(scaleFactor)
        print img.width, img.height
        # Create a pygame display
        if display:
            if img.width > img.height:
                disp = Display(
                    (27 * 640 / 10, 25 * 400 / 10)
                )  #(int(2*img.width/scaleFactor), int(2*img.height/scaleFactor)))
            else:
                disp = Display((810, 1080))
        #js = JpegStreamer()

        # Initialize variables
        previous_angle = 0  # target has to be upright when starting. Target width has to be larger than target heigth.
        previous_coord_px = (
            0, 0)  # Initialized to top left corner, which always exists
        previous_dCoord = previous_coord_px
        previous_dAngle = previous_angle
        angles = []
        coords_px = []
        coord_px = [0, 0]
        angle = 0
        target_elevations = []
        target_bearings = []
        times = []
        wasTargetFoundInPreviousFrame = False
        i_frame = 0
        isPaused = False
        selectionInProgress = False
        th = [100, 100, 100]
        skycolor = Color.BLUE
        timeLastTarget = 0

        # Prepare recording
        recordFilename = datetime.datetime.utcnow().strftime(
            "%Y%m%d_%Hh%M_") + 'simpleTrack'
        if useHDF5:
            try:
                os.remove(recordFilename + '.hdf5')
            except:
                print('Creating file ' + recordFilename + '.hdf5')
            """ The following line is used to silence the following error (according to http://stackoverflow.com/questions/15117128/h5py-in-memory-file-and-multiprocessing-error)
    #000: ../../../src/H5F.c line 1526 in H5Fopen(): unable to open file
    major: File accessability
    minor: Unable to open file"""
            h5py._errors.silence_errors()
            recordFile = h5py.File(
                os.path.join(os.getcwd(), 'log', recordFilename + '.hdf5'),
                'a')
            hdfSize = 0
            dset = recordFile.create_dataset('kite', (2, 2),
                                             maxshape=(None, 7))
            imset = recordFile.create_dataset('image',
                                              (2, img.width, img.height, 3),
                                              maxshape=(None, img.width,
                                                        img.height, 3))
        else:
            try:
                os.remove(recordFilename + '.csv')
            except:
                print('Creating file ' + recordFilename + '.csv')
            recordFile = file(
                os.path.join(os.getcwd(), 'log', recordFilename + '.csv'), 'a')
            csv_writer = csv.writer(recordFile)
            csv_writer.writerow([
                'Time (s)', 'x (px)', 'y (px)', 'Orientation (rad)',
                'Elevation (rad)', 'Bearing (rad)', 'ROT (rad/s)'
            ])

        # Launch a thread to get UDP message with orientation of the camera
        mobile = mobileState.mobileState()
        if isUDPConnection:
            mobile.open()
        # Loop while not canceled by user
        t0 = time.time()
        previousTime = t0
        while not (display) or disp.isNotDone():
            t = time.time()
            deltaT = (t - previousTime)
            FPS = 1.0 / deltaT
            #print 'FPS =', FPS
            if isVirtualCamera:
                deltaT = 1.0 / virtualCameraFPS
            previousTime = t
            i_frame = i_frame + 1
            timestamp = datetime.datetime.utcnow()

            # Receive orientation of the camera
            if isUDPConnection:
                mobile.computeRPY([2, 0, 1], [-1, 1, 1])
            ctm = np.array([[sp.cos(mobile.roll), -sp.sin(mobile.roll)], \
                    [sp.sin(mobile.roll), sp.cos(mobile.roll)]]) # Coordinate transform matrix

            if useBasemap:
                # Warning this really slows down the computation
                m = Basemap(width=img.width,
                            height=img.height,
                            projection='aeqd',
                            lat_0=sp.rad2deg(mobile.pitch),
                            lon_0=sp.rad2deg(mobile.yaw),
                            rsphere=radius)

            # Get an image from camera
            if not isPaused:
                img = cam.getImage()
                img = img.resize(int(scaleFactor * img.width),
                                 int(scaleFactor * img.height))

            if display:
                # Pause image when right button is pressed
                dwn = disp.rightButtonDownPosition()
                if dwn is not None:
                    isPaused = not (isPaused)
                    dwn = None

            if display:
                # Create a layer to enable user to make a selection of the target
                selectionLayer = DrawingLayer((img.width, img.height))

            if img:
                if display:
                    # Create a new layer to host information retrieved from video
                    layer = DrawingLayer((img.width, img.height))
                    # Selection is a rectangle drawn while holding mouse left button down
                    if disp.leftButtonDown:
                        corner1 = (disp.mouseX, disp.mouseY)
                        selectionInProgress = True
                    if selectionInProgress:
                        corner2 = (disp.mouseX, disp.mouseY)
                        bb = disp.pointsToBoundingBox(
                            corner1,
                            corner2)  # Display the temporary selection
                        if disp.leftButtonUp:  # User has finished is selection
                            selectionInProgress = False
                            selection = img.crop(bb[0], bb[1], bb[2], bb[3])
                            if selection != None:
                                # The 3 main colors in the area selected are considered.
                                # Note that the selection should be included in the target and not contain background
                                try:
                                    selection.save('../ObjectTracking/' +
                                                   'kite_detail_tmp.jpg')
                                    img0 = Image(
                                        "kite_detail_tmp.jpg"
                                    )  # For unknown reason I have to reload the image...
                                    pal = img0.getPalette(bins=2, hue=False)
                                except:  # getPalette is sometimes bugging and raising LinalgError because matrix not positive definite
                                    pal = pal
                                wasTargetFoundInPreviousFrame = False
                                previous_coord_px = (bb[0] + bb[2] / 2,
                                                     bb[1] + bb[3] / 2)
                        if corner1 != corner2:
                            selectionLayer.rectangle((bb[0], bb[1]),
                                                     (bb[2], bb[3]),
                                                     width=5,
                                                     color=Color.YELLOW)

                # If the target was already found, we can save computation time by
                # reducing the Region Of Interest around predicted position
                if wasTargetFoundInPreviousFrame:
                    ROITopLeftCorner = (max(0, previous_coord_px[0]-maxRelativeMotionPerFrame/2*width), \
                              max(0, previous_coord_px[1] -height*maxRelativeMotionPerFrame/2))
                    ROI = img.crop(ROITopLeftCorner[0], ROITopLeftCorner[1],                          \
                                         maxRelativeMotionPerFrame*width, maxRelativeMotionPerFrame*height, \
                             centered = False)
                    if display:
                        # Draw the rectangle corresponding to the ROI on the complete image
                        layer.rectangle((previous_coord_px[0]-maxRelativeMotionPerFrame/2*width,  \
                                                 previous_coord_px[1]-maxRelativeMotionPerFrame/2*height), \
                                              (maxRelativeMotionPerFrame*width, maxRelativeMotionPerFrame*height), \
                               color = Color.GREEN, width = 2)
                else:
                    # Search on the whole image if no clue of where is the target
                    ROITopLeftCorner = (0, 0)
                    ROI = img
                    '''#Option 1
        target_part0 = ROI.hueDistance(color=(142,50,65)).invert().threshold(150)
        target_part1 = ROI.hueDistance(color=(93,16,28)).invert().threshold(150)
        target_part2 = ROI.hueDistance(color=(223,135,170)).invert().threshold(150)
        target_raw_img = target_part0+target_part1+target_part2
        target_img = target_raw_img.erode(5).dilate(5)

        #Option 2
        target_img = ROI.hueDistance(imgModel.getPixel(10,10)).binarize().invert().erode(2).dilate(2)'''

                    # Find sky color
                sky = (img - img.binarize()).findBlobs(minsize=10000)
                if sky:
                    skycolor = sky[0].meanColor()
                # Option 3
                target_img = ROI - ROI  # Black image

                # Loop through palette of target colors
                if display and displayDebug:
                    decomposition = []
                i_col = 0
                for col in pal:
                    c = tuple([int(col[i]) for i in range(0, 3)])
                    # Search the target based on color
                    ROI.save('../ObjectTracking/' + 'ROI_tmp.jpg')
                    img1 = Image('../ObjectTracking/' + 'ROI_tmp.jpg')
                    filter_img = img1.colorDistance(color=c)
                    h = filter_img.histogram(numbins=256)
                    cs = np.cumsum(h)
                    thmax = np.argmin(
                        abs(cs - 0.02 * img.width * img.height)
                    )  # find the threshold to have 10% of the pixel in the expected color
                    thmin = np.argmin(
                        abs(cs - 0.005 * img.width * img.height)
                    )  # find the threshold to have 10% of the pixel in the expected color
                    if thmin == thmax:
                        newth = thmin
                    else:
                        newth = np.argmin(h[thmin:thmax]) + thmin
                    alpha = 0.5
                    th[i_col] = alpha * th[i_col] + (1 - alpha) * newth
                    filter_img = filter_img.threshold(
                        max(40, min(200, th[i_col]))).invert()
                    target_img = target_img + filter_img
                    #print th
                    i_col = i_col + 1
                    if display and displayDebug:
                        [R, G, B] = filter_img.splitChannels()
                        white = (R - R).invert()
                        r = R * 1.0 / 255 * c[0]
                        g = G * 1.0 / 255 * c[1]
                        b = B * 1.0 / 255 * c[2]
                        tmp = white.mergeChannels(r, g, b)
                        decomposition.append(tmp)

                # Get a black background with with white target foreground
                target_img = target_img.threshold(150)

                target_img = target_img - ROI.colorDistance(
                    color=skycolor).threshold(80).invert()

                if display and displayDebug:
                    small_ini = target_img.resize(
                        int(img.width / (len(pal) + 1)),
                        int(img.height / (len(pal) + 1)))
                    for tmp in decomposition:
                        small_ini = small_ini.sideBySide(tmp.resize(
                            int(img.width / (len(pal) + 1)),
                            int(img.height / (len(pal) + 1))),
                                                         side='bottom')
                    small_ini = small_ini.adaptiveScale(
                        (int(img.width), int(img.height)))
                    toDisplay = img.sideBySide(small_ini)
                else:
                    toDisplay = img
                    #target_img = ROI.hueDistance(color = Color.RED).threshold(10).invert()

                # Search for binary large objects representing potential target
                target = target_img.findBlobs(minsize=500)

                if target:  # If a target was found

                    if wasTargetFoundInPreviousFrame:
                        predictedTargetPosition = (
                            width * maxRelativeMotionPerFrame / 2,
                            height * maxRelativeMotionPerFrame / 2
                        )  # Target will most likely be close to the center of the ROI
                    else:
                        predictedTargetPosition = previous_coord_px
                        # If there are several targets in the image, take the one which is the closest of the predicted position
                    target = target.sortDistance(predictedTargetPosition)

                    # Get target coordinates according to minimal bounding rectangle or centroid.
                    coordMinRect = ROITopLeftCorner + np.array(
                        (target[0].minRectX(), target[0].minRectY()))
                    coord_px = ROITopLeftCorner + np.array(
                        target[0].centroid())

                    # Rotate the coordinates of roll angle around the middle of the screen
                    rot_coord_px = np.dot(
                        ctm, coord_px -
                        np.array([img.width / 2, img.height / 2])) + np.array(
                            [img.width / 2, img.height / 2])
                    if useBasemap:
                        coord = sp.deg2rad(
                            m(rot_coord_px[0],
                              img.height - rot_coord_px[1],
                              inverse=True))
                    else:
                        coord = localProjection(
                            rot_coord_px[0] - img.width / 2,
                            img.height / 2 - rot_coord_px[1],
                            radius,
                            mobile.yaw,
                            mobile.pitch,
                            inverse=True)
                    target_bearing, target_elevation = coord

                    # Get minimum bounding rectangle for display purpose
                    minR = ROITopLeftCorner + np.array(target[0].minRect())

                    contours = target[0].contour()

                    contours = [
                        ROITopLeftCorner + np.array(contour)
                        for contour in contours
                    ]

                    # Get target features
                    angle = sp.deg2rad(target[0].angle()) + mobile.roll
                    angle = sp.deg2rad(
                        unwrap180(sp.rad2deg(angle),
                                  sp.rad2deg(previous_angle)))
                    width = target[0].width()
                    height = target[0].height()

                    # Check if the kite is upside down
                    # First rotate the kite
                    ctm2 = np.array([[sp.cos(-angle+mobile.roll), -sp.sin(-angle+mobile.roll)], \
                        [sp.sin(-angle+mobile.roll), sp.cos(-angle+mobile.roll)]]) # Coordinate transform matrix
                    rotated_contours = [
                        np.dot(ctm2, contour - coordMinRect)
                        for contour in contours
                    ]
                    y = [-tmp[1] for tmp in rotated_contours]
                    itop = np.argmax(y)  # Then looks at the points at the top
                    ibottom = np.argmin(y)  # and the point at the bottom
                    # The point the most excentered is at the bottom
                    if abs(rotated_contours[itop][0]) > abs(
                            rotated_contours[ibottom][0]):
                        isInverted = True
                    else:
                        isInverted = False

                    if isInverted:
                        angle = angle + sp.pi

                        # Filter the data
                    alpha = 1 - sp.exp(-deltaT / self.filterTimeConstant)
                    if not (isPaused):
                        dCoord = np.array(previous_dCoord) * (
                            1 - alpha) + alpha * (
                                np.array(coord_px) - previous_coord_px
                            )  # related to the speed only if cam is fixed
                        dAngle = np.array(previous_dAngle) * (
                            1 - alpha) + alpha * (np.array(angle) -
                                                  previous_angle)
                    else:
                        dCoord = np.array([0, 0])
                        dAngle = np.array([0])


#print coord_px, angle, width, height, dCoord

# Record important data
                    times.append(timestamp)
                    coords_px.append(coord_px)
                    angles.append(angle)
                    target_elevations.append(target_elevation)
                    target_bearings.append(target_bearing)

                    # Export data to controller
                    self.elevation = target_elevation
                    self.bearing = target_bearing
                    self.orientation = angle
                    dt = time.time() - timeLastTarget
                    self.ROT = dAngle / dt
                    self.lastUpdateTime = t

                    # Save for initialisation of next step
                    previous_dCoord = dCoord
                    previous_angle = angle
                    previous_coord_px = (int(coord_px[0]), int(coord_px[1]))
                    wasTargetFoundInPreviousFrame = True
                    timeLastTarget = time.time()

                else:
                    wasTargetFoundInPreviousFrame = False

                if useHDF5:
                    hdfSize = hdfSize + 1
                    dset.resize((hdfSize, 7))
                    imset.resize((hdfSize, img.width, img.height, 3))
                    dset[hdfSize - 1, :] = [
                        time.time(), coord_px[0], coord_px[1], angle,
                        self.elevation, self.bearing, self.ROT
                    ]
                    imset[hdfSize - 1, :, :, :] = img.getNumpy()
                    recordFile.flush()
                else:
                    csv_writer.writerow([
                        time.time(), coord_px[0], coord_px[1], angle,
                        self.elevation, self.bearing, self.ROT
                    ])

                if display:
                    if target:
                        # Add target features to layer
                        # Minimal rectange and its center in RED
                        layer.polygon(minR[(0, 1, 3, 2), :],
                                      color=Color.RED,
                                      width=5)
                        layer.circle(
                            (int(coordMinRect[0]), int(coordMinRect[1])),
                            10,
                            filled=True,
                            color=Color.RED)

                        # Target contour and centroid in BLUE
                        layer.circle((int(coord_px[0]), int(coord_px[1])),
                                     10,
                                     filled=True,
                                     color=Color.BLUE)
                        layer.polygon(contours, color=Color.BLUE, width=5)

                        # Speed vector in BLACK
                        layer.line((int(coord_px[0]), int(coord_px[1])),
                                   (int(coord_px[0] + 20 * dCoord[0]),
                                    int(coord_px[1] + 20 * dCoord[1])),
                                   width=3)

                        # Line giving angle
                        layer.line((int(coord_px[0] + 200 * sp.cos(angle)),
                                    int(coord_px[1] + 200 * sp.sin(angle))),
                                   (int(coord_px[0] - 200 * sp.cos(angle)),
                                    int(coord_px[1] - 200 * sp.sin(angle))),
                                   color=Color.RED)

                    # Line giving rate of turn
                    #layer.line((int(coord_px[0]+200*sp.cos(angle+dAngle*10)), int(coord_px[1]+200*sp.sin(angle+dAngle*10))), (int(coord_px[0]-200*sp.cos(angle + dAngle*10)), int(coord_px[1]-200*sp.sin(angle+dAngle*10))))

                # Add the layer to the raw image
                    toDisplay.addDrawingLayer(layer)
                    toDisplay.addDrawingLayer(selectionLayer)

                    # Add time metadata
                    toDisplay.drawText(str(i_frame) + " " + str(timestamp),
                                       x=0,
                                       y=0,
                                       fontsize=20)

                    # Add Line giving horizon
                    #layer.line((0, int(img.height/2 + mobile.pitch*pixelPerRadians)),(img.width, int(img.height/2 + mobile.pitch*pixelPerRadians)), width = 3, color = Color.RED)

                    # Plot parallels
                    for lat in range(-90, 90, 15):
                        r = range(0, 361, 10)
                        if useBasemap:
                            # \todo improve for high roll
                            l = m(r, [lat] * len(r))
                            pix = [np.array(l[0]), img.height - np.array(l[1])]
                        else:
                            l = localProjection(sp.deg2rad(r), \
                                    sp.deg2rad([lat]*len(r)), \
                                    radius, \
                                    lon_0 = mobile.yaw, \
                                    lat_0 = mobile.pitch, \
                                    inverse = False)
                            l = np.dot(ctm, l)
                            pix = [
                                np.array(l[0]) + img.width / 2,
                                img.height / 2 - np.array(l[1])
                            ]

                        for i in range(len(r) - 1):
                            if isPixelInImage(
                                (pix[0][i], pix[1][i]), img) or isPixelInImage(
                                    (pix[0][i + 1], pix[1][i + 1]), img):
                                layer.line((pix[0][i], pix[1][i]),
                                           (pix[0][i + 1], pix[1][i + 1]),
                                           color=Color.WHITE,
                                           width=2)

                # Plot meridians
                    for lon in range(0, 360, 15):
                        r = range(-90, 91, 10)
                        if useBasemap:
                            # \todo improve for high roll
                            l = m([lon] * len(r), r)
                            pix = [np.array(l[0]), img.height - np.array(l[1])]
                        else:
                            l= localProjection(sp.deg2rad([lon]*len(r)), \
                                    sp.deg2rad(r), \
                                    radius, \
                                    lon_0 = mobile.yaw, \
                                    lat_0 = mobile.pitch, \
                                    inverse = False)
                            l = np.dot(ctm, l)
                            pix = [
                                np.array(l[0]) + img.width / 2,
                                img.height / 2 - np.array(l[1])
                            ]

                        for i in range(len(r) - 1):
                            if isPixelInImage(
                                (pix[0][i], pix[1][i]), img) or isPixelInImage(
                                    (pix[0][i + 1], pix[1][i + 1]), img):
                                layer.line((pix[0][i], pix[1][i]),
                                           (pix[0][i + 1], pix[1][i + 1]),
                                           color=Color.WHITE,
                                           width=2)

                # Text giving bearing
                # \todo improve for high roll
                    for bearing_deg in range(0, 360, 30):
                        l = localProjection(sp.deg2rad(bearing_deg),
                                            sp.deg2rad(0),
                                            radius,
                                            lon_0=mobile.yaw,
                                            lat_0=mobile.pitch,
                                            inverse=False)
                        l = np.dot(ctm, l)
                        layer.text(
                            str(bearing_deg),
                            (img.width / 2 + int(l[0]), img.height - 20),
                            color=Color.RED)

                # Text giving elevation
                # \todo improve for high roll
                    for elevation_deg in range(-60, 91, 30):
                        l = localProjection(0,
                                            sp.deg2rad(elevation_deg),
                                            radius,
                                            lon_0=mobile.yaw,
                                            lat_0=mobile.pitch,
                                            inverse=False)
                        l = np.dot(ctm, l)
                        layer.text(str(elevation_deg),
                                   (img.width / 2, img.height / 2 - int(l[1])),
                                   color=Color.RED)

                    #toDisplay.save(js)
                    toDisplay.save(disp)
            if display:
                toDisplay.removeDrawingLayer(1)
                toDisplay.removeDrawingLayer(0)
        recordFile.close()
示例#32
0
from SimpleCV import Color, Image
import time
img = Image("mandms.jpg")

blue_distance = img.colorDistance(Color.BLUE).invert()
blobs = blue_distance.findBlobs()
blobs.draw(color=Color.PUCE, width=3)
blue_distance.show()
img.addDrawingLayer(blue_distance.dl())
img.show()
time.sleep(10)
    if paso1 == True:
        for marca in marcas:
            SCREEN.blit(marca[0], marca[1]) # imagen, rect

    if paso1 == False and paso2 == False:
        dibujar_Textos('GRACIAS,', font40, COLOR_AZUL_CIELO, SCREEN, 10, 100,'center')
        dibujar_Textos('CALIBRACION DE TAPETE FINALIZADA', font40, COLOR_AZUL_CIELO, SCREEN, 10, 150,'center')

    if paso2 == True:
        img = camara.getImage()
        nivel_R = control_RED.valorLevel
        nivel_G = control_GREEN.valorLevel
        nivel_B = control_BLUE.valorLevel
        UMBRAL_BINARIZADO = control_BINARIZE.valorLevel

        img = img.colorDistance((nivel_R, nivel_G, nivel_B))
        img = img.binarize(UMBRAL_BINARIZADO)
        img.save('temp/vistaTapete.png')
        imagen_calibracion =  pygame.image.load('temp/vistaTapete.png')
        SCREEN.blit(imagen_calibracion, (0,0))

        # dibujar los contoles deslizables
        for control_deslizable in listaControles:
            control_deslizable.draw(SCREEN)
            
        # Dibujar Etiquetas(y otros textos)
        dibujar_Textos('(R)   %0d'  %(nivel_R),  font20, COLOR_BLANCO_SUCIO, SCREEN, 10,  15, 0)
        dibujar_Textos('(G)   %0d' %(nivel_G), font20, COLOR_BLANCO_SUCIO, SCREEN, 10,  65, 0)
        dibujar_Textos('(B)   %0d' %(nivel_B),  font20, COLOR_BLANCO_SUCIO, SCREEN, 10, 115, 0)
        dibujar_Textos('BINARIZADO   %0d' %(UMBRAL_BINARIZADO),   font20, COLOR_BLANCO_SUCIO, SCREEN, 10, 160, 0)