Ejemplo n.º 1
0
 def initialize_scope_layer(self):
     self.__scope_layer = DrawingLayer(
         self.window_size)  # same as window size
     #  (position/coordinates, diameter, Color, Thickness of the lines)
     self.__scope_layer.circle(self.__window_center,
                               50,
                               Color.BLUE,
                               width=3)
     self.__scope_layer.circle(self.__window_center,
                               100,
                               Color.BLUE,
                               width=2)
     self.__scope_layer.line(
         (self.__window_center[0], self.__window_center[1] - 50),
         (self.__window_center[0], 0),
         Color.BLACK,
         width=2)
     self.__scope_layer.line(
         (self.__window_center[0], self.__window_center[1] + 50),
         (self.__window_center[0], self.window_size[1]),
         Color.BLACK,
         width=2)
     self.__scope_layer.line(
         (self.__window_center[0] - 50, self.__window_center[1]),
         (0, self.__window_center[1]),
         Color.BLACK,
         width=2)
     self.__scope_layer.line(
         (self.__window_center[0] + 50, self.__window_center[1]),
         (self.window_size[0], self.__window_center[1]),
         Color.BLACK,
         width=2)
Ejemplo n.º 2
0
 def _WriteText(self, disp, img, txt,color):
     if(disp is not None):
         txt = ' ' + txt + ' '
         img = img.adaptiveScale(disp.resolution)
         layer = DrawingLayer((img.width,img.height))
         layer.setFontSize(60)
         layer.ezViewText(txt,(20,20),fgcolor=color)
         img.addDrawingLayer(layer)
         img.applyLayers()
         img.save(disp)
Ejemplo n.º 3
0
    def visionLoop(self):
        while not self.exit:
            # acquire image
            img = self.cam.getImage()

            # exit if we've got nothing
            if img is None:
                break

            # adjust image
            '''
            img = img.resize(self.camRes[0], self.camRes[1])
            img = img.rotate90()
            '''

            # blob search
            colorDiff = img - img.colorDistance(self.trackingColor)
            blobs = colorDiff.findBlobs(-1, self.trackingBlobMin,
                                        self.trackingBlobMax)

            # blob find
            if blobs is not None:
                self.x = blobs[-1].x
                self.y = blobs[-1].y

            # blob show
            if blobs is not None:
                # roi = region of interest
                roiLayer = DrawingLayer((img.width, img.height))

                # draw all blobs
                for blob in blobs:
                    blob.draw(layer=roiLayer)

                # draw a circle around the main blob
                roiLayer.circle((self.x, self.y), 50, Color.RED, 2)

                # apply roi to img
                img.addDrawingLayer(roiLayer)
                img = img.applyLayers()

            img.show()

            # fps
            now = datetime.utcnow()
            self.trackingFrameQ.put(now)
            if self.trackingFrameQ.qsize() < 30:
                fps = 0.0
            else:
                fps = 30.0 / (now - self.trackingFrameQ.get()).total_seconds()

            # logging
            logger.debug("{func} ({x},{y}) {fps:5.2f}".format(
                func=inspect.stack()[0][3], x=self.x, y=self.y, fps=fps))
Ejemplo n.º 4
0
def loop():
	try:
		json=get_jsonparsed_data(quakeURL)
		setRGB(0,128,64)
		setRGB(0,255,0)
		# Read distance value from Ultrasonic
		# distant = ultrasonicRead(ultrasonic_ranger)
		# button_state=digitalRead(button)
		# flushLCD('+++ ' + str(distant) + ':'  + str(trigger))

		analogWrite(led,255)
		# count down for photo !
		flushLCD('SMILE!')
		beep (0.01)
		time.sleep (1)
		beep (0.02)
		time.sleep(1)
		beep (0.1)
		time.sleep (1)
		
		frame=myCamera.getImage()
		flushLCD('processing ...')
		ts = time.time()
		now = datetime.datetime.fromtimestamp(ts).strftime('%Y%m%d-%H%M%S')
		photo='/home/pi/tmp/' + now + '.jpg' # NOTE MUST use absolute path here!
		psize=frame.width*frame.height 
		print "Photo Size: " + photo + " " + str(psize) + " w:" + str(frame.width) + " h: " + str(frame.height)
		myDL=DrawingLayer((frame.width,frame.height))
		myDL.setFontSize(80)
		myDL.text("Quake report from " + json["Ort"] + " with Strength of " + json["LastStrengthString"] + " at: " + json["LastOccured"] ,(10,10),color=Color.RED)
		frame.addDrawingLayer(myDL)
		frame.applyLayers()
		
		frame.save(photo)
		analogWrite(led,0)
		beep (0.05)
		time.sleep(0.3)
		beep(0.2)

		time.sleep(1) # wait save complete ...
		status = twitterText1 + ' ' + hashtag + ' ' + twitterText2 + ' ' + json["Ort"] + ' @:' + now + ' ' + twitterText3 + ' ' + json["Name"]
		# tweet ...
		api.update_with_media(photo, status=status)
		logLCD('TWEETed!')

	
	except TypeError:
		print "Type Error"
	except IOError:
		print "IO Error"
Ejemplo n.º 5
0
def draw_center_square_and_circle(img):

    from SimpleCV import DrawingLayer

    facelayer = DrawingLayer((img.width, img.height))
    facebox_dim = (200,200)
    center_point = (img.width / 2, img.height / 2)
    facebox = facelayer.centeredRectangle(center_point, facebox_dim)
    circlelayer = DrawingLayer((img.width, img.height))
    circlelayer.circle(center_point, 10)
    img.addDrawingLayer(circlelayer)
    img.addDrawingLayer(facelayer)
    img.applyLayers()
    return img
Ejemplo n.º 6
0
    def draw_crosshair(self, pos, layer_name = None):

        size = self._layers['raw'].size()

        if layer_name is not None:
            layer = DrawingLayer(self._layers['raw'].size())
        else:
            layer = self._layers['raw'].dl()

        layer.line((0, pos[1]), (size[0], pos[1]), color=(0, 0, 255))
        layer.line((pos[0], 0), (pos[0], size[1]), color=(0, 0, 255))

        if layer_name is not None:
            self.update_layer(layer_name, layer)
Ejemplo n.º 7
0
def face_recognize(filename):
    from SimpleCV import Image, Display, DrawingLayer
    
    image = Image(filename)
    faces = image.findHaarFeatures('face.xml')
    if faces:
        for face in faces:
            face_layer = DrawingLayer((image.width, image.height))
            face_box = face_layer.centeredRectangle(face.coordinates(), (face.width(), face.height()))
            image.addDrawingLayer(face_layer)
            image.applyLayers()
        image.save(filename)
        print('偵測到 {} 張人臉'.format(len(faces)))
    else:
        print('沒有偵測到人臉')
Ejemplo n.º 8
0
def face_recognize(filename):
    from SimpleCV import Image, Display, DrawingLayer

    image = Image(filename)
    faces = image.findHaarFeatures('face.xml')
    if faces:
        for face in faces:
            face_layer = DrawingLayer((image.width, image.height))
            face_box = face_layer.centeredRectangle(
                face.coordinates(), (face.width(), face.height()))
            image.addDrawingLayer(face_layer)
            image.applyLayers()
        image.save(filename)
        print('偵測到 {} 張人臉'.format(len(faces)))
    else:
        print('沒有偵測到人臉')
Ejemplo n.º 9
0
def main(arg):
    # Get all the images files
    # files = filter(lambda f: str(f).__contains__('.png'), os.listdir('.'))
    # print files

    # for f in files:
    #     img = scvImage(f)
    #     lines = img.findLines()
    #     corners = img.findCorners()
    #
    #     print img.filename
    #     print 'Lines   : ', lines
    #     print 'Corners : ', corners
    #     print
    #
    #     img.save('/Users/xor/Desktop/{0}'.format(img.filename.replace('.png', '_OUT.png')))


    # Load Images
    setting_img = scvImage('/Users/xor/Desktop/settings.png', sample=True)
    friend_img = scvImage('/Users/xor/Desktop/friend_list.png', sample=True)
    hs_game_img = scvImage('/Users/xor/Desktop/up_left.png', sample=True)

    # Open ourself a drawing layer for display only
    dl = DrawingLayer((hs_game_img.width, hs_game_img.height))

    # Check for template matching
    i = 0
    matches = hs_game_img.findTemplate(setting_img, 5, 'CCOEFF_NORM')
    if matches and len(matches) > 0:
        for match in matches:
            dl.rectangle((match.x,match.y),(match.width(),match.height()),color=Color.RED)

        result = scvImage('/Users/xor/Desktop/up_left.png', sample=True)
        result.addDrawingLayer(dl)
        result.applyLayers()
        result.save('/Users/xor/Desktop/MATCHES{0}.png'.format(i))
        i = i + 1
        time.sleep(3)
    else:
        print 'No match'


    return 0
Ejemplo n.º 10
0
def addText(fileName, text):
    image = Image(fileName)
    draw = DrawingLayer((IMAGE_WIDTH, IMAGE_HEIGHT))
    draw.rectangle((8, 8), (121, 18), filled=True, color=Color.YELLOW)
    draw.setFontSize(20)
    draw.text(text, (10, 9), color=Color.BLUE)
    image.addDrawingLayer(draw)
    image.save(fileName)
Ejemplo n.º 11
0
def drawImage():
    #Load Map
    d = Display((1240, 820), title="London Map - Scotland Yard")
    lMap = Image("maps/map.jpg")

    #Check Position from players

    #See corresponding pixel in list

    #Draw Circle from players
    circlesLayer = DrawingLayer((lMap.width, lMap.height))
    circlesLayer.circle ((191,44), 20,color=Color.BLACK, filled=True, alpha = 255)
    lMap.addDrawingLayer(circlesLayer)

    #Display
    lMap.applyLayers()
    lMap.save(d)

    '''Later create a "draw possibilites" areas in map for thief '''
Ejemplo n.º 12
0
def draw_blobs(img, corners, boat):
    centers = [blob.centroid() for blob in corners]
    tl = topLeft(centers)
    br = bottomRight(centers)
    maplayer = DrawingLayer((img.width, img.height))
    maplayer.rectangle(tl, (br[0] - tl[0], br[1] - tl[1]))

    for blob in corners:
        maplayer.circle(blob.centroid(), radius=20)
    maplayer.circle(boat.centroid(), radius=10)

    pos = boat_lat_lon(boat, corners)
    maplayer.text("(%.2f, %.2f)" % (pos[0], pos[1]),
                  boat.centroid(),
                  color=Color.WHITE)

    img.addDrawingLayer(maplayer)
    img.applyLayers()
    img.show()
Ejemplo n.º 13
0
def draw_blobs(img, corners, boat):
    centers = [blob.centroid() for blob in corners]
    tl = topLeft(centers)
    br = bottomRight(centers)
    maplayer = DrawingLayer((img.width, img.height))
    maplayer.rectangle(tl, (br[0] - tl[0], br[1] - tl[1]))

    for blob in corners:
        maplayer.circle(blob.centroid(), radius=20)
    maplayer.circle(boat.centroid(), radius=10)

    pos = boat_lat_lon(boat, corners)
    maplayer.text("(%.2f, %.2f)" % (pos[0], pos[1]), boat.centroid(), color=Color.WHITE)

    img.addDrawingLayer(maplayer)
    img.applyLayers()
    img.show()
Ejemplo n.º 14
0
    def __init__(self, camara_id, retraso_video = 10, framerate = 4.0, color=False, size=(320,240), ruido=True):
        self.buffer_size = int(retraso_video*framerate)+ 1              # (nos aseguramos que nunca sea cero)
        self.intervalo_refresco = float(1.0/framerate)                  # periodicidad con que se rerescan los datos del buffer de video
        self.momento_refresco = time.time() + self.intervalo_refresco   # momento en que se debe sacar y añadir informacion al buffer de video
        time.sleep(self.intervalo_refresco)                             # pausa de seguridad para la generacion del buffer
        self.video_buffer = []                                          # contiene los frames equivalentes al tiempo de retraso
        self.imagen = None                                              # almacenamiento temporal de la captura de la camara para hacer operaciones con ella
        self.camara = Camera(camara_id)                                 # creamos una instancia de la clase opencv Camara()
        self.Flag_color = color                                         # si False,  procesa la imagen y la devuelve en gris        
        self.Flag_resize = False                                        # se pone a True si le pasamos una resolucion valida,  para pemitir el reescalado
        self.size = size                                                # si es una resocucion valida reescala la imagen
        if size[0]>0 and size[1]>0:
            self.Flag_resize = True
        
        # control del ruido
        self.duracion_interferencia = (2,6)                            # tiempo en segundos que puede llegar a durar una interferencia
        self.tiempo_entre_interferencias = (5, 20)                     # periodos de señal sin interferencias (de 25 a 45 segundos)
        self.FLAG_ruido_activo = ruido                                  # Si True se activan las interferencias en momentos aleatorios
        self.FLAG_aplicar_ruido_ahora = True                            # si FLAG_ruido_activo = True, indica si es momento o no de meter una interferencia
        if self.Flag_color == False:
            self.video_ruido = [self.resize(self.gris(Image("VideoBuffer_SimpleCV/c_ruido%d.png" %x))) for x in range(5)]    # cargamos la lista de fotogramas correspondientes al ruido
        else:
            self.video_ruido = [self.resize(Image("VideoBuffer_SimpleCV/c_ruido%d.png" %x)) for x in range(5)]  # cargamos la lista de fotogramas correspondientes al ruido

        self.frame_ruido_index = 0                                      # fotograma del ruido que se mostrará
        self.incremento_aleatorio = retraso_video + random.randrange(self.duracion_interferencia[0],self.duracion_interferencia[1])
        self.momento_cambio_bandera = time.time() + self.incremento_aleatorio
        self.nivel_ruido_maximo = 10                                     # intensidad con que se mostrará la interferencia

        #creacion y llenado inicial del buffer de video
        self.video_buffer = []                                          # definir el buffer como una lista
        self.imagen = self.getImage()                                   # capturar un fotograma desde la camara
        self.imagen = self.resize(self.gris(self.imagen))               # reescalarlo y convertirlo a escala de grises
        self.imagen = self.imagen.blur(45,45)                           # hasta superado el retraso, la imagen sera borrosa

        # esto no funciona :(   No se lleva bien con el ruido              
        textLayer = DrawingLayer((self.imagen.width, self.imagen.height))   # crear una capa vacia para escribir texto
        textLayer.text("CONECTANDO...", (40, 70), color=Color.RED)      # poner mensaje "CONECTANDO..." sobre la imagen:
        self.imagen.addDrawingLayer(textLayer)                          # fusionar la capa de imagen y al de texto
        self.video_buffer = [self.imagen for x in range(self.buffer_size)]  # llenar el buffer de video con una imagen estatica"
Ejemplo n.º 15
0
def renderGUI():
	global bNums
	initPos()
	while True:
		img = cam.getImage().flipHorizontal()
		img = img.binarize()
		blobs = img.findBlobs()
		rect = DrawingLayer((img.width, img.height))
		rect_dim = (200,200)
		l1.configure(text = "Total Blobs: " + str(len(blobs)))
		for b in blobs:
			#print b.centroid()
			# print b.isCircle()
			if b.area() > 300:
				center_point = b.centroid()
				rect.centeredRectangle(center_point, rect_dim, color=Color.RED)
				img.addDrawingLayer(rect)
				#img.applyLayers()
		img.show()
		servos = [str(w1.get()), str(w2.get()), str(w3.get()), str(w4.get()), str(w5.get())]
		for i in servos:
			pass
Ejemplo n.º 16
0
 def _WriteText(self, disp, img, txt, color):
     if (disp is not None):
         txt = ' ' + txt + ' '
         img = img.adaptiveScale(disp.resolution)
         layer = DrawingLayer((img.width, img.height))
         layer.setFontSize(60)
         layer.ezViewText(txt, (20, 20), fgcolor=color)
         img.addDrawingLayer(layer)
         img.applyLayers()
         img.save(disp)
Ejemplo n.º 17
0
def loop():
    try:
        json = get_jsonparsed_data(quakeURL)
        setRGB(0, 128, 64)
        setRGB(0, 255, 0)
        # Read distance value from Ultrasonic
        # distant = ultrasonicRead(ultrasonic_ranger)
        # button_state=digitalRead(button)
        # flushLCD('+++ ' + str(distant) + ':'  + str(trigger))

        analogWrite(led, 255)
        # count down for photo !
        flushLCD('SMILE!')
        beep(0.01)
        time.sleep(1)
        beep(0.02)
        time.sleep(1)
        beep(0.1)
        time.sleep(1)

        frame = myCamera.getImage()
        flushLCD('processing ...')
        ts = time.time()
        now = datetime.datetime.fromtimestamp(ts).strftime('%Y%m%d-%H%M%S')
        photo = '/home/pi/tmp/' + now + '.jpg'  # NOTE MUST use absolute path here!
        psize = frame.width * frame.height
        print "Photo Size: " + photo + " " + str(psize) + " w:" + str(
            frame.width) + " h: " + str(frame.height)
        myDL = DrawingLayer((frame.width, frame.height))
        myDL.setFontSize(80)
        myDL.text("Quake report from " + json["Ort"] + " with Strength of " +
                  json["LastStrengthString"] + " at: " + json["LastOccured"],
                  (10, 10),
                  color=Color.RED)
        frame.addDrawingLayer(myDL)
        frame.applyLayers()

        frame.save(photo)
        analogWrite(led, 0)
        beep(0.05)
        time.sleep(0.3)
        beep(0.2)

        time.sleep(1)  # wait save complete ...
        status = twitterText1 + ' ' + hashtag + ' ' + twitterText2 + ' ' + json[
            "Ort"] + ' @:' + now + ' ' + twitterText3 + ' ' + json["Name"]
        # tweet ...
        api.update_with_media(photo, status=status)
        logLCD('TWEETed!')

    except TypeError:
        print "Type Error"
    except IOError:
        print "IO Error"
Ejemplo n.º 18
0
def transform_classify_blobs(processing_path, img, db_blobs):
    log("transform_classify_blobs(processing_path=%s, img=%s, db_blobs=%s)" %
        (processing_path, img, db_blobs))
    # create markup layer
    img_markups = DrawingLayer(img.size())
    # traverse blobs
    for db_blob in db_blobs:
        b = db_blob["bounds"]
        # call external service to classify a blob image as a class category
        c1ass = classify_blob(db_blob["img_url"])
        db_blob["c1ass"] = c1ass
        db_blob["c1ass_state"] = "auto"
        # get the state color
        state = db_blob["state"]
        color = Color.RED if state == BLOB_STATE_REMOVED else (
            Color.ORANGE if state == BLOB_STATE_DUPLICATE else Color.GREEN)
        # draw blob rectangle
        img_markups.rectangle(topLeft=(b["x"], b["y"]),
                              dimensions=(b["w"], b["h"]),
                              color=color,
                              width=3)
        # draw class text
        img_markups.text(text=c1ass,
                         location=(b["x"], b["y"] - 15),
                         color=color)
    # copy image and add markups
    img_marked = img.copy()
    img_marked.addDrawingLayer(img_markups)
    img_marked_name = TR_TYPE_BLOBS_CLASSIFIED + ".png"
    img_marked_path = processing_path + img_marked_name
    img_marked.save(img_marked_path)
    return db_transform(uid=ObjectId(),
                        title="classify blobs",
                        type=TR_TYPE_BLOBS_CLASSIFIED,
                        description="use the classifier to classify the blob",
                        img_path=img_marked_path,
                        img_url=url_for("static",
                                        filename=os.path.relpath(
                                            img_marked_path, "static"),
                                        _external=True)), db_blobs
Ejemplo n.º 19
0
    def track(self):
        print "Press right mouse button to pause or play"
        print "Use left mouse button to select target"
        print "Target color must be different from background"
        print "Target must have width larger than height"
        print "Target can be upside down"

        #Parameters
        isUDPConnection = False  # Currently switched manually in the code
        display = True
        displayDebug = True
        useBasemap = False
        maxRelativeMotionPerFrame = 2  # How much the target can moved between two succesive frames
        pixelPerRadians = 320
        radius = pixelPerRadians
        referenceImage = '../ObjectTracking/kite_detail.jpg'
        scaleFactor = 0.5
        isVirtualCamera = True
        useHDF5 = False

        # Open reference image: this is used at initlalisation
        target_detail = Image(referenceImage)

        # Get RGB color palette of target (was found to work better than using hue)
        pal = target_detail.getPalette(bins=2, hue=False)

        # Open video to analyse or live stream
        #cam = JpegStreamCamera('http://192.168.1.29:8080/videofeed')#640 * 480
        if isVirtualCamera:
            #cam = VirtualCamera('../../zenith-wind-power-read-only/KiteControl-Qt/videos/kiteFlying.avi','video')
            #cam = VirtualCamera('/media/bat/DATA/Baptiste/Nautilab/kite_project/robokite/ObjectTracking/00095.MTS', 'video')
            #cam = VirtualCamera('output.avi', 'video')
            cam = VirtualCamera(
                '../Recording/Videos/Flying kite images (for kite steering unit development)-YTMgX1bvrTo.mp4',
                'video')
            virtualCameraFPS = 25
        else:
            cam = JpegStreamCamera(
                'http://192.168.43.1:8080/videofeed')  #640 * 480
            #cam = Camera()

        # Get a sample image to initialize the display at the same size
        img = cam.getImage().scale(scaleFactor)
        print img.width, img.height
        # Create a pygame display
        if display:
            if img.width > img.height:
                disp = Display(
                    (27 * 640 / 10, 25 * 400 / 10)
                )  #(int(2*img.width/scaleFactor), int(2*img.height/scaleFactor)))
            else:
                disp = Display((810, 1080))
        #js = JpegStreamer()

        # Initialize variables
        previous_angle = 0  # target has to be upright when starting. Target width has to be larger than target heigth.
        previous_coord_px = (
            0, 0)  # Initialized to top left corner, which always exists
        previous_dCoord = previous_coord_px
        previous_dAngle = previous_angle
        angles = []
        coords_px = []
        coord_px = [0, 0]
        angle = 0
        target_elevations = []
        target_bearings = []
        times = []
        wasTargetFoundInPreviousFrame = False
        i_frame = 0
        isPaused = False
        selectionInProgress = False
        th = [100, 100, 100]
        skycolor = Color.BLUE
        timeLastTarget = 0

        # Prepare recording
        recordFilename = datetime.datetime.utcnow().strftime(
            "%Y%m%d_%Hh%M_") + 'simpleTrack'
        if useHDF5:
            try:
                os.remove(recordFilename + '.hdf5')
            except:
                print('Creating file ' + recordFilename + '.hdf5')
            """ The following line is used to silence the following error (according to http://stackoverflow.com/questions/15117128/h5py-in-memory-file-and-multiprocessing-error)
    #000: ../../../src/H5F.c line 1526 in H5Fopen(): unable to open file
    major: File accessability
    minor: Unable to open file"""
            h5py._errors.silence_errors()
            recordFile = h5py.File(
                os.path.join(os.getcwd(), 'log', recordFilename + '.hdf5'),
                'a')
            hdfSize = 0
            dset = recordFile.create_dataset('kite', (2, 2),
                                             maxshape=(None, 7))
            imset = recordFile.create_dataset('image',
                                              (2, img.width, img.height, 3),
                                              maxshape=(None, img.width,
                                                        img.height, 3))
        else:
            try:
                os.remove(recordFilename + '.csv')
            except:
                print('Creating file ' + recordFilename + '.csv')
            recordFile = file(
                os.path.join(os.getcwd(), 'log', recordFilename + '.csv'), 'a')
            csv_writer = csv.writer(recordFile)
            csv_writer.writerow([
                'Time (s)', 'x (px)', 'y (px)', 'Orientation (rad)',
                'Elevation (rad)', 'Bearing (rad)', 'ROT (rad/s)'
            ])

        # Launch a thread to get UDP message with orientation of the camera
        mobile = mobileState.mobileState()
        if isUDPConnection:
            mobile.open()
        # Loop while not canceled by user
        t0 = time.time()
        previousTime = t0
        while not (display) or disp.isNotDone():
            t = time.time()
            deltaT = (t - previousTime)
            FPS = 1.0 / deltaT
            #print 'FPS =', FPS
            if isVirtualCamera:
                deltaT = 1.0 / virtualCameraFPS
            previousTime = t
            i_frame = i_frame + 1
            timestamp = datetime.datetime.utcnow()

            # Receive orientation of the camera
            if isUDPConnection:
                mobile.computeRPY([2, 0, 1], [-1, 1, 1])
            ctm = np.array([[sp.cos(mobile.roll), -sp.sin(mobile.roll)], \
                    [sp.sin(mobile.roll), sp.cos(mobile.roll)]]) # Coordinate transform matrix

            if useBasemap:
                # Warning this really slows down the computation
                m = Basemap(width=img.width,
                            height=img.height,
                            projection='aeqd',
                            lat_0=sp.rad2deg(mobile.pitch),
                            lon_0=sp.rad2deg(mobile.yaw),
                            rsphere=radius)

            # Get an image from camera
            if not isPaused:
                img = cam.getImage()
                img = img.resize(int(scaleFactor * img.width),
                                 int(scaleFactor * img.height))

            if display:
                # Pause image when right button is pressed
                dwn = disp.rightButtonDownPosition()
                if dwn is not None:
                    isPaused = not (isPaused)
                    dwn = None

            if display:
                # Create a layer to enable user to make a selection of the target
                selectionLayer = DrawingLayer((img.width, img.height))

            if img:
                if display:
                    # Create a new layer to host information retrieved from video
                    layer = DrawingLayer((img.width, img.height))
                    # Selection is a rectangle drawn while holding mouse left button down
                    if disp.leftButtonDown:
                        corner1 = (disp.mouseX, disp.mouseY)
                        selectionInProgress = True
                    if selectionInProgress:
                        corner2 = (disp.mouseX, disp.mouseY)
                        bb = disp.pointsToBoundingBox(
                            corner1,
                            corner2)  # Display the temporary selection
                        if disp.leftButtonUp:  # User has finished is selection
                            selectionInProgress = False
                            selection = img.crop(bb[0], bb[1], bb[2], bb[3])
                            if selection != None:
                                # The 3 main colors in the area selected are considered.
                                # Note that the selection should be included in the target and not contain background
                                try:
                                    selection.save('../ObjectTracking/' +
                                                   'kite_detail_tmp.jpg')
                                    img0 = Image(
                                        "kite_detail_tmp.jpg"
                                    )  # For unknown reason I have to reload the image...
                                    pal = img0.getPalette(bins=2, hue=False)
                                except:  # getPalette is sometimes bugging and raising LinalgError because matrix not positive definite
                                    pal = pal
                                wasTargetFoundInPreviousFrame = False
                                previous_coord_px = (bb[0] + bb[2] / 2,
                                                     bb[1] + bb[3] / 2)
                        if corner1 != corner2:
                            selectionLayer.rectangle((bb[0], bb[1]),
                                                     (bb[2], bb[3]),
                                                     width=5,
                                                     color=Color.YELLOW)

                # If the target was already found, we can save computation time by
                # reducing the Region Of Interest around predicted position
                if wasTargetFoundInPreviousFrame:
                    ROITopLeftCorner = (max(0, previous_coord_px[0]-maxRelativeMotionPerFrame/2*width), \
                              max(0, previous_coord_px[1] -height*maxRelativeMotionPerFrame/2))
                    ROI = img.crop(ROITopLeftCorner[0], ROITopLeftCorner[1],                          \
                                         maxRelativeMotionPerFrame*width, maxRelativeMotionPerFrame*height, \
                             centered = False)
                    if display:
                        # Draw the rectangle corresponding to the ROI on the complete image
                        layer.rectangle((previous_coord_px[0]-maxRelativeMotionPerFrame/2*width,  \
                                                 previous_coord_px[1]-maxRelativeMotionPerFrame/2*height), \
                                              (maxRelativeMotionPerFrame*width, maxRelativeMotionPerFrame*height), \
                               color = Color.GREEN, width = 2)
                else:
                    # Search on the whole image if no clue of where is the target
                    ROITopLeftCorner = (0, 0)
                    ROI = img
                    '''#Option 1
        target_part0 = ROI.hueDistance(color=(142,50,65)).invert().threshold(150)
        target_part1 = ROI.hueDistance(color=(93,16,28)).invert().threshold(150)
        target_part2 = ROI.hueDistance(color=(223,135,170)).invert().threshold(150)
        target_raw_img = target_part0+target_part1+target_part2
        target_img = target_raw_img.erode(5).dilate(5)

        #Option 2
        target_img = ROI.hueDistance(imgModel.getPixel(10,10)).binarize().invert().erode(2).dilate(2)'''

                    # Find sky color
                sky = (img - img.binarize()).findBlobs(minsize=10000)
                if sky:
                    skycolor = sky[0].meanColor()
                # Option 3
                target_img = ROI - ROI  # Black image

                # Loop through palette of target colors
                if display and displayDebug:
                    decomposition = []
                i_col = 0
                for col in pal:
                    c = tuple([int(col[i]) for i in range(0, 3)])
                    # Search the target based on color
                    ROI.save('../ObjectTracking/' + 'ROI_tmp.jpg')
                    img1 = Image('../ObjectTracking/' + 'ROI_tmp.jpg')
                    filter_img = img1.colorDistance(color=c)
                    h = filter_img.histogram(numbins=256)
                    cs = np.cumsum(h)
                    thmax = np.argmin(
                        abs(cs - 0.02 * img.width * img.height)
                    )  # find the threshold to have 10% of the pixel in the expected color
                    thmin = np.argmin(
                        abs(cs - 0.005 * img.width * img.height)
                    )  # find the threshold to have 10% of the pixel in the expected color
                    if thmin == thmax:
                        newth = thmin
                    else:
                        newth = np.argmin(h[thmin:thmax]) + thmin
                    alpha = 0.5
                    th[i_col] = alpha * th[i_col] + (1 - alpha) * newth
                    filter_img = filter_img.threshold(
                        max(40, min(200, th[i_col]))).invert()
                    target_img = target_img + filter_img
                    #print th
                    i_col = i_col + 1
                    if display and displayDebug:
                        [R, G, B] = filter_img.splitChannels()
                        white = (R - R).invert()
                        r = R * 1.0 / 255 * c[0]
                        g = G * 1.0 / 255 * c[1]
                        b = B * 1.0 / 255 * c[2]
                        tmp = white.mergeChannels(r, g, b)
                        decomposition.append(tmp)

                # Get a black background with with white target foreground
                target_img = target_img.threshold(150)

                target_img = target_img - ROI.colorDistance(
                    color=skycolor).threshold(80).invert()

                if display and displayDebug:
                    small_ini = target_img.resize(
                        int(img.width / (len(pal) + 1)),
                        int(img.height / (len(pal) + 1)))
                    for tmp in decomposition:
                        small_ini = small_ini.sideBySide(tmp.resize(
                            int(img.width / (len(pal) + 1)),
                            int(img.height / (len(pal) + 1))),
                                                         side='bottom')
                    small_ini = small_ini.adaptiveScale(
                        (int(img.width), int(img.height)))
                    toDisplay = img.sideBySide(small_ini)
                else:
                    toDisplay = img
                    #target_img = ROI.hueDistance(color = Color.RED).threshold(10).invert()

                # Search for binary large objects representing potential target
                target = target_img.findBlobs(minsize=500)

                if target:  # If a target was found

                    if wasTargetFoundInPreviousFrame:
                        predictedTargetPosition = (
                            width * maxRelativeMotionPerFrame / 2,
                            height * maxRelativeMotionPerFrame / 2
                        )  # Target will most likely be close to the center of the ROI
                    else:
                        predictedTargetPosition = previous_coord_px
                        # If there are several targets in the image, take the one which is the closest of the predicted position
                    target = target.sortDistance(predictedTargetPosition)

                    # Get target coordinates according to minimal bounding rectangle or centroid.
                    coordMinRect = ROITopLeftCorner + np.array(
                        (target[0].minRectX(), target[0].minRectY()))
                    coord_px = ROITopLeftCorner + np.array(
                        target[0].centroid())

                    # Rotate the coordinates of roll angle around the middle of the screen
                    rot_coord_px = np.dot(
                        ctm, coord_px -
                        np.array([img.width / 2, img.height / 2])) + np.array(
                            [img.width / 2, img.height / 2])
                    if useBasemap:
                        coord = sp.deg2rad(
                            m(rot_coord_px[0],
                              img.height - rot_coord_px[1],
                              inverse=True))
                    else:
                        coord = localProjection(
                            rot_coord_px[0] - img.width / 2,
                            img.height / 2 - rot_coord_px[1],
                            radius,
                            mobile.yaw,
                            mobile.pitch,
                            inverse=True)
                    target_bearing, target_elevation = coord

                    # Get minimum bounding rectangle for display purpose
                    minR = ROITopLeftCorner + np.array(target[0].minRect())

                    contours = target[0].contour()

                    contours = [
                        ROITopLeftCorner + np.array(contour)
                        for contour in contours
                    ]

                    # Get target features
                    angle = sp.deg2rad(target[0].angle()) + mobile.roll
                    angle = sp.deg2rad(
                        unwrap180(sp.rad2deg(angle),
                                  sp.rad2deg(previous_angle)))
                    width = target[0].width()
                    height = target[0].height()

                    # Check if the kite is upside down
                    # First rotate the kite
                    ctm2 = np.array([[sp.cos(-angle+mobile.roll), -sp.sin(-angle+mobile.roll)], \
                        [sp.sin(-angle+mobile.roll), sp.cos(-angle+mobile.roll)]]) # Coordinate transform matrix
                    rotated_contours = [
                        np.dot(ctm2, contour - coordMinRect)
                        for contour in contours
                    ]
                    y = [-tmp[1] for tmp in rotated_contours]
                    itop = np.argmax(y)  # Then looks at the points at the top
                    ibottom = np.argmin(y)  # and the point at the bottom
                    # The point the most excentered is at the bottom
                    if abs(rotated_contours[itop][0]) > abs(
                            rotated_contours[ibottom][0]):
                        isInverted = True
                    else:
                        isInverted = False

                    if isInverted:
                        angle = angle + sp.pi

                        # Filter the data
                    alpha = 1 - sp.exp(-deltaT / self.filterTimeConstant)
                    if not (isPaused):
                        dCoord = np.array(previous_dCoord) * (
                            1 - alpha) + alpha * (
                                np.array(coord_px) - previous_coord_px
                            )  # related to the speed only if cam is fixed
                        dAngle = np.array(previous_dAngle) * (
                            1 - alpha) + alpha * (np.array(angle) -
                                                  previous_angle)
                    else:
                        dCoord = np.array([0, 0])
                        dAngle = np.array([0])


#print coord_px, angle, width, height, dCoord

# Record important data
                    times.append(timestamp)
                    coords_px.append(coord_px)
                    angles.append(angle)
                    target_elevations.append(target_elevation)
                    target_bearings.append(target_bearing)

                    # Export data to controller
                    self.elevation = target_elevation
                    self.bearing = target_bearing
                    self.orientation = angle
                    dt = time.time() - timeLastTarget
                    self.ROT = dAngle / dt
                    self.lastUpdateTime = t

                    # Save for initialisation of next step
                    previous_dCoord = dCoord
                    previous_angle = angle
                    previous_coord_px = (int(coord_px[0]), int(coord_px[1]))
                    wasTargetFoundInPreviousFrame = True
                    timeLastTarget = time.time()

                else:
                    wasTargetFoundInPreviousFrame = False

                if useHDF5:
                    hdfSize = hdfSize + 1
                    dset.resize((hdfSize, 7))
                    imset.resize((hdfSize, img.width, img.height, 3))
                    dset[hdfSize - 1, :] = [
                        time.time(), coord_px[0], coord_px[1], angle,
                        self.elevation, self.bearing, self.ROT
                    ]
                    imset[hdfSize - 1, :, :, :] = img.getNumpy()
                    recordFile.flush()
                else:
                    csv_writer.writerow([
                        time.time(), coord_px[0], coord_px[1], angle,
                        self.elevation, self.bearing, self.ROT
                    ])

                if display:
                    if target:
                        # Add target features to layer
                        # Minimal rectange and its center in RED
                        layer.polygon(minR[(0, 1, 3, 2), :],
                                      color=Color.RED,
                                      width=5)
                        layer.circle(
                            (int(coordMinRect[0]), int(coordMinRect[1])),
                            10,
                            filled=True,
                            color=Color.RED)

                        # Target contour and centroid in BLUE
                        layer.circle((int(coord_px[0]), int(coord_px[1])),
                                     10,
                                     filled=True,
                                     color=Color.BLUE)
                        layer.polygon(contours, color=Color.BLUE, width=5)

                        # Speed vector in BLACK
                        layer.line((int(coord_px[0]), int(coord_px[1])),
                                   (int(coord_px[0] + 20 * dCoord[0]),
                                    int(coord_px[1] + 20 * dCoord[1])),
                                   width=3)

                        # Line giving angle
                        layer.line((int(coord_px[0] + 200 * sp.cos(angle)),
                                    int(coord_px[1] + 200 * sp.sin(angle))),
                                   (int(coord_px[0] - 200 * sp.cos(angle)),
                                    int(coord_px[1] - 200 * sp.sin(angle))),
                                   color=Color.RED)

                    # Line giving rate of turn
                    #layer.line((int(coord_px[0]+200*sp.cos(angle+dAngle*10)), int(coord_px[1]+200*sp.sin(angle+dAngle*10))), (int(coord_px[0]-200*sp.cos(angle + dAngle*10)), int(coord_px[1]-200*sp.sin(angle+dAngle*10))))

                # Add the layer to the raw image
                    toDisplay.addDrawingLayer(layer)
                    toDisplay.addDrawingLayer(selectionLayer)

                    # Add time metadata
                    toDisplay.drawText(str(i_frame) + " " + str(timestamp),
                                       x=0,
                                       y=0,
                                       fontsize=20)

                    # Add Line giving horizon
                    #layer.line((0, int(img.height/2 + mobile.pitch*pixelPerRadians)),(img.width, int(img.height/2 + mobile.pitch*pixelPerRadians)), width = 3, color = Color.RED)

                    # Plot parallels
                    for lat in range(-90, 90, 15):
                        r = range(0, 361, 10)
                        if useBasemap:
                            # \todo improve for high roll
                            l = m(r, [lat] * len(r))
                            pix = [np.array(l[0]), img.height - np.array(l[1])]
                        else:
                            l = localProjection(sp.deg2rad(r), \
                                    sp.deg2rad([lat]*len(r)), \
                                    radius, \
                                    lon_0 = mobile.yaw, \
                                    lat_0 = mobile.pitch, \
                                    inverse = False)
                            l = np.dot(ctm, l)
                            pix = [
                                np.array(l[0]) + img.width / 2,
                                img.height / 2 - np.array(l[1])
                            ]

                        for i in range(len(r) - 1):
                            if isPixelInImage(
                                (pix[0][i], pix[1][i]), img) or isPixelInImage(
                                    (pix[0][i + 1], pix[1][i + 1]), img):
                                layer.line((pix[0][i], pix[1][i]),
                                           (pix[0][i + 1], pix[1][i + 1]),
                                           color=Color.WHITE,
                                           width=2)

                # Plot meridians
                    for lon in range(0, 360, 15):
                        r = range(-90, 91, 10)
                        if useBasemap:
                            # \todo improve for high roll
                            l = m([lon] * len(r), r)
                            pix = [np.array(l[0]), img.height - np.array(l[1])]
                        else:
                            l= localProjection(sp.deg2rad([lon]*len(r)), \
                                    sp.deg2rad(r), \
                                    radius, \
                                    lon_0 = mobile.yaw, \
                                    lat_0 = mobile.pitch, \
                                    inverse = False)
                            l = np.dot(ctm, l)
                            pix = [
                                np.array(l[0]) + img.width / 2,
                                img.height / 2 - np.array(l[1])
                            ]

                        for i in range(len(r) - 1):
                            if isPixelInImage(
                                (pix[0][i], pix[1][i]), img) or isPixelInImage(
                                    (pix[0][i + 1], pix[1][i + 1]), img):
                                layer.line((pix[0][i], pix[1][i]),
                                           (pix[0][i + 1], pix[1][i + 1]),
                                           color=Color.WHITE,
                                           width=2)

                # Text giving bearing
                # \todo improve for high roll
                    for bearing_deg in range(0, 360, 30):
                        l = localProjection(sp.deg2rad(bearing_deg),
                                            sp.deg2rad(0),
                                            radius,
                                            lon_0=mobile.yaw,
                                            lat_0=mobile.pitch,
                                            inverse=False)
                        l = np.dot(ctm, l)
                        layer.text(
                            str(bearing_deg),
                            (img.width / 2 + int(l[0]), img.height - 20),
                            color=Color.RED)

                # Text giving elevation
                # \todo improve for high roll
                    for elevation_deg in range(-60, 91, 30):
                        l = localProjection(0,
                                            sp.deg2rad(elevation_deg),
                                            radius,
                                            lon_0=mobile.yaw,
                                            lat_0=mobile.pitch,
                                            inverse=False)
                        l = np.dot(ctm, l)
                        layer.text(str(elevation_deg),
                                   (img.width / 2, img.height / 2 - int(l[1])),
                                   color=Color.RED)

                    #toDisplay.save(js)
                    toDisplay.save(disp)
            if display:
                toDisplay.removeDrawingLayer(1)
                toDisplay.removeDrawingLayer(0)
        recordFile.close()
Ejemplo n.º 20
0
from SimpleCV import Display, Camera, Image, DrawingLayer, VirtualCamera
disp = Display((600,800))
#cam = Camera()
cam = VirtualCamera('/media/bat/DATA/Baptiste/Nautilab/kite_project/zenith-wind-power-read-only/KiteControl-Qt/videos/kiteTest.avi','video')
isPaused = False
updateSelection = False
while(disp.isNotDone()):  
  if not isPaused:
    img_flip = cam.getImage().flipHorizontal()
    img = img_flip.edges(150, 100).dilate()
  if disp.rightButtonDown:
    isPaused = not(isPaused)
  selectionLayer = DrawingLayer((img.width, img.height))
  if disp.leftButtonDown:
	corner1 = (disp.mouseX, disp.mouseY)
        updateSelection = True
  if updateSelection:
    corner2 = (disp.mouseX, disp.mouseY)
    bb = disp.pointsToBoundingBox(corner1, corner2)
    if disp.leftButtonUp: 
      updateSelection = False
    if corner1!=corner2:
     selectionLayer.rectangle((bb[0],bb[1]),(bb[2],bb[3]))
  img.addDrawingLayer(selectionLayer)
  img.save(disp)
  img.removeDrawingLayer(0)

Ejemplo n.º 21
0
while True:
    opc=raw_input("Ingrese el numero de una opcion: ")
    if opc=="1":
        d=Display() ## Se le asigna una variable al display para posteriormente controlarlo mediante un ciclo while
        c=Camera() ## Se le asigna una variable a la camara
        while d.isNotDone(): ## Se crea un ciclo infinito hasta que se cierre la ventana del display por el usuario
            img=c.getImage() ## Se captura la imagen con la camara
            dis=img.colorDistance((0,0,0)) ## Se calcula la distancia de colores con respecto al negro 
            seg=dis.stretch(220,255) ## se realiza un estiramiento de histograma para dejar solamente los pixeles en donde se encuentra el objeto de interes y se mejora el contraste
            blobs=seg.findBlobs() ## se buscan grupos de pixeles
            blobs.sortArea() ## se ordenan los grupos por el area de cada uno en forma ascendente
            if blobs:
                rect=blobs.filter([b.isRectangle(0.15) for b in blobs]) ## se filtran los grupos por aquellos que se asemejan a un rectangulo con cierta tolerancia
                if rect:
                    fl=DrawingLayer((img.width,img.height)) ## Se crea una mascara para dibujar los rectangulos
                    fbd=(rect[-1].width(),rect[-1].height()) ## Se guarda la dimension del rectangulo de mayor area
                    tlc=rect[-1].topLeftCorner() ## Se obtiene la coordenada de la esquina superior izquierda del rectangulo
                    fb=fl.rectangle(tlc,fbd,(0,200,0),3) ## se dibuja el rectangulo con los datos nombrados
                    img.addDrawingLayer(fl) ## se agrega la mascara sobre la imagen original
                    img.applyLayers()           
            img.show() ## Se muestra la imagen en tiempo "real"
    elif opc=="2":
        d=Display() ## Se le asigna una variable al display para posteriormente controlarlo mediante un ciclo while
        c=Camera() ## Se le asigna una variable a la camara
        while d.isNotDone():
            img=c.getImage()## Se captura la imagen con la camara
            dis=img.colorDistance((0,0,0)) ## Se calcula la distancia de colores con respecto al negro 
            seg=dis.stretch(220,255)## se realiza un estiramiento de histograma para dejar solamente los pixeles en donde se encuentra el objeto de interes y se mejora el contraste
            blobs=seg.findBlobs()## se buscan grupos de pixeles
            blobs.sortArea()## se ordenan los grupos por el area de cada uno en forma ascendente
Ejemplo n.º 22
0
def fancify():
    if request.method == 'POST':
        print request.data
        cur_request = json.loads(request.data)
    else:
        #cur_request = """{"url": "", "debug":true}"""
        #cur_request = """{"url": "", "debug":true}"""
        #cur_request = """{"url": "", "debug":true}"""
        cur_request = """{"url": "http://localhost/images/scrubs.jpg", "debug":true}"""
        #cur_request = """{"url": "http://www.newrichstrategies.com/wp-content/uploads/2012/03/How-to-Find-Good-People-in-Your-Life.jpg", "debug":false}"""
        #cur_request = """{"url": "http://greenobles.com/data_images/frank-lampard/frank-lampard-02.jpg", "debug":true}"""
        #cur_request = """{"url": "http://www.billslater.com/barack__obama.jpg"}"""
        #cur_request = """{"url": "http://celebrityroast.com/wp-content/uploads/2011/01/arnold-schwarzenegger-body-building.jpg", "debug":false}"""
        #cur_request = """{"url": "http://face2face.si.edu/.a/6a00e550199efb8833010536a5483e970c-800wi", "debug":true}"""
        #cur_request = """{"url": "http://collider.com/uploads/imageGallery/Scrubs/scrubs_cast_image__medium_.jpg", "debug":false}"""
        #cur_request = """{"url": "http://localhost/images/Kevin_Bacon_at_the_2010_SAG_Awards.jpg", "debug":false}"""
        #cur_request = """{"url": "http://cdn02.cdn.justjared.com/wp-content/uploads/headlines/2012/02/anna-faris-oscars-red-carpet-2012.jpg", "debug":true}"""
        #cur_request = """{"url": "http://www.viewzone.com/attractive.female.jpg", "debug":true}"""
        cur_request = json.loads(cur_request)

    print cur_request["url"]
    img = Image(str(cur_request["url"]))
    img = img.scale(2.0)

    debug = True
    #if "debug" in cur_request:
    #    debug = cur_request["debug"]

    chosen_faces = []
    faces = img.findHaarFeatures(face_cascade)
    if faces is not None:
        for face in faces:
            face_features = []
            invalid_face = False
            face_rect = Rect(face.x - (face.width() / 2), face.y - (face.height() / 2), face.width(), face.height())
            for chosen_face in chosen_faces:
                if face_rect.colliderect(chosen_face):
                    invalid_face = True
                    break
            if invalid_face:
                break

            nose = None
            mouth = None
            left_eye = None
            right_eye = None
            cur_face = img.crop(face.x, face.y, face.width(), face.height(), centered=True)
            #cur_face = face.crop()

            noses = cur_face.findHaarFeatures(nose_cascade)
            mouths = cur_face.findHaarFeatures(mouth_cascade)
            eyes = cur_face.findHaarFeatures(eye_cascade)

            face_left_edge = face.x - (face.width() / 2)
            face_top_edge = face.y - (face.height() / 2)

            if noses is not None:
                nose = noses[0]
                nose_dist = (abs(nose.x - (face.width() / 2)) +
                             abs(nose.y - (face.height() * 5 / 9)) +
                             abs(nose.width() - (face.width() / 4)))
                for cur_nose in noses:
                    cur_dist = (abs(cur_nose.x - (face.width() / 2)) +
                                abs(cur_nose.y - (face.height() * 5 / 9)) +
                                abs(cur_nose.width() - (face.width() / 4)))
                    if cur_dist < nose_dist:
                        nose = cur_nose
                        nost_dist = cur_dist

            if nose and (nose.y < (face.height() / 3)):
                nose = None

            if nose and mouths is not None:
                mouth = mouths[0]
                mouth_dist = abs(mouth.x - nose.x) + (abs(mouth.y - (face.height() * 4 / 5)) * 2)

                for cur_mouth in mouths:
                    cur_dist = abs(cur_mouth.x - nose.x) + (abs(cur_mouth.y - (face.height() * 4/ 5)) * 2)
                    if (cur_dist < mouth_dist) and (cur_mouth.y > nose.y):
                        mouth = cur_mouth
                        mouth_dist = cur_dist

            if nose and eyes:
                right_eye = eyes[0]
                right_eye_dist = (abs(right_eye.x - (3 * face.width() / 4)) * 2 +
                                  abs(right_eye.y - (nose.y - (nose.height() / 2)) / 2) +
                                  abs(right_eye.width() - (face.width() / 3)))
                for cur_eye in eyes:
                    cur_right_dist = (abs(cur_eye.x - (3 * face.width() / 4)) +
                                      abs(cur_eye.y - (nose.y - (nose.height() / 2)) / 2) +
                                      abs(cur_eye.width() - (face.width() / 3)))

                    if (cur_right_dist <= right_eye_dist): # and (cur_eye.y < nose.y):
                        right_eye = cur_eye
                        right_eye_dist = cur_right_dist

            if nose and right_eye and (((right_eye.y - (right_eye.height() / 2)) > nose.y) or (right_eye.x < nose.x)):
                print "Culling right_eye"
                right_eye = None

            if nose and mouth:
                chosen_faces.append(face_rect)
                x_face = face.x - (face.width() / 2)
                y_face = face.y - (face.height() / 2)

                x_nose = nose.x - (nose.width() / 2)
                y_nose = nose.y - (nose.height() / 2)

                # Setup TopHat Image
                scale_factor = face.width() / 175.0
                cur_hat = hat.copy()
                cur_hat = cur_hat.scale(scale_factor)
                cur_hat_mask = hat_mask.copy()
                cur_hat_mask = cur_hat_mask.scale(scale_factor)
                cur_hat_mask = cur_hat_mask.createAlphaMask(hue_lb=0, hue_ub=100)

                # Calculate the hat position
                if (face.y - face.height() / 2) > cur_hat.height:
                    x_hat = face.x - (cur_hat.width / 2)
                    y_hat = face.y - (face.height() * 7 / 10) - (cur_hat.height / 2)
                    img = img.blit(cur_hat, pos=(x_hat, y_hat), alphaMask=cur_hat_mask)

                if mouth:
                    x_mouth = mouth.x - (mouth.width() / 2)
                    y_mouth = mouth.y - (mouth.height() / 2)
                    # Setup Mustache Image
                    cur_stache = stache.copy()
                    scale_factor = ((nose.width() / 300.0) + (face.width() / 600.0)) / 2.0
                    cur_stache = cur_stache.scale(scale_factor)
                    stache_mask = cur_stache.createAlphaMask(hue_lb=0, hue_ub=10).invert()

                    # Calculate the mustache position
                    bottom_of_nose = y_nose + (nose.height() * 4 / 5)
                    top_of_mouth = y_mouth
                    # if top_of_mouth > bottom_of_nose:
                    #    top_of_mouth = bottom_of_nose
                    y_must = y_face + ((bottom_of_nose + top_of_mouth) / 2) - (cur_stache.height / 2)

                    middle_of_nose = nose.x
                    middle_of_mouth = mouth.x
                    x_must = x_face + ((middle_of_nose + middle_of_mouth) / 2) - (cur_stache.width / 2)

                if right_eye:
                    x_right_eye = right_eye.x - (right_eye.width() / 2)
                    y_right_eye = right_eye.y - (right_eye.height() / 2)

                    # Setup Monocle Image
                    cur_mono = monocle.copy()
                    scale_factor = ((right_eye.width() / 65.0) + (face.width() / 200.0)) / 2.0
                    cur_mono = cur_mono.scale(scale_factor)
                    mono_mask = cur_mono.createAlphaMask(hue_lb=0, hue_ub=100).invert()

                    # Calculate Monocle Position
                    x_mono = x_face + x_right_eye
                    y_mono = y_face + y_right_eye
                    img = img.blit(cur_mono, pos=(x_mono, y_mono), alphaMask=mono_mask)

                img = img.blit(cur_stache, pos=(x_must, y_must), alphaMask=stache_mask)

                if debug:
                    noselayer = DrawingLayer((img.width, img.height))
                    nosebox_dimensions = (nose.width(), nose.height())
                    center_point = (face.x - (face.width() / 2) + nose.x,
                                    face.y - (face.height() / 2) + nose.y)
                    nosebox = noselayer.centeredRectangle(center_point, nosebox_dimensions, width=3)
                    img.addDrawingLayer(noselayer)
                    img = img.applyLayers()

            else:
                print "Face culled:"
                if not nose:
                    print "  No Nose"
                if not mouth:
                    print "  No mouth"
                if not right_eye:
                    print "  No right eye"
                    print

            if debug:
                face_left_edge = face.x - (face.width() / 2)
                face_top_edge = face.y - (face.height() / 2)

                facelayer = DrawingLayer((img.width, img.height))
                facebox_dimensions = (face.width(), face.height())
                center_point = (face.x, face.y)
                facebox = facelayer.centeredRectangle(center_point, facebox_dimensions, Color.BLUE)
                img.addDrawingLayer(facelayer)

                if noses:
                    for nose in noses:
                        noselayer = DrawingLayer((img.width, img.height))
                        nosebox_dimensions = (nose.width(), nose.height())
                        center_point = (face.x - (face.width() / 2) + nose.x,
                                    face.y - (face.height() / 2) + nose.y)
                        nosebox = noselayer.centeredRectangle(center_point, nosebox_dimensions)
                        img.addDrawingLayer(noselayer)

                if mouths:
                    for mouth in mouths:
                        mouthlayer = DrawingLayer((img.width, img.height))
                        mouthbox_dimensions = (mouth.width(), mouth.height())
                        center_point = (face.x - (face.width() / 2) + mouth.x,
                                face.y - (face.height() / 2) + mouth.y)
                        mouthbox = mouthlayer.centeredRectangle(center_point, mouthbox_dimensions, Color.GREEN)
                        img.addDrawingLayer(mouthlayer)

                if eyes:
                    for right_eye in eyes:
                        right_eyelayer = DrawingLayer((img.width, img.height))
                        right_eyebox_dimensions = (right_eye.width(), right_eye.height())
                        right_eye_center_point = (face_left_edge + right_eye.x, face_top_edge + right_eye.y)
                        right_eyebox = right_eyelayer.centeredRectangle(right_eye_center_point, right_eyebox_dimensions)
                        img.addDrawingLayer(right_eyelayer)

                img = img.applyLayers()

    img = img.scale(0.5)
    w_ratio = img.width / 800.0
    h_ratio = img.height / 600.0

    if h_ratio > 1.0 or w_ratio > 1.0:
        if h_ratio > w_ratio:
            img = img.resize(h=600)
        else:
            img = img.resize(w=800)

    output = StringIO.StringIO()
    img.getPIL().save(output, format="JPEG") #, quality=85, optimize=True)
    img_contents = output.getvalue()

    mimetype = "image/jpeg"
    return app.response_class(img_contents, mimetype=mimetype, direct_passthrough=False)
Ejemplo n.º 23
0
def loop():
    while True:
        try:
            setRGB(0,128,64)
            setRGB(0,255,0)
            # Read distance value from Ultrasonic
            distant = ultrasonicRead(ultrasonic_ranger)
            button_state=digitalRead(button)
            if (distant <= trigger) and (button_state):
    #            print 'Alarm ', distant,'cm', 'trigger', trigger
                flushLCD('+++ ' + str(distant) + ':'  + str(trigger))

                analogWrite(led,255)
                # count down for photo !
                flushLCD('SMILE!')
                beep (0.01)
                time.sleep (1)
                beep (0.02)
                time.sleep(1)
                beep (0.1)
                time.sleep (1)
                
                frame=myCamera.getImage()
                flushLCD('processing ...')
                faces=frame.findHaarFeatures('face')
                if faces:
                    print str(len(faces)) + " faces"
                    fct=0
                    ts = time.time()
                    now = datetime.datetime.fromtimestamp(ts).strftime('%Y%m%d-%H%M%S')

                    for face in faces:
                        fct+=1
                        print "face " + str(fct) + " at: " + str(face.coordinates())
                        myFace=face.crop()  # tweet all faces ...
                        photo='/home/pi/tmp/' + now + 'F-' + str(fct) + '.jpg' # NOTE MUST use absolute path here!
                        psize=myFace.width*myFace.height 
                        if psize>20000: #looks like smaller images are thrash 
                            print "Photo Size: " + photo + " " + str(psize)
                            myDL=DrawingLayer((myFace.width,myFace.height))
                            myDL.setFontSize(25)
                            myDL.text("I am " + str(distant) + " cm next to a PiCam!",(myFace.width/2 - 140,10),color=Color.WHITE)
                            myFace.addDrawingLayer(myDL)
                            myFace.applyLayers()
                            
                            myFace.save(photo)
                            beep (0.05)
                            time.sleep(0.3)
                            beep(0.2)

                            time.sleep(1) # wait save complete ...
                            status = 'Look Ma, I did the #lnf16 @ #FHburgenland just now: ' + now
                            # tweet ...
                            api.update_with_media(photo, status=status)
                            logLCD('TWEETed!')

                        else:
                            print "Face skipped too small: " + str(psize)
                            logLCD("Face " + str(fct) + " skipped too small: " + str(psize))

    #                print 'Sleep before next watch cycle ...'
                else:
                    logLCD('NO faces detected!')
                    
            else:
    #            print 'No Alarm ', distant,'cm' , 'trigger', trigger
                flushLCD('--- ' + str(distant) + ':'  + str(trigger))
                analogWrite(led,0)
                
            time.sleep(1)
            
        except TypeError:
            print "Error"
        except IOError:
            print "Error"
Ejemplo n.º 24
0
def detectChargingStation(image_file):
	debug = False

	myColor1 = (8,33,64)
	myColor2 = (70,80,100)

	original = Image(image_file)

	only_station = onlyBlueColor(original, myColor1)

	#Different findBlobs
	maskMean = original.hueDistance(color=(200,160,150))
	mask = only_station.hueDistance(color=myColor1).binarize()
	meanColor = (round(((maskMean.meanColor()[0]+maskMean.meanColor()[1]+maskMean.meanColor()[2])/3) * 10000)/10000)
	blobs = original.findBlobsFromMask(mask, minsize=400)

	if(meanColor > 190):
		return 6

	#print "Number of blobs found" , len(blobs)
	try: 
		blobs.image = original
	except Exception:
		only_station = onlyBlueColor(original, myColor2)
		mask = only_station.hueDistance(color=myColor2).binarize()
		blobs = original.findBlobsFromMask(mask, minsize=400)
		blobs.image = original

	station_blob = chooseBestBlobCosine(blobs)
	station_blob.drawMinRect(color=Color.RED)

	centroidX = station_blob.minRectX()
	centroidY = station_blob.minRectY()

	#Have to find out which part of the screen centroid is in
	maxX = original.getNumpy().shape[0]
	maxY = original.getNumpy().shape[1]+100

	if(debug):
		centroidLayer = DrawingLayer((maxX,maxY))

		centroidLayer.line((0,(1/3.0)*maxY),(maxX, (1/3.0)*maxY), color=Color.GREEN, width=2)
		centroidLayer.line((0,(2/3.0)*maxY),(maxX, (2/3.0)*maxY), color=Color.GREEN, width=2)
		centroidLayer.circle((int(centroidX), int(centroidY)), color=Color.GREEN, radius=5, filled=True)

		original.addDrawingLayer(centroidLayer)
		original.applyLayers()

		mask.save("binarizeMask.png")
		original.save("blobs.png")
		only_station.save("blueFilter.png")

	#print "Coordinates of centroid are "+str(centroidX)+", "+str(centroidY)
	#print "Coordinates of max are "+str(maxX)+", "+str(maxY)

	#if(station_blob.width() * station_blob.height() < 4000):
	#	return 2

	blobArea = station_blob.width() * station_blob.height()

	if(blobArea < 10000):
		return 2

	return chargingStationLocation_New(maxX,maxY,centroidX,centroidY,200, station_blob.width() / float(station_blob.height()), blobArea)
Ejemplo n.º 25
0
from SimpleCV import Camera, Image, VirtualCamera, Display, DrawingLayer, Color, JpegStreamCamera, JpegStreamer
import scipy as sp
import numpy as np

cam = VirtualCamera('../Recording/Videos/Kite with leds in night - YouTube [360p].mp4','video')
img = cam.getImage()
disp = Display((810,1080))
display = True
predictedTargetPosition = (img.size()[0]/2, img.size()[1]/2)
while (not(display) or disp.isNotDone()) and img.size()!= (0, 0) :
    img = cam.getImage()
    if img.size()!= (0, 0):
     if img:
      if display: 
	    # Create a new layer to host information retrieved from video
	      layer = DrawingLayer((img.width, img.height))
      maskred = img.colorDistance(color=(200,50,70)).invert().threshold(170)
      imgred = (img*(maskred/255)).dilate(3)
      targetred=imgred.findBlobs(maxsize=200)
      maskwhite = img.colorDistance(color=(200,200,200)).invert().threshold(230)
      imgwhite = (img*(maskwhite/255)).dilate(3)
      targetwhite=imgwhite.findBlobs(maxsize=200)
      
      
      if targetred:
        targetred.draw()
        print targetred.meanColor()
        targetred = targetred.sortDistance(predictedTargetPosition)
        coord_px = np.array(targetred[0].centroid())
        layer.circle((int(coord_px[0]), int(coord_px[1])), 10, filled = False, color = Color.RED)
      if targetwhite:
Ejemplo n.º 26
0
 def track(self):
  print "Press right mouse button to pause or play"
  print "Use left mouse button to select target"
  print "Target color must be different from background"
  print "Target must have width larger than height"
  print "Target can be upside down"

  #Parameters
  isUDPConnection = False # Currently switched manually in the code
  display = True
  displayDebug = True
  useBasemap = False
  maxRelativeMotionPerFrame = 2 # How much the target can moved between two succesive frames
  pixelPerRadians = 320
  radius = pixelPerRadians
  referenceImage = '../ObjectTracking/kite_detail.jpg'
  scaleFactor = 0.5
  isVirtualCamera = True
  useHDF5 = False

  # Open reference image: this is used at initlalisation
  target_detail = Image(referenceImage)

  # Get RGB color palette of target (was found to work better than using hue)
  pal = target_detail.getPalette(bins = 2, hue = False)

  # Open video to analyse or live stream
  #cam = JpegStreamCamera('http://192.168.1.29:8080/videofeed')#640 * 480
  if isVirtualCamera:
    #cam = VirtualCamera('../../zenith-wind-power-read-only/KiteControl-Qt/videos/kiteFlying.avi','video')
    #cam = VirtualCamera('/media/bat/DATA/Baptiste/Nautilab/kite_project/robokite/ObjectTracking/00095.MTS', 'video')
    #cam = VirtualCamera('output.avi', 'video')
    cam = VirtualCamera('../Recording/Videos/Flying kite images (for kite steering unit development)-YTMgX1bvrTo.flv','video')
    virtualCameraFPS = 25
  else:
    cam = JpegStreamCamera('http://192.168.43.1:8080/videofeed')#640 * 480
    #cam = Camera() 

  # Get a sample image to initialize the display at the same size
  img = cam.getImage().scale(scaleFactor)
  print img.width, img.height
  # Create a pygame display
  if display:
   if img.width>img.height:
     disp = Display((27*640/10,25*400/10))#(int(2*img.width/scaleFactor), int(2*img.height/scaleFactor)))
   else:
     disp = Display((810,1080))
  #js = JpegStreamer()



  # Initialize variables
  previous_angle = 0 # target has to be upright when starting. Target width has to be larger than target heigth.
  previous_coord_px = (0, 0) # Initialized to top left corner, which always exists
  previous_dCoord = previous_coord_px
  previous_dAngle = previous_angle
  angles = []
  coords_px = []
  coord_px = [0, 0]
  angle = 0
  target_elevations = []
  target_bearings = []
  times = []
  wasTargetFoundInPreviousFrame = False
  i_frame = 0
  isPaused = False
  selectionInProgress = False
  th = [100, 100, 100]
  skycolor = Color.BLUE
  timeLastTarget = 0

  # Prepare recording
  recordFilename = datetime.datetime.utcnow().strftime("%Y%m%d_%Hh%M_")+ 'simpleTrack'
  if useHDF5:
    try:
      os.remove(recordFilename + '.hdf5') 
    except:
      print('Creating file ' + recordFilename + '.hdf5')
    """ The following line is used to silence the following error (according to http://stackoverflow.com/questions/15117128/h5py-in-memory-file-and-multiprocessing-error)
    #000: ../../../src/H5F.c line 1526 in H5Fopen(): unable to open file
    major: File accessability
    minor: Unable to open file"""
    h5py._errors.silence_errors()
    recordFile = h5py.File(recordFilename + '.hdf5', 'a') 
    hdfSize = 0    
    dset = recordFile.create_dataset('kite', (2,2), maxshape=(None,7))
    imset = recordFile.create_dataset('image', (2,img.width,img.height,3 ), maxshape=(None, img.width, img.height, 3))
  else:
    try:
      os.remove(recordFilename + '.csv')   
    except:
      print('Creating file ' + recordFilename + '.csv') 
    recordFile = file(recordFilename + '.csv', 'a')
    csv_writer = csv.writer(recordFile)
    csv_writer.writerow(['Time (s)', 'x (px)', 'y (px)', 'Orientation (rad)', 'Elevation (rad)', 'Bearing (rad)', 'ROT (rad/s)'])

  # Launch a thread to get UDP message with orientation of the camera
  mobile = mobileState.mobileState()
  if isUDPConnection:
   a = threading.Thread(None, mobileState.mobileState.checkUpdate, None, (mobile,))
   a.start()

  # Loop while not canceled by user
  t0 = time.time()
  previousTime = t0
  while not(display) or disp.isNotDone():
    t = time.time()
    deltaT = (t-previousTime)
    FPS = 1.0/deltaT
    #print 'FPS =', FPS
    if isVirtualCamera:
      deltaT = 1.0/virtualCameraFPS
    previousTime = t
    i_frame = i_frame + 1
    timestamp = datetime.datetime.utcnow()

    # Receive orientation of the camera
    if isUDPConnection:
      mobile.computeRPY([2, 0, 1], [-1, 1, 1])
    ctm = np.array([[sp.cos(mobile.roll), -sp.sin(mobile.roll)], \
            [sp.sin(mobile.roll), sp.cos(mobile.roll)]]) # Coordinate transform matrix

    if useBasemap:
    # Warning this really slows down the computation
      m = Basemap(width=img.width, height=img.height, projection='aeqd',
            lat_0=sp.rad2deg(mobile.pitch), lon_0=sp.rad2deg(mobile.yaw), rsphere = radius)

    # Get an image from camera
    if not isPaused:
      img = cam.getImage()
      img = img.resize(int(scaleFactor*img.width), int(scaleFactor*img.height))
    
    if display:
      # Pause image when right button is pressed
      dwn = disp.rightButtonDownPosition()
      if dwn is not None:
        isPaused = not(isPaused)
        dwn = None

    if display:
    # Create a layer to enable user to make a selection of the target
      selectionLayer = DrawingLayer((img.width, img.height))

    if img:
      if display: 
      # Create a new layer to host information retrieved from video
        layer = DrawingLayer((img.width, img.height))
          # Selection is a rectangle drawn while holding mouse left button down
        if disp.leftButtonDown:
          corner1 = (disp.mouseX, disp.mouseY)
          selectionInProgress = True
        if selectionInProgress:
          corner2 = (disp.mouseX, disp.mouseY)
          bb = disp.pointsToBoundingBox(corner1, corner2)# Display the temporary selection
          if disp.leftButtonUp: # User has finished is selection
            selectionInProgress = False
            selection = img.crop(bb[0], bb[1], bb[2], bb[3])
            if selection != None:
                    # The 3 main colors in the area selected are considered.
            # Note that the selection should be included in the target and not contain background
              try:
                selection.save('../ObjectTracking/'+ 'kite_detail_tmp.jpg')
                img0 = Image("kite_detail_tmp.jpg") # For unknown reason I have to reload the image...
                pal = img0.getPalette(bins = 2, hue = False)
              except: # getPalette is sometimes bugging and raising LinalgError because matrix not positive definite
                pal = pal
              wasTargetFoundInPreviousFrame = False
              previous_coord_px = (bb[0] + bb[2]/2, bb[1] + bb[3]/2)
          if corner1 != corner2:
            selectionLayer.rectangle((bb[0], bb[1]), (bb[2], bb[3]), width = 5, color = Color.YELLOW)
                       
   
      # If the target was already found, we can save computation time by
      # reducing the Region Of Interest around predicted position
      if wasTargetFoundInPreviousFrame:
        ROITopLeftCorner = (max(0, previous_coord_px[0]-maxRelativeMotionPerFrame/2*width), \
                  max(0, previous_coord_px[1] -height*maxRelativeMotionPerFrame/2))
        ROI = img.crop(ROITopLeftCorner[0], ROITopLeftCorner[1],                          \
                             maxRelativeMotionPerFrame*width, maxRelativeMotionPerFrame*height, \
                 centered = False)
        if display :
      # Draw the rectangle corresponding to the ROI on the complete image
          layer.rectangle((previous_coord_px[0]-maxRelativeMotionPerFrame/2*width,  \
                                   previous_coord_px[1]-maxRelativeMotionPerFrame/2*height), \
                                (maxRelativeMotionPerFrame*width, maxRelativeMotionPerFrame*height), \
                 color = Color.GREEN, width = 2)
      else:
        # Search on the whole image if no clue of where is the target
        ROITopLeftCorner = (0, 0)
        ROI = img

        '''#Option 1
        target_part0 = ROI.hueDistance(color=(142,50,65)).invert().threshold(150)
        target_part1 = ROI.hueDistance(color=(93,16,28)).invert().threshold(150)
        target_part2 = ROI.hueDistance(color=(223,135,170)).invert().threshold(150)
        target_raw_img = target_part0+target_part1+target_part2
        target_img = target_raw_img.erode(5).dilate(5)

        #Option 2
        target_img = ROI.hueDistance(imgModel.getPixel(10,10)).binarize().invert().erode(2).dilate(2)'''
    
          # Find sky color
      sky = (img-img.binarize()).findBlobs(minsize=10000)
      if sky:
       skycolor = sky[0].meanColor()
      # Option 3
      target_img = ROI-ROI # Black image
              
      # Loop through palette of target colors
      if display and displayDebug:
            decomposition = []
      i_col = 0
      for col in pal: 
        c = tuple([int(col[i]) for i in range(0,3)])
            # Search the target based on color
        ROI.save('../ObjectTracking/'+ 'ROI_tmp.jpg')
        img1 = Image('../ObjectTracking/'+ 'ROI_tmp.jpg')
        filter_img = img1.colorDistance(color = c)
        h = filter_img.histogram(numbins=256)
        cs = np.cumsum(h)
        thmax = np.argmin(abs(cs- 0.02*img.width*img.height)) # find the threshold to have 10% of the pixel in the expected color
        thmin = np.argmin(abs(cs- 0.005*img.width*img.height)) # find the threshold to have 10% of the pixel in the expected color
        if thmin==thmax:
          newth = thmin
        else:
          newth = np.argmin(h[thmin:thmax]) + thmin
        alpha = 0.5
        th[i_col] = alpha*th[i_col]+(1-alpha)*newth
        filter_img = filter_img.threshold(max(40,min(200,th[i_col]))).invert()
        target_img = target_img + filter_img
        #print th
        i_col = i_col + 1
        if display and displayDebug:
          [R, G, B] = filter_img.splitChannels()
          white = (R-R).invert()
          r = R*1.0/255*c[0]
          g = G*1.0/255*c[1]
          b = B*1.0/255*c[2]
          tmp = white.mergeChannels(r, g, b)
          decomposition.append(tmp)

      # Get a black background with with white target foreground
      target_img = target_img.threshold(150)
  
      target_img = target_img - ROI.colorDistance(color = skycolor).threshold(80).invert()

      if display and displayDebug:
        small_ini = target_img.resize(int(img.width/(len(pal)+1)),  int(img.height/(len(pal)+1)))
        for tmp in decomposition:
          small_ini = small_ini.sideBySide(tmp.resize(int(img.width/(len(pal)+1)), int(img.height/(len(pal)+1))), side = 'bottom')
        small_ini = small_ini.adaptiveScale((int(img.width), int(img.height)))
        toDisplay = img.sideBySide(small_ini)
      else:
        toDisplay = img
          #target_img = ROI.hueDistance(color = Color.RED).threshold(10).invert()

      # Search for binary large objects representing potential target
      target = target_img.findBlobs(minsize = 500)
      
      if target: # If a target was found
      
        if wasTargetFoundInPreviousFrame:
          predictedTargetPosition = (width*maxRelativeMotionPerFrame/2, height*maxRelativeMotionPerFrame/2) # Target will most likely be close to the center of the ROI   
        else:
          predictedTargetPosition = previous_coord_px
              # If there are several targets in the image, take the one which is the closest of the predicted position
        target = target.sortDistance(predictedTargetPosition)

        # Get target coordinates according to minimal bounding rectangle or centroid.
        coordMinRect = ROITopLeftCorner + np.array((target[0].minRectX(), target[0].minRectY()))
        coord_px = ROITopLeftCorner + np.array(target[0].centroid())

        # Rotate the coordinates of roll angle around the middle of the screen
        rot_coord_px = np.dot(ctm, coord_px - np.array([img.width/2, img.height/2])) + np.array([img.width/2, img.height/2])
        if useBasemap:
          coord = sp.deg2rad(m(rot_coord_px[0], img.height-rot_coord_px[1], inverse = True))
        else:
          coord = localProjection(rot_coord_px[0]-img.width/2, img.height/2-rot_coord_px[1], radius, mobile.yaw, mobile.pitch, inverse = True)
        target_bearing, target_elevation = coord

      # Get minimum bounding rectangle for display purpose
        minR = ROITopLeftCorner + np.array(target[0].minRect())

        contours = target[0].contour()

        contours = [ ROITopLeftCorner + np.array(contour) for contour in contours]


  
        # Get target features
        angle = sp.deg2rad(target[0].angle()) + mobile.roll
        angle =  sp.deg2rad(unwrap180(sp.rad2deg(angle), sp.rad2deg(previous_angle)))
        width = target[0].width()
        height = target[0].height()

        # Check if the kite is upside down
        # First rotate the kite
        ctm2 = np.array([[sp.cos(-angle+mobile.roll), -sp.sin(-angle+mobile.roll)], \
            [sp.sin(-angle+mobile.roll), sp.cos(-angle+mobile.roll)]]) # Coordinate transform matrix
        rotated_contours = [np.dot(ctm2, contour-coordMinRect) for contour in contours]  
        y = [-tmp[1] for tmp in rotated_contours]
        itop = np.argmax(y) # Then looks at the points at the top
        ibottom = np.argmin(y) # and the point at the bottom
        # The point the most excentered is at the bottom
        if abs(rotated_contours[itop][0])>abs(rotated_contours[ibottom][0]):
          isInverted = True
        else:
          isInverted = False    
        
        if isInverted:
            angle = angle + sp.pi    

        
                # Filter the data
        alpha = 1-sp.exp(-deltaT/self.filterTimeConstant)
        if not(isPaused):
          dCoord = np.array(previous_dCoord)*(1-alpha) + alpha*(np.array(coord_px) - previous_coord_px) # related to the speed only if cam is fixed
          dAngle = np.array(previous_dAngle)*(1-alpha) + alpha*(np.array(angle) - previous_angle)
        else : 
          dCoord = np.array([0, 0])
          dAngle = np.array([0]) 
#print coord_px, angle, width, height, dCoord
    
        # Record important data
        times.append(timestamp)
        coords_px.append(coord_px)
        angles.append(angle)
        target_elevations.append(target_elevation)
        target_bearings.append(target_bearing)
        
        # Export data to controller
        self.elevation = target_elevation
        self.bearing = target_bearing
        self.orientation = angle
        dt = time.time()-timeLastTarget
        self.ROT = dAngle/dt
        self.lastUpdateTime = t
        
        # Save for initialisation of next step
        previous_dCoord = dCoord
        previous_angle = angle
        previous_coord_px = (int(coord_px[0]), int(coord_px[1]))
        wasTargetFoundInPreviousFrame = True
        timeLastTarget = time.time()
      
      else:
        wasTargetFoundInPreviousFrame = False
        
      if useHDF5:
        hdfSize = hdfSize+1
        dset.resize((hdfSize, 7))
        imset.resize((hdfSize, img.width, img.height, 3))
        dset[hdfSize-1,:] = [time.time(), coord_px[0], coord_px[1], angle, self.elevation, self.bearing, self.ROT]
        imset[hdfSize-1,:,:,:] = img.getNumpy()
        recordFile.flush()
      else:
        csv_writer.writerow([time.time(), coord_px[0], coord_px[1], angle, self.elevation, self.bearing, self.ROT])



      if display :
        if target:
        # Add target features to layer
        # Minimal rectange and its center in RED
          layer.polygon(minR[(0, 1, 3, 2), :], color = Color.RED, width = 5)
          layer.circle((int(coordMinRect[0]), int(coordMinRect[1])), 10, filled = True, color = Color.RED)
        
                # Target contour and centroid in BLUE
          layer.circle((int(coord_px[0]), int(coord_px[1])), 10, filled = True, color = Color.BLUE)
          layer.polygon(contours, color = Color.BLUE, width = 5)

        # Speed vector in BLACK
          layer.line((int(coord_px[0]), int(coord_px[1])), (int(coord_px[0]+20*dCoord[0]), int(coord_px[1]+20*dCoord[1])), width = 3)
        
        # Line giving angle
          layer.line((int(coord_px[0]+200*sp.cos(angle)), int(coord_px[1]+200*sp.sin(angle))), (int(coord_px[0]-200*sp.cos(angle)), int(coord_px[1]-200*sp.sin(angle))), color = Color.RED)

        # Line giving rate of turn
        #layer.line((int(coord_px[0]+200*sp.cos(angle+dAngle*10)), int(coord_px[1]+200*sp.sin(angle+dAngle*10))), (int(coord_px[0]-200*sp.cos(angle + dAngle*10)), int(coord_px[1]-200*sp.sin(angle+dAngle*10))))
            
      # Add the layer to the raw image 
        toDisplay.addDrawingLayer(layer)
        toDisplay.addDrawingLayer(selectionLayer)

      # Add time metadata
        toDisplay.drawText(str(i_frame)+" "+ str(timestamp), x=0, y=0, fontsize=20)

      # Add Line giving horizon
          #layer.line((0, int(img.height/2 + mobile.pitch*pixelPerRadians)),(img.width, int(img.height/2 + mobile.pitch*pixelPerRadians)), width = 3, color = Color.RED)

      # Plot parallels
        for lat in range(-90, 90, 15):
          r = range(0, 361, 10)
          if useBasemap:
            # \todo improve for high roll
            l = m (r, [lat]*len(r))
            pix = [np.array(l[0]), img.height-np.array(l[1])]
          else:
            l = localProjection(sp.deg2rad(r), \
                    sp.deg2rad([lat]*len(r)), \
                    radius, \
                    lon_0 = mobile.yaw, \
                    lat_0 = mobile.pitch, \
                    inverse = False)
            l = np.dot(ctm, l)
            pix = [np.array(l[0])+img.width/2, img.height/2-np.array(l[1])]

          for i in range(len(r)-1):
            if isPixelInImage((pix[0][i],pix[1][i]), img) or isPixelInImage((pix[0][i+1],pix[1][i+1]), img):
              layer.line((pix[0][i],pix[1][i]), (pix[0][i+1], pix[1][i+1]), color=Color.WHITE, width = 2)

      # Plot meridians
        for lon in range(0, 360, 15):
          r = range(-90, 91, 10)
          if useBasemap:
        # \todo improve for high roll
            l = m ([lon]*len(r), r)
            pix = [np.array(l[0]), img.height-np.array(l[1])]
          else:
            l= localProjection(sp.deg2rad([lon]*len(r)), \
                    sp.deg2rad(r), \
                    radius, \
                    lon_0 = mobile.yaw, \
                    lat_0 = mobile.pitch, \
                    inverse = False)
            l = np.dot(ctm, l)
            pix = [np.array(l[0])+img.width/2, img.height/2-np.array(l[1])]

          for i in range(len(r)-1):
            if isPixelInImage((pix[0][i],pix[1][i]), img) or isPixelInImage((pix[0][i+1],pix[1][i+1]), img):
              layer.line((pix[0][i],pix[1][i]), (pix[0][i+1], pix[1][i+1]), color=Color.WHITE, width = 2)

      # Text giving bearing
      # \todo improve for high roll
        for bearing_deg in range(0, 360, 30):
          l = localProjection(sp.deg2rad(bearing_deg), sp.deg2rad(0), radius, lon_0 = mobile.yaw, lat_0 = mobile.pitch, inverse = False)
          l = np.dot(ctm, l)
          layer.text(str(bearing_deg), ( img.width/2+int(l[0]), img.height-20), color = Color.RED)

      # Text giving elevation
      # \todo improve for high roll
        for elevation_deg in range(-60, 91, 30):
          l = localProjection(0, sp.deg2rad(elevation_deg), radius, lon_0 = mobile.yaw, lat_0 = mobile.pitch, inverse = False)
          l = np.dot(ctm, l)
          layer.text(str(elevation_deg), ( img.width/2 ,img.height/2-int(l[1])), color = Color.RED)

        #toDisplay.save(js)
        toDisplay.save(disp)
    if display : 
      toDisplay.removeDrawingLayer(1)
      toDisplay.removeDrawingLayer(0)
  recordFile.close()
Ejemplo n.º 27
0
# This example modified based on examples by Matt Richardson and Shawn Wallace "Getting Started with Raspberry Pi"


from SimpleCV import Camera,Display, DrawingLayer, Color
from time import sleep

myCamera = Camera(prop_set={'width':320,'height':240})
myDisplay = Display(resolution=(320,240))
myDrawingLayer = DrawingLayer((320,240))
while not myDisplay.isDone():
    frame = myCamera.getImage()
    faces = frame.findHaarFeatures('face')
    if faces:
        for face in faces:
            print "Face at: " + str(face.coordinates())
            myDrawingLayer.setFontSize(80)
            myDrawingLayer.rectangle((18,148),(250,60),filled=True)
            myDrawingLayer.text("Toadman",(20,150),color=Color.GREEN)
            frame.addDrawingLayer(myDrawingLayer)
            frame.applyLayers()
            frame.save(myDisplay)

    else:
        print "No faces detected."
    frame.save(myDisplay)
    sleep(.1)
img = util.tomaFoto("objeto.jpg", brillo = 55)
#img = Image("lineaDe4.jpg")
#img.live()

# El truco esta en buscar el color azul del tablero en lugar de ir directamente a por las fichas
img_tratada = img.binarize() 
#img_tratada.live()

blobGrande = img_tratada.findBlobs().sortArea()[-1]

if blobGrande:
	
	i_prima = blobGrande.length() / FACTOR_CONVERSION_PIXEL_A_MM
	d = (ALTURA_OBJETO * DISTANCIA_FOCAL / i_prima) / 10
	
	textLayer = DrawingLayer((img.width,img.height))
	textLayer.setFontSize(36)
	textLayer.text("Distancia = " + str(d) + " centimetros", (10,10), color=Color.RED)
	
	blobGrande.draw(width = 5, color = Color.RED)	
	img.addDrawingLayer(img_tratada.dl())
	img.addDrawingLayer(textLayer)

	img.show()
	util.pausa()
else:
	print "No se han encontrado Blobs"



	def encuentraYDibujaAngulos(self, img):
		""" Ademas de dibujar la estructura de los huesos del brazo
		devuelve los angulos de dichos huesos con la horizontal """
		
		# Pinto ejes de coordenadas
		img.dl().line((20, img.height - 20), (20, img.height - 60), Color.RED, width=5)
		img.dl().line((20, img.height - 20), (60, img.height - 20), Color.RED, width=5)
		textLayer = DrawingLayer((img.width, img.height))
		textLayer.setFontSize(20)
		textLayer.text("90 grados", (20, img.height - 80), Color.RED)
		textLayer.text("0 grados", (70, img.height - 20), Color.RED)
		img.addDrawingLayer(textLayer)
				 
		angulosHuesos = []
		if self.articulaciones != []:
			self.articulaciones = aux.ordenaListaPorDistanciaApunto(self.articulaciones, [0,480])
			puntoInicial = self.articulaciones.pop()
			img.dl().circle(puntoInicial, 10, Color.BLUE, width=5)
			numAngulo = 1
			while self.articulaciones != []:
				p = self.articulaciones.pop()
				img.dl().line(puntoInicial, p, Color.BLUE, width=5)
				img.dl().circle(p, 10, Color.BLUE, width=5)
				textLayer = DrawingLayer((img.width, img.height))
				textLayer.setFontSize(24)
				textLayer.text(str(numAngulo), (p[0] , p[1] ), Color.RED)
				img.addDrawingLayer(textLayer)
				numAngulo += 1
				img.applyLayers()
				angulosHuesos.append(aux.anguloLineaEntreDosPuntos(p, puntoInicial))
				puntoInicial = p
				
		if len(angulosHuesos) == 3:
			return angulosHuesos
		else:
			return []
Ejemplo n.º 30
0
def get_bounding_box(keyword, url, filename):
    # get the image
    img = Image(url)

    # resize the image so things aren't so slow, if necessary
    w, h = img.size()
    if w > 1200 or h > 1200:
        maxdim = max(w, h)
        ratio = math.ceil(maxdim/800.0)
        print "   resizing..."
        img = img.resize(w=int(w/ratio), h=int(h/ratio))
    else:
        ratio = 1

    # get the canvas
    disp = Display((800, 800))
    # text overlay
    textlayer = DrawingLayer(img.size())
    textlayer.setFontSize(30)
    cx, cy = 10, 10
    for xoff in range(-2, 3):
        for yoff in range(-2, 3):
            textlayer.text(keyword, (cx + xoff, cy + yoff), color=Color.BLACK)
    textlayer.text(keyword, (cx, cy), color=Color.WHITE)

    # two points to declare a bounding box
    point1 = None
    point2 = None
    while disp.isNotDone():
        cursor = (disp.mouseX, disp.mouseY)
        if disp.leftButtonUp:
            if point1 and point2:
                point1 = None
                point2 = None
            if point1:
                point2 = disp.leftButtonUpPosition()
            else:
                point1 = disp.leftButtonUpPosition()
        bb = None
        if point1 and point2:
            bb = disp.pointsToBoundingBox(point1, point2)
        elif point1 and not point2:
            bb = disp.pointsToBoundingBox(point1, cursor)

        img.clearLayers()
        drawlayer = DrawingLayer(img.size())
        if bb:
            drawlayer.rectangle((bb[0], bb[1]), (bb[2], bb[3]), color=Color.RED)

        # keyboard commands
        if pygame.key.get_pressed()[pygame.K_s]:
            # skip for now
            raise Skip()
        elif pygame.key.get_pressed()[pygame.K_b]:
            # mark it as an invalid picture
            raise BadImage()
        elif pygame.key.get_pressed()[pygame.K_RETURN]:
            if point1 and point2:
                bb = disp.pointsToBoundingBox(scale(ratio, point1), scale(ratio, point2))
                return bb
            elif not point1 and not point2:
                bb = disp.pointsToBoundingBox((0, 0), (w, h))
                return bb


        drawlayer.line((cursor[0], 0), (cursor[0], img.height), color=Color.BLUE)
        drawlayer.line((0, cursor[1]), (img.width, cursor[1]), color=Color.BLUE)
        #drawlayer.circle(cursor, 2, color=Color.BLUE, filled=True)
        img.addDrawingLayer(textlayer)
        img.addDrawingLayer(drawlayer)
        img.save(disp)
Ejemplo n.º 31
0
    def visionLoop(self):
        while not self.exit:
            # acquire image
            img = self.cam.getImage()
            
            # exit if we've got nothing
            if img is None:
                break

            # adjust image
            if self.camRotate != 0:
                img.rotate(self.camrotate)
            '''
            img = img.resize(self.camRes[0], self.camRes[1])
            img = img.rotate90()
            '''

            # blob search
            colorDiff = img - img.colorDistance(self.trackingColor)
            blobs = colorDiff.findBlobs(-1, self.trackingBlobMin, 
                                        self.trackingBlobMax)
            '''
            blobs = img.findBlobs((255,60,60), self.trackingBlobMin,
                                  self.trackingBlobMax)
            '''

            # blob find
            if blobs is not None:
                self.x = blobs[-1].x
                self.y = blobs[-1].y

            # blob show
            if blobs is not None:
                # roi = region of interest
                roiLayer = DrawingLayer((img.width, img.height))
                
                # draw all blobs
                for blob in blobs:
                    blob.draw(layer=roiLayer)
                
                # draw a circle around the main blob
                roiLayer.circle((self.x,self.y), 50, Color.RED, 2)

                # apply roi to img
                img.addDrawingLayer(roiLayer)
                img = img.applyLayers()
            
            img.show()

            # fps
            now = datetime.utcnow()
            self.trackingFrameQ.put(now)
            if self.trackingFrameQ.qsize() < 30:
                fps = 0.0
            else:
                fps = 30.0/(now - self.trackingFrameQ.get()).total_seconds()

            # logging
            logger.debug("{func} ({x},{y}) {fps:5.2f}"
                         .format(func=inspect.stack()[0][3],
                                 x=self.x, y=self.y, fps=fps))
        # loop has ened
        logger.debug("{func} stopped.".format(func=inspect.stack()[0][3]))
Ejemplo n.º 32
0
def drawSunglasses(img, coordinates):
    imgLayer = DrawingLayer((img.width, img.height))
    imgLayer.circle(coordinates, 100)
    img.addDrawingLayer(imgLayer)
    img.applyLayers()
Ejemplo n.º 33
0
from SimpleCV import Image, DrawingLayer, Color, Display


d = Display((1240, 820), title="London Map - Scotland Yard")
lMap = Image("C:\\Users\\flavio\\Documents\\Python\\Scotland Yard\\maps\\map.jpg")
circlesLayer = DrawingLayer((lMap.width, lMap.height))

circlesLayer.circle ((191,44), 20,color=Color.ORANGE, filled=True, alpha = 255)
lMap.addDrawingLayer(circlesLayer)
lMap.applyLayers()

lMap.save(d)
Ejemplo n.º 34
0

if __name__ == '__main__':
    cam = Camera(0)
    img = Image()

    samples = 0
    coordinates = redCoords = (0,0)
    text = " "

    while True:
        img = cam.getImage()
        # Make image black and white
        tmp = findRedDot(img)
        if (tmp != None):
            coordinates= (coordinates[0]+tmp[0][0], coordinates[1]+tmp[0][1])
            samples+=1

        if samples == 10:
            samples = 0
            coordinates = (coordinates[0]/10, coordinates[1]/10)
            text = str(coordinates)
            redCoords = coordinates
            coordinates = (0,0)

        redcircle = DrawingLayer((img.width, img.height))
        redcircle.circle(redCoords, 5, filled=True, color=(0,255,0)) #add circle point 10,10, radius 10.
        img.addDrawingLayer(redcircle)
        img.applyLayers()
        img.drawText(text)
        img.show()
Ejemplo n.º 35
0
def loop():
    while True:
        try:
            setRGB(0, 128, 64)
            setRGB(0, 255, 0)
            # Read distance value from Ultrasonic
            distant = ultrasonicRead(ultrasonic_ranger)
            button_state = digitalRead(button)
            if (distant <= trigger) and (button_state):
                #            print 'Alarm ', distant,'cm', 'trigger', trigger
                flushLCD('+++ ' + str(distant) + ':' + str(trigger))

                analogWrite(led, 255)
                # count down for photo !
                flushLCD('SMILE!')
                beep(0.01)
                time.sleep(1)
                beep(0.02)
                time.sleep(1)
                beep(0.1)
                time.sleep(1)

                frame = myCamera.getImage()
                flushLCD('processing ...')
                faces = frame.findHaarFeatures('face')
                if faces:
                    print str(len(faces)) + " faces"
                    fct = 0
                    ts = time.time()
                    now = datetime.datetime.fromtimestamp(ts).strftime(
                        '%Y%m%d-%H%M%S')

                    for face in faces:
                        fct += 1
                        print "face " + str(fct) + " at: " + str(
                            face.coordinates())
                        myFace = face.crop()  # tweet all faces ...
                        photo = '/home/pi/tmp/' + now + 'F-' + str(
                            fct) + '.jpg'  # NOTE MUST use absolute path here!
                        psize = myFace.width * myFace.height
                        if psize > 20000:  #looks like smaller images are thrash
                            print "Photo Size: " + photo + " " + str(psize)
                            myDL = DrawingLayer((myFace.width, myFace.height))
                            myDL.setFontSize(25)
                            myDL.text("I am " + str(distant) +
                                      " cm next to a PiCam!",
                                      (myFace.width / 2 - 140, 10),
                                      color=Color.WHITE)
                            myFace.addDrawingLayer(myDL)
                            myFace.applyLayers()

                            myFace.save(photo)
                            beep(0.05)
                            time.sleep(0.3)
                            beep(0.2)

                            time.sleep(1)  # wait save complete ...
                            status = 'Look Ma, I did the #lnf16 @ #FHburgenland just now: ' + now
                            # tweet ...
                            api.update_with_media(photo, status=status)
                            logLCD('TWEETed!')

                        else:
                            print "Face skipped too small: " + str(psize)
                            logLCD("Face " + str(fct) +
                                   " skipped too small: " + str(psize))

    #                print 'Sleep before next watch cycle ...'
                else:
                    logLCD('NO faces detected!')

            else:
                #            print 'No Alarm ', distant,'cm' , 'trigger', trigger
                flushLCD('--- ' + str(distant) + ':' + str(trigger))
                analogWrite(led, 0)

            time.sleep(1)

        except TypeError:
            print "Error"
        except IOError:
            print "Error"
Ejemplo n.º 36
0
#Simple writing text
  #optional: change font by img.dl().selectFont('font_name')
  #to see all supported fonts, run img.dl().listFonts()
img.drawText("Hello, World");

#show the image
img.show()
#wait for input (keeps the image shown, hit enter in the terminal to continue the code)
raw_input()

#Drawing Shapes

#Rectange
#First, We have to create a drawing layer to draw on
imgLayer = DrawingLayer((img.width,img.height))
sizeOfRect = (200,200)
point = (img.width/2 , img.height/2) #centering
rect = imgLayer.rectangle(point, sizeOfRect)
img.addDrawingLayer(imgLayer)
img.applyLayers()
img.show()
raw_input()

''' Shapes you can draw:
imgLayer.circle(point, radius)
imgLayer.centeredRectangle(centerPoint, (width, height))
imgLayer.rectangle(topLeftCornerPoint, (width, height))

polygons are a bit more complicated:
first, we need to define the points that the polygon will have
Ejemplo n.º 37
0
servo_initial = 375
circle_x = 0
circle_y = 0
servo_min = 125  # Min pulse length out of 4096
servo_max = 625  # Max pulse length out of 4096

# Set frequency to 60hz, good for servos.
pwm.set_pwm_freq(60)

pwm.set_pwm(0, 0, servo_initial)
time.sleep(0.0166667)
i = servo_initial

# dito i-uudjust ung center for calibration ng fresnel lens
center = (320, 240)
scope_layer = DrawingLayer(winsize)  # same as window size

#  (position/coordinates, diameter, Color, Thickness of the lines)
scope_layer.circle(center, 50, Color.BLACK, width=3)
scope_layer.circle(center, 100, Color.BLACK, width=2)
scope_layer.line((center[0], center[1] - 50), (center[0], 0),
                 Color.BLACK,
                 width=2)
scope_layer.line((center[0], center[1] + 50), (center[0], winsize[1]),
                 Color.BLACK,
                 width=2)
scope_layer.line((center[0] - 50, center[1]), (0, center[1]),
                 Color.BLACK,
                 width=2)
scope_layer.line((center[0] + 50, center[1]), (winsize[0], center[1]),
                 Color.BLACK,
Ejemplo n.º 38
0
def transform_blob_state_changes(processing_path, img, db_blobs_now,
                                 db_blobs_prev, max_offset):
    log("transform_blob_state_changes(processing_path=%s, img=%s, db_blobs_now=%s,\
         db_blobs_prev=%s, max_offset=%s)" %
        (processing_path, img, db_blobs_now, db_blobs_prev, max_offset))
    # create markup layer
    img_markups = DrawingLayer(img.size())
    # count the state totals
    blob_count_duplicate = 0
    blob_count_new = 0
    blob_count_removed = 0
    # traverse db_blobs
    for db_blob_now in db_blobs_now:
        found_in_prev = False
        bn = db_blob_now["bounds"]
        for i, db_blob_prev in enumerate(db_blobs_prev):
            bp = db_blob_prev["bounds"]
            if abs(bn["x"] - bp["x"]) <= max_offset \
                    and abs(bn["y"] - bp["y"]) <= max_offset \
                    and abs(bn["w"] - bp["w"]) <= max_offset \
                    and abs(bn["h"] - bp["h"]) <= max_offset \
                    and db_blob_prev["state"] != BLOB_STATE_REMOVED:
                found_in_prev = True
                # remove item and immediately break
                db_blobs_prev.pop(i)
                break
        # set the state and associated color
        if found_in_prev:
            state = BLOB_STATE_DUPLICATE
            color = Color.ORANGE
            blob_count_duplicate += 1
        else:
            state = BLOB_STATE_NEW
            color = Color.GREEN
            blob_count_new += 1
        db_blob_now["state"] = state
        # draw blob rectangle
        img_markups.rectangle(topLeft=(bn["x"], bn["y"]),
                              dimensions=(bn["w"], bn["h"]),
                              color=color,
                              width=3)
        # draw class text
        img_markups.text(text=state,
                         location=(bn["x"], bn["y"] - 15),
                         color=color)
    if SHOW_REMOVED_SNACKS:
        # draw removed items
        for db_blob_prev in db_blobs_prev:
            # ignore if prev was removed
            if db_blob_prev["state"] == BLOB_STATE_REMOVED:
                continue
            color = Color.RED
            state = BLOB_STATE_REMOVED
            blob_count_removed += 1
            bp = db_blob_prev["bounds"]
            # copy the prev blob images and add it to db_blobs_now
            i = len(db_blobs_now) + 1
            blob_img_name = "blob_" + str(i).zfill(2) + ".png"
            blob_img_path = processing_path + blob_img_name
            copyfile(db_blob_prev["img_path"], blob_img_path)
            db_blobs_now.append(
                db_blob(uid=ObjectId(),
                        title="blob " + str(i).zfill(2),
                        bounds=db_blob_prev["bounds"],
                        img_path=blob_img_path,
                        img_url=url_for("static",
                                        filename=os.path.relpath(
                                            blob_img_path, "static"),
                                        _external=True),
                        state=state))
            # draw blob rectangle
            img_markups.rectangle(topLeft=(bp["x"], bp["y"]),
                                  dimensions=(bp["w"], bp["h"]),
                                  color=color,
                                  width=3)
            # draw class text
            img_markups.text(text=state,
                             location=(bp["x"], bp["y"] - 15),
                             color=color)
    # if only duplicates, throw exception
    if blob_count_new == 0 and blob_count_removed == 0:
        raise NoBlobChangesDetectedException("No changes were detected.")
    # copy image and add markups
    img_marked = img.copy()
    img_marked.addDrawingLayer(img_markups)
    img_marked_name = TR_TYPE_BLOB_STATE_CHANGE + ".png"
    img_marked_path = processing_path + img_marked_name
    img_marked.save(img_marked_path)
    return db_transform(
        uid=ObjectId(),
        title="state changes",
        type=TR_TYPE_BLOB_STATE_CHANGE,
        description=
        "compare all of the blobs and determine which ones are new, duplicated and removed",
        img_path=img_marked_path,
        img_url=url_for("static",
                        filename=os.path.relpath(img_marked_path, "static"),
                        _external=True)), db_blobs_now
Ejemplo n.º 39
0
#!/usr/bin/python

from SimpleCV import Image, Camera, Display,DrawingLayer,Color
from time import sleep, time
import random
import RPi.GPIO as GPIO

myCamera = Camera(prop_set={'width':320,'height':240})
myDisplay = Display(resolution=(320,240))
myDrawingLayer = DrawingLayer((320,240))

GPIO.setmode(GPIO.BCM)
GPIO.setup(2,GPIO.OUT)

while not myDisplay.isDone():
	frame = myCamera.getImage()
	faces = frame.findHaarFeatures('face')
	if faces:
		for face in faces:
			GPIO.output(2,GPIO.HIGH)
			print "Face at: " + str(face.coordinates())
			
			
			frame.clearLayers()
			myDrawingLayer.rectangle((0,0),(120,30),filled=True)
			myDrawingLayer.setFontSize(45)
			myDrawingLayer.text(`random.randint(1,1000000)`,(0,0),color=Color.RED)
			
			frame.addDrawingLayer(myDrawingLayer)
			frame.save("combat power" + str(time()) + ".jpg")
			sleep(.3)
Ejemplo n.º 40
0
class SolarCamera:
    def __init__(self, window_size=(640, 480), **kwargs):
        while True:  # Initialize the Camera
            try:
                cam = Camera()
                cam.getImage().flipHorizontal()
            except:
                continue
            else:
                break
        self.cam = cam
        self.image = None
        self.window_size = window_size
        self.display = Display(self.window_size)
        self.__window_center = (
            338, 377)  # (self.window_size[0]/2, self.window_size[1]/2)
        self.__distance = None
        self.__blobs = None
        self.__segmented = None
        self.__circles = None
        self.__scope_layer = None
        self.initialize_scope_layer()

    @property
    def is_there_sun(self):
        self.__binarize_image()  # binarize image
        if self.__blobs:
            self.__circles = self.__blobs.filter(
                [b.isCircle(0.2) for b in self.__blobs])
            if self.__circles:
                return True
        return False

    def __binarize_image(self):
        #  dilate the image
        self.__distance = self.image.colorDistance(Color.BLACK).dilate(2)
        #  segment image with colors ranging from 250 to 255
        self.__segmented = self.__distance.stretch(250, 255)
        #  identify if there's a blob
        self.__blobs = self.__segmented.findBlobs(minsize=2000)

    def get_image(self):
        self.image = self.cam.getImage().flipHorizontal()

    def show_image(self):
        self.image.addDrawingLayer(self.__scope_layer)
        self.image.show()

    def mark_sun(self):
        self.image.drawCircle((self.__circles[-1].x, self.__circles[-1].y),
                              self.__circles[-1].radius(), Color.BLUE, 3)

    @property
    def get_sun_coordinates(self):
        return (self.__circles[-1].x, self.__circles[-1].y)

    def initialize_scope_layer(self):
        self.__scope_layer = DrawingLayer(
            self.window_size)  # same as window size
        #  (position/coordinates, diameter, Color, Thickness of the lines)
        self.__scope_layer.circle(self.__window_center,
                                  50,
                                  Color.BLUE,
                                  width=3)
        self.__scope_layer.circle(self.__window_center,
                                  100,
                                  Color.BLUE,
                                  width=2)
        self.__scope_layer.line(
            (self.__window_center[0], self.__window_center[1] - 50),
            (self.__window_center[0], 0),
            Color.BLACK,
            width=2)
        self.__scope_layer.line(
            (self.__window_center[0], self.__window_center[1] + 50),
            (self.__window_center[0], self.window_size[1]),
            Color.BLACK,
            width=2)
        self.__scope_layer.line(
            (self.__window_center[0] - 50, self.__window_center[1]),
            (0, self.__window_center[1]),
            Color.BLACK,
            width=2)
        self.__scope_layer.line(
            (self.__window_center[0] + 50, self.__window_center[1]),
            (self.window_size[0], self.__window_center[1]),
            Color.BLACK,
            width=2)

    def print_sun_coordinates(self):
        if self.__circles:
            print("x:", self.get_sun_coordinates[0], "y:",
                  self.get_sun_coordinates[1])

    @property
    def get_window_center(self):
        return self.__window_center
Ejemplo n.º 41
0
from SimpleCV import Display, Camera, Image, DrawingLayer, VirtualCamera

disp = Display((600, 800))
#cam = Camera()
cam = VirtualCamera(
    '../../Recording/Videos/kiteTest from Zenith Wind Power-fNMO3kAX0PE.mp4',
    'video')
isPaused = False
updateSelection = False
while (disp.isNotDone()):
    if not isPaused:
        img_flip = cam.getImage().flipHorizontal()
        img = img_flip.edges(150, 100).dilate()
    if disp.rightButtonDown:
        isPaused = not (isPaused)
    selectionLayer = DrawingLayer((img.width, img.height))
    if disp.leftButtonDown:
        corner1 = (disp.mouseX, disp.mouseY)
        updateSelection = True
    if updateSelection:
        corner2 = (disp.mouseX, disp.mouseY)
        bb = disp.pointsToBoundingBox(corner1, corner2)
        if disp.leftButtonUp:
            updateSelection = False
        if corner1 != corner2:
            selectionLayer.rectangle((bb[0], bb[1]), (bb[2], bb[3]))
    img.addDrawingLayer(selectionLayer)
    img.save(disp)
    img.removeDrawingLayer(0)
Ejemplo n.º 42
0
import scipy as sp
import numpy as np

cam = VirtualCamera(
    '../Recording/Videos/Kite with leds in night-LgvpmMt-SA0.mp4', 'video')
img = cam.getImage()
disp = Display((810, 1080))
display = True
predictedTargetPosition = (img.size()[0] / 2, img.size()[1] / 2)
while (not (display) or disp.isNotDone()) and img.size() != (0, 0):
    img = cam.getImage()
    if img.size() != (0, 0):
        if img:
            if display:
                # Create a new layer to host information retrieved from video
                layer = DrawingLayer((img.width, img.height))
            maskred = img.colorDistance(color=(200, 50,
                                               70)).invert().threshold(170)
            imgred = (img * (maskred / 255)).dilate(3)
            targetred = imgred.findBlobs(maxsize=200)
            maskwhite = img.colorDistance(color=(200, 200,
                                                 200)).invert().threshold(230)
            imgwhite = (img * (maskwhite / 255)).dilate(3)
            targetwhite = imgwhite.findBlobs(maxsize=200)

            if targetred:
                targetred.draw()
                print targetred.meanColor()
                targetred = targetred.sortDistance(predictedTargetPosition)
                coord_px = np.array(targetred[0].centroid())
                layer.circle((int(coord_px[0]), int(coord_px[1])),
from SimpleCV import Image,Display,DrawingLayer,Color
from time import sleep

myDisplay = Display()

raspberryImage = Image("test.jpg")

myDrawingLayer = DrawingLayer((raspberryImage.width, raspberryImage.height))
myDrawingLayer.rectangle((50,20),(250,60),filled=True)
myDrawingLayer.setFontSize(45)
myDrawingLayer.text("Raspberries!",(50,20),color=Color.WHITE)
raspberryImage.addDrawingLayer(myDrawingLayer)
raspberryImage.applyLayers()
raspberryImage.save(myDisplay)
while not myDisplay.isDone():
  sleep(0.1)
Ejemplo n.º 44
0
from SimpleCV import Image, Display, DrawingLayer, Color, Camera
import time
import numpy as np
img = Image('stenramchiffontest.jpg')
disp = Display()
img_blurred = img.gaussianBlur((101, 101))

# Make a mask
mask_size = 80
mask = Image((4 * mask_size, 4 * mask_size))
dl = DrawingLayer((4 * mask_size, 4 * mask_size))

# Draw a filled circle in the mask
dl.circle((2 * mask_size, 2 * mask_size),
          mask_size,
          filled=True,
          color=Color.WHITE)
mask.addDrawingLayer(dl)
mask = mask.applyLayers()
blurred_mask = mask.gaussianBlur((101, 101))
t0 = time.time()
# Blur the mask to get progressive blur

n = 3
img_ = img.gaussianBlur((n, n))
old_n = 3
isDown = False
mouseRawXOld = 0
x = 0
while not disp.isDone():
    t = time.time()
Ejemplo n.º 45
0
    def encuentraYDibujaAngulos(self, img):
        """ Ademas de dibujar la estructura de los huesos del brazo
		devuelve los angulos de dichos huesos con la horizontal """

        # Pinto ejes de coordenadas
        img.dl().line((20, img.height - 20), (20, img.height - 60),
                      Color.RED,
                      width=5)
        img.dl().line((20, img.height - 20), (60, img.height - 20),
                      Color.RED,
                      width=5)
        textLayer = DrawingLayer((img.width, img.height))
        textLayer.setFontSize(20)
        textLayer.text("90 grados", (20, img.height - 80), Color.RED)
        textLayer.text("0 grados", (70, img.height - 20), Color.RED)
        img.addDrawingLayer(textLayer)

        angulosHuesos = []
        if self.articulaciones != []:
            self.articulaciones = aux.ordenaListaPorDistanciaApunto(
                self.articulaciones, [0, 480])
            puntoInicial = self.articulaciones.pop()
            img.dl().circle(puntoInicial, 10, Color.BLUE, width=5)
            numAngulo = 1
            while self.articulaciones != []:
                p = self.articulaciones.pop()
                img.dl().line(puntoInicial, p, Color.BLUE, width=5)
                img.dl().circle(p, 10, Color.BLUE, width=5)
                textLayer = DrawingLayer((img.width, img.height))
                textLayer.setFontSize(24)
                textLayer.text(str(numAngulo), (p[0], p[1]), Color.RED)
                img.addDrawingLayer(textLayer)
                numAngulo += 1
                img.applyLayers()
                angulosHuesos.append(
                    aux.anguloLineaEntreDosPuntos(p, puntoInicial))
                puntoInicial = p

        if len(angulosHuesos) == 3:
            return angulosHuesos
        else:
            return []
Ejemplo n.º 46
0

    def similarity(self, matrix):
        """ Calculate similarity between this pattern matrix and another
            matrix. This calculates the cosine distance between pattern
            vectors for all the regions. Similar shapes should have similar
            vector patterns in all regions.
        """ 
        my_print = self.fingerprint()
        their_print = matrix.fingerprint(regions=self.get_fingerprint_regions())
        if len(my_print) != len(their_print):
            raise Exception("Finger print lengths don't match")

        out = []
        for i in range(0, len(my_print)):
            out.append(self.cosine_similarity(my_print[i][1], 
                                              their_print[i][1])) 

        print "Similarity: %s" % (repr(out))
        return sum(out) / float(len(out))


if __name__=="__main__":
    img = Image(sys.argv[-1])
    dl = DrawingLayer((img.width,img.height))
    img.addDrawingLayer(dl)
    patterns = match_patterns(img, dl)
    img.save('outline2.png')
    for row in patterns:
        print "\t".join(row)