def addText(fileName, text): image = Image(fileName) draw = DrawingLayer((IMAGE_WIDTH, IMAGE_HEIGHT)) draw.rectangle((8, 8), (121, 18), filled=True, color=Color.YELLOW) draw.setFontSize(20) draw.text(text, (10, 9), color=Color.BLUE) image.addDrawingLayer(draw) image.save(fileName)
def loop(): try: json = get_jsonparsed_data(quakeURL) setRGB(0, 128, 64) setRGB(0, 255, 0) # Read distance value from Ultrasonic # distant = ultrasonicRead(ultrasonic_ranger) # button_state=digitalRead(button) # flushLCD('+++ ' + str(distant) + ':' + str(trigger)) analogWrite(led, 255) # count down for photo ! flushLCD('SMILE!') beep(0.01) time.sleep(1) beep(0.02) time.sleep(1) beep(0.1) time.sleep(1) frame = myCamera.getImage() flushLCD('processing ...') ts = time.time() now = datetime.datetime.fromtimestamp(ts).strftime('%Y%m%d-%H%M%S') photo = '/home/pi/tmp/' + now + '.jpg' # NOTE MUST use absolute path here! psize = frame.width * frame.height print "Photo Size: " + photo + " " + str(psize) + " w:" + str( frame.width) + " h: " + str(frame.height) myDL = DrawingLayer((frame.width, frame.height)) myDL.setFontSize(80) myDL.text("Quake report from " + json["Ort"] + " with Strength of " + json["LastStrengthString"] + " at: " + json["LastOccured"], (10, 10), color=Color.RED) frame.addDrawingLayer(myDL) frame.applyLayers() frame.save(photo) analogWrite(led, 0) beep(0.05) time.sleep(0.3) beep(0.2) time.sleep(1) # wait save complete ... status = twitterText1 + ' ' + hashtag + ' ' + twitterText2 + ' ' + json[ "Ort"] + ' @:' + now + ' ' + twitterText3 + ' ' + json["Name"] # tweet ... api.update_with_media(photo, status=status) logLCD('TWEETed!') except TypeError: print "Type Error" except IOError: print "IO Error"
def loop(): try: json=get_jsonparsed_data(quakeURL) setRGB(0,128,64) setRGB(0,255,0) # Read distance value from Ultrasonic # distant = ultrasonicRead(ultrasonic_ranger) # button_state=digitalRead(button) # flushLCD('+++ ' + str(distant) + ':' + str(trigger)) analogWrite(led,255) # count down for photo ! flushLCD('SMILE!') beep (0.01) time.sleep (1) beep (0.02) time.sleep(1) beep (0.1) time.sleep (1) frame=myCamera.getImage() flushLCD('processing ...') ts = time.time() now = datetime.datetime.fromtimestamp(ts).strftime('%Y%m%d-%H%M%S') photo='/home/pi/tmp/' + now + '.jpg' # NOTE MUST use absolute path here! psize=frame.width*frame.height print "Photo Size: " + photo + " " + str(psize) + " w:" + str(frame.width) + " h: " + str(frame.height) myDL=DrawingLayer((frame.width,frame.height)) myDL.setFontSize(80) myDL.text("Quake report from " + json["Ort"] + " with Strength of " + json["LastStrengthString"] + " at: " + json["LastOccured"] ,(10,10),color=Color.RED) frame.addDrawingLayer(myDL) frame.applyLayers() frame.save(photo) analogWrite(led,0) beep (0.05) time.sleep(0.3) beep(0.2) time.sleep(1) # wait save complete ... status = twitterText1 + ' ' + hashtag + ' ' + twitterText2 + ' ' + json["Ort"] + ' @:' + now + ' ' + twitterText3 + ' ' + json["Name"] # tweet ... api.update_with_media(photo, status=status) logLCD('TWEETed!') except TypeError: print "Type Error" except IOError: print "IO Error"
def draw_blobs(img, corners, boat): centers = [blob.centroid() for blob in corners] tl = topLeft(centers) br = bottomRight(centers) maplayer = DrawingLayer((img.width, img.height)) maplayer.rectangle(tl, (br[0] - tl[0], br[1] - tl[1])) for blob in corners: maplayer.circle(blob.centroid(), radius=20) maplayer.circle(boat.centroid(), radius=10) pos = boat_lat_lon(boat, corners) maplayer.text("(%.2f, %.2f)" % (pos[0], pos[1]), boat.centroid(), color=Color.WHITE) img.addDrawingLayer(maplayer) img.applyLayers() img.show()
def __init__(self, camara_id, retraso_video = 10, framerate = 4.0, color=False, size=(320,240), ruido=True): self.buffer_size = int(retraso_video*framerate)+ 1 # (nos aseguramos que nunca sea cero) self.intervalo_refresco = float(1.0/framerate) # periodicidad con que se rerescan los datos del buffer de video self.momento_refresco = time.time() + self.intervalo_refresco # momento en que se debe sacar y añadir informacion al buffer de video time.sleep(self.intervalo_refresco) # pausa de seguridad para la generacion del buffer self.video_buffer = [] # contiene los frames equivalentes al tiempo de retraso self.imagen = None # almacenamiento temporal de la captura de la camara para hacer operaciones con ella self.camara = Camera(camara_id) # creamos una instancia de la clase opencv Camara() self.Flag_color = color # si False, procesa la imagen y la devuelve en gris self.Flag_resize = False # se pone a True si le pasamos una resolucion valida, para pemitir el reescalado self.size = size # si es una resocucion valida reescala la imagen if size[0]>0 and size[1]>0: self.Flag_resize = True # control del ruido self.duracion_interferencia = (2,6) # tiempo en segundos que puede llegar a durar una interferencia self.tiempo_entre_interferencias = (5, 20) # periodos de señal sin interferencias (de 25 a 45 segundos) self.FLAG_ruido_activo = ruido # Si True se activan las interferencias en momentos aleatorios self.FLAG_aplicar_ruido_ahora = True # si FLAG_ruido_activo = True, indica si es momento o no de meter una interferencia if self.Flag_color == False: self.video_ruido = [self.resize(self.gris(Image("VideoBuffer_SimpleCV/c_ruido%d.png" %x))) for x in range(5)] # cargamos la lista de fotogramas correspondientes al ruido else: self.video_ruido = [self.resize(Image("VideoBuffer_SimpleCV/c_ruido%d.png" %x)) for x in range(5)] # cargamos la lista de fotogramas correspondientes al ruido self.frame_ruido_index = 0 # fotograma del ruido que se mostrará self.incremento_aleatorio = retraso_video + random.randrange(self.duracion_interferencia[0],self.duracion_interferencia[1]) self.momento_cambio_bandera = time.time() + self.incremento_aleatorio self.nivel_ruido_maximo = 10 # intensidad con que se mostrará la interferencia #creacion y llenado inicial del buffer de video self.video_buffer = [] # definir el buffer como una lista self.imagen = self.getImage() # capturar un fotograma desde la camara self.imagen = self.resize(self.gris(self.imagen)) # reescalarlo y convertirlo a escala de grises self.imagen = self.imagen.blur(45,45) # hasta superado el retraso, la imagen sera borrosa # esto no funciona :( No se lleva bien con el ruido textLayer = DrawingLayer((self.imagen.width, self.imagen.height)) # crear una capa vacia para escribir texto textLayer.text("CONECTANDO...", (40, 70), color=Color.RED) # poner mensaje "CONECTANDO..." sobre la imagen: self.imagen.addDrawingLayer(textLayer) # fusionar la capa de imagen y al de texto self.video_buffer = [self.imagen for x in range(self.buffer_size)] # llenar el buffer de video con una imagen estatica"
def transform_classify_blobs(processing_path, img, db_blobs): log("transform_classify_blobs(processing_path=%s, img=%s, db_blobs=%s)" % (processing_path, img, db_blobs)) # create markup layer img_markups = DrawingLayer(img.size()) # traverse blobs for db_blob in db_blobs: b = db_blob["bounds"] # call external service to classify a blob image as a class category c1ass = classify_blob(db_blob["img_url"]) db_blob["c1ass"] = c1ass db_blob["c1ass_state"] = "auto" # get the state color state = db_blob["state"] color = Color.RED if state == BLOB_STATE_REMOVED else ( Color.ORANGE if state == BLOB_STATE_DUPLICATE else Color.GREEN) # draw blob rectangle img_markups.rectangle(topLeft=(b["x"], b["y"]), dimensions=(b["w"], b["h"]), color=color, width=3) # draw class text img_markups.text(text=c1ass, location=(b["x"], b["y"] - 15), color=color) # copy image and add markups img_marked = img.copy() img_marked.addDrawingLayer(img_markups) img_marked_name = TR_TYPE_BLOBS_CLASSIFIED + ".png" img_marked_path = processing_path + img_marked_name img_marked.save(img_marked_path) return db_transform(uid=ObjectId(), title="classify blobs", type=TR_TYPE_BLOBS_CLASSIFIED, description="use the classifier to classify the blob", img_path=img_marked_path, img_url=url_for("static", filename=os.path.relpath( img_marked_path, "static"), _external=True)), db_blobs
def encuentraYDibujaAngulos(self, img): """ Ademas de dibujar la estructura de los huesos del brazo devuelve los angulos de dichos huesos con la horizontal """ # Pinto ejes de coordenadas img.dl().line((20, img.height - 20), (20, img.height - 60), Color.RED, width=5) img.dl().line((20, img.height - 20), (60, img.height - 20), Color.RED, width=5) textLayer = DrawingLayer((img.width, img.height)) textLayer.setFontSize(20) textLayer.text("90 grados", (20, img.height - 80), Color.RED) textLayer.text("0 grados", (70, img.height - 20), Color.RED) img.addDrawingLayer(textLayer) angulosHuesos = [] if self.articulaciones != []: self.articulaciones = aux.ordenaListaPorDistanciaApunto( self.articulaciones, [0, 480]) puntoInicial = self.articulaciones.pop() img.dl().circle(puntoInicial, 10, Color.BLUE, width=5) numAngulo = 1 while self.articulaciones != []: p = self.articulaciones.pop() img.dl().line(puntoInicial, p, Color.BLUE, width=5) img.dl().circle(p, 10, Color.BLUE, width=5) textLayer = DrawingLayer((img.width, img.height)) textLayer.setFontSize(24) textLayer.text(str(numAngulo), (p[0], p[1]), Color.RED) img.addDrawingLayer(textLayer) numAngulo += 1 img.applyLayers() angulosHuesos.append( aux.anguloLineaEntreDosPuntos(p, puntoInicial)) puntoInicial = p if len(angulosHuesos) == 3: return angulosHuesos else: return []
def encuentraYDibujaAngulos(self, img): """ Ademas de dibujar la estructura de los huesos del brazo devuelve los angulos de dichos huesos con la horizontal """ # Pinto ejes de coordenadas img.dl().line((20, img.height - 20), (20, img.height - 60), Color.RED, width=5) img.dl().line((20, img.height - 20), (60, img.height - 20), Color.RED, width=5) textLayer = DrawingLayer((img.width, img.height)) textLayer.setFontSize(20) textLayer.text("90 grados", (20, img.height - 80), Color.RED) textLayer.text("0 grados", (70, img.height - 20), Color.RED) img.addDrawingLayer(textLayer) angulosHuesos = [] if self.articulaciones != []: self.articulaciones = aux.ordenaListaPorDistanciaApunto(self.articulaciones, [0,480]) puntoInicial = self.articulaciones.pop() img.dl().circle(puntoInicial, 10, Color.BLUE, width=5) numAngulo = 1 while self.articulaciones != []: p = self.articulaciones.pop() img.dl().line(puntoInicial, p, Color.BLUE, width=5) img.dl().circle(p, 10, Color.BLUE, width=5) textLayer = DrawingLayer((img.width, img.height)) textLayer.setFontSize(24) textLayer.text(str(numAngulo), (p[0] , p[1] ), Color.RED) img.addDrawingLayer(textLayer) numAngulo += 1 img.applyLayers() angulosHuesos.append(aux.anguloLineaEntreDosPuntos(p, puntoInicial)) puntoInicial = p if len(angulosHuesos) == 3: return angulosHuesos else: return []
def track(self): print "Press right mouse button to pause or play" print "Use left mouse button to select target" print "Target color must be different from background" print "Target must have width larger than height" print "Target can be upside down" #Parameters isUDPConnection = False # Currently switched manually in the code display = True displayDebug = True useBasemap = False maxRelativeMotionPerFrame = 2 # How much the target can moved between two succesive frames pixelPerRadians = 320 radius = pixelPerRadians referenceImage = '../ObjectTracking/kite_detail.jpg' scaleFactor = 0.5 isVirtualCamera = True useHDF5 = False # Open reference image: this is used at initlalisation target_detail = Image(referenceImage) # Get RGB color palette of target (was found to work better than using hue) pal = target_detail.getPalette(bins = 2, hue = False) # Open video to analyse or live stream #cam = JpegStreamCamera('http://192.168.1.29:8080/videofeed')#640 * 480 if isVirtualCamera: #cam = VirtualCamera('../../zenith-wind-power-read-only/KiteControl-Qt/videos/kiteFlying.avi','video') #cam = VirtualCamera('/media/bat/DATA/Baptiste/Nautilab/kite_project/robokite/ObjectTracking/00095.MTS', 'video') #cam = VirtualCamera('output.avi', 'video') cam = VirtualCamera('../Recording/Videos/Flying kite images (for kite steering unit development)-YTMgX1bvrTo.flv','video') virtualCameraFPS = 25 else: cam = JpegStreamCamera('http://192.168.43.1:8080/videofeed')#640 * 480 #cam = Camera() # Get a sample image to initialize the display at the same size img = cam.getImage().scale(scaleFactor) print img.width, img.height # Create a pygame display if display: if img.width>img.height: disp = Display((27*640/10,25*400/10))#(int(2*img.width/scaleFactor), int(2*img.height/scaleFactor))) else: disp = Display((810,1080)) #js = JpegStreamer() # Initialize variables previous_angle = 0 # target has to be upright when starting. Target width has to be larger than target heigth. previous_coord_px = (0, 0) # Initialized to top left corner, which always exists previous_dCoord = previous_coord_px previous_dAngle = previous_angle angles = [] coords_px = [] coord_px = [0, 0] angle = 0 target_elevations = [] target_bearings = [] times = [] wasTargetFoundInPreviousFrame = False i_frame = 0 isPaused = False selectionInProgress = False th = [100, 100, 100] skycolor = Color.BLUE timeLastTarget = 0 # Prepare recording recordFilename = datetime.datetime.utcnow().strftime("%Y%m%d_%Hh%M_")+ 'simpleTrack' if useHDF5: try: os.remove(recordFilename + '.hdf5') except: print('Creating file ' + recordFilename + '.hdf5') """ The following line is used to silence the following error (according to http://stackoverflow.com/questions/15117128/h5py-in-memory-file-and-multiprocessing-error) #000: ../../../src/H5F.c line 1526 in H5Fopen(): unable to open file major: File accessability minor: Unable to open file""" h5py._errors.silence_errors() recordFile = h5py.File(recordFilename + '.hdf5', 'a') hdfSize = 0 dset = recordFile.create_dataset('kite', (2,2), maxshape=(None,7)) imset = recordFile.create_dataset('image', (2,img.width,img.height,3 ), maxshape=(None, img.width, img.height, 3)) else: try: os.remove(recordFilename + '.csv') except: print('Creating file ' + recordFilename + '.csv') recordFile = file(recordFilename + '.csv', 'a') csv_writer = csv.writer(recordFile) csv_writer.writerow(['Time (s)', 'x (px)', 'y (px)', 'Orientation (rad)', 'Elevation (rad)', 'Bearing (rad)', 'ROT (rad/s)']) # Launch a thread to get UDP message with orientation of the camera mobile = mobileState.mobileState() if isUDPConnection: a = threading.Thread(None, mobileState.mobileState.checkUpdate, None, (mobile,)) a.start() # Loop while not canceled by user t0 = time.time() previousTime = t0 while not(display) or disp.isNotDone(): t = time.time() deltaT = (t-previousTime) FPS = 1.0/deltaT #print 'FPS =', FPS if isVirtualCamera: deltaT = 1.0/virtualCameraFPS previousTime = t i_frame = i_frame + 1 timestamp = datetime.datetime.utcnow() # Receive orientation of the camera if isUDPConnection: mobile.computeRPY([2, 0, 1], [-1, 1, 1]) ctm = np.array([[sp.cos(mobile.roll), -sp.sin(mobile.roll)], \ [sp.sin(mobile.roll), sp.cos(mobile.roll)]]) # Coordinate transform matrix if useBasemap: # Warning this really slows down the computation m = Basemap(width=img.width, height=img.height, projection='aeqd', lat_0=sp.rad2deg(mobile.pitch), lon_0=sp.rad2deg(mobile.yaw), rsphere = radius) # Get an image from camera if not isPaused: img = cam.getImage() img = img.resize(int(scaleFactor*img.width), int(scaleFactor*img.height)) if display: # Pause image when right button is pressed dwn = disp.rightButtonDownPosition() if dwn is not None: isPaused = not(isPaused) dwn = None if display: # Create a layer to enable user to make a selection of the target selectionLayer = DrawingLayer((img.width, img.height)) if img: if display: # Create a new layer to host information retrieved from video layer = DrawingLayer((img.width, img.height)) # Selection is a rectangle drawn while holding mouse left button down if disp.leftButtonDown: corner1 = (disp.mouseX, disp.mouseY) selectionInProgress = True if selectionInProgress: corner2 = (disp.mouseX, disp.mouseY) bb = disp.pointsToBoundingBox(corner1, corner2)# Display the temporary selection if disp.leftButtonUp: # User has finished is selection selectionInProgress = False selection = img.crop(bb[0], bb[1], bb[2], bb[3]) if selection != None: # The 3 main colors in the area selected are considered. # Note that the selection should be included in the target and not contain background try: selection.save('../ObjectTracking/'+ 'kite_detail_tmp.jpg') img0 = Image("kite_detail_tmp.jpg") # For unknown reason I have to reload the image... pal = img0.getPalette(bins = 2, hue = False) except: # getPalette is sometimes bugging and raising LinalgError because matrix not positive definite pal = pal wasTargetFoundInPreviousFrame = False previous_coord_px = (bb[0] + bb[2]/2, bb[1] + bb[3]/2) if corner1 != corner2: selectionLayer.rectangle((bb[0], bb[1]), (bb[2], bb[3]), width = 5, color = Color.YELLOW) # If the target was already found, we can save computation time by # reducing the Region Of Interest around predicted position if wasTargetFoundInPreviousFrame: ROITopLeftCorner = (max(0, previous_coord_px[0]-maxRelativeMotionPerFrame/2*width), \ max(0, previous_coord_px[1] -height*maxRelativeMotionPerFrame/2)) ROI = img.crop(ROITopLeftCorner[0], ROITopLeftCorner[1], \ maxRelativeMotionPerFrame*width, maxRelativeMotionPerFrame*height, \ centered = False) if display : # Draw the rectangle corresponding to the ROI on the complete image layer.rectangle((previous_coord_px[0]-maxRelativeMotionPerFrame/2*width, \ previous_coord_px[1]-maxRelativeMotionPerFrame/2*height), \ (maxRelativeMotionPerFrame*width, maxRelativeMotionPerFrame*height), \ color = Color.GREEN, width = 2) else: # Search on the whole image if no clue of where is the target ROITopLeftCorner = (0, 0) ROI = img '''#Option 1 target_part0 = ROI.hueDistance(color=(142,50,65)).invert().threshold(150) target_part1 = ROI.hueDistance(color=(93,16,28)).invert().threshold(150) target_part2 = ROI.hueDistance(color=(223,135,170)).invert().threshold(150) target_raw_img = target_part0+target_part1+target_part2 target_img = target_raw_img.erode(5).dilate(5) #Option 2 target_img = ROI.hueDistance(imgModel.getPixel(10,10)).binarize().invert().erode(2).dilate(2)''' # Find sky color sky = (img-img.binarize()).findBlobs(minsize=10000) if sky: skycolor = sky[0].meanColor() # Option 3 target_img = ROI-ROI # Black image # Loop through palette of target colors if display and displayDebug: decomposition = [] i_col = 0 for col in pal: c = tuple([int(col[i]) for i in range(0,3)]) # Search the target based on color ROI.save('../ObjectTracking/'+ 'ROI_tmp.jpg') img1 = Image('../ObjectTracking/'+ 'ROI_tmp.jpg') filter_img = img1.colorDistance(color = c) h = filter_img.histogram(numbins=256) cs = np.cumsum(h) thmax = np.argmin(abs(cs- 0.02*img.width*img.height)) # find the threshold to have 10% of the pixel in the expected color thmin = np.argmin(abs(cs- 0.005*img.width*img.height)) # find the threshold to have 10% of the pixel in the expected color if thmin==thmax: newth = thmin else: newth = np.argmin(h[thmin:thmax]) + thmin alpha = 0.5 th[i_col] = alpha*th[i_col]+(1-alpha)*newth filter_img = filter_img.threshold(max(40,min(200,th[i_col]))).invert() target_img = target_img + filter_img #print th i_col = i_col + 1 if display and displayDebug: [R, G, B] = filter_img.splitChannels() white = (R-R).invert() r = R*1.0/255*c[0] g = G*1.0/255*c[1] b = B*1.0/255*c[2] tmp = white.mergeChannels(r, g, b) decomposition.append(tmp) # Get a black background with with white target foreground target_img = target_img.threshold(150) target_img = target_img - ROI.colorDistance(color = skycolor).threshold(80).invert() if display and displayDebug: small_ini = target_img.resize(int(img.width/(len(pal)+1)), int(img.height/(len(pal)+1))) for tmp in decomposition: small_ini = small_ini.sideBySide(tmp.resize(int(img.width/(len(pal)+1)), int(img.height/(len(pal)+1))), side = 'bottom') small_ini = small_ini.adaptiveScale((int(img.width), int(img.height))) toDisplay = img.sideBySide(small_ini) else: toDisplay = img #target_img = ROI.hueDistance(color = Color.RED).threshold(10).invert() # Search for binary large objects representing potential target target = target_img.findBlobs(minsize = 500) if target: # If a target was found if wasTargetFoundInPreviousFrame: predictedTargetPosition = (width*maxRelativeMotionPerFrame/2, height*maxRelativeMotionPerFrame/2) # Target will most likely be close to the center of the ROI else: predictedTargetPosition = previous_coord_px # If there are several targets in the image, take the one which is the closest of the predicted position target = target.sortDistance(predictedTargetPosition) # Get target coordinates according to minimal bounding rectangle or centroid. coordMinRect = ROITopLeftCorner + np.array((target[0].minRectX(), target[0].minRectY())) coord_px = ROITopLeftCorner + np.array(target[0].centroid()) # Rotate the coordinates of roll angle around the middle of the screen rot_coord_px = np.dot(ctm, coord_px - np.array([img.width/2, img.height/2])) + np.array([img.width/2, img.height/2]) if useBasemap: coord = sp.deg2rad(m(rot_coord_px[0], img.height-rot_coord_px[1], inverse = True)) else: coord = localProjection(rot_coord_px[0]-img.width/2, img.height/2-rot_coord_px[1], radius, mobile.yaw, mobile.pitch, inverse = True) target_bearing, target_elevation = coord # Get minimum bounding rectangle for display purpose minR = ROITopLeftCorner + np.array(target[0].minRect()) contours = target[0].contour() contours = [ ROITopLeftCorner + np.array(contour) for contour in contours] # Get target features angle = sp.deg2rad(target[0].angle()) + mobile.roll angle = sp.deg2rad(unwrap180(sp.rad2deg(angle), sp.rad2deg(previous_angle))) width = target[0].width() height = target[0].height() # Check if the kite is upside down # First rotate the kite ctm2 = np.array([[sp.cos(-angle+mobile.roll), -sp.sin(-angle+mobile.roll)], \ [sp.sin(-angle+mobile.roll), sp.cos(-angle+mobile.roll)]]) # Coordinate transform matrix rotated_contours = [np.dot(ctm2, contour-coordMinRect) for contour in contours] y = [-tmp[1] for tmp in rotated_contours] itop = np.argmax(y) # Then looks at the points at the top ibottom = np.argmin(y) # and the point at the bottom # The point the most excentered is at the bottom if abs(rotated_contours[itop][0])>abs(rotated_contours[ibottom][0]): isInverted = True else: isInverted = False if isInverted: angle = angle + sp.pi # Filter the data alpha = 1-sp.exp(-deltaT/self.filterTimeConstant) if not(isPaused): dCoord = np.array(previous_dCoord)*(1-alpha) + alpha*(np.array(coord_px) - previous_coord_px) # related to the speed only if cam is fixed dAngle = np.array(previous_dAngle)*(1-alpha) + alpha*(np.array(angle) - previous_angle) else : dCoord = np.array([0, 0]) dAngle = np.array([0]) #print coord_px, angle, width, height, dCoord # Record important data times.append(timestamp) coords_px.append(coord_px) angles.append(angle) target_elevations.append(target_elevation) target_bearings.append(target_bearing) # Export data to controller self.elevation = target_elevation self.bearing = target_bearing self.orientation = angle dt = time.time()-timeLastTarget self.ROT = dAngle/dt self.lastUpdateTime = t # Save for initialisation of next step previous_dCoord = dCoord previous_angle = angle previous_coord_px = (int(coord_px[0]), int(coord_px[1])) wasTargetFoundInPreviousFrame = True timeLastTarget = time.time() else: wasTargetFoundInPreviousFrame = False if useHDF5: hdfSize = hdfSize+1 dset.resize((hdfSize, 7)) imset.resize((hdfSize, img.width, img.height, 3)) dset[hdfSize-1,:] = [time.time(), coord_px[0], coord_px[1], angle, self.elevation, self.bearing, self.ROT] imset[hdfSize-1,:,:,:] = img.getNumpy() recordFile.flush() else: csv_writer.writerow([time.time(), coord_px[0], coord_px[1], angle, self.elevation, self.bearing, self.ROT]) if display : if target: # Add target features to layer # Minimal rectange and its center in RED layer.polygon(minR[(0, 1, 3, 2), :], color = Color.RED, width = 5) layer.circle((int(coordMinRect[0]), int(coordMinRect[1])), 10, filled = True, color = Color.RED) # Target contour and centroid in BLUE layer.circle((int(coord_px[0]), int(coord_px[1])), 10, filled = True, color = Color.BLUE) layer.polygon(contours, color = Color.BLUE, width = 5) # Speed vector in BLACK layer.line((int(coord_px[0]), int(coord_px[1])), (int(coord_px[0]+20*dCoord[0]), int(coord_px[1]+20*dCoord[1])), width = 3) # Line giving angle layer.line((int(coord_px[0]+200*sp.cos(angle)), int(coord_px[1]+200*sp.sin(angle))), (int(coord_px[0]-200*sp.cos(angle)), int(coord_px[1]-200*sp.sin(angle))), color = Color.RED) # Line giving rate of turn #layer.line((int(coord_px[0]+200*sp.cos(angle+dAngle*10)), int(coord_px[1]+200*sp.sin(angle+dAngle*10))), (int(coord_px[0]-200*sp.cos(angle + dAngle*10)), int(coord_px[1]-200*sp.sin(angle+dAngle*10)))) # Add the layer to the raw image toDisplay.addDrawingLayer(layer) toDisplay.addDrawingLayer(selectionLayer) # Add time metadata toDisplay.drawText(str(i_frame)+" "+ str(timestamp), x=0, y=0, fontsize=20) # Add Line giving horizon #layer.line((0, int(img.height/2 + mobile.pitch*pixelPerRadians)),(img.width, int(img.height/2 + mobile.pitch*pixelPerRadians)), width = 3, color = Color.RED) # Plot parallels for lat in range(-90, 90, 15): r = range(0, 361, 10) if useBasemap: # \todo improve for high roll l = m (r, [lat]*len(r)) pix = [np.array(l[0]), img.height-np.array(l[1])] else: l = localProjection(sp.deg2rad(r), \ sp.deg2rad([lat]*len(r)), \ radius, \ lon_0 = mobile.yaw, \ lat_0 = mobile.pitch, \ inverse = False) l = np.dot(ctm, l) pix = [np.array(l[0])+img.width/2, img.height/2-np.array(l[1])] for i in range(len(r)-1): if isPixelInImage((pix[0][i],pix[1][i]), img) or isPixelInImage((pix[0][i+1],pix[1][i+1]), img): layer.line((pix[0][i],pix[1][i]), (pix[0][i+1], pix[1][i+1]), color=Color.WHITE, width = 2) # Plot meridians for lon in range(0, 360, 15): r = range(-90, 91, 10) if useBasemap: # \todo improve for high roll l = m ([lon]*len(r), r) pix = [np.array(l[0]), img.height-np.array(l[1])] else: l= localProjection(sp.deg2rad([lon]*len(r)), \ sp.deg2rad(r), \ radius, \ lon_0 = mobile.yaw, \ lat_0 = mobile.pitch, \ inverse = False) l = np.dot(ctm, l) pix = [np.array(l[0])+img.width/2, img.height/2-np.array(l[1])] for i in range(len(r)-1): if isPixelInImage((pix[0][i],pix[1][i]), img) or isPixelInImage((pix[0][i+1],pix[1][i+1]), img): layer.line((pix[0][i],pix[1][i]), (pix[0][i+1], pix[1][i+1]), color=Color.WHITE, width = 2) # Text giving bearing # \todo improve for high roll for bearing_deg in range(0, 360, 30): l = localProjection(sp.deg2rad(bearing_deg), sp.deg2rad(0), radius, lon_0 = mobile.yaw, lat_0 = mobile.pitch, inverse = False) l = np.dot(ctm, l) layer.text(str(bearing_deg), ( img.width/2+int(l[0]), img.height-20), color = Color.RED) # Text giving elevation # \todo improve for high roll for elevation_deg in range(-60, 91, 30): l = localProjection(0, sp.deg2rad(elevation_deg), radius, lon_0 = mobile.yaw, lat_0 = mobile.pitch, inverse = False) l = np.dot(ctm, l) layer.text(str(elevation_deg), ( img.width/2 ,img.height/2-int(l[1])), color = Color.RED) #toDisplay.save(js) toDisplay.save(disp) if display : toDisplay.removeDrawingLayer(1) toDisplay.removeDrawingLayer(0) recordFile.close()
def loop(): while True: try: setRGB(0,128,64) setRGB(0,255,0) # Read distance value from Ultrasonic distant = ultrasonicRead(ultrasonic_ranger) button_state=digitalRead(button) if (distant <= trigger) and (button_state): # print 'Alarm ', distant,'cm', 'trigger', trigger flushLCD('+++ ' + str(distant) + ':' + str(trigger)) analogWrite(led,255) # count down for photo ! flushLCD('SMILE!') beep (0.01) time.sleep (1) beep (0.02) time.sleep(1) beep (0.1) time.sleep (1) frame=myCamera.getImage() flushLCD('processing ...') faces=frame.findHaarFeatures('face') if faces: print str(len(faces)) + " faces" fct=0 ts = time.time() now = datetime.datetime.fromtimestamp(ts).strftime('%Y%m%d-%H%M%S') for face in faces: fct+=1 print "face " + str(fct) + " at: " + str(face.coordinates()) myFace=face.crop() # tweet all faces ... photo='/home/pi/tmp/' + now + 'F-' + str(fct) + '.jpg' # NOTE MUST use absolute path here! psize=myFace.width*myFace.height if psize>20000: #looks like smaller images are thrash print "Photo Size: " + photo + " " + str(psize) myDL=DrawingLayer((myFace.width,myFace.height)) myDL.setFontSize(25) myDL.text("I am " + str(distant) + " cm next to a PiCam!",(myFace.width/2 - 140,10),color=Color.WHITE) myFace.addDrawingLayer(myDL) myFace.applyLayers() myFace.save(photo) beep (0.05) time.sleep(0.3) beep(0.2) time.sleep(1) # wait save complete ... status = 'Look Ma, I did the #lnf16 @ #FHburgenland just now: ' + now # tweet ... api.update_with_media(photo, status=status) logLCD('TWEETed!') else: print "Face skipped too small: " + str(psize) logLCD("Face " + str(fct) + " skipped too small: " + str(psize)) # print 'Sleep before next watch cycle ...' else: logLCD('NO faces detected!') else: # print 'No Alarm ', distant,'cm' , 'trigger', trigger flushLCD('--- ' + str(distant) + ':' + str(trigger)) analogWrite(led,0) time.sleep(1) except TypeError: print "Error" except IOError: print "Error"
from SimpleCV import Image,Display,DrawingLayer,Color from time import sleep myDisplay = Display() raspberryImage = Image("test.jpg") myDrawingLayer = DrawingLayer((raspberryImage.width, raspberryImage.height)) myDrawingLayer.rectangle((50,20),(250,60),filled=True) myDrawingLayer.setFontSize(45) myDrawingLayer.text("Raspberries!",(50,20),color=Color.WHITE) raspberryImage.addDrawingLayer(myDrawingLayer) raspberryImage.applyLayers() raspberryImage.save(myDisplay) while not myDisplay.isDone(): sleep(0.1)
def track(self): print "Press right mouse button to pause or play" print "Use left mouse button to select target" print "Target color must be different from background" print "Target must have width larger than height" print "Target can be upside down" #Parameters isUDPConnection = False # Currently switched manually in the code display = True displayDebug = True useBasemap = False maxRelativeMotionPerFrame = 2 # How much the target can moved between two succesive frames pixelPerRadians = 320 radius = pixelPerRadians referenceImage = '../ObjectTracking/kite_detail.jpg' scaleFactor = 0.5 isVirtualCamera = True useHDF5 = False # Open reference image: this is used at initlalisation target_detail = Image(referenceImage) # Get RGB color palette of target (was found to work better than using hue) pal = target_detail.getPalette(bins=2, hue=False) # Open video to analyse or live stream #cam = JpegStreamCamera('http://192.168.1.29:8080/videofeed')#640 * 480 if isVirtualCamera: #cam = VirtualCamera('../../zenith-wind-power-read-only/KiteControl-Qt/videos/kiteFlying.avi','video') #cam = VirtualCamera('/media/bat/DATA/Baptiste/Nautilab/kite_project/robokite/ObjectTracking/00095.MTS', 'video') #cam = VirtualCamera('output.avi', 'video') cam = VirtualCamera( '../Recording/Videos/Flying kite images (for kite steering unit development)-YTMgX1bvrTo.mp4', 'video') virtualCameraFPS = 25 else: cam = JpegStreamCamera( 'http://192.168.43.1:8080/videofeed') #640 * 480 #cam = Camera() # Get a sample image to initialize the display at the same size img = cam.getImage().scale(scaleFactor) print img.width, img.height # Create a pygame display if display: if img.width > img.height: disp = Display( (27 * 640 / 10, 25 * 400 / 10) ) #(int(2*img.width/scaleFactor), int(2*img.height/scaleFactor))) else: disp = Display((810, 1080)) #js = JpegStreamer() # Initialize variables previous_angle = 0 # target has to be upright when starting. Target width has to be larger than target heigth. previous_coord_px = ( 0, 0) # Initialized to top left corner, which always exists previous_dCoord = previous_coord_px previous_dAngle = previous_angle angles = [] coords_px = [] coord_px = [0, 0] angle = 0 target_elevations = [] target_bearings = [] times = [] wasTargetFoundInPreviousFrame = False i_frame = 0 isPaused = False selectionInProgress = False th = [100, 100, 100] skycolor = Color.BLUE timeLastTarget = 0 # Prepare recording recordFilename = datetime.datetime.utcnow().strftime( "%Y%m%d_%Hh%M_") + 'simpleTrack' if useHDF5: try: os.remove(recordFilename + '.hdf5') except: print('Creating file ' + recordFilename + '.hdf5') """ The following line is used to silence the following error (according to http://stackoverflow.com/questions/15117128/h5py-in-memory-file-and-multiprocessing-error) #000: ../../../src/H5F.c line 1526 in H5Fopen(): unable to open file major: File accessability minor: Unable to open file""" h5py._errors.silence_errors() recordFile = h5py.File( os.path.join(os.getcwd(), 'log', recordFilename + '.hdf5'), 'a') hdfSize = 0 dset = recordFile.create_dataset('kite', (2, 2), maxshape=(None, 7)) imset = recordFile.create_dataset('image', (2, img.width, img.height, 3), maxshape=(None, img.width, img.height, 3)) else: try: os.remove(recordFilename + '.csv') except: print('Creating file ' + recordFilename + '.csv') recordFile = file( os.path.join(os.getcwd(), 'log', recordFilename + '.csv'), 'a') csv_writer = csv.writer(recordFile) csv_writer.writerow([ 'Time (s)', 'x (px)', 'y (px)', 'Orientation (rad)', 'Elevation (rad)', 'Bearing (rad)', 'ROT (rad/s)' ]) # Launch a thread to get UDP message with orientation of the camera mobile = mobileState.mobileState() if isUDPConnection: mobile.open() # Loop while not canceled by user t0 = time.time() previousTime = t0 while not (display) or disp.isNotDone(): t = time.time() deltaT = (t - previousTime) FPS = 1.0 / deltaT #print 'FPS =', FPS if isVirtualCamera: deltaT = 1.0 / virtualCameraFPS previousTime = t i_frame = i_frame + 1 timestamp = datetime.datetime.utcnow() # Receive orientation of the camera if isUDPConnection: mobile.computeRPY([2, 0, 1], [-1, 1, 1]) ctm = np.array([[sp.cos(mobile.roll), -sp.sin(mobile.roll)], \ [sp.sin(mobile.roll), sp.cos(mobile.roll)]]) # Coordinate transform matrix if useBasemap: # Warning this really slows down the computation m = Basemap(width=img.width, height=img.height, projection='aeqd', lat_0=sp.rad2deg(mobile.pitch), lon_0=sp.rad2deg(mobile.yaw), rsphere=radius) # Get an image from camera if not isPaused: img = cam.getImage() img = img.resize(int(scaleFactor * img.width), int(scaleFactor * img.height)) if display: # Pause image when right button is pressed dwn = disp.rightButtonDownPosition() if dwn is not None: isPaused = not (isPaused) dwn = None if display: # Create a layer to enable user to make a selection of the target selectionLayer = DrawingLayer((img.width, img.height)) if img: if display: # Create a new layer to host information retrieved from video layer = DrawingLayer((img.width, img.height)) # Selection is a rectangle drawn while holding mouse left button down if disp.leftButtonDown: corner1 = (disp.mouseX, disp.mouseY) selectionInProgress = True if selectionInProgress: corner2 = (disp.mouseX, disp.mouseY) bb = disp.pointsToBoundingBox( corner1, corner2) # Display the temporary selection if disp.leftButtonUp: # User has finished is selection selectionInProgress = False selection = img.crop(bb[0], bb[1], bb[2], bb[3]) if selection != None: # The 3 main colors in the area selected are considered. # Note that the selection should be included in the target and not contain background try: selection.save('../ObjectTracking/' + 'kite_detail_tmp.jpg') img0 = Image( "kite_detail_tmp.jpg" ) # For unknown reason I have to reload the image... pal = img0.getPalette(bins=2, hue=False) except: # getPalette is sometimes bugging and raising LinalgError because matrix not positive definite pal = pal wasTargetFoundInPreviousFrame = False previous_coord_px = (bb[0] + bb[2] / 2, bb[1] + bb[3] / 2) if corner1 != corner2: selectionLayer.rectangle((bb[0], bb[1]), (bb[2], bb[3]), width=5, color=Color.YELLOW) # If the target was already found, we can save computation time by # reducing the Region Of Interest around predicted position if wasTargetFoundInPreviousFrame: ROITopLeftCorner = (max(0, previous_coord_px[0]-maxRelativeMotionPerFrame/2*width), \ max(0, previous_coord_px[1] -height*maxRelativeMotionPerFrame/2)) ROI = img.crop(ROITopLeftCorner[0], ROITopLeftCorner[1], \ maxRelativeMotionPerFrame*width, maxRelativeMotionPerFrame*height, \ centered = False) if display: # Draw the rectangle corresponding to the ROI on the complete image layer.rectangle((previous_coord_px[0]-maxRelativeMotionPerFrame/2*width, \ previous_coord_px[1]-maxRelativeMotionPerFrame/2*height), \ (maxRelativeMotionPerFrame*width, maxRelativeMotionPerFrame*height), \ color = Color.GREEN, width = 2) else: # Search on the whole image if no clue of where is the target ROITopLeftCorner = (0, 0) ROI = img '''#Option 1 target_part0 = ROI.hueDistance(color=(142,50,65)).invert().threshold(150) target_part1 = ROI.hueDistance(color=(93,16,28)).invert().threshold(150) target_part2 = ROI.hueDistance(color=(223,135,170)).invert().threshold(150) target_raw_img = target_part0+target_part1+target_part2 target_img = target_raw_img.erode(5).dilate(5) #Option 2 target_img = ROI.hueDistance(imgModel.getPixel(10,10)).binarize().invert().erode(2).dilate(2)''' # Find sky color sky = (img - img.binarize()).findBlobs(minsize=10000) if sky: skycolor = sky[0].meanColor() # Option 3 target_img = ROI - ROI # Black image # Loop through palette of target colors if display and displayDebug: decomposition = [] i_col = 0 for col in pal: c = tuple([int(col[i]) for i in range(0, 3)]) # Search the target based on color ROI.save('../ObjectTracking/' + 'ROI_tmp.jpg') img1 = Image('../ObjectTracking/' + 'ROI_tmp.jpg') filter_img = img1.colorDistance(color=c) h = filter_img.histogram(numbins=256) cs = np.cumsum(h) thmax = np.argmin( abs(cs - 0.02 * img.width * img.height) ) # find the threshold to have 10% of the pixel in the expected color thmin = np.argmin( abs(cs - 0.005 * img.width * img.height) ) # find the threshold to have 10% of the pixel in the expected color if thmin == thmax: newth = thmin else: newth = np.argmin(h[thmin:thmax]) + thmin alpha = 0.5 th[i_col] = alpha * th[i_col] + (1 - alpha) * newth filter_img = filter_img.threshold( max(40, min(200, th[i_col]))).invert() target_img = target_img + filter_img #print th i_col = i_col + 1 if display and displayDebug: [R, G, B] = filter_img.splitChannels() white = (R - R).invert() r = R * 1.0 / 255 * c[0] g = G * 1.0 / 255 * c[1] b = B * 1.0 / 255 * c[2] tmp = white.mergeChannels(r, g, b) decomposition.append(tmp) # Get a black background with with white target foreground target_img = target_img.threshold(150) target_img = target_img - ROI.colorDistance( color=skycolor).threshold(80).invert() if display and displayDebug: small_ini = target_img.resize( int(img.width / (len(pal) + 1)), int(img.height / (len(pal) + 1))) for tmp in decomposition: small_ini = small_ini.sideBySide(tmp.resize( int(img.width / (len(pal) + 1)), int(img.height / (len(pal) + 1))), side='bottom') small_ini = small_ini.adaptiveScale( (int(img.width), int(img.height))) toDisplay = img.sideBySide(small_ini) else: toDisplay = img #target_img = ROI.hueDistance(color = Color.RED).threshold(10).invert() # Search for binary large objects representing potential target target = target_img.findBlobs(minsize=500) if target: # If a target was found if wasTargetFoundInPreviousFrame: predictedTargetPosition = ( width * maxRelativeMotionPerFrame / 2, height * maxRelativeMotionPerFrame / 2 ) # Target will most likely be close to the center of the ROI else: predictedTargetPosition = previous_coord_px # If there are several targets in the image, take the one which is the closest of the predicted position target = target.sortDistance(predictedTargetPosition) # Get target coordinates according to minimal bounding rectangle or centroid. coordMinRect = ROITopLeftCorner + np.array( (target[0].minRectX(), target[0].minRectY())) coord_px = ROITopLeftCorner + np.array( target[0].centroid()) # Rotate the coordinates of roll angle around the middle of the screen rot_coord_px = np.dot( ctm, coord_px - np.array([img.width / 2, img.height / 2])) + np.array( [img.width / 2, img.height / 2]) if useBasemap: coord = sp.deg2rad( m(rot_coord_px[0], img.height - rot_coord_px[1], inverse=True)) else: coord = localProjection( rot_coord_px[0] - img.width / 2, img.height / 2 - rot_coord_px[1], radius, mobile.yaw, mobile.pitch, inverse=True) target_bearing, target_elevation = coord # Get minimum bounding rectangle for display purpose minR = ROITopLeftCorner + np.array(target[0].minRect()) contours = target[0].contour() contours = [ ROITopLeftCorner + np.array(contour) for contour in contours ] # Get target features angle = sp.deg2rad(target[0].angle()) + mobile.roll angle = sp.deg2rad( unwrap180(sp.rad2deg(angle), sp.rad2deg(previous_angle))) width = target[0].width() height = target[0].height() # Check if the kite is upside down # First rotate the kite ctm2 = np.array([[sp.cos(-angle+mobile.roll), -sp.sin(-angle+mobile.roll)], \ [sp.sin(-angle+mobile.roll), sp.cos(-angle+mobile.roll)]]) # Coordinate transform matrix rotated_contours = [ np.dot(ctm2, contour - coordMinRect) for contour in contours ] y = [-tmp[1] for tmp in rotated_contours] itop = np.argmax(y) # Then looks at the points at the top ibottom = np.argmin(y) # and the point at the bottom # The point the most excentered is at the bottom if abs(rotated_contours[itop][0]) > abs( rotated_contours[ibottom][0]): isInverted = True else: isInverted = False if isInverted: angle = angle + sp.pi # Filter the data alpha = 1 - sp.exp(-deltaT / self.filterTimeConstant) if not (isPaused): dCoord = np.array(previous_dCoord) * ( 1 - alpha) + alpha * ( np.array(coord_px) - previous_coord_px ) # related to the speed only if cam is fixed dAngle = np.array(previous_dAngle) * ( 1 - alpha) + alpha * (np.array(angle) - previous_angle) else: dCoord = np.array([0, 0]) dAngle = np.array([0]) #print coord_px, angle, width, height, dCoord # Record important data times.append(timestamp) coords_px.append(coord_px) angles.append(angle) target_elevations.append(target_elevation) target_bearings.append(target_bearing) # Export data to controller self.elevation = target_elevation self.bearing = target_bearing self.orientation = angle dt = time.time() - timeLastTarget self.ROT = dAngle / dt self.lastUpdateTime = t # Save for initialisation of next step previous_dCoord = dCoord previous_angle = angle previous_coord_px = (int(coord_px[0]), int(coord_px[1])) wasTargetFoundInPreviousFrame = True timeLastTarget = time.time() else: wasTargetFoundInPreviousFrame = False if useHDF5: hdfSize = hdfSize + 1 dset.resize((hdfSize, 7)) imset.resize((hdfSize, img.width, img.height, 3)) dset[hdfSize - 1, :] = [ time.time(), coord_px[0], coord_px[1], angle, self.elevation, self.bearing, self.ROT ] imset[hdfSize - 1, :, :, :] = img.getNumpy() recordFile.flush() else: csv_writer.writerow([ time.time(), coord_px[0], coord_px[1], angle, self.elevation, self.bearing, self.ROT ]) if display: if target: # Add target features to layer # Minimal rectange and its center in RED layer.polygon(minR[(0, 1, 3, 2), :], color=Color.RED, width=5) layer.circle( (int(coordMinRect[0]), int(coordMinRect[1])), 10, filled=True, color=Color.RED) # Target contour and centroid in BLUE layer.circle((int(coord_px[0]), int(coord_px[1])), 10, filled=True, color=Color.BLUE) layer.polygon(contours, color=Color.BLUE, width=5) # Speed vector in BLACK layer.line((int(coord_px[0]), int(coord_px[1])), (int(coord_px[0] + 20 * dCoord[0]), int(coord_px[1] + 20 * dCoord[1])), width=3) # Line giving angle layer.line((int(coord_px[0] + 200 * sp.cos(angle)), int(coord_px[1] + 200 * sp.sin(angle))), (int(coord_px[0] - 200 * sp.cos(angle)), int(coord_px[1] - 200 * sp.sin(angle))), color=Color.RED) # Line giving rate of turn #layer.line((int(coord_px[0]+200*sp.cos(angle+dAngle*10)), int(coord_px[1]+200*sp.sin(angle+dAngle*10))), (int(coord_px[0]-200*sp.cos(angle + dAngle*10)), int(coord_px[1]-200*sp.sin(angle+dAngle*10)))) # Add the layer to the raw image toDisplay.addDrawingLayer(layer) toDisplay.addDrawingLayer(selectionLayer) # Add time metadata toDisplay.drawText(str(i_frame) + " " + str(timestamp), x=0, y=0, fontsize=20) # Add Line giving horizon #layer.line((0, int(img.height/2 + mobile.pitch*pixelPerRadians)),(img.width, int(img.height/2 + mobile.pitch*pixelPerRadians)), width = 3, color = Color.RED) # Plot parallels for lat in range(-90, 90, 15): r = range(0, 361, 10) if useBasemap: # \todo improve for high roll l = m(r, [lat] * len(r)) pix = [np.array(l[0]), img.height - np.array(l[1])] else: l = localProjection(sp.deg2rad(r), \ sp.deg2rad([lat]*len(r)), \ radius, \ lon_0 = mobile.yaw, \ lat_0 = mobile.pitch, \ inverse = False) l = np.dot(ctm, l) pix = [ np.array(l[0]) + img.width / 2, img.height / 2 - np.array(l[1]) ] for i in range(len(r) - 1): if isPixelInImage( (pix[0][i], pix[1][i]), img) or isPixelInImage( (pix[0][i + 1], pix[1][i + 1]), img): layer.line((pix[0][i], pix[1][i]), (pix[0][i + 1], pix[1][i + 1]), color=Color.WHITE, width=2) # Plot meridians for lon in range(0, 360, 15): r = range(-90, 91, 10) if useBasemap: # \todo improve for high roll l = m([lon] * len(r), r) pix = [np.array(l[0]), img.height - np.array(l[1])] else: l= localProjection(sp.deg2rad([lon]*len(r)), \ sp.deg2rad(r), \ radius, \ lon_0 = mobile.yaw, \ lat_0 = mobile.pitch, \ inverse = False) l = np.dot(ctm, l) pix = [ np.array(l[0]) + img.width / 2, img.height / 2 - np.array(l[1]) ] for i in range(len(r) - 1): if isPixelInImage( (pix[0][i], pix[1][i]), img) or isPixelInImage( (pix[0][i + 1], pix[1][i + 1]), img): layer.line((pix[0][i], pix[1][i]), (pix[0][i + 1], pix[1][i + 1]), color=Color.WHITE, width=2) # Text giving bearing # \todo improve for high roll for bearing_deg in range(0, 360, 30): l = localProjection(sp.deg2rad(bearing_deg), sp.deg2rad(0), radius, lon_0=mobile.yaw, lat_0=mobile.pitch, inverse=False) l = np.dot(ctm, l) layer.text( str(bearing_deg), (img.width / 2 + int(l[0]), img.height - 20), color=Color.RED) # Text giving elevation # \todo improve for high roll for elevation_deg in range(-60, 91, 30): l = localProjection(0, sp.deg2rad(elevation_deg), radius, lon_0=mobile.yaw, lat_0=mobile.pitch, inverse=False) l = np.dot(ctm, l) layer.text(str(elevation_deg), (img.width / 2, img.height / 2 - int(l[1])), color=Color.RED) #toDisplay.save(js) toDisplay.save(disp) if display: toDisplay.removeDrawingLayer(1) toDisplay.removeDrawingLayer(0) recordFile.close()
def loop(): while True: try: setRGB(0, 128, 64) setRGB(0, 255, 0) # Read distance value from Ultrasonic distant = ultrasonicRead(ultrasonic_ranger) button_state = digitalRead(button) if (distant <= trigger) and (button_state): # print 'Alarm ', distant,'cm', 'trigger', trigger flushLCD('+++ ' + str(distant) + ':' + str(trigger)) analogWrite(led, 255) # count down for photo ! flushLCD('SMILE!') beep(0.01) time.sleep(1) beep(0.02) time.sleep(1) beep(0.1) time.sleep(1) frame = myCamera.getImage() flushLCD('processing ...') faces = frame.findHaarFeatures('face') if faces: print str(len(faces)) + " faces" fct = 0 ts = time.time() now = datetime.datetime.fromtimestamp(ts).strftime( '%Y%m%d-%H%M%S') for face in faces: fct += 1 print "face " + str(fct) + " at: " + str( face.coordinates()) myFace = face.crop() # tweet all faces ... photo = '/home/pi/tmp/' + now + 'F-' + str( fct) + '.jpg' # NOTE MUST use absolute path here! psize = myFace.width * myFace.height if psize > 20000: #looks like smaller images are thrash print "Photo Size: " + photo + " " + str(psize) myDL = DrawingLayer((myFace.width, myFace.height)) myDL.setFontSize(25) myDL.text("I am " + str(distant) + " cm next to a PiCam!", (myFace.width / 2 - 140, 10), color=Color.WHITE) myFace.addDrawingLayer(myDL) myFace.applyLayers() myFace.save(photo) beep(0.05) time.sleep(0.3) beep(0.2) time.sleep(1) # wait save complete ... status = 'Look Ma, I did the #lnf16 @ #FHburgenland just now: ' + now # tweet ... api.update_with_media(photo, status=status) logLCD('TWEETed!') else: print "Face skipped too small: " + str(psize) logLCD("Face " + str(fct) + " skipped too small: " + str(psize)) # print 'Sleep before next watch cycle ...' else: logLCD('NO faces detected!') else: # print 'No Alarm ', distant,'cm' , 'trigger', trigger flushLCD('--- ' + str(distant) + ':' + str(trigger)) analogWrite(led, 0) time.sleep(1) except TypeError: print "Error" except IOError: print "Error"
# This example modified based on examples by Matt Richardson and Shawn Wallace "Getting Started with Raspberry Pi" from SimpleCV import Camera,Display, DrawingLayer, Color from time import sleep myCamera = Camera(prop_set={'width':320,'height':240}) myDisplay = Display(resolution=(320,240)) myDrawingLayer = DrawingLayer((320,240)) while not myDisplay.isDone(): frame = myCamera.getImage() faces = frame.findHaarFeatures('face') if faces: for face in faces: print "Face at: " + str(face.coordinates()) myDrawingLayer.setFontSize(80) myDrawingLayer.rectangle((18,148),(250,60),filled=True) myDrawingLayer.text("Toadman",(20,150),color=Color.GREEN) frame.addDrawingLayer(myDrawingLayer) frame.applyLayers() frame.save(myDisplay) else: print "No faces detected." frame.save(myDisplay) sleep(.1)
myCamera = Camera(prop_set={'width':320,'height':240}) myDisplay = Display(resolution=(320,240)) myDrawingLayer = DrawingLayer((320,240)) GPIO.setmode(GPIO.BCM) GPIO.setup(2,GPIO.OUT) while not myDisplay.isDone(): frame = myCamera.getImage() faces = frame.findHaarFeatures('face') if faces: for face in faces: GPIO.output(2,GPIO.HIGH) print "Face at: " + str(face.coordinates()) frame.clearLayers() myDrawingLayer.rectangle((0,0),(120,30),filled=True) myDrawingLayer.setFontSize(45) myDrawingLayer.text(`random.randint(1,1000000)`,(0,0),color=Color.RED) frame.addDrawingLayer(myDrawingLayer) frame.save("combat power" + str(time()) + ".jpg") sleep(.3) else: print"NO face" GPIO.output(2,GPIO.LOW) frame.save(myDisplay) sleep(.1)
def transform_blob_state_changes(processing_path, img, db_blobs_now, db_blobs_prev, max_offset): log("transform_blob_state_changes(processing_path=%s, img=%s, db_blobs_now=%s,\ db_blobs_prev=%s, max_offset=%s)" % (processing_path, img, db_blobs_now, db_blobs_prev, max_offset)) # create markup layer img_markups = DrawingLayer(img.size()) # count the state totals blob_count_duplicate = 0 blob_count_new = 0 blob_count_removed = 0 # traverse db_blobs for db_blob_now in db_blobs_now: found_in_prev = False bn = db_blob_now["bounds"] for i, db_blob_prev in enumerate(db_blobs_prev): bp = db_blob_prev["bounds"] if abs(bn["x"] - bp["x"]) <= max_offset \ and abs(bn["y"] - bp["y"]) <= max_offset \ and abs(bn["w"] - bp["w"]) <= max_offset \ and abs(bn["h"] - bp["h"]) <= max_offset \ and db_blob_prev["state"] != BLOB_STATE_REMOVED: found_in_prev = True # remove item and immediately break db_blobs_prev.pop(i) break # set the state and associated color if found_in_prev: state = BLOB_STATE_DUPLICATE color = Color.ORANGE blob_count_duplicate += 1 else: state = BLOB_STATE_NEW color = Color.GREEN blob_count_new += 1 db_blob_now["state"] = state # draw blob rectangle img_markups.rectangle(topLeft=(bn["x"], bn["y"]), dimensions=(bn["w"], bn["h"]), color=color, width=3) # draw class text img_markups.text(text=state, location=(bn["x"], bn["y"] - 15), color=color) if SHOW_REMOVED_SNACKS: # draw removed items for db_blob_prev in db_blobs_prev: # ignore if prev was removed if db_blob_prev["state"] == BLOB_STATE_REMOVED: continue color = Color.RED state = BLOB_STATE_REMOVED blob_count_removed += 1 bp = db_blob_prev["bounds"] # copy the prev blob images and add it to db_blobs_now i = len(db_blobs_now) + 1 blob_img_name = "blob_" + str(i).zfill(2) + ".png" blob_img_path = processing_path + blob_img_name copyfile(db_blob_prev["img_path"], blob_img_path) db_blobs_now.append( db_blob(uid=ObjectId(), title="blob " + str(i).zfill(2), bounds=db_blob_prev["bounds"], img_path=blob_img_path, img_url=url_for("static", filename=os.path.relpath( blob_img_path, "static"), _external=True), state=state)) # draw blob rectangle img_markups.rectangle(topLeft=(bp["x"], bp["y"]), dimensions=(bp["w"], bp["h"]), color=color, width=3) # draw class text img_markups.text(text=state, location=(bp["x"], bp["y"] - 15), color=color) # if only duplicates, throw exception if blob_count_new == 0 and blob_count_removed == 0: raise NoBlobChangesDetectedException("No changes were detected.") # copy image and add markups img_marked = img.copy() img_marked.addDrawingLayer(img_markups) img_marked_name = TR_TYPE_BLOB_STATE_CHANGE + ".png" img_marked_path = processing_path + img_marked_name img_marked.save(img_marked_path) return db_transform( uid=ObjectId(), title="state changes", type=TR_TYPE_BLOB_STATE_CHANGE, description= "compare all of the blobs and determine which ones are new, duplicated and removed", img_path=img_marked_path, img_url=url_for("static", filename=os.path.relpath(img_marked_path, "static"), _external=True)), db_blobs_now
def get_bounding_box(keyword, url, filename): # get the image img = Image(url) # resize the image so things aren't so slow, if necessary w, h = img.size() if w > 1200 or h > 1200: maxdim = max(w, h) ratio = math.ceil(maxdim/800.0) print " resizing..." img = img.resize(w=int(w/ratio), h=int(h/ratio)) else: ratio = 1 # get the canvas disp = Display((800, 800)) # text overlay textlayer = DrawingLayer(img.size()) textlayer.setFontSize(30) cx, cy = 10, 10 for xoff in range(-2, 3): for yoff in range(-2, 3): textlayer.text(keyword, (cx + xoff, cy + yoff), color=Color.BLACK) textlayer.text(keyword, (cx, cy), color=Color.WHITE) # two points to declare a bounding box point1 = None point2 = None while disp.isNotDone(): cursor = (disp.mouseX, disp.mouseY) if disp.leftButtonUp: if point1 and point2: point1 = None point2 = None if point1: point2 = disp.leftButtonUpPosition() else: point1 = disp.leftButtonUpPosition() bb = None if point1 and point2: bb = disp.pointsToBoundingBox(point1, point2) elif point1 and not point2: bb = disp.pointsToBoundingBox(point1, cursor) img.clearLayers() drawlayer = DrawingLayer(img.size()) if bb: drawlayer.rectangle((bb[0], bb[1]), (bb[2], bb[3]), color=Color.RED) # keyboard commands if pygame.key.get_pressed()[pygame.K_s]: # skip for now raise Skip() elif pygame.key.get_pressed()[pygame.K_b]: # mark it as an invalid picture raise BadImage() elif pygame.key.get_pressed()[pygame.K_RETURN]: if point1 and point2: bb = disp.pointsToBoundingBox(scale(ratio, point1), scale(ratio, point2)) return bb elif not point1 and not point2: bb = disp.pointsToBoundingBox((0, 0), (w, h)) return bb drawlayer.line((cursor[0], 0), (cursor[0], img.height), color=Color.BLUE) drawlayer.line((0, cursor[1]), (img.width, cursor[1]), color=Color.BLUE) #drawlayer.circle(cursor, 2, color=Color.BLUE, filled=True) img.addDrawingLayer(textlayer) img.addDrawingLayer(drawlayer) img.save(disp)
img = util.tomaFoto("objeto.jpg", brillo = 55) #img = Image("lineaDe4.jpg") #img.live() # El truco esta en buscar el color azul del tablero en lugar de ir directamente a por las fichas img_tratada = img.binarize() #img_tratada.live() blobGrande = img_tratada.findBlobs().sortArea()[-1] if blobGrande: i_prima = blobGrande.length() / FACTOR_CONVERSION_PIXEL_A_MM d = (ALTURA_OBJETO * DISTANCIA_FOCAL / i_prima) / 10 textLayer = DrawingLayer((img.width,img.height)) textLayer.setFontSize(36) textLayer.text("Distancia = " + str(d) + " centimetros", (10,10), color=Color.RED) blobGrande.draw(width = 5, color = Color.RED) img.addDrawingLayer(img_tratada.dl()) img.addDrawingLayer(textLayer) img.show() util.pausa() else: print "No se han encontrado Blobs"