def scale(self, img_path, scale): new_file_path = self.nfn('resized') img = Image(img_path) img = img.scale(scale) img.save(new_file_path) self.transformations.append(new_file_path) return new_file_path
def encuentraYFiltraBlobs(self,areaMin, areaMax, toleranciaWH, desviacionD, toleranciaLP, tipoDibujo): imagenBlobs = Image(self.rutaImagenTratada_Fase2).copy() blobs = imagenBlobs.findBlobs() self.todosLosCandidatos = blobs if blobs: blobs.image = imagenBlobs self.areaBlobs = blobs.area() blobs = self.filtroPorArea(blobs, areaMin, areaMax) self.numBlobsCandidatosPorArea = len(blobs) # Busca los blobs de forma circular , los blobs que pasan el filtro # se guardan en la lista self.articulaciones blobs = self.filtroPorForma(blobs, toleranciaWH, desviacionD, toleranciaLP) if tipoDibujo == 'blobs': self.dibujaBlobs(blobs) elif tipoDibujo == 'estructura': self.dibujaEstructura(imagenBlobs) # La imagen tratada tiene que ser guardada porque sino no funciona # la integracion con Tkinter imagenBlobs.save(self.rutaImagenBlobs) return Image(self.rutaImagenBlobs)
def send_email(percentage): import smtplib from email.MIMEMultipart import MIMEMultipart from email.MIMEImage import MIMEImage from email.MIMEText import MIMEText # Prepare actual message msg = MIMEMultipart() msg['From'] = "*****@*****.**" # change to your mail msg['To'] = "*****@*****.**" # change to your mail msg['Subject'] = "RPi Camera Alarm!" imgcv = Image("image.jpg") imgcv.save("imagesend.jpg", quality=50) # reducing quality of the image for smaller size img1 = MIMEImage(open("imagesend.jpg", "rb").read(), _subtype="jpg") img1.add_header('Content-Disposition', 'attachment; filename="image.jpg"') msg.attach(img1) part = MIMEText('text', "plain") part.set_payload(("Raspberry Pi camera alarm activated with level {:f}" ).format(percentage)) msg.attach(part) try: server = smtplib.SMTP("mail.htnet.hr", 25) #change to your SMTP provider server.ehlo() server.starttls() server.sendmail(msg['From'], msg['To'], msg.as_string()) server.quit() print 'Successfully sent the mail' except smtplib.SMTPException as e: print(e)
def send_email(percentage): import smtplib from email.MIMEMultipart import MIMEMultipart from email.MIMEImage import MIMEImage from email.MIMEText import MIMEText # Prepare actual message msg = MIMEMultipart() msg['From'] = "*****@*****.**" # change to your mail msg['To'] = "*****@*****.**" # change to your mail msg['Subject'] = "RPi Camera Alarm!" imgcv = Image("image.jpg") imgcv.save("imagesend.jpg", quality=50) # reducing quality of the image for smaller size img1 = MIMEImage(open("imagesend.jpg","rb").read(), _subtype="jpg") img1.add_header('Content-Disposition', 'attachment; filename="image.jpg"') msg.attach(img1) part = MIMEText('text', "plain") part.set_payload(("Raspberry Pi camera alarm activated with level {:f}").format(percentage)) msg.attach(part) try: server = smtplib.SMTP("mail.htnet.hr", 25) #change to your SMTP provider server.ehlo() server.starttls() server.sendmail(msg['From'], msg['To'], msg.as_string()) server.quit() print 'Successfully sent the mail' except smtplib.SMTPException as e: print(e)
def encuentraYFiltraBlobs(self, tipoDibujo='estructura'): imagenBlobs = Image(self.rutaImagenTratada_Fase2).copy() blobs = imagenBlobs.findBlobs() self.todosLosCandidatos = blobs if blobs != []: blobs.image = imagenBlobs self.areaBlobs = blobs.area() blobs = self.filtroPorArea(blobs) self.numBlobsCandidatosPorArea = len(blobs) # Busca los blobs de forma circular , los blobs que pasan el filtro # se guardan en la lista self.articulaciones blobs = self.filtroPorForma(blobs) if tipoDibujo == 'blobs': self.dibujaBlobs(blobs) elif tipoDibujo == 'estructura': self.listaAngulos = self.encuentraYDibujaAngulos(imagenBlobs) # La imagen tratada tiene que ser guardada porque sino no funciona # la integracion con Tkinter imagenBlobs.save(self.rutaImagenBlobs) return Image(self.rutaImagenBlobs)
def rotate(self, img_path, shoe_measurements): img = Image(img_path) new_file_path = self.nfn('rotated') img = img.rotate(shoe_measurements.toe_heel_angle(), point=shoe_measurements.cleat_length_intersection()) self.transformations.append(new_file_path) img.save(new_file_path) return new_file_path
def addText(fileName, text): image = Image(fileName) draw = DrawingLayer((IMAGE_WIDTH, IMAGE_HEIGHT)) draw.rectangle((8, 8), (121, 18), filled=True, color=Color.YELLOW) draw.setFontSize(20) draw.text(text, (10, 9), color=Color.BLUE) image.addDrawingLayer(draw) image.save(fileName)
def correct_alignment(si, image_path): new_file_path = step_file_path(si, 'correct-alignment') img = Image(image_path) if (img.width > img.height): img.rotate(-90, fixed=False).save(new_file_path) else: img.save(new_file_path) si.step_outputs.append(new_file_path) return new_file_path
def correct_alignment(self, img_path): new_file_path = self.nfn('correct-alignment') img = Image(img_path) if (img.width > img.height): img.rotate(-90, fixed=False).save(new_file_path) else: img.save(new_file_path) self.transformations.append(new_file_path) return new_file_path
def run(self): cam_mode=self.cam_mode wsh = self.wsh js = self.js wsh2 = self.wsh2 d = "n" c2 = self.c2 c = self.c sqx = self.sqx sqy = self.sqy x = 0 y = 0 stat="live cam" if cam_mode == 3: img1 = c2.getImage() if cam_mode==1: img1 = c.getImage() time.sleep(1) with picamera.PiCamera() as camera: camera.resolution = (544, 288) camera.capture('imagesmall.jpg') img2 = Image('imagesmall.jpg') time.sleep(.5) img1 = img1.sideBySide(img2) img1 = img1.scale(544,288) time.sleep(.5) if cam_mode==2: with picamera.PiCamera() as camera: camera.resolution = (544, 288) camera.capture('imagesmall.jpg') img1 = Image('imagesmall.jpg') self.img1 = img1 blobs = img1.findBlobs() if blobs : ##blobs.draw() img1.drawCircle((blobs[-1].x,blobs[-1].y),30,color=(255,255,255)) img1.drawCircle((blobs[-1].centroid()),10,color=(255,100,100)) blobx1 = blobs[-1].x bloby1 = blobs[-1].y print blobx1 print bloby1 img1.drawText("ogp: live cam", 10, 10, fontsize=50) img1.drawText(str(blobx1), blobx1, 250, color=(255,255,255), fontsize=20) img1.drawText(str(bloby1), 10, bloby1, color=(255,255,255), fontsize=20) img1.save(js.framebuffer) sqx2=sqx+20 sqy2=sqy+20 time.sleep(.5) wsh.write_message(wsh2, "live") else: wsh.write_message(wsh2, "live")
def build_bg(): for n in range(BG_PICS): pn = bgname.format(n) save_picture(pn) sleep(FRAME_DELAY) camera.stop_preview() frames = ImageSet() frames.load(bgdir) img = Image(frames[0].size()) nframes = len(frames) for frame in frames: img = img + (frame / nframes) img.save(bgf) return img
def face_recognize(filename): from SimpleCV import Image, Display, DrawingLayer image = Image(filename) faces = image.findHaarFeatures('face.xml') if faces: for face in faces: face_layer = DrawingLayer((image.width, image.height)) face_box = face_layer.centeredRectangle(face.coordinates(), (face.width(), face.height())) image.addDrawingLayer(face_layer) image.applyLayers() image.save(filename) print('偵測到 {} 張人臉'.format(len(faces))) else: print('沒有偵測到人臉')
def face_recognize(filename): from SimpleCV import Image, Display, DrawingLayer image = Image(filename) faces = image.findHaarFeatures('face.xml') if faces: for face in faces: face_layer = DrawingLayer((image.width, image.height)) face_box = face_layer.centeredRectangle( face.coordinates(), (face.width(), face.height())) image.addDrawingLayer(face_layer) image.applyLayers() image.save(filename) print('偵測到 {} 張人臉'.format(len(faces))) else: print('沒有偵測到人臉')
def activaCamara(): """ Se activa la camara para detectar movimiento utilizando el algormitmo running segmentation de SimpleCV""" global proximo screenLength = resolucion[0] min_blob_size = screenLength * 0.15 max_blob_size = screenLength * 0.75 ahora = datetime.datetime.now() img = util.tomaFoto(filename,brillo,resolucion,modoExposicion) rs.addImage(img) diffImg = rs.getSegmentedImage(False) if diffImg is not None: blobs = diffImg.dilate(3).findBlobs() if blobs is not None : print screenLength, min_blob_size, blobs[-1].length(), max_blob_size if blobs[-1].length() > min_blob_size and blobs[-1].length() < max_blob_size : print "El sensor ha detectado algo relevante" mensaje = 'Alerta ' + str(ahora) + ".jpg" imagen = Image (filename) os.chdir(RUTA_BASE_LOCAL_ALERTAS) blobs.image = imagen blobs[-1].draw(width=5, color=Color.GREEN) imagen.drawText(str(ahora), x= 20, y =10, fontsize=25) imagen.save(str(ahora)+'.jpg') os.chdir(RUTA_BASE_LOCAL) destinatarios = ['*****@*****.**', '*****@*****.**'] mensaje = 'Alerta de la alarma, visita https://www.dropbox.com para ver las fotos' mandaMensaje(mensaje, destinatarios) # --------------------------------------------- # Foto a intervalos regulares # --------------------------------------------- if ahora > proximo : print "tomada foto de cada hora" camara.tomaFoto(str(ahora) + ".jpg", brillo=50,resolucion=(640,480),preview=False,modoExposicion='auto') proximo = datetime.datetime.now() + periodo
def drawImage(): #Load Map d = Display((1240, 820), title="London Map - Scotland Yard") lMap = Image("maps/map.jpg") #Check Position from players #See corresponding pixel in list #Draw Circle from players circlesLayer = DrawingLayer((lMap.width, lMap.height)) circlesLayer.circle ((191,44), 20,color=Color.BLACK, filled=True, alpha = 255) lMap.addDrawingLayer(circlesLayer) #Display lMap.applyLayers() lMap.save(d) '''Later create a "draw possibilites" areas in map for thief '''
def Run(cmdPipe): steadyStateFPS = 10 desiredBuffer = 60*60 #1 minute * 60 seconds numberOfFrames = steadyStateFPS*desiredBuffer; fmt = '%Y-%m-%d %H:%M:%S' disp = Display() filelist = [] frameCounter = 101 sleepTime = .1 while disp.isNotDone(): # check command if cmdPipe.poll(): cmd = cmdPipe.recv() if cmd=='shutdown': print('player', 0, "Shutting down.") break if frameCounter > 100 or len(filelist) == 0: frameCounter = 0 filelist = glob("images/*.jpg") if len(filelist)>numberOfFrames: sleepTime = 1.0/steadyStateFPS print("player", 0, "number of frames in buffer="+str(len(filelist))+" desired="+str(numberOfFrames)+" setting sleeptime to "+str(sleepTime)) else: sleepTime = (1.0/steadyStateFPS)+.01 print("player", 0, "number of frames in buffer="+str(len(filelist))+" desired="+str(numberOfFrames)+" setting sleeptime to "+str(sleepTime)) filename = filelist.pop(0) img = Image(filename) matchObj = re.search(r'[0-9- :]+', filename) d1_ts = time.mktime(datetime.strptime(matchObj.group(), fmt).timetuple()) d2_ts = time.mktime(datetime.utcnow().timetuple()) offset = int(d1_ts-d2_ts)/60 img.drawText(str(offset), x=600, y=470) img.save(disp) os.remove(filename) frameCounter = frameCounter+1 time.sleep(sleepTime)
class Scale: global db, Gd name = "Scale" def __init__(self, fname): self.img = Image(fname) if not fname == "input.png": self.img.save("input.png") self.cropped = Cropx(self.img) # crop image self.cropped.save("cropTest.png") # save intermediat results [self.wt, self.fat, self.h2o] = Part(self, self.cropped) # partition cropped image self.fat.save("fatTest.png") self.h2o.save("h2oTest.png") self.wt.save("wtTest.png") # hunt for the appropriate number in each partitioned image self.nwt = hunt(self.wt, "wt") self.nfat = hunt(self.fat, "fat") self.nh2o = hunt(self.h2o, "h2o") return None
def save_trained_image(train_as, img): image = Image(img) trainPath = CLASSIFIER_TRAIN_PATH + '/' + train_as # make sure the train/{class} directory exist if not os.path.exists(trainPath): os.makedirs(trainPath) imageName = os.path.basename(img) (filename, fileext) = os.path.splitext(imageName) # make sure imageName is not duplicated by seq number uniq = 1 imagePath = '%s/%s_%03d%s' % (trainPath, train_as, uniq, fileext) while os.path.exists(imagePath): imagePath = '%s/%s_%03d%s' % (trainPath, train_as, uniq, fileext) uniq += 1 image.save(imagePath) log("save_trained_image(train_as=%s, img=%s, saved=%s)" % (train_as, img, imagePath)) return { "image": img, "class": train_as, "saved": imagePath }
def get_image(url, file_name, dir='images/'): """Return PIL.Image Get 'file_name' at 'dir' if the file doesn't exists it download it and save it at dir+file_name otherwise if the files exists open the file and return the reference """ img = None if file_name: output_file = dir + file_name if dir else file_name if file_name in os.listdir(dir): img = Image(output_file) elif url: buff = StringIO(get_web_content(url)) img = Image(buff) img.save(output_file) else: print 'Error Impossible to get images files' return img
def generate_negative_examples(argv): image_dirs = argv[4:] images = [] for image_dir in image_dirs: # grab all images images.extend(glob(path.join(image_dir, '*.jpg'))) images.extend(glob(path.join(image_dir, '*.JPG'))) images.extend(glob(path.join(image_dir, '*.png'))) images.extend(glob(path.join(image_dir, '*.PNG'))) images = set(images) if len(images) < N: print 'Not enough images! (got %d, need %d)' % (len(images), N) return width, height, output_dir = int(argv[1]), int(argv[2]), argv[3] if path.exists(output_dir) and (not path.isdir(output_dir)): print '%s is not a directory' % output_dir return elif not path.exists(output_dir): os.mkdir(output_dir) for i in xrange(N): print 'generating %3d/%d...' % ((i+1), N) img = Image(images.pop()) img = img.grayscale() if img.width > MAX_WIDTH: img = img.resize(MAX_WIDTH, int(1.0*img.height*MAX_WIDTH/img.width)) x, y = random.randint(0, img.width-width), random.randint(0, img.height-height) img = img.crop(x, y, width, height) path_to_save = path.join(output_dir, '%d.png' % (i+1)) img.save(path_to_save)
def detectCenter(image_file): original = Image(image_file) center_only = original.colorDistance((155,9,49))*8 mask = center_only.invert() #mask.save("center_mask.jpg") binarizedMask = mask.binarize().invert() #binarizedMask.save("binarized_mask_center.jpg") blobs = original.findBlobsFromMask(binarizedMask) if blobs == None : #print "No red found" return detectGreenLowQuality(image_file) bestBlob = blobs[-1] bestBlob.drawMinRect(color=Color.RED,width =10) bestBlob.image = original original.save("align.png") centroidX = bestBlob.minRectX() centroidY = bestBlob.minRectY() #Have to find out which part of the screen centroid is in maxX = original.getNumpy().shape[0] maxY = original.getNumpy().shape[1]+100 #assume width of 150 pixels return align_center(maxX,maxY,centroidX,centroidY,80,80)
class Window: world_size = 100 img_size = (world_size, world_size) generation = 0 def __init__(self): self.display = Display(self.img_size) self.img = Image(self.img_size) self.img.save(self.display) def dot(self, x, y, size=0, color=Color.WHITE): x = int(round(x)) y = int(round(y)) #print "Drawing robot particle at {}, {}".format(x, y) self.img.dl().circle((x, y), size, color, filled=True) def dot_red(self, x, y): self.dot(x, y, 2, Color.RED) def dots(self, coords, size=0, color=Color.WHITE): for (x, y) in coords: self.dot(x, y, size, color) def clear(self): self.img = Image(self.img_size) #self.display.clear() self.img.save(self.display) def show(self): self.img.save(self.display) self.generation += 1 print "Generation = {}".format(self.generation) self.wait_for_mouse() def wait_for_mouse(self): while True: for event in pg.event.get(): if event.type == pg.MOUSEBUTTONDOWN: print event self.clear() return
def rotate_and_resize(si, left_sm, right_sm): new_file_path = step_file_path(si, 'rotate') img = Image(si.step_outputs[-1:][0]) rot = right_sm.toe_heel_angle() + left_sm.toe_heel_angle() img = img.rotate(rot, fixed=True) si.step_outputs.append(new_file_path) img.save(new_file_path) scale = left_sm.shoe_length() / right_sm.shoe_length() new_file_path = step_file_path(si, 'scale-length') img = img.scale(scale) si.step_outputs.append(new_file_path) img.save(new_file_path) # We also resize the original file orig_img = Image(si.step_outputs[0]) orig_img = orig_img.rotate(rot, fixed=True) orig_img = orig_img.scale(scale) new_file_path = step_file_path(si, 'transformed-original') si.step_outputs.append(new_file_path) orig_img.save(new_file_path) return new_file_path
def run(self): cam_mode=self.cam_mode s.write('s') wsh = self.wsh js = self.js wsh2 = self.wsh2 ms = 50 d = "n" acu = int(1) acd = int(1) acl = int(1) acr = int(1) c2=self.c2 sqx=self.sqx sqy=self.sqy x=0 y=0 stat="centering" if cam_mode == 3: img1 = c2.getImage() if cam_mode==1: with picamera.PiCamera() as camera: camera.resolution = (544, 288) camera.capture('imagesmall.jpg') img1 = Image('imagesmall.jpg') if cam_mode==2: with picamera.PiCamera() as camera: camera.resolution = (544, 288) camera.capture('imagesmall.jpg') img1 = Image('imagesmall.jpg') self.img1 = img1 blobs = img1.findBlobs() if blobs : ##blobs.draw() img1.drawCircle((blobs[-1].x,blobs[-1].y),30,color=(255,255,255)) img1.drawCircle((blobs[-1].centroid()),10,color=(255,100,100)) blobx1 = blobs[-1].x bloby1 = blobs[-1].y print blobx1 print bloby1 img1.drawText("ogp: centering", 10, 10, fontsize=50) img1.drawText(str(blobx1), 10, 200, color=(255,255,255), fontsize=50) ##img1.drawText(str(bloby1), 50, 200, color=(255,255,255), fontsize=50) img1.drawText(str(bloby1), 10, 250, color=(255,255,255), fontsize=50) img1.save(js.framebuffer) sqx2=sqx+20 sqy2=sqy+20 if blobx1 > sqx2: d = '_r' s.write('4') mov = acx(s, d, ms, acu, acd, acl, acr) mov.run() wsh.write_message(wsh2, "g_"+ str(d)) if blobx1 < sqx: d = 'l' s.write('2') mov = acx(s, d, ms, acu, acd, acl, acr) mov.run() wsh.write_message(wsh2, "g_"+ str(d)) if bloby1 > sqy2: d = 'd' s.write('9') mov = acx(s, d, ms, acu, acd, acl, acr) mov.run() wsh.write_message(wsh2, "g_"+ str(d)) if bloby1 < sqy: d = 'u' s.write('6') mov = acx(s, d, ms, acu, acd, acl, acr) mov.run() wsh.write_message(wsh2, "g_"+ str(d)) wsh.write_message(wsh2, "c") else: wsh.write_message(wsh2, "c_" + "null" )
'This script is used to display the images saved in hdf5 file generated by simpleTrack.py script' ) print('Call the script with filename as argument, for example:') print(' python replay.py 20130805_14h03_simpleTrack.hdf5') print('') recordFilename = sys.argv[1] print('Opening file ' + recordFilename) recordFile = h5py.File(recordFilename, 'r') imgs = recordFile.get('image') kite = recordFile.get('kite') i_time = 0 #index of time column t = kite[0, i_time] img = imgs[0, :, :, :, ] s = img.shape disp = Display((s[0] * 4, s[1] * 4)) for i_img in range(len(imgs[:, 0, 0, 0])): dt = kite[i_img, 0] - t time.sleep(dt) t = kite[i_img, 0] time.sleep(0.05) img = imgs[i_img, :, :, :] print(dt) r = img[:, :, 0] g = img[:, :, 1] b = img[:, :, 2] im = Image(img) im.save(disp)
ret, frame = cap.read() blob = b.detect(frame) fcount = 0 for beest in blob: if fcount > 100: continue tmpImg = Image(frame, cv2image=True).crop(int(beest.pt[0]), int(beest.pt[1]), box_dim, box_dim, centered=True) if ((tmpImg.width + tmpImg.height) == 2 * box_dim): # cv2.imshow('classify',tmpImg.getNumpyCv2()) tmpImg.save(display) result = tkMessageBox.askquestion( "Wildebeest!", "Is this one? (no if you don't know)", icon='warning', type='yesnocancel') if result == 'yes': save_path = "yes/img-" + str(counter) + ".png" tmpImg.save(save_path) fcount += 1 counter += 1 if result == 'no': save_path = "no/img-" + str(counter) + ".png" tmpImg.save(save_path) counter += 1 if result == 'cancel':
##b=histograma(imgGray) b = imgGray.binarize(50) ainv = imgGray.binarize(100) a=ainv.__invert__() ##b.show() ##pylab.figure(1) ##pylab.subplot(311), a.show(), pylab.title('Imagen Original') ##a.show() ##histograma(imgGray) ##pylab.subplot(312), b.show(), pylab.title('Imagen Invertida') ##b.show() a.save("a.png") b.save("b.png") ainv.save("ainv.png") orig = cv2.imread("hola5Gray.png") ainv = cv2.imread("ainv.png") h=imgGray.histogram(255) a = cv2.imread("a.png") b = cv2.imread("b.png") c=cv2.add(a,b) pylab.subplot(2,2,1) pylab.imshow(orig) pylab.title('Imagen Original') pylab.subplot(2,2,2)
from SimpleCV import Display, Image, Color winsize = (640, 480) display = Display(winsize) img = Image(winsize) img.save(display) while not display.isDone(): if display.mouseLeft: img.dl().circle((display.mouseX, display.mouseY), 5, Color.RED, filled=True) img.save(display) img.save("art.png")
def detectChargingStation(image_file): debug = False myColor1 = (8,33,64) myColor2 = (70,80,100) original = Image(image_file) only_station = onlyBlueColor(original, myColor1) #Different findBlobs maskMean = original.hueDistance(color=(200,160,150)) mask = only_station.hueDistance(color=myColor1).binarize() meanColor = (round(((maskMean.meanColor()[0]+maskMean.meanColor()[1]+maskMean.meanColor()[2])/3) * 10000)/10000) blobs = original.findBlobsFromMask(mask, minsize=400) if(meanColor > 190): return 6 #print "Number of blobs found" , len(blobs) try: blobs.image = original except Exception: only_station = onlyBlueColor(original, myColor2) mask = only_station.hueDistance(color=myColor2).binarize() blobs = original.findBlobsFromMask(mask, minsize=400) blobs.image = original station_blob = chooseBestBlobCosine(blobs) station_blob.drawMinRect(color=Color.RED) centroidX = station_blob.minRectX() centroidY = station_blob.minRectY() #Have to find out which part of the screen centroid is in maxX = original.getNumpy().shape[0] maxY = original.getNumpy().shape[1]+100 if(debug): centroidLayer = DrawingLayer((maxX,maxY)) centroidLayer.line((0,(1/3.0)*maxY),(maxX, (1/3.0)*maxY), color=Color.GREEN, width=2) centroidLayer.line((0,(2/3.0)*maxY),(maxX, (2/3.0)*maxY), color=Color.GREEN, width=2) centroidLayer.circle((int(centroidX), int(centroidY)), color=Color.GREEN, radius=5, filled=True) original.addDrawingLayer(centroidLayer) original.applyLayers() mask.save("binarizeMask.png") original.save("blobs.png") only_station.save("blueFilter.png") #print "Coordinates of centroid are "+str(centroidX)+", "+str(centroidY) #print "Coordinates of max are "+str(maxX)+", "+str(maxY) #if(station_blob.width() * station_blob.height() < 4000): # return 2 blobArea = station_blob.width() * station_blob.height() if(blobArea < 10000): return 2 return chargingStationLocation_New(maxX,maxY,centroidX,centroidY,200, station_blob.width() / float(station_blob.height()), blobArea)
import socket from PIL import Image as pil from cStringIO import StringIO from SimpleCV import Image, Display display = Display() HOST = "192.168.2.19" PORT = 8000 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((HOST, PORT)) s.listen(1) conn, addr = s.accept() print "Connected by", addr while 1: b = "" while 1: data = conn.recv(1024) if not data: break b += data if len(b) > 0: img = Image(pil.open(StringIO(b))) img.save(display)
# -*- coding: utf-8 -*- from SimpleCV import Display, Image, Color winsize = (640, 480) display = Display(winsize) img = Image(winsize) img.save(display) while not display.isDone(): if display.mouseLeft: img.dl().circle((display.mouseX, display.mouseY), 4, Color.WHITE, filled=True) img.save(display) img.save("ex7.png")
from SimpleCV import Image, DrawingLayer, Color from time import sleep raspberryImage = Image("raspberries.jpg") myDrawingLayer = DrawingLayer((raspberryImage.width, raspberryImage.height)) myDrawingLayer.rectangle((50,20), (250, 60), filled=True) myDrawingLayer.setFontSize(45) myDrawingLayer.text("Raspberries!", (50, 20), color=Color.WHITE) raspberryImage.addDrawingLayer(myDrawingLayer) raspberryImage.applyLayers() raspberryImage.save("raspberries-titled.jpg")
# run this from X environment (rdp) from SimpleCV import Image, Display from time import sleep myDisplay = Display() rImage = Image("test.jpg") rImage.save(myDisplay) while not myDisplay.isDone(): sleep(0.1)
# -*- coding: utf-8 -*- from SimpleCV import Display, Image import time img = Image("img-ex1.png") display = Display() img.save(display) time.sleep(5) exit()
from SimpleCV import Image img = Image('ex15.png') # Retrieve the RGB triplet from (100, 100) (red, green, blue) = img.getPixel(100, 100) # Change the color of the pixel(100,100) img[100, 100] = (0, 0, blue) # Show the image for feel seconds img.show() # Saves the result image with a diferent name img.save('ex15-result.png')
# -*- coding: utf-8 -*- from SimpleCV import Image imgJPG = Image("ex8.jpg") imgJPG.save("ex8PNG.png") imgPNG = Image("ex8PNG.png") imgPNG.resize(800, 600).show()
def doface(aa, f1, cc, f2, ee): camera = PiCamera() #imgg = Image('img1.jpg') #disp = Display(imgg.size()) dsize = (640, 480) disp = Display(dsize) #drawing = Image('mustache.png') #maskk = drawing.createAlphaMask() #camera.start_preview() #sleep(2) #['right_eye.xml', 'lefteye.xml', 'face3.xml', 'glasses.xml', # 'right_ear.xml', 'fullbody.xml', 'profile.xml', 'upper_body2.xml', # 'face.xml', 'face4.xml', 'two_eyes_big.xml', 'right_eye2.xml', # 'left_ear.xml', 'nose.xml', 'upper_body.xml', 'left_eye2.xml', # 'two_eyes_small.xml', 'face2.xml', 'eye.xml', 'face_cv2.xml', # 'mouth.xml', 'lower_body.xml'] while disp.isNotDone(): camera.capture('img2.png') img = Image('img2.png') img = img.resize(640, 480) #whatt = img.listHaarFeatures() faces = img.findHaarFeatures('face.xml') print 'faces:', faces if faces: #is not None: face = faces.sortArea()[-1] #print 'size:',face.size if aa == 'none': break elif aa == 'block': face.draw() else: f0draw = aa + '.png' draw0 = Image('use/' + f0draw) face = face.blit(draw0, pos=(100, 200)) #bigFace = face[-1] myface = face.crop() if f1 and cc is not None: feature01 = f1 + '.xml' f1draw = cc + '.png' draw1 = Image('/home/pi/cv/use/' + f1draw) feature1s = myface.findHaarFeatures(feature01) if feature1s is not None: feature1 = feature1s.sortArea()[-1] xpos1 = face.points[0][0] + feature1.x - (draw1.width / 2) ypos1 = face.points[0][ 1] + feature1.y #+ (2*draw1.height/3) #pos = (xmust,ymust) img = img.blit(draw1, pos=(xpos1, ypos1)) #mask=maskk) if f2 and ee is not None: feature02 = f2 + '.xml' f2draw = ee + '.png' draw2 = Image('/home/pi/cv/use/' + f2draw) feature2s = myface.findHaarFeatures(feature02) if feature2s is not None: feature2 = feature2s.sortArea()[-1] xpos2 = face.points[0][0] + feature2.x - (draw2.width / 2) ypos2 = face.points[0][ 1] + feature2.y #+ (2*draw2.height/3) #pos = (xmust,ymust) img = img.blit(draw2, pos=(xpos2, ypos2)) #mask=maskk) img.save(disp) else: print 'no face~~'
def run(self): wsh = self.wsh ## c = self.c js = self.js wsh2 = self.wsh2 acu = int(1) acd = int(1) acl = int(1) acr = int(1) irpic = pinoir2(js) img1 = Image('imagesmall.jpg') blobs = img1.findBlobs() img1.drawCircle((blobs[-1].x,blobs[-1].y),30,color=(255,255,255)) img1.drawCircle((blobs[-1].centroid()),10,color=(255,100,100)) acx1 = blobs[-1].x acy1 = blobs[-1].y img1.drawText("ogp: autocalibrating", 10, 10, fontsize=50) img1.drawText(str(acx1), 10, 50, color=(255,255,255), fontsize=20) img1.drawText(str(acy1), 10, 75, color=(255,255,255), fontsize=20) img1.save(js.framebuffer) d = 'r' ms = 50 s.write('4') mov = acx(s, d, ms, acu, acd, acl, acr) mov.run() time.sleep(1) img1 = c.getImage() blobs = img1.findBlobs() img1.drawCircle((blobs[-1].x,blobs[-1].y),30,color=(255,255,255)) img1.drawCircle((blobs[-1].centroid()),10,color=(255,100,100)) acx2 = blobs[-1].x acy2 = blobs[-1].y img1.drawText("ogp: autocalibrating", 10, 10, fontsize=50) img1.drawText(str(acx1), 10, 50, color=(255,255,255), fontsize=20) img1.drawText(str(acy1), 10, 75, color=(255,255,255), fontsize=20) img1.drawText(str(acx2), 40, 50, color=(255,255,255), fontsize=20) img1.drawText(str(acy2), 40, 75, color=(255,255,255), fontsize=20) img1.save(js.framebuffer) d = 'd' ms = 50 s.write('9') mov = acx(s, d, ms, acu, acd, acl, acr) mov.run() time.sleep(1) img1 = c.getImage() blobs = img1.findBlobs() img1.drawCircle((blobs[-1].x,blobs[-1].y),30,color=(255,255,255)) img1.drawCircle((blobs[-1].centroid()),10,color=(255,100,100)) acx3 = blobs[-1].x acy3 = blobs[-1].y img1.drawText("ogp: autocalibrating", 10, 10, fontsize=50) img1.drawText(str(acx1), 10, 50, color=(255,255,255), fontsize=20) img1.drawText(str(acy1), 10, 75, color=(255,255,255), fontsize=20) img1.drawText(str(acx2), 40, 50, color=(255,255,255), fontsize=20) img1.drawText(str(acy2), 40, 75, color=(255,255,255), fontsize=20) img1.drawText(str(acx3), 70, 50, color=(255,255,255), fontsize=20) img1.drawText(str(acy3), 70, 75, color=(255,255,255), fontsize=20) img1.save(js.framebuffer) d = 'l' ms = 50 s.write('2') mov = acx(s, d, ms, acu, acd, acl, acr) mov.run() time.sleep(1) img1 = c.getImage() blobs = img1.findBlobs() img1.drawCircle((blobs[-1].x,blobs[-1].y),30,color=(255,255,255)) img1.drawCircle((blobs[-1].centroid()),10,color=(255,100,100)) acx4 = blobs[-1].x acy4 = blobs[-1].y img1.drawText("ogp: autocalibrating", 10, 10, fontsize=50) img1.drawText(str(acx1), 10, 50, color=(255,255,255), fontsize=20) img1.drawText(str(acy1), 10, 75, color=(255,255,255), fontsize=20) img1.drawText(str(acx2), 40, 50, color=(255,255,255), fontsize=20) img1.drawText(str(acy2), 40, 75, color=(255,255,255), fontsize=20) img1.drawText(str(acx3), 70, 50, color=(255,255,255), fontsize=20) img1.drawText(str(acy3), 70, 75, color=(255,255,255), fontsize=20) img1.drawText(str(acx4), 100, 50, color=(255,255,255), fontsize=20) img1.drawText(str(acy4), 100, 75, color=(255,255,255), fontsize=20) img1.save(js.framebuffer) d = 'u' ms = 50 s.write('6') mov = acx(s, d, ms, acu, acd, acl, acr) mov.run() time.sleep(1) img1 = c.getImage() blobs = img1.findBlobs() img1.drawCircle((blobs[-1].x,blobs[-1].y),30,color=(255,255,255)) img1.drawCircle((blobs[-1].centroid()),10,color=(255,100,100)) acx5 = blobs[-1].x acy5 = blobs[-1].y img1.drawText("ogp: autocalibrating", 10, 10, fontsize=50) img1.drawText(str(acx1), 10, 50, color=(255,255,255), fontsize=20) img1.drawText(str(acy1), 10, 75, color=(255,255,255), fontsize=20) img1.drawText(str(acx2), 40, 50, color=(255,255,255), fontsize=20) img1.drawText(str(acy2), 40, 75, color=(255,255,255), fontsize=20) img1.drawText(str(acx3), 70, 50, color=(255,255,255), fontsize=20) img1.drawText(str(acy3), 70, 75, color=(255,255,255), fontsize=20) img1.drawText(str(acx4), 100, 50, color=(255,255,255), fontsize=20) img1.drawText(str(acy4), 100, 75, color=(255,255,255), fontsize=20) img1.drawText(str(acx5), 130, 50, color=(255,255,255), fontsize=20) img1.drawText(str(acy5), 130, 75, color=(255,255,255), fontsize=20) img1.save(js.framebuffer) cal1 = acx1 - acx2 cal2 = acy2 - acy3 cal3 = acx3 - acx4 cal4 = acy4 = acy5 time.sleep(2) wsh.write_message(wsh2, "x_" + str(cal1) + "_" + str(cal2) + "_" + str(cal3)+ "_" + str(cal4) )
def histo(self): cam_mode = self.cam_mode js = self.js ms = self.ms w = self.w cent = 0 rgb1 = 0 c2 = self.c2 wsh = self.wsh wsh2 = self.wsh2 s.write('s') if cam_mode == 3: img1 = c2.getImage() if cam_mode==1: with picamera.PiCamera() as camera: camera.resolution = (544, 288) camera.capture('imagesmall.jpg') img1 = Image('imagesmall.jpg') if cam_mode==2: with picamera.PiCamera() as camera: camera.resolution = (544, 288) camera.capture('imagesmall.jpg') img1 = Image('imagesmall.jpg') self.img1 = img1 blobs = img1.findBlobs() if blobs: print "blob" x = self.x y = self.y p = self.p p = p + 1 img1.drawCircle((blobs[-1].x,blobs[-1].y),30,color=(255,255,255)) img1.drawCircle((blobs[0].centroid()),10,color=(255,100,100)) print blobs[-1].meanColor() rgb1 = blobs[-1].meanColor() cent = blobs[-1].centroid() pth1 = "/var/www/images/image" pth3 = ".png" pth = pth1 + str(p) + pth3 print pth img1.save(pth) thumbnail = img1.crop(150,25,250,250) thumbnail = thumbnail.scale(20,20) thumb1 = "/var/www/images/thumbs/thumb" thumb3 = ".png" thumbpath = thumb1 + str(p) + thumb3 print thumbpath thumbnail.save(thumbpath) self.p = p mySet.add((p,x,y,w,cent,rgb1)) self.mySet = mySet wshx = str(self.x) wshy = str(self.y) centroidx = int(cent[0]) centroidy=int(cent[1]) rcolor=rgb1[0] gcolor=rgb1[1] bcolor=rgb1[2] rcolor=int(rcolor) gcolor=int(gcolor) bcolor=int(bcolor) wsh.write_message(wsh2, "rgb_" + str(rcolor)+"_"+str(gcolor)+"_"+str(bcolor)) wsh.write_message(wsh2, "x_" + str(centroidx)+"_"+str(centroidy)) img1.save(js.framebuffer) wsh.write_message(wsh2, "d_" + wshx + "_" + wshy + "_" + str(p) ) else: wshx = str(self.x) wshy = str(self.y) wsh.write_message(wsh2, wshx + " " + wshy + "dark") print "dark"
#!/usr/bin/python2.7 # Programma test Haar Features import picamera from SimpleCV import Image import time with picamera.PiCamera() as camera: camera.resolution = (640, 480) camera.start_preview() time.sleep(10) camera.capture('foto.jpg') foto = Image("foto.jpg") print(foto.listHaarFeatures()) trovati = foto.findHaarFeatures('face.xml') if trovati: for trovato in trovati: print "Found all coordinate : " + str(trovato.coordinates()) trovato.draw() else: print "No found" camera.stop_preview() foto.save('foto1.jpg') foto.show() time.sleep(10)
from __future__ import print_function from SimpleCV import Image, Color, VirtualCamera, Display import SimpleCV as scv # # video = VirtualCamera('', 'video') # display = Display() # while display.isNotDone(): # img = video.getImage() # try: # dist = img - img.colorDistance(Color.RED) # dist.show() # except KeyboardInterrupt: # display.done = True # if display.mouseRight: # display.done = True # display.quit() img = Image('img/curb.JPG') img = img.crop(0, 2*img.height/3, img.width, 5*img.height/6) print(img.meanColor()) # img = img.binarize() # img.findBlobs().draw() # ls = img.findLines() # for l in ls: # l.draw() # img.findBlobs().draw() # img = img.colorDistance(Color.RED) img.save('img/curb_processed.jpg')
from SimpleCV import Image, DrawingLayer, Color from time import sleep #open the image raspberryImage = Image("raspberry.jpg") #creates the layer to draw on myDrawingLayer = DrawingLayer((raspberryImage.width, raspberryImage.height)) #drawings myDrawingLayer.rectangle((50, 20), (250, 60), filled=True) myDrawingLayer.setFontSize(45) myDrawingLayer.text("Raspberry!", (50, 20), color=Color.WHITE) #overlay raspberryImage.addDrawingLayer(myDrawingLayer) raspberryImage.applyLayers() #save raspberryImage.save("raspberry-tittled.jpg")
#!/usr/bin/env python from SimpleCV import Color,Display,Image display = Display() while(display.isNotDone()): img = Image('example.jpg') barcode = img.findBarcode() #finds barcode data from image if(barcode is not None): #if there is some data processed barcode = barcode[0] result = str(barcode.data) print result #prints result of barcode in python shell barcode = [] #reset barcode data to empty set img.save(display) #shows the image on the screen
#! /usr/bin/python2.7 # Program to Test Haar Features import picamera from SimpleCV import Image import time with picamera.PiCamera() as camera: camera.resolution = (640, 480) camera.start_preview() time.sleep(2) camera.capture('foto.jpg') foto = Image("foto.jpg") print(foto.listHaarFeatures()) trovati = foto.findHaarFeatures('face.xml') if trovati: for trovato in trovati: print "Face Found at Coordinates : " + str(trovato.coordinates()) trovato.draw() else: print "Not Found" camera.stop_preview() foto.save('foto1.jpg') foto.show() time.sleep(10)
from SimpleCV import Image, Display import time displayObject = Display() img = Image('starry_night.png') print 'Initial: %s' % (img.getPixel(25, 25),) img.save(displayObject) time.sleep(3) hsv = img.toHSV() print 'HSV: %s' % (hsv.getPixel(25, 25),) hsv.save(displayObject) time.sleep(3) rgb = hsv.toRGB() print 'RGB: %s' % (rgb.getPixel(25, 25),) rgb.save(displayObject) time.sleep(3) gray = img.grayscale()
def run(self): wsh = self.wsh ## c = self.c js = self.js wsh2 = self.wsh2 acu = int(1) acd = int(1) acl = int(1) acr = int(1) irpic = pinoir2(js) img1 = Image('imagesmall.jpg') blobs = img1.findBlobs() img1.drawCircle((blobs[-1].x, blobs[-1].y), 30, color=(255, 255, 255)) img1.drawCircle((blobs[-1].centroid()), 10, color=(255, 100, 100)) acx1 = blobs[-1].x acy1 = blobs[-1].y img1.drawText("ogp: autocalibrating", 10, 10, fontsize=50) img1.drawText(str(acx1), 10, 50, color=(255, 255, 255), fontsize=20) img1.drawText(str(acy1), 10, 75, color=(255, 255, 255), fontsize=20) img1.save(js.framebuffer) d = 'r' ms = 50 s.write('4') mov = acx(s, d, ms, acu, acd, acl, acr) mov.run() time.sleep(1) img1 = c.getImage() blobs = img1.findBlobs() img1.drawCircle((blobs[-1].x, blobs[-1].y), 30, color=(255, 255, 255)) img1.drawCircle((blobs[-1].centroid()), 10, color=(255, 100, 100)) acx2 = blobs[-1].x acy2 = blobs[-1].y img1.drawText("ogp: autocalibrating", 10, 10, fontsize=50) img1.drawText(str(acx1), 10, 50, color=(255, 255, 255), fontsize=20) img1.drawText(str(acy1), 10, 75, color=(255, 255, 255), fontsize=20) img1.drawText(str(acx2), 40, 50, color=(255, 255, 255), fontsize=20) img1.drawText(str(acy2), 40, 75, color=(255, 255, 255), fontsize=20) img1.save(js.framebuffer) d = 'd' ms = 50 s.write('9') mov = acx(s, d, ms, acu, acd, acl, acr) mov.run() time.sleep(1) img1 = c.getImage() blobs = img1.findBlobs() img1.drawCircle((blobs[-1].x, blobs[-1].y), 30, color=(255, 255, 255)) img1.drawCircle((blobs[-1].centroid()), 10, color=(255, 100, 100)) acx3 = blobs[-1].x acy3 = blobs[-1].y img1.drawText("ogp: autocalibrating", 10, 10, fontsize=50) img1.drawText(str(acx1), 10, 50, color=(255, 255, 255), fontsize=20) img1.drawText(str(acy1), 10, 75, color=(255, 255, 255), fontsize=20) img1.drawText(str(acx2), 40, 50, color=(255, 255, 255), fontsize=20) img1.drawText(str(acy2), 40, 75, color=(255, 255, 255), fontsize=20) img1.drawText(str(acx3), 70, 50, color=(255, 255, 255), fontsize=20) img1.drawText(str(acy3), 70, 75, color=(255, 255, 255), fontsize=20) img1.save(js.framebuffer) d = 'l' ms = 50 s.write('2') mov = acx(s, d, ms, acu, acd, acl, acr) mov.run() time.sleep(1) img1 = c.getImage() blobs = img1.findBlobs() img1.drawCircle((blobs[-1].x, blobs[-1].y), 30, color=(255, 255, 255)) img1.drawCircle((blobs[-1].centroid()), 10, color=(255, 100, 100)) acx4 = blobs[-1].x acy4 = blobs[-1].y img1.drawText("ogp: autocalibrating", 10, 10, fontsize=50) img1.drawText(str(acx1), 10, 50, color=(255, 255, 255), fontsize=20) img1.drawText(str(acy1), 10, 75, color=(255, 255, 255), fontsize=20) img1.drawText(str(acx2), 40, 50, color=(255, 255, 255), fontsize=20) img1.drawText(str(acy2), 40, 75, color=(255, 255, 255), fontsize=20) img1.drawText(str(acx3), 70, 50, color=(255, 255, 255), fontsize=20) img1.drawText(str(acy3), 70, 75, color=(255, 255, 255), fontsize=20) img1.drawText(str(acx4), 100, 50, color=(255, 255, 255), fontsize=20) img1.drawText(str(acy4), 100, 75, color=(255, 255, 255), fontsize=20) img1.save(js.framebuffer) d = 'u' ms = 50 s.write('6') mov = acx(s, d, ms, acu, acd, acl, acr) mov.run() time.sleep(1) img1 = c.getImage() blobs = img1.findBlobs() img1.drawCircle((blobs[-1].x, blobs[-1].y), 30, color=(255, 255, 255)) img1.drawCircle((blobs[-1].centroid()), 10, color=(255, 100, 100)) acx5 = blobs[-1].x acy5 = blobs[-1].y img1.drawText("ogp: autocalibrating", 10, 10, fontsize=50) img1.drawText(str(acx1), 10, 50, color=(255, 255, 255), fontsize=20) img1.drawText(str(acy1), 10, 75, color=(255, 255, 255), fontsize=20) img1.drawText(str(acx2), 40, 50, color=(255, 255, 255), fontsize=20) img1.drawText(str(acy2), 40, 75, color=(255, 255, 255), fontsize=20) img1.drawText(str(acx3), 70, 50, color=(255, 255, 255), fontsize=20) img1.drawText(str(acy3), 70, 75, color=(255, 255, 255), fontsize=20) img1.drawText(str(acx4), 100, 50, color=(255, 255, 255), fontsize=20) img1.drawText(str(acy4), 100, 75, color=(255, 255, 255), fontsize=20) img1.drawText(str(acx5), 130, 50, color=(255, 255, 255), fontsize=20) img1.drawText(str(acy5), 130, 75, color=(255, 255, 255), fontsize=20) img1.save(js.framebuffer) cal1 = acx1 - acx2 cal2 = acy2 - acy3 cal3 = acx3 - acx4 cal4 = acy4 = acy5 time.sleep(2) wsh.write_message( wsh2, "x_" + str(cal1) + "_" + str(cal2) + "_" + str(cal3) + "_" + str(cal4))
from SimpleCV import Image,Display,DrawingLayer,Color from time import sleep myDisplay = Display() raspberryImage = Image("test.jpg") myDrawingLayer = DrawingLayer((raspberryImage.width, raspberryImage.height)) myDrawingLayer.rectangle((50,20),(250,60),filled=True) myDrawingLayer.setFontSize(45) myDrawingLayer.text("Raspberries!",(50,20),color=Color.WHITE) raspberryImage.addDrawingLayer(myDrawingLayer) raspberryImage.applyLayers() raspberryImage.save(myDisplay) while not myDisplay.isDone(): sleep(0.1)
def histo(self): cam_mode = self.cam_mode js = self.js ms = self.ms w = self.w cent = 0 rgb1 = 0 c2 = self.c2 wsh = self.wsh wsh2 = self.wsh2 s.write('s') if cam_mode == 3: img1 = c2.getImage() if cam_mode == 1: with picamera.PiCamera() as camera: camera.resolution = (544, 288) camera.capture('imagesmall.jpg') img1 = Image('imagesmall.jpg') if cam_mode == 2: with picamera.PiCamera() as camera: camera.resolution = (544, 288) camera.capture('imagesmall.jpg') img1 = Image('imagesmall.jpg') self.img1 = img1 blobs = img1.findBlobs() if blobs: print "blob" x = self.x y = self.y p = self.p p = p + 1 img1.drawCircle((blobs[-1].x, blobs[-1].y), 30, color=(255, 255, 255)) img1.drawCircle((blobs[0].centroid()), 10, color=(255, 100, 100)) print blobs[-1].meanColor() rgb1 = blobs[-1].meanColor() cent = blobs[-1].centroid() pth1 = "/var/www/images/image" pth3 = ".png" pth = pth1 + str(p) + pth3 print pth img1.save(pth) thumbnail = img1.crop(150, 25, 250, 250) thumbnail = thumbnail.scale(20, 20) thumb1 = "/var/www/images/thumbs/thumb" thumb3 = ".png" thumbpath = thumb1 + str(p) + thumb3 print thumbpath thumbnail.save(thumbpath) self.p = p mySet.add((p, x, y, w, cent, rgb1)) self.mySet = mySet wshx = str(self.x) wshy = str(self.y) centroidx = int(cent[0]) centroidy = int(cent[1]) rcolor = rgb1[0] gcolor = rgb1[1] bcolor = rgb1[2] rcolor = int(rcolor) gcolor = int(gcolor) bcolor = int(bcolor) wsh.write_message( wsh2, "rgb_" + str(rcolor) + "_" + str(gcolor) + "_" + str(bcolor)) wsh.write_message(wsh2, "x_" + str(centroidx) + "_" + str(centroidy)) img1.save(js.framebuffer) wsh.write_message(wsh2, "d_" + wshx + "_" + wshy + "_" + str(p)) else: wshx = str(self.x) wshy = str(self.y) wsh.write_message(wsh2, wshx + " " + wshy + "dark") print "dark"
def get_bounding_box(keyword, url, filename): # get the image img = Image(url) # resize the image so things aren't so slow, if necessary w, h = img.size() if w > 1200 or h > 1200: maxdim = max(w, h) ratio = math.ceil(maxdim/800.0) print " resizing..." img = img.resize(w=int(w/ratio), h=int(h/ratio)) else: ratio = 1 # get the canvas disp = Display((800, 800)) # text overlay textlayer = DrawingLayer(img.size()) textlayer.setFontSize(30) cx, cy = 10, 10 for xoff in range(-2, 3): for yoff in range(-2, 3): textlayer.text(keyword, (cx + xoff, cy + yoff), color=Color.BLACK) textlayer.text(keyword, (cx, cy), color=Color.WHITE) # two points to declare a bounding box point1 = None point2 = None while disp.isNotDone(): cursor = (disp.mouseX, disp.mouseY) if disp.leftButtonUp: if point1 and point2: point1 = None point2 = None if point1: point2 = disp.leftButtonUpPosition() else: point1 = disp.leftButtonUpPosition() bb = None if point1 and point2: bb = disp.pointsToBoundingBox(point1, point2) elif point1 and not point2: bb = disp.pointsToBoundingBox(point1, cursor) img.clearLayers() drawlayer = DrawingLayer(img.size()) if bb: drawlayer.rectangle((bb[0], bb[1]), (bb[2], bb[3]), color=Color.RED) # keyboard commands if pygame.key.get_pressed()[pygame.K_s]: # skip for now raise Skip() elif pygame.key.get_pressed()[pygame.K_b]: # mark it as an invalid picture raise BadImage() elif pygame.key.get_pressed()[pygame.K_RETURN]: if point1 and point2: bb = disp.pointsToBoundingBox(scale(ratio, point1), scale(ratio, point2)) return bb elif not point1 and not point2: bb = disp.pointsToBoundingBox((0, 0), (w, h)) return bb drawlayer.line((cursor[0], 0), (cursor[0], img.height), color=Color.BLUE) drawlayer.line((0, cursor[1]), (img.width, cursor[1]), color=Color.BLUE) #drawlayer.circle(cursor, 2, color=Color.BLUE, filled=True) img.addDrawingLayer(textlayer) img.addDrawingLayer(drawlayer) img.save(disp)
#!/usr/bin/env python # author: Powen Ko from SimpleCV import Image, Display from time import sleep import urllib url="https://chart.googleapis.com/chart?chs=150x150&cht=qr&chl=Hello%20world&choe=UTF-8" filename="out.jpg" urllib.urlretrieve(url,filename) Display1 = Display() img = Image(filename) img.save(Display1) # 顯示在畫面上 while not Display1.isDone(): sleep(1)
from SimpleCV import Image, Display from time import sleep Display1 = Display() Image1 = Image("raspberrypi.png") Image1.save(Display1) while not Display1.isDone(): sleep(1)
result = Image(output, cv2image=True) # return result return result #disp = Display((800,600)) disp = Display((1296, 972)) vals = [] last = (0, 0) # Load the video from the rpi #vc = VirtualCamera("/var/www/html/vid_files/video.h264","video") # Sometimes there is crud at the begining, buffer it out #for i in range(0,10): # img = vc.getImage() img = Image("pi3.jpg") img.save(disp) # Show the user a frame let them left click the center # of the "donut" and the right inner and outer edge # in that order. Press esc to exit the display while not disp.isDone(): test = disp.leftButtonDownPosition() if (test != last and test is not None): last = test vals.append(test) # 0 = xc yc # 1 = r1 # 2 = r2 # center of the "donut" Cx = vals[0][0] Cy = vals[0][1]