def fneighbourdhood_area(x): global na, thres, windowTitle, tipo if x % 2 ==0: na = x+1 else: na = x if na == 0 or na == 1: na = 3 if tipo == 0: thres = img elif tipo == 1: thres = cv2.adaptiveThreshold(img, maxValue,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY, na, cons) elif tipo == 2: thres = cv2.adaptiveThreshold(img, maxValue, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,na,cons) blobImg = Image(thres) invImg = blobImg.invert() blobImg = blobImg.rotate90() invImg = blobImg.invert() blobs = invImg.findBlobs() for blob in blobs: #print blob.coordinates() invImg.dl().circle(blob.coordinates(), 3, Color.RED, filled = True) blobImg.addDrawingLayer(invImg.dl()) blobs.show(color=Color.GREEN,width=1) cv2.imshow(windowTitle, thres)
def thresholding(x): global thres, value, maxValue, img, tipo imgO = img if (x == 0): thres = None filtro = img value = 0 maxValue = 255 cv2.createTrackbar('Value', windowTitle, value, maxValue, fValue) cv2.createTrackbar('MaxValue', windowTitle, maxValue, maxValue, fMaxValue) elif (x == 1): thres = cv2.THRESH_BINARY+cv2.THRESH_OTSU elif (x==2): thres = cv2.THRESH_BINARY+cv2.THRESH_OTSU img = cv2.GaussianBlur(img,(5,5),0) if (x != 0): ret, filtro = cv2.threshold(img,value, maxValue, thres) tipo = x img = imgO blobImg = Image(filtro) invImg = blobImg.invert() blobImg = blobImg.rotate90() invImg = blobImg.invert() blobs = invImg.findBlobs() for blob in blobs: #print blob.coordinates() invImg.dl().circle(blob.coordinates(), 3, Color.RED, filled = True) blobImg.addDrawingLayer(invImg.dl()) blobs.show(color=Color.GREEN,width=1) cv2.imshow(windowTitle, filtro)
def adaptative_thresholding(x): global thres, na, cons, maxValue, tipo, img, windoTitle if x == 0: thres = img maxValue = 255 na = 11 cons = 2; cv2.createTrackbar('Neighbourhood area (odds)', windowTitle, na, maxValue, fneighbourdhood_area) cv2.createTrackbar('Constant', windowTitle, -maxValue, maxValue, fConstant) cv2.createTrackbar('MaxValue', windowTitle, maxValue, maxValue, fMaxValue) elif x == 1: thres = cv2.adaptiveThreshold(img, maxValue,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY, na, cons) elif x == 2: thres = cv2.adaptiveThreshold(img, maxValue, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,na,cons) tipo = x blobImg = Image(thres) invImg = blobImg.invert() blobImg = blobImg.rotate90() invImg = blobImg.invert() blobs = invImg.findBlobs() for blob in blobs: #print blob.coordinates() invImg.dl().circle(blob.coordinates(), 3, Color.RED, filled = True) blobImg.addDrawingLayer(invImg.dl()) blobs.show(color=Color.GREEN,width=1) cv2.imshow(windowTitle, thres)
def addText(fileName, text): image = Image(fileName) draw = DrawingLayer((IMAGE_WIDTH, IMAGE_HEIGHT)) draw.rectangle((8, 8), (121, 18), filled=True, color=Color.YELLOW) draw.setFontSize(20) draw.text(text, (10, 9), color=Color.BLUE) image.addDrawingLayer(draw) image.save(fileName)
def face_recognize(filename): from SimpleCV import Image, Display, DrawingLayer image = Image(filename) faces = image.findHaarFeatures('face.xml') if faces: for face in faces: face_layer = DrawingLayer((image.width, image.height)) face_box = face_layer.centeredRectangle(face.coordinates(), (face.width(), face.height())) image.addDrawingLayer(face_layer) image.applyLayers() image.save(filename) print('偵測到 {} 張人臉'.format(len(faces))) else: print('沒有偵測到人臉')
def face_recognize(filename): from SimpleCV import Image, Display, DrawingLayer image = Image(filename) faces = image.findHaarFeatures('face.xml') if faces: for face in faces: face_layer = DrawingLayer((image.width, image.height)) face_box = face_layer.centeredRectangle( face.coordinates(), (face.width(), face.height())) image.addDrawingLayer(face_layer) image.applyLayers() image.save(filename) print('偵測到 {} 張人臉'.format(len(faces))) else: print('沒有偵測到人臉')
def drawImage(): #Load Map d = Display((1240, 820), title="London Map - Scotland Yard") lMap = Image("maps/map.jpg") #Check Position from players #See corresponding pixel in list #Draw Circle from players circlesLayer = DrawingLayer((lMap.width, lMap.height)) circlesLayer.circle ((191,44), 20,color=Color.BLACK, filled=True, alpha = 255) lMap.addDrawingLayer(circlesLayer) #Display lMap.applyLayers() lMap.save(d) '''Later create a "draw possibilites" areas in map for thief '''
def fMaxValue(x): global maxValue, value, thres, img, filtro maxValue = x if (thres is None): filtro = img else: ret, filtro = cv2.threshold(img,value, maxValue, thres) blobImg = Image(filtro) invImg = blobImg.invert() blobImg = blobImg.rotate90() invImg = blobImg.invert() blobs = invImg.findBlobs() for blob in blobs: #print blob.coordinates() invImg.dl().circle(blob.coordinates(), 3, Color.RED, filled = True) blobImg.addDrawingLayer(invImg.dl()) blobs.show(color=Color.GREEN,width=1) cv2.imshow(windowTitle, filtro)
def fMaxValue(x): global maxValueAdaptative, windowTitle, thresAdaptative, img, naAdaptative, consAdaptative, filtro maxValueAdaptative = x if tipoAdaptative == 0: thresAdaptative = img elif tipoAdaptative == 1: thresAdaptative = cv2.adaptiveThreshold(filtro, maxValueAdaptative,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY, naAdaptative, consAdaptative) elif tipoAdaptative == 2: thresAdaptative = cv2.adaptiveThreshold(filtro, maxValueAdaptative, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,naAdaptative,consAdaptative) blobImg = Image(thresAdaptative) invImg = blobImg.invert() blobImg = blobImg.rotate90() invImg = blobImg.invert() blobs = invImg.findBlobs() for blob in blobs: #print blob.coordinates() invImg.dl().circle(blob.coordinates(), 3, Color.RED, filled = True) blobImg.addDrawingLayer(invImg.dl()) blobs.show(color=Color.GREEN,width=1) cv2.imshow(windowTitle, thresAdaptative)
def fConstant(x): global cons, thres, windowTitle, tipo, maxValue, na, img # const positive to white, otherwise, to black cons = x if tipo == 0: thres = img elif tipo == 1: thres = cv2.adaptiveThreshold(img, maxValue,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY, na, cons) elif tipo == 2: thres = cv2.adaptiveThreshold(img, maxValue, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,na,cons) blobImg = Image(thres) invImg = blobImg.invert() blobImg = blobImg.rotate90() invImg = blobImg.invert() blobs = invImg.findBlobs() for blob in blobs: #print blob.coordinates() invImg.dl().circle(blob.coordinates(), 3, Color.RED, filled = True) blobImg.addDrawingLayer(invImg.dl()) blobs.show(color=Color.GREEN,width=1) cv2.imshow(windowTitle, thres)
def thresholding(x): global thres, value, maxValue, img, filtro if (x == 0): thres = None filtro = img value = 127 maxValue = 255 cv2.createTrackbar('Value', windowThres, value, maxValue, fValue) cv2.createTrackbar('MaxValue', windowThres, maxValue, maxValue, fMaxValue) elif (x == 1): thres = cv2.THRESH_BINARY elif (x==2): thres = cv2.THRESH_BINARY_INV elif (x==3): thres = cv2.THRESH_TRUNC elif (x==4): thres = cv2.THRESH_TOZERO elif (x==5): thres = cv2.THRESH_TOZERO_INV if (x != 0): ret, filtro = cv2.threshold(img,value, maxValue, thres) blobImg = Image(filtro) invImg = blobImg.invert() blobImg = blobImg.rotate90() invImg = blobImg.invert() blobs = invImg.findBlobs() for blob in blobs: #print blob.coordinates() invImg.dl().circle(blob.coordinates(), 3, Color.RED, filled = True) blobImg.addDrawingLayer(invImg.dl()) blobs.show(color=Color.GREEN,width=1) cv2.imshow(windowTitle, filtro)
from SimpleCV import Image,Display,DrawingLayer,Color from time import sleep myDisplay = Display() raspberryImage = Image("test.jpg") myDrawingLayer = DrawingLayer((raspberryImage.width, raspberryImage.height)) myDrawingLayer.rectangle((50,20),(250,60),filled=True) myDrawingLayer.setFontSize(45) myDrawingLayer.text("Raspberries!",(50,20),color=Color.WHITE) raspberryImage.addDrawingLayer(myDrawingLayer) raspberryImage.applyLayers() raspberryImage.save(myDisplay) while not myDisplay.isDone(): sleep(0.1)
head = Image('head.png') amgothic = Image('amgothic.png') scream = Image('scream.png') amgothic.dl().blit(head, (175, 110)) amgothic.show() time.sleep(2) layer = amgothic.getDrawingLayer() scream.addDrawingLayer(layer) scream.show() time.sleep(2) print amgothic._mLayers print scream._mLayers layer.blit(head, (75, 220)) amgothic.show() time.sleep(2) scream.show() time.sleep(5)
def detectChargingStation(image_file): debug = False myColor1 = (8,33,64) myColor2 = (70,80,100) original = Image(image_file) only_station = onlyBlueColor(original, myColor1) #Different findBlobs maskMean = original.hueDistance(color=(200,160,150)) mask = only_station.hueDistance(color=myColor1).binarize() meanColor = (round(((maskMean.meanColor()[0]+maskMean.meanColor()[1]+maskMean.meanColor()[2])/3) * 10000)/10000) blobs = original.findBlobsFromMask(mask, minsize=400) if(meanColor > 190): return 6 #print "Number of blobs found" , len(blobs) try: blobs.image = original except Exception: only_station = onlyBlueColor(original, myColor2) mask = only_station.hueDistance(color=myColor2).binarize() blobs = original.findBlobsFromMask(mask, minsize=400) blobs.image = original station_blob = chooseBestBlobCosine(blobs) station_blob.drawMinRect(color=Color.RED) centroidX = station_blob.minRectX() centroidY = station_blob.minRectY() #Have to find out which part of the screen centroid is in maxX = original.getNumpy().shape[0] maxY = original.getNumpy().shape[1]+100 if(debug): centroidLayer = DrawingLayer((maxX,maxY)) centroidLayer.line((0,(1/3.0)*maxY),(maxX, (1/3.0)*maxY), color=Color.GREEN, width=2) centroidLayer.line((0,(2/3.0)*maxY),(maxX, (2/3.0)*maxY), color=Color.GREEN, width=2) centroidLayer.circle((int(centroidX), int(centroidY)), color=Color.GREEN, radius=5, filled=True) original.addDrawingLayer(centroidLayer) original.applyLayers() mask.save("binarizeMask.png") original.save("blobs.png") only_station.save("blueFilter.png") #print "Coordinates of centroid are "+str(centroidX)+", "+str(centroidY) #print "Coordinates of max are "+str(maxX)+", "+str(maxY) #if(station_blob.width() * station_blob.height() < 4000): # return 2 blobArea = station_blob.width() * station_blob.height() if(blobArea < 10000): return 2 return chargingStationLocation_New(maxX,maxY,centroidX,centroidY,200, station_blob.width() / float(station_blob.height()), blobArea)
(red,green,blue)=pruebalunar.splitChannels(False) # la separo en RGB red.save("fotoenrojo.png") green.save("fotoenverde.png") blue.save("fotoenazul.png") #codigo para encontrarn manchas solo se a echo en escala de grises prueba69=green.binarize() #la binarizo por que se vera mejor asi mancha=prueba69.findBlobs() #ocupo el comando para encontrar lasmanchas (lunares) mancha.show(Color.YELLOW) prueba69.save("porfavorguardate3.png") invertidos=green.invert()#se invierte la imagen para obtener manchas negras en la foto blob=invertidos.findBlobs()#se ve si se encuentrasn las mannchas en la foto invertida blob.show(width=2) pruebalunar.addDrawingLayer(invertidos.dl()) pruebalunar.show() pruebalunar.save("porfavorguardate2.png") #guardamos la imagen #enncontrar manchas por color especifico para el cual tenemos: brown_distance=green.colorDistance(Color.BLACK).invert()##cmo buscamos de color negro , le pknemos black blobs2_=brown_distance.findBlobs() blobs2_.draw(color=Color.PUCE ,width=3)#se va hacer el mismo ejemplo de la guia brown_distance.show() green.addDrawingLayer(brown_distance.dl()) green.show() green.save("Porfavorguaradte5.png") #lineas=pruebalunar.findLines() #lineas.draw(width=3)
#!/usr/local/env python # coding=utf-8 # # Author: Archer Reilly # Desc: 按照颜色找出物体blob # File: FindBlobs.py # Date: 30/July/2016 # from SimpleCV import Color, Image # img = Image('/home/archer/Downloads/Chapter 8/mandms-dark.png') img = Image('/home/archer/Downloads/1185391864.jpg') # blue_distance = img.colorDistance(Color.BLUE).invert() blue_distance = img.colorDistance(Color.BLACK).invert() blobs = blue_distance.findBlobs(minsize=15) blobs.draw(color=Color.RED, width=3) blue_distance.show() img.addDrawingLayer(blue_distance.dl()) img.save('res.png') img.show()
from SimpleCV import Color, Image img = Image("ex23b.png") #Open ex23b.png too :) colorDist = img.colorDistance(Color.BLUE).invert() blobs = colorDist.findBlobs() # Draw a BLACK border at blobs blobs.draw(color=Color.BLACK, width=3) # The thing is at this line before img.addDrawingLayer(colorDist.dl()) img.show()
#object.draw(color=Color.PUCE, width=2) #blue_distance.show() #blue_distance.save("/dev/shm/p3.png") corners=img.findCorners() statusWin.clear() statusWin.addstr( 1, 1, str(object.meanColor())) num_corners = len(corners) statusWin.addstr(2,1, "Corners Found:" + str(num_corners)) corners.draw() img.addDrawingLayer(object.dl()) # circle tracking #dist = img.colorDistance(Color.BLACK).dilate(2) #segmented = dist.stretch(200,255) blobs = img.findBlobs() if blobs: circles = blobs.filter([b.isCircle(0.2) for b in blobs]) if circles: img.drawCircle((circles[-1].x, circles[-1].y), circles[-1].radius(),Color.BLUE,3)
def fancify(): if request.method == 'POST': print request.data cur_request = json.loads(request.data) else: #cur_request = """{"url": "", "debug":true}""" #cur_request = """{"url": "", "debug":true}""" #cur_request = """{"url": "", "debug":true}""" cur_request = """{"url": "http://localhost/images/scrubs.jpg", "debug":true}""" #cur_request = """{"url": "http://www.newrichstrategies.com/wp-content/uploads/2012/03/How-to-Find-Good-People-in-Your-Life.jpg", "debug":false}""" #cur_request = """{"url": "http://greenobles.com/data_images/frank-lampard/frank-lampard-02.jpg", "debug":true}""" #cur_request = """{"url": "http://www.billslater.com/barack__obama.jpg"}""" #cur_request = """{"url": "http://celebrityroast.com/wp-content/uploads/2011/01/arnold-schwarzenegger-body-building.jpg", "debug":false}""" #cur_request = """{"url": "http://face2face.si.edu/.a/6a00e550199efb8833010536a5483e970c-800wi", "debug":true}""" #cur_request = """{"url": "http://collider.com/uploads/imageGallery/Scrubs/scrubs_cast_image__medium_.jpg", "debug":false}""" #cur_request = """{"url": "http://localhost/images/Kevin_Bacon_at_the_2010_SAG_Awards.jpg", "debug":false}""" #cur_request = """{"url": "http://cdn02.cdn.justjared.com/wp-content/uploads/headlines/2012/02/anna-faris-oscars-red-carpet-2012.jpg", "debug":true}""" #cur_request = """{"url": "http://www.viewzone.com/attractive.female.jpg", "debug":true}""" cur_request = json.loads(cur_request) print cur_request["url"] img = Image(str(cur_request["url"])) img = img.scale(2.0) debug = True #if "debug" in cur_request: # debug = cur_request["debug"] chosen_faces = [] faces = img.findHaarFeatures(face_cascade) if faces is not None: for face in faces: face_features = [] invalid_face = False face_rect = Rect(face.x - (face.width() / 2), face.y - (face.height() / 2), face.width(), face.height()) for chosen_face in chosen_faces: if face_rect.colliderect(chosen_face): invalid_face = True break if invalid_face: break nose = None mouth = None left_eye = None right_eye = None cur_face = img.crop(face.x, face.y, face.width(), face.height(), centered=True) #cur_face = face.crop() noses = cur_face.findHaarFeatures(nose_cascade) mouths = cur_face.findHaarFeatures(mouth_cascade) eyes = cur_face.findHaarFeatures(eye_cascade) face_left_edge = face.x - (face.width() / 2) face_top_edge = face.y - (face.height() / 2) if noses is not None: nose = noses[0] nose_dist = (abs(nose.x - (face.width() / 2)) + abs(nose.y - (face.height() * 5 / 9)) + abs(nose.width() - (face.width() / 4))) for cur_nose in noses: cur_dist = (abs(cur_nose.x - (face.width() / 2)) + abs(cur_nose.y - (face.height() * 5 / 9)) + abs(cur_nose.width() - (face.width() / 4))) if cur_dist < nose_dist: nose = cur_nose nost_dist = cur_dist if nose and (nose.y < (face.height() / 3)): nose = None if nose and mouths is not None: mouth = mouths[0] mouth_dist = abs(mouth.x - nose.x) + (abs(mouth.y - (face.height() * 4 / 5)) * 2) for cur_mouth in mouths: cur_dist = abs(cur_mouth.x - nose.x) + (abs(cur_mouth.y - (face.height() * 4/ 5)) * 2) if (cur_dist < mouth_dist) and (cur_mouth.y > nose.y): mouth = cur_mouth mouth_dist = cur_dist if nose and eyes: right_eye = eyes[0] right_eye_dist = (abs(right_eye.x - (3 * face.width() / 4)) * 2 + abs(right_eye.y - (nose.y - (nose.height() / 2)) / 2) + abs(right_eye.width() - (face.width() / 3))) for cur_eye in eyes: cur_right_dist = (abs(cur_eye.x - (3 * face.width() / 4)) + abs(cur_eye.y - (nose.y - (nose.height() / 2)) / 2) + abs(cur_eye.width() - (face.width() / 3))) if (cur_right_dist <= right_eye_dist): # and (cur_eye.y < nose.y): right_eye = cur_eye right_eye_dist = cur_right_dist if nose and right_eye and (((right_eye.y - (right_eye.height() / 2)) > nose.y) or (right_eye.x < nose.x)): print "Culling right_eye" right_eye = None if nose and mouth: chosen_faces.append(face_rect) x_face = face.x - (face.width() / 2) y_face = face.y - (face.height() / 2) x_nose = nose.x - (nose.width() / 2) y_nose = nose.y - (nose.height() / 2) # Setup TopHat Image scale_factor = face.width() / 175.0 cur_hat = hat.copy() cur_hat = cur_hat.scale(scale_factor) cur_hat_mask = hat_mask.copy() cur_hat_mask = cur_hat_mask.scale(scale_factor) cur_hat_mask = cur_hat_mask.createAlphaMask(hue_lb=0, hue_ub=100) # Calculate the hat position if (face.y - face.height() / 2) > cur_hat.height: x_hat = face.x - (cur_hat.width / 2) y_hat = face.y - (face.height() * 7 / 10) - (cur_hat.height / 2) img = img.blit(cur_hat, pos=(x_hat, y_hat), alphaMask=cur_hat_mask) if mouth: x_mouth = mouth.x - (mouth.width() / 2) y_mouth = mouth.y - (mouth.height() / 2) # Setup Mustache Image cur_stache = stache.copy() scale_factor = ((nose.width() / 300.0) + (face.width() / 600.0)) / 2.0 cur_stache = cur_stache.scale(scale_factor) stache_mask = cur_stache.createAlphaMask(hue_lb=0, hue_ub=10).invert() # Calculate the mustache position bottom_of_nose = y_nose + (nose.height() * 4 / 5) top_of_mouth = y_mouth # if top_of_mouth > bottom_of_nose: # top_of_mouth = bottom_of_nose y_must = y_face + ((bottom_of_nose + top_of_mouth) / 2) - (cur_stache.height / 2) middle_of_nose = nose.x middle_of_mouth = mouth.x x_must = x_face + ((middle_of_nose + middle_of_mouth) / 2) - (cur_stache.width / 2) if right_eye: x_right_eye = right_eye.x - (right_eye.width() / 2) y_right_eye = right_eye.y - (right_eye.height() / 2) # Setup Monocle Image cur_mono = monocle.copy() scale_factor = ((right_eye.width() / 65.0) + (face.width() / 200.0)) / 2.0 cur_mono = cur_mono.scale(scale_factor) mono_mask = cur_mono.createAlphaMask(hue_lb=0, hue_ub=100).invert() # Calculate Monocle Position x_mono = x_face + x_right_eye y_mono = y_face + y_right_eye img = img.blit(cur_mono, pos=(x_mono, y_mono), alphaMask=mono_mask) img = img.blit(cur_stache, pos=(x_must, y_must), alphaMask=stache_mask) if debug: noselayer = DrawingLayer((img.width, img.height)) nosebox_dimensions = (nose.width(), nose.height()) center_point = (face.x - (face.width() / 2) + nose.x, face.y - (face.height() / 2) + nose.y) nosebox = noselayer.centeredRectangle(center_point, nosebox_dimensions, width=3) img.addDrawingLayer(noselayer) img = img.applyLayers() else: print "Face culled:" if not nose: print " No Nose" if not mouth: print " No mouth" if not right_eye: print " No right eye" print if debug: face_left_edge = face.x - (face.width() / 2) face_top_edge = face.y - (face.height() / 2) facelayer = DrawingLayer((img.width, img.height)) facebox_dimensions = (face.width(), face.height()) center_point = (face.x, face.y) facebox = facelayer.centeredRectangle(center_point, facebox_dimensions, Color.BLUE) img.addDrawingLayer(facelayer) if noses: for nose in noses: noselayer = DrawingLayer((img.width, img.height)) nosebox_dimensions = (nose.width(), nose.height()) center_point = (face.x - (face.width() / 2) + nose.x, face.y - (face.height() / 2) + nose.y) nosebox = noselayer.centeredRectangle(center_point, nosebox_dimensions) img.addDrawingLayer(noselayer) if mouths: for mouth in mouths: mouthlayer = DrawingLayer((img.width, img.height)) mouthbox_dimensions = (mouth.width(), mouth.height()) center_point = (face.x - (face.width() / 2) + mouth.x, face.y - (face.height() / 2) + mouth.y) mouthbox = mouthlayer.centeredRectangle(center_point, mouthbox_dimensions, Color.GREEN) img.addDrawingLayer(mouthlayer) if eyes: for right_eye in eyes: right_eyelayer = DrawingLayer((img.width, img.height)) right_eyebox_dimensions = (right_eye.width(), right_eye.height()) right_eye_center_point = (face_left_edge + right_eye.x, face_top_edge + right_eye.y) right_eyebox = right_eyelayer.centeredRectangle(right_eye_center_point, right_eyebox_dimensions) img.addDrawingLayer(right_eyelayer) img = img.applyLayers() img = img.scale(0.5) w_ratio = img.width / 800.0 h_ratio = img.height / 600.0 if h_ratio > 1.0 or w_ratio > 1.0: if h_ratio > w_ratio: img = img.resize(h=600) else: img = img.resize(w=800) output = StringIO.StringIO() img.getPIL().save(output, format="JPEG") #, quality=85, optimize=True) img_contents = output.getvalue() mimetype = "image/jpeg" return app.response_class(img_contents, mimetype=mimetype, direct_passthrough=False)
import numpy as np img = Image('stenramchiffontest.jpg') disp = Display() img_blurred = img.gaussianBlur((101, 101)) # Make a mask mask_size = 80 mask = Image((4 * mask_size, 4 * mask_size)) dl = DrawingLayer((4 * mask_size, 4 * mask_size)) # Draw a filled circle in the mask dl.circle((2 * mask_size, 2 * mask_size), mask_size, filled=True, color=Color.WHITE) mask.addDrawingLayer(dl) mask = mask.applyLayers() blurred_mask = mask.gaussianBlur((101, 101)) t0 = time.time() # Blur the mask to get progressive blur n = 3 img_ = img.gaussianBlur((n, n)) old_n = 3 isDown = False mouseRawXOld = 0 x = 0 while not disp.isDone(): t = time.time() dt = t - t0 t0 = t
from SimpleCV import Image, DrawingLayer, Color, Display d = Display((1240, 820), title="London Map - Scotland Yard") lMap = Image("C:\\Users\\flavio\\Documents\\Python\\Scotland Yard\\maps\\map.jpg") circlesLayer = DrawingLayer((lMap.width, lMap.height)) circlesLayer.circle ((191,44), 20,color=Color.ORANGE, filled=True, alpha = 255) lMap.addDrawingLayer(circlesLayer) lMap.applyLayers() lMap.save(d)
head = Image('head.png') amgothic = Image('amgothic.png') scream = Image('scream.png') amgothic.dl().blit(head,(175, 110)) amgothic.show() time.sleep(2) layer = amgothic.getDrawingLayer() scream.addDrawingLayer(layer) scream.show() time.sleep(2) print amgothic._mLayers print scream._mLayers layer.blit(head,(75,220)) amgothic.show() time.sleep(2) scream.show() time.sleep(5)
def get_bounding_box(keyword, url, filename): # get the image img = Image(url) # resize the image so things aren't so slow, if necessary w, h = img.size() if w > 1200 or h > 1200: maxdim = max(w, h) ratio = math.ceil(maxdim/800.0) print " resizing..." img = img.resize(w=int(w/ratio), h=int(h/ratio)) else: ratio = 1 # get the canvas disp = Display((800, 800)) # text overlay textlayer = DrawingLayer(img.size()) textlayer.setFontSize(30) cx, cy = 10, 10 for xoff in range(-2, 3): for yoff in range(-2, 3): textlayer.text(keyword, (cx + xoff, cy + yoff), color=Color.BLACK) textlayer.text(keyword, (cx, cy), color=Color.WHITE) # two points to declare a bounding box point1 = None point2 = None while disp.isNotDone(): cursor = (disp.mouseX, disp.mouseY) if disp.leftButtonUp: if point1 and point2: point1 = None point2 = None if point1: point2 = disp.leftButtonUpPosition() else: point1 = disp.leftButtonUpPosition() bb = None if point1 and point2: bb = disp.pointsToBoundingBox(point1, point2) elif point1 and not point2: bb = disp.pointsToBoundingBox(point1, cursor) img.clearLayers() drawlayer = DrawingLayer(img.size()) if bb: drawlayer.rectangle((bb[0], bb[1]), (bb[2], bb[3]), color=Color.RED) # keyboard commands if pygame.key.get_pressed()[pygame.K_s]: # skip for now raise Skip() elif pygame.key.get_pressed()[pygame.K_b]: # mark it as an invalid picture raise BadImage() elif pygame.key.get_pressed()[pygame.K_RETURN]: if point1 and point2: bb = disp.pointsToBoundingBox(scale(ratio, point1), scale(ratio, point2)) return bb elif not point1 and not point2: bb = disp.pointsToBoundingBox((0, 0), (w, h)) return bb drawlayer.line((cursor[0], 0), (cursor[0], img.height), color=Color.BLUE) drawlayer.line((0, cursor[1]), (img.width, cursor[1]), color=Color.BLUE) #drawlayer.circle(cursor, 2, color=Color.BLUE, filled=True) img.addDrawingLayer(textlayer) img.addDrawingLayer(drawlayer) img.save(disp)
if __name__ == '__main__': cam = Camera(0) img = Image() samples = 0 coordinates = redCoords = (0,0) text = " " while True: img = cam.getImage() # Make image black and white tmp = findRedDot(img) if (tmp != None): coordinates= (coordinates[0]+tmp[0][0], coordinates[1]+tmp[0][1]) samples+=1 if samples == 10: samples = 0 coordinates = (coordinates[0]/10, coordinates[1]/10) text = str(coordinates) redCoords = coordinates coordinates = (0,0) redcircle = DrawingLayer((img.width, img.height)) redcircle.circle(redCoords, 5, filled=True, color=(0,255,0)) #add circle point 10,10, radius 10. img.addDrawingLayer(redcircle) img.applyLayers() img.drawText(text) img.show()
from SimpleCV import Color, Image import time img = Image("mandms.jpg") blue_distance = img.colorDistance(Color.BLUE).invert() blobs = blue_distance.findBlobs() blobs.draw(color=Color.PUCE, width=3) blue_distance.show() img.addDrawingLayer(blue_distance.dl()) img.show() time.sleep(10)
def similarity(self, matrix): """ Calculate similarity between this pattern matrix and another matrix. This calculates the cosine distance between pattern vectors for all the regions. Similar shapes should have similar vector patterns in all regions. """ my_print = self.fingerprint() their_print = matrix.fingerprint(regions=self.get_fingerprint_regions()) if len(my_print) != len(their_print): raise Exception("Finger print lengths don't match") out = [] for i in range(0, len(my_print)): out.append(self.cosine_similarity(my_print[i][1], their_print[i][1])) print "Similarity: %s" % (repr(out)) return sum(out) / float(len(out)) if __name__=="__main__": img = Image(sys.argv[-1]) dl = DrawingLayer((img.width,img.height)) img.addDrawingLayer(dl) patterns = match_patterns(img, dl) img.save('outline2.png') for row in patterns: print "\t".join(row)