def query_blob(id): blob = Image("static\\img\\%d.jpg" % id).crop( int(request.form["x"]), int(request.form["y"]), int(request.form["width"]), int(request.form["height"]) ) if blob and "SK_MODEL" in app.config: if blob.height > blob.width: blob = blob.resize(h=app.config["PATCH_SIZE"]) else: blob = blob.resize(w=app.config["PATCH_SIZE"]) blob = blob.embiggen((app.config["PATCH_SIZE"], app.config["PATCH_SIZE"])) np_img = blob.getGrayNumpy().transpose().reshape(-1) pred = labels.inverse_transform(sk_model.predict(np_img))[0] return jsonify(prediction=pred) else: return jsonify(prediction="")
class Fish(): def __init__(self, image_url, mask_url): self.position = (10, 10) self.last_position = (0, 0) self.draw_position = (0, 0) self.direction = "left" self.orig_image = Image(image_url) self.orig_mask = Image(mask_url).invert() self.draw_image = self.orig_image.copy() self.draw_mask = self.orig_mask.copy() def update(self, x, y, w, h): self.update_dir() self.position = (x, y) self.draw_image = self.orig_image.resize(w=w, h=h) self.draw_mask = self.orig_mask.resize(w=w, h=h) if self.direction == "left": self.draw_position = self.position elif self.direction == "right": # TODO: setup offsets here for image when facing another direction self.draw_position = (self.position[0] + 0, self.position[1] + 0) pass def update_dir(self): hor_change = self.position[0] - self.last_position[0] # if the difference is more than 10 pixels if abs(hor_change) > 5: # TODO: can optimise this slightly by checking if == left && horchange is positive # (so doesnt flip it every time over 10 pixels in the same direction) if self.direction == "left": self.direction = "right" self.draw_image = self.orig_image.flipHorizontal().copy() self.draw_mask = self.orig_mask.flipHorizontal().copy() elif self.direction == "right": self.direction = "left" self.draw_image = self.orig_image.copy() self.draw_mask = self.orig_mask.copy() self.last_position = self.position def draw(self, parent): #self.draw_image.save(canvas) print self.draw_image print self.draw_position parent.canvas = parent.canvas.blit(self.draw_image, pos=self.draw_position, alphaMask=self.draw_mask)
def query_blob(id): blob = Image("static\\img\\%d.jpg" % id).crop(int(request.form['x']), int(request.form['y']), int(request.form['width']), int(request.form['height'])) if blob and 'SK_MODEL' in app.config: if blob.height > blob.width: blob = blob.resize(h=app.config['PATCH_SIZE']) else: blob = blob.resize(w=app.config['PATCH_SIZE']) blob = blob.embiggen( (app.config['PATCH_SIZE'], app.config['PATCH_SIZE'])) np_img = blob.getGrayNumpy().transpose().reshape(-1) pred = labels.inverse_transform(sk_model.predict(np_img))[0] return jsonify(prediction=pred) else: return jsonify(prediction='')
def parse_frame(img): """ Parses a SimpleCV image object of a frame from Super Hexagon. Returns a ParsedFrame object containing selected features. """ img # helper image size variables w,h = img.size() midx,midy = w/2,h/2 # Create normalized images for targeting objects in the foreground or background. # (This normalization is handy since Super Hexagon's colors are inverted for some parts of the game) # fg_img = foreground image (bright walls, black when binarized) # bg_img = background image (bright space, black when binarized) fg_img = img if sum(img.binarize().getPixel(midx,midy)) == 0: fg_img = img.invert() bg_img = fg_img.invert() # We need to close any gaps around the center wall so we can detect its containing blob. # The gaps are resulting artifacts from video encoding. # The 'erode' function does this by expanding the dark parts of the image. bimg = bg_img.binarize() bimg = black_out_GUI(bimg) blobs = bimg.findBlobs() cursor_blob = get_cursor_blob(blobs, h, midx, midy) if cursor_blob: cursor_point = map(int, cursor_blob.centroid()) cursor_r, cursor_angle = cart_to_polar(cursor_point[0] - midx, midy - cursor_point[1]) cursor_angle = int(cursor_angle * 360/ (2 * pi)) cursor_angle = 180 - cursor_angle if cursor_angle < 0: a += 360 bimg = black_out_center(bimg, cursor_r).applyLayers() arr = bimg.resize(100).getGrayNumpy() > 100 rot_arr = arr_to_polar(arr) rot_img = Image(PIL.Image.fromarray(np.uint8(np.transpose(rot_arr)*255))).dilate(iterations=3) rot_arr = rot_img.getGrayNumpy() > 100 rot_img = rot_img.resize(400).flipVertical() return ParsedFrame(img, bimg, arr, rot_arr, rot_img, cursor_r, cursor_angle) else: return None
def generate_negative_examples(argv): image_dirs = argv[4:] images = [] for image_dir in image_dirs: # grab all images images.extend(glob(path.join(image_dir, '*.jpg'))) images.extend(glob(path.join(image_dir, '*.JPG'))) images.extend(glob(path.join(image_dir, '*.png'))) images.extend(glob(path.join(image_dir, '*.PNG'))) images = set(images) if len(images) < N: print 'Not enough images! (got %d, need %d)' % (len(images), N) return width, height, output_dir = int(argv[1]), int(argv[2]), argv[3] if path.exists(output_dir) and (not path.isdir(output_dir)): print '%s is not a directory' % output_dir return elif not path.exists(output_dir): os.mkdir(output_dir) for i in xrange(N): print 'generating %3d/%d...' % ((i+1), N) img = Image(images.pop()) img = img.grayscale() if img.width > MAX_WIDTH: img = img.resize(MAX_WIDTH, int(1.0*img.height*MAX_WIDTH/img.width)) x, y = random.randint(0, img.width-width), random.randint(0, img.height-height) img = img.crop(x, y, width, height) path_to_save = path.join(output_dir, '%d.png' % (i+1)) img.save(path_to_save)
roi = ROI(img.width*0.45,img.height*0.45,img.width*0.1,img.height*0.1,img) tct.train(cam,roi=roi,maxFrames=250,pkWndw=20) # Matplot Lib example plotting plotc = {'r':'r','g':'g','b':'b','i':'m','h':'y'} for key in tct.data.keys(): plt.plot(tct.data[key],plotc[key]) for pt in tct.peaks[key]: plt.plot(pt[0],pt[1],'r*') for pt in tct.valleys[key]: plt.plot(pt[0],pt[1],'b*') plt.grid() plt.show() disp = Display((800,600)) while disp.isNotDone(): img = cam.getImage() result = tct.recognize(img) plt.plot(tct._rtData,'r-') plt.grid() plt.savefig('temp.png') plt.clf() plotImg = Image('temp.png') roi = ROI(img.width*0.45,img.height*0.45,img.width*0.1,img.height*0.1,img) roi.draw(width=3) img.drawText(str(result),20,20,color=Color.RED,fontsize=32) img = img.applyLayers() img = img.blit(plotImg.resize(w=img.width,h=img.height),pos=(0,0),alpha=0.5) img.save(disp)
# Matplot Lib example plotting plotc = {'r': 'r', 'g': 'g', 'b': 'b', 'i': 'm', 'h': 'y'} for key in list(tct.data.keys()): plt.plot(tct.data[key], plotc[key]) for pt in tct.peaks[key]: plt.plot(pt[0], pt[1], 'r*') for pt in tct.valleys[key]: plt.plot(pt[0], pt[1], 'b*') plt.grid() plt.show() disp = Display((800, 600)) while disp.isNotDone(): img = cam.getImage() result = tct.recognize(img) plt.plot(tct._rtData, 'r-') plt.grid() plt.savefig('temp.png') plt.clf() plotImg = Image('temp.png') roi = ROI(img.width * 0.45, img.height * 0.45, img.width * 0.1, img.height * 0.1, img) roi.draw(width=3) img.drawText(str(result), 20, 20, color=Color.RED, fontsize=32) img = img.applyLayers() img = img.blit(plotImg.resize(w=img.width, h=img.height), pos=(0, 0), alpha=0.5) img.save(disp)
from SimpleCV import Image coins = Image("ex22a.jpg") # Open ex22b.jpg and ex22c.jpg too :) coins = coins.resize(500, 500) # A simple resize only for a better display # Binarize the image to manipulate it more easily binCoins = coins.binarize() # Find the blobs on the image blobs = binCoins.findBlobs() # 'blobs' it's a object, then we can manipulate it and display it blobs.show(width=5)
from SimpleCV import Image import time img = Image('ladies.jpg') #img.live() # Gets the information for the pixel located at # x coordinate = 120, and y coordinate = 150 pixel = img[120, 150] # or pixelll = img.getPixel(120, 150) print pixel print pixelll print img.getGrayPixel(120, 150) print img.height print img.width # Retrieve the RGB triplet from (120, 150) (red, green, blue) = img.getPixel(223, 82) # Change the color of the pixel+ img[215:230, 82:85] = (0, 0, 0) # Resize the image so it is 5 times bigger then it's original size bigImg = img.resize(img.width*1, img.height*1) bigImg.show() #img.show() time.sleep(10)
def rotateAntiClockWise( image, min_angle, max_angle ): x = image.width / 2 y = image.height / 2 index = os.path.basename( image.filename ).split( "." )[ 0 ][ -1 ] for angle in xrange( min_angle, max_angle + 1 ): rotated_image = image.rotate( angle, ( x, y ) ) rotated_image.resize( 64, 64 ) rotated_image.save( str( index ) + str( angle ) + "l.jpg" ) print "rotated %s through %s angle,anticlockwise." % ( index, angle ) size = 10 filename = "" target = "dataset" image_list = [] for index in xrange( 0, size ): filename = "../" + str( index ) + ".jpg" image = Image( filename ) image.resize( 64, 64 ).save( str( index ) + ".jpg" ) image_list.append( image.filename ) print "%s thumbnail created." % ( image.filename ) # create rotated images. # functions will create images at the current directory labeled by number and angle.format: [ angle ][ number ].jpg os.chdir( target ) for image in image_list: rotateClockWise( Image( image ), min_angle = 0, max_angle = 45 ) rotateAntiClockWise( Image( image ), min_angle = 0, max_angle = 45 )
faces = image.findHaarFeatures("face.xml") if faces: if not password: faces.draw() face = faces[-1] password = face.crop().save("password.jpg") print "First Run Application" print "Exit Program" break else: faces.draw() face = faces[-1] template = face.crop() template.save("passwordmatch.jpg") template = Image("passwordmatch.jpg") diff = template.resize(password.width, password.height) - password; print(diff) #keypoints = password.findKeypointMatch(template) if diff.findBlobs(minsize = 20000): print "Face not matched" print "Danger" GPIO.output(GreenLED, 0) GPIO.output(RedLED, 1) GPIO.output(YellowLED, 0) else: print "your face detected and matched" print "Welcome to Application" GPIO.output(GreenLED, 1) GPIO.output(RedLED, 0)
class Window2: generation = 0 def __init__(self, filename, scale=1): filename = os.path.expanduser(filename) self.img = Image(filename) self.max_x, self.max_y = self.img.width, self.img.height self.scale = scale self.array_map = np.array([[0 for y in range(self.max_y)] for x in range(self.max_x)]) for x in range(self.max_x): for y in range(self.max_y): pixel = self.img.getPixel(x, y) self.array_map[x][y] = (pixel == (255, 255, 255)) # scale image self.img = self.img.resize(self.img.width*scale, self.img.height*scale) self.img_size = self.img.width, self.img.height self.display = Display(self.img_size) self.img.save(self.display) def dot(self, p, color=Color.WHITE, size=0): x, y = p[0], p[1] #print "Drawing robot particle at {}, {}".format(x, y) if x < 0 or x >= self.max_x: print "Oh my god! x=", x raise RuntimeError if y < 0 or y >= self.max_y: print "Oh shit! y=", y raise RuntimeError else: self.img.dl().circle(center=(x*self.scale, y*self.scale), radius=size, color=color, width=1, filled=True) def dot_red(self, p, color=Color.RED): self.dot(p, color, 2) def dots(self, coords, color=Color.WHITE, size=0): for (x, y) in coords: self.dot((x, y), color, size) def clear(self): self.img = Image(self.img_size) #self.display.clear() self.img.save(self.display) def clear_dl(self): self.img.clearLayers() self.img.save(self.display) def show(self): self.img.save(self.display) self.generation += 1 print "Generation = {}".format(self.generation) self.wait_for_mouse() print "Mouse pressed!" def draw_robot(self, position, orientation): color = Color.RED #self.img.drawRectangle(p[0], p[1], 20, 40, color, 1) self.dot(position, color, 2) length = 20 bx = int(round(position[0] + cos(orientation) * length)) by = int(round(position[1] + sin(orientation) * length)) self.vector(position, orientation, length, detect_collision=False, color=color) self.vector((bx, by), orientation - 3*pi/4, length=8, detect_collision=False, color=color) self.vector((bx, by), orientation + 3*pi/4, length=8, detect_collision=False, color=color) def vector(self, x, orientation, length, detect_collision=True, color=Color.FORESTGREEN): bx = int(round(x[0] + cos(orientation) * length)) by = int(round(x[1] + sin(orientation) * length)) #self.dot_red((bx, by)) return self.line(x, (bx, by), detect_collision=detect_collision, color=color) #return bx, by # a = startpunkt, b = endpunkt #@profile def line(self, a, b, detect_collision=True, color=Color.BLUE): """http://en.wikipedia.org/wiki/Bresenham's_line_algorithm""" # performance => use local vars max_x = self.max_x max_y = self.max_y array_map = self.array_map x0, y0 = a x1, y1 = b dx = abs(x1-x0) dy = -abs(y1-y0) if x0 < x1: sx = 1 else: sx = -1 if y0 < y1: sy = 1 else: sy = -1 err = dx+dy while True: if x0 <= 0 or x0 >= max_x or y0 <= 0 or y0 >= max_y: break if color: self.dot((x0, y0), color, 0) #if detect_collision and self.img.getPixel(x0, y0) == (255, 255, 255): if detect_collision and array_map[x0][y0]: break if x0 == x1 and y0 == y1: break e2 = 2*err if e2 > dy: err += dy x0 += sx if x0 == x1 and y0 == y1: #if color: # self.dot((x0, y0), color, 0) break if e2 < dx: err = err + dx y0 += sy return x0, y0 def wait_for_mouse(self): while True: for event in pg.event.get(): if event.type == pg.MOUSEBUTTONDOWN: print event #self.clear() return
from SimpleCV import Image, Camera, Display from time import sleep camera = Camera(prop_set={'width':320, 'height':240}) display = Display(resolution=(320, 240)) mustacheImage = Image("/Users/jharms/Desktop/Mustache.jpg") mustacheImage = mustacheImage.resize(w=120, h=80) stacheMask = mustacheImage.createBinaryMask(color1=(10,10,10), color2=(255,255,255)) stacheMask = stacheMask.invert() #i.save(myDisplay) def mustachify(frame): faces = None print frame.listHaarFeatures() faces = frame.findHaarFeatures('face') if faces: for face in faces: print "Gesicht bei " + str(face.coordinates()) frame = frame.blit(mustacheImage, pos=face.coordinates(), mask=stacheMask) return frame while not display.isDone(): frame = camera.getImage() frame = mustachify(frame) frame.save(display) sleep(.1)
def scale_down(si, image_path): new_file_path = step_file_path(si, 'scale-down') img = Image(image_path) img = img.resize(h=1024) img.save(new_file_path) return new_file_path
# -*- coding: utf-8 -*- from SimpleCV import Image imgJPG = Image("ex8.jpg") imgJPG.save("ex8PNG.png") imgPNG = Image("ex8PNG.png") imgPNG.resize(800, 600).show()
# -*- coding: utf-8 -*- from SimpleCV import Image nomeImagem = raw_input("Digite o nome da imagem que deseja redimensionar: ") imgEntrada = Image(nomeImagem) imgEntrada.resize(1200,300).save(nomeImagem) print ("Convertendo e redimensionando...") print ("Done!")
def scale_down(self, img_path): new_file_path = self.nfn('scale-down') img = Image(img_path) img = img.resize(h=1024) img.save(new_file_path) return new_file_path
# -*- coding: utf-8 -*- from SimpleCV import Image imgJPG = Image("ex8.jpg") imgJPG.save("ex8PNG.png") imgPNG = Image("ex8PNG.png") imgPNG.resize(800,600).show()
def doface(aa, f1, cc, f2, ee): camera = PiCamera() #imgg = Image('img1.jpg') #disp = Display(imgg.size()) dsize = (640, 480) disp = Display(dsize) #drawing = Image('mustache.png') #maskk = drawing.createAlphaMask() #camera.start_preview() #sleep(2) #['right_eye.xml', 'lefteye.xml', 'face3.xml', 'glasses.xml', # 'right_ear.xml', 'fullbody.xml', 'profile.xml', 'upper_body2.xml', # 'face.xml', 'face4.xml', 'two_eyes_big.xml', 'right_eye2.xml', # 'left_ear.xml', 'nose.xml', 'upper_body.xml', 'left_eye2.xml', # 'two_eyes_small.xml', 'face2.xml', 'eye.xml', 'face_cv2.xml', # 'mouth.xml', 'lower_body.xml'] while disp.isNotDone(): camera.capture('img2.png') img = Image('img2.png') img = img.resize(640, 480) #whatt = img.listHaarFeatures() faces = img.findHaarFeatures('face.xml') print 'faces:', faces if faces: #is not None: face = faces.sortArea()[-1] #print 'size:',face.size if aa == 'none': break elif aa == 'block': face.draw() else: f0draw = aa + '.png' draw0 = Image('use/' + f0draw) face = face.blit(draw0, pos=(100, 200)) #bigFace = face[-1] myface = face.crop() if f1 and cc is not None: feature01 = f1 + '.xml' f1draw = cc + '.png' draw1 = Image('/home/pi/cv/use/' + f1draw) feature1s = myface.findHaarFeatures(feature01) if feature1s is not None: feature1 = feature1s.sortArea()[-1] xpos1 = face.points[0][0] + feature1.x - (draw1.width / 2) ypos1 = face.points[0][ 1] + feature1.y #+ (2*draw1.height/3) #pos = (xmust,ymust) img = img.blit(draw1, pos=(xpos1, ypos1)) #mask=maskk) if f2 and ee is not None: feature02 = f2 + '.xml' f2draw = ee + '.png' draw2 = Image('/home/pi/cv/use/' + f2draw) feature2s = myface.findHaarFeatures(feature02) if feature2s is not None: feature2 = feature2s.sortArea()[-1] xpos2 = face.points[0][0] + feature2.x - (draw2.width / 2) ypos2 = face.points[0][ 1] + feature2.y #+ (2*draw2.height/3) #pos = (xmust,ymust) img = img.blit(draw2, pos=(xpos2, ypos2)) #mask=maskk) img.save(disp) else: print 'no face~~'
def rotateAntiClockWise(image, min_angle, max_angle): x = image.width / 2 y = image.height / 2 index = os.path.basename(image.filename).split(".")[0][-1] for angle in xrange(min_angle, max_angle + 1): rotated_image = image.rotate(angle, (x, y)) rotated_image.resize(64, 64) rotated_image.save(str(index) + str(angle) + "l.jpg") print "rotated %s through %s angle,anticlockwise." % (index, angle) size = 10 filename = "" target = "dataset" image_list = [] for index in xrange(0, size): filename = "../" + str(index) + ".jpg" image = Image(filename) image.resize(64, 64).save(str(index) + ".jpg") image_list.append(image.filename) print "%s thumbnail created." % (image.filename) # create rotated images. # functions will create images at the current directory labeled by number and angle.format: [ angle ][ number ].jpg os.chdir(target) for image in image_list: rotateClockWise(Image(image), min_angle=0, max_angle=45) rotateAntiClockWise(Image(image), min_angle=0, max_angle=45)
from SimpleCV import Image img = Image('ahmedabad.jpg') bigImg = img.resize(img.width * 2, img.height) bigImg.show() raw_input()
def get_bounding_box(keyword, url, filename): # get the image img = Image(url) # resize the image so things aren't so slow, if necessary w, h = img.size() if w > 1200 or h > 1200: maxdim = max(w, h) ratio = math.ceil(maxdim/800.0) print " resizing..." img = img.resize(w=int(w/ratio), h=int(h/ratio)) else: ratio = 1 # get the canvas disp = Display((800, 800)) # text overlay textlayer = DrawingLayer(img.size()) textlayer.setFontSize(30) cx, cy = 10, 10 for xoff in range(-2, 3): for yoff in range(-2, 3): textlayer.text(keyword, (cx + xoff, cy + yoff), color=Color.BLACK) textlayer.text(keyword, (cx, cy), color=Color.WHITE) # two points to declare a bounding box point1 = None point2 = None while disp.isNotDone(): cursor = (disp.mouseX, disp.mouseY) if disp.leftButtonUp: if point1 and point2: point1 = None point2 = None if point1: point2 = disp.leftButtonUpPosition() else: point1 = disp.leftButtonUpPosition() bb = None if point1 and point2: bb = disp.pointsToBoundingBox(point1, point2) elif point1 and not point2: bb = disp.pointsToBoundingBox(point1, cursor) img.clearLayers() drawlayer = DrawingLayer(img.size()) if bb: drawlayer.rectangle((bb[0], bb[1]), (bb[2], bb[3]), color=Color.RED) # keyboard commands if pygame.key.get_pressed()[pygame.K_s]: # skip for now raise Skip() elif pygame.key.get_pressed()[pygame.K_b]: # mark it as an invalid picture raise BadImage() elif pygame.key.get_pressed()[pygame.K_RETURN]: if point1 and point2: bb = disp.pointsToBoundingBox(scale(ratio, point1), scale(ratio, point2)) return bb elif not point1 and not point2: bb = disp.pointsToBoundingBox((0, 0), (w, h)) return bb drawlayer.line((cursor[0], 0), (cursor[0], img.height), color=Color.BLUE) drawlayer.line((0, cursor[1]), (img.width, cursor[1]), color=Color.BLUE) #drawlayer.circle(cursor, 2, color=Color.BLUE, filled=True) img.addDrawingLayer(textlayer) img.addDrawingLayer(drawlayer) img.save(disp)
# -*- coding: utf-8 -*- from SimpleCV import Image nomeImagem = raw_input("Digite o nome da imagem que deseja redimensionar: ") imgEntrada = Image(nomeImagem) imgEntrada.resize(1200, 300).save(nomeImagem) print("Convertendo e redimensionando...") print("Done!")
from SimpleCV import Image import time img = Image('ladies.jpg') # Using Otsu's method otsu = img.binarize() # Specify a low value low = img.binarize(75) # Specify a high value high = img.binarize(125) img = img.resize(int(img.width*.5), int(img.height*.5)) otsu = otsu.resize(int(otsu.width*.5), int(otsu.height*.5)) low = low.resize(int(low.width*.5), int(low.height*.5)) high = high.resize(int(high.width*.5), int(high.height*.5)) top = img.sideBySide(otsu) bottom = low.sideBySide(high) combined = top.sideBySide(bottom, side="bottom") combined.show() time.sleep(20)
#se prueba distintos metodos para ver si detecta los bellos en la piel ed=imagen.edges() edg=imagen+ed edg.save('bordes_edge.png') grad = imagen.morphGradient() grd = imagen+grad grd.save('bordes_gradiente.png') lineas=imagen.findLines() lineas.draw(Color.RED,width=3) #imagen.show() imagen.save("linbeas.png") resu = imagen.resize(320,240) #se redefine la imagen para que tenga un menor tiempo #de procesamiento gray=resu.grayscale() inv=gray.invert() sumimg=resu+inv res=(resu*1.5)-(gray/2) res.save('muestras/imagen_tratada.png') [red, green, blue]=res.splitChannels(False) def Rho(A,B): # A es el color del lunar y B es el color de la piel Ar=A[0]; Ag=A[1]; Ab=A[2] Br=B[0]; Bg=B[1]; Bb=B[2] r=np.array([Ar/Br,Ar/Bg,Ar/Bb,Ag/Br,Ag/Bg,Ag/Bb,Ab/Br,Ab/Bg,Ab/Bb]) return r
def fancify(): if request.method == 'POST': print request.data cur_request = json.loads(request.data) else: #cur_request = """{"url": "", "debug":true}""" #cur_request = """{"url": "", "debug":true}""" #cur_request = """{"url": "", "debug":true}""" cur_request = """{"url": "http://localhost/images/scrubs.jpg", "debug":true}""" #cur_request = """{"url": "http://www.newrichstrategies.com/wp-content/uploads/2012/03/How-to-Find-Good-People-in-Your-Life.jpg", "debug":false}""" #cur_request = """{"url": "http://greenobles.com/data_images/frank-lampard/frank-lampard-02.jpg", "debug":true}""" #cur_request = """{"url": "http://www.billslater.com/barack__obama.jpg"}""" #cur_request = """{"url": "http://celebrityroast.com/wp-content/uploads/2011/01/arnold-schwarzenegger-body-building.jpg", "debug":false}""" #cur_request = """{"url": "http://face2face.si.edu/.a/6a00e550199efb8833010536a5483e970c-800wi", "debug":true}""" #cur_request = """{"url": "http://collider.com/uploads/imageGallery/Scrubs/scrubs_cast_image__medium_.jpg", "debug":false}""" #cur_request = """{"url": "http://localhost/images/Kevin_Bacon_at_the_2010_SAG_Awards.jpg", "debug":false}""" #cur_request = """{"url": "http://cdn02.cdn.justjared.com/wp-content/uploads/headlines/2012/02/anna-faris-oscars-red-carpet-2012.jpg", "debug":true}""" #cur_request = """{"url": "http://www.viewzone.com/attractive.female.jpg", "debug":true}""" cur_request = json.loads(cur_request) print cur_request["url"] img = Image(str(cur_request["url"])) img = img.scale(2.0) debug = True #if "debug" in cur_request: # debug = cur_request["debug"] chosen_faces = [] faces = img.findHaarFeatures(face_cascade) if faces is not None: for face in faces: face_features = [] invalid_face = False face_rect = Rect(face.x - (face.width() / 2), face.y - (face.height() / 2), face.width(), face.height()) for chosen_face in chosen_faces: if face_rect.colliderect(chosen_face): invalid_face = True break if invalid_face: break nose = None mouth = None left_eye = None right_eye = None cur_face = img.crop(face.x, face.y, face.width(), face.height(), centered=True) #cur_face = face.crop() noses = cur_face.findHaarFeatures(nose_cascade) mouths = cur_face.findHaarFeatures(mouth_cascade) eyes = cur_face.findHaarFeatures(eye_cascade) face_left_edge = face.x - (face.width() / 2) face_top_edge = face.y - (face.height() / 2) if noses is not None: nose = noses[0] nose_dist = (abs(nose.x - (face.width() / 2)) + abs(nose.y - (face.height() * 5 / 9)) + abs(nose.width() - (face.width() / 4))) for cur_nose in noses: cur_dist = (abs(cur_nose.x - (face.width() / 2)) + abs(cur_nose.y - (face.height() * 5 / 9)) + abs(cur_nose.width() - (face.width() / 4))) if cur_dist < nose_dist: nose = cur_nose nost_dist = cur_dist if nose and (nose.y < (face.height() / 3)): nose = None if nose and mouths is not None: mouth = mouths[0] mouth_dist = abs(mouth.x - nose.x) + (abs(mouth.y - (face.height() * 4 / 5)) * 2) for cur_mouth in mouths: cur_dist = abs(cur_mouth.x - nose.x) + (abs(cur_mouth.y - (face.height() * 4/ 5)) * 2) if (cur_dist < mouth_dist) and (cur_mouth.y > nose.y): mouth = cur_mouth mouth_dist = cur_dist if nose and eyes: right_eye = eyes[0] right_eye_dist = (abs(right_eye.x - (3 * face.width() / 4)) * 2 + abs(right_eye.y - (nose.y - (nose.height() / 2)) / 2) + abs(right_eye.width() - (face.width() / 3))) for cur_eye in eyes: cur_right_dist = (abs(cur_eye.x - (3 * face.width() / 4)) + abs(cur_eye.y - (nose.y - (nose.height() / 2)) / 2) + abs(cur_eye.width() - (face.width() / 3))) if (cur_right_dist <= right_eye_dist): # and (cur_eye.y < nose.y): right_eye = cur_eye right_eye_dist = cur_right_dist if nose and right_eye and (((right_eye.y - (right_eye.height() / 2)) > nose.y) or (right_eye.x < nose.x)): print "Culling right_eye" right_eye = None if nose and mouth: chosen_faces.append(face_rect) x_face = face.x - (face.width() / 2) y_face = face.y - (face.height() / 2) x_nose = nose.x - (nose.width() / 2) y_nose = nose.y - (nose.height() / 2) # Setup TopHat Image scale_factor = face.width() / 175.0 cur_hat = hat.copy() cur_hat = cur_hat.scale(scale_factor) cur_hat_mask = hat_mask.copy() cur_hat_mask = cur_hat_mask.scale(scale_factor) cur_hat_mask = cur_hat_mask.createAlphaMask(hue_lb=0, hue_ub=100) # Calculate the hat position if (face.y - face.height() / 2) > cur_hat.height: x_hat = face.x - (cur_hat.width / 2) y_hat = face.y - (face.height() * 7 / 10) - (cur_hat.height / 2) img = img.blit(cur_hat, pos=(x_hat, y_hat), alphaMask=cur_hat_mask) if mouth: x_mouth = mouth.x - (mouth.width() / 2) y_mouth = mouth.y - (mouth.height() / 2) # Setup Mustache Image cur_stache = stache.copy() scale_factor = ((nose.width() / 300.0) + (face.width() / 600.0)) / 2.0 cur_stache = cur_stache.scale(scale_factor) stache_mask = cur_stache.createAlphaMask(hue_lb=0, hue_ub=10).invert() # Calculate the mustache position bottom_of_nose = y_nose + (nose.height() * 4 / 5) top_of_mouth = y_mouth # if top_of_mouth > bottom_of_nose: # top_of_mouth = bottom_of_nose y_must = y_face + ((bottom_of_nose + top_of_mouth) / 2) - (cur_stache.height / 2) middle_of_nose = nose.x middle_of_mouth = mouth.x x_must = x_face + ((middle_of_nose + middle_of_mouth) / 2) - (cur_stache.width / 2) if right_eye: x_right_eye = right_eye.x - (right_eye.width() / 2) y_right_eye = right_eye.y - (right_eye.height() / 2) # Setup Monocle Image cur_mono = monocle.copy() scale_factor = ((right_eye.width() / 65.0) + (face.width() / 200.0)) / 2.0 cur_mono = cur_mono.scale(scale_factor) mono_mask = cur_mono.createAlphaMask(hue_lb=0, hue_ub=100).invert() # Calculate Monocle Position x_mono = x_face + x_right_eye y_mono = y_face + y_right_eye img = img.blit(cur_mono, pos=(x_mono, y_mono), alphaMask=mono_mask) img = img.blit(cur_stache, pos=(x_must, y_must), alphaMask=stache_mask) if debug: noselayer = DrawingLayer((img.width, img.height)) nosebox_dimensions = (nose.width(), nose.height()) center_point = (face.x - (face.width() / 2) + nose.x, face.y - (face.height() / 2) + nose.y) nosebox = noselayer.centeredRectangle(center_point, nosebox_dimensions, width=3) img.addDrawingLayer(noselayer) img = img.applyLayers() else: print "Face culled:" if not nose: print " No Nose" if not mouth: print " No mouth" if not right_eye: print " No right eye" print if debug: face_left_edge = face.x - (face.width() / 2) face_top_edge = face.y - (face.height() / 2) facelayer = DrawingLayer((img.width, img.height)) facebox_dimensions = (face.width(), face.height()) center_point = (face.x, face.y) facebox = facelayer.centeredRectangle(center_point, facebox_dimensions, Color.BLUE) img.addDrawingLayer(facelayer) if noses: for nose in noses: noselayer = DrawingLayer((img.width, img.height)) nosebox_dimensions = (nose.width(), nose.height()) center_point = (face.x - (face.width() / 2) + nose.x, face.y - (face.height() / 2) + nose.y) nosebox = noselayer.centeredRectangle(center_point, nosebox_dimensions) img.addDrawingLayer(noselayer) if mouths: for mouth in mouths: mouthlayer = DrawingLayer((img.width, img.height)) mouthbox_dimensions = (mouth.width(), mouth.height()) center_point = (face.x - (face.width() / 2) + mouth.x, face.y - (face.height() / 2) + mouth.y) mouthbox = mouthlayer.centeredRectangle(center_point, mouthbox_dimensions, Color.GREEN) img.addDrawingLayer(mouthlayer) if eyes: for right_eye in eyes: right_eyelayer = DrawingLayer((img.width, img.height)) right_eyebox_dimensions = (right_eye.width(), right_eye.height()) right_eye_center_point = (face_left_edge + right_eye.x, face_top_edge + right_eye.y) right_eyebox = right_eyelayer.centeredRectangle(right_eye_center_point, right_eyebox_dimensions) img.addDrawingLayer(right_eyelayer) img = img.applyLayers() img = img.scale(0.5) w_ratio = img.width / 800.0 h_ratio = img.height / 600.0 if h_ratio > 1.0 or w_ratio > 1.0: if h_ratio > w_ratio: img = img.resize(h=600) else: img = img.resize(w=800) output = StringIO.StringIO() img.getPIL().save(output, format="JPEG") #, quality=85, optimize=True) img_contents = output.getvalue() mimetype = "image/jpeg" return app.response_class(img_contents, mimetype=mimetype, direct_passthrough=False)
faces = image.findHaarFeatures("face.xml") if faces: if not password: faces.draw() face = faces[-1] password = face.crop().save("password.jpg") print "First Run Application" print "Exit Program" break else: faces.draw() face = faces[-1] template = face.crop() template.save("passwordmatch.jpg") template = Image("passwordmatch.jpg") diff = template.resize(password.width, password.height) - password print(diff) #keypoints = password.findKeypointMatch(template) if diff.findBlobs(minsize=20000): print "Face not matched" print "Danger" GPIO.output(GreenLED, 0) GPIO.output(RedLED, 1) GPIO.output(YellowLED, 0) else: print "your face detected and matched" print "Welcome to Application" GPIO.output(GreenLED, 1) GPIO.output(RedLED, 0)