示例#1
0
def get_faces(image_path, api_mode=False, rescale_face_crop=0):
    """
    Return a list of cropped faces given an image path

    :param image_path: Path to image
    :type image_path: str
    :param api_mode: If api_mode is True get_faces returns a list of found HaarFeatures
    :type api_mode: bool
    :returns: list of images
    """

    original_image = Image(image_path)
    faces = original_image.findHaarFeatures(segment_face)

    if api_mode:
        return faces
    else:
        if rescale_face_crop:
            return [
                original_image.crop(
                    scale_bounding_box(face.boundingBox(), rescale_face_crop))
                for face in faces
            ]
        else:
            return [face.crop() for face in faces]
def line_blobs(id):
    cur = g.db.execute(
        'SELECT id, x, y, width, height FROM blobs WHERE image=?', [id])
    blobs = cur.fetchall()
    entries = []
    img = Image("static\\img\\%d.jpg" % id)
    for i, entry in enumerate(blobs):
        blob = img.crop(entry[1], entry[2], entry[3], entry[4])
        if blob and 'SK_MODEL' in app.config:
            if blob.height > blob.width:
                blob = blob.resize(h=app.config['PATCH_SIZE'])
            else:
                blob = blob.resize(w=app.config['PATCH_SIZE'])
            blob = blob.embiggen(
                (app.config['PATCH_SIZE'], app.config['PATCH_SIZE']))
            np_img = blob.getGrayNumpy().transpose().reshape(-1)
            pred = labels.inverse_transform(sk_model.predict(np_img))[0]
            if app.config['DEBUG']:
                blob.save("tmp\\pic%d %s.jpg" % (i, pred))
            entries.append(
                dict(x=entry[1],
                     y=entry[2],
                     width=entry[3],
                     height=entry[4],
                     pred=pred))
        else:
            entries.append(
                dict(x=entry[1], y=entry[2], width=entry[3], height=entry[4]))
    return jsonify(blobs=entries)
示例#3
0
    def capture(self):
        count = 0
        currentframes = []
        self.framecount = self.framecount + 1

        for c in self.cameras:
            img = ""
            if c.__class__.__name__ == "Kinect" and c._usedepth == 1: 
                img = c.getDepth()
            elif c.__class__.__name__ == "Kinect" and c._usematrix == 1:
                mat = c.getDepthMatrix().transpose()
                img = Image(np.clip(mat - np.min(mat), 0, 255))
            else:
                img = c.getImage()
            if self.config.cameras[0].has_key('crop'):
                img = img.crop(*self.config.cameras[0]['crop'])
            frame = M.Frame(capturetime = datetime.utcnow(), 
                camera= self.config.cameras[count]['name'])
            frame.image = img
            currentframes.append(frame)
            
            while len(self.lastframes) > self.config.max_frames:
                self.lastframes.pop(0)
            # log.info('framecount is %s', len(self.lastframes))
                            
#            Session().redis.set("framecount", self.framecount)
            count = count + 1
            
                    
        self.lastframes.append(currentframes)            
            
        return currentframes
示例#4
0
文件: wmr.py 项目: nthrockmorton/wmr
def crop():
    #Begin Processing image
    print ('Begin processing image...')
    fullImage = Image('test/image/expected/fullImage.jpg')
    hundreds = fullImage.crop(602, 239, 28, 63)
    hundreds = hundreds.binarize(65).invert()
    hundreds.save('test/image/actual/hundredsImage.jpg')
    print ('Hundreds place cropped, binarized, and saved')
示例#5
0
def get_pan_info(img_path):
    if img_path == None:
        img_path = sys.argv[1]
    print "Reading from file: " + img_path
    img = Image(img_path).toRGB()

    min_pan_area = img.width * img.height / 10
    pan_rects = img.invert().findBlobs((25, 0, 0), min_pan_area)

    if pan_rects and len(pan_rects) > 0:
        img = img.crop(pan_rects[0])

    if img.width < 350 or img.height < 200:
        print "Error too small PAN image"
        exit(1)

    cropped_img = img.crop(0, img.height / 4.60, img.width / 1.4,
                           img.height / 4 * 2.3)
    if cropped_img.width < 550:
        cropped_img = cropped_img.resize(
            550, 550 * cropped_img.height / cropped_img.width)

    t_img = cropped_img.threshold(100)
    img_text = pytesseract.image_to_string(t_img.getPIL(), lang='eng')
    context = {}
    learn_text(context, img_text)

    t_img = cropped_img
    img_text = pytesseract.image_to_string(t_img.getPIL(), lang='eng')
    learn_text(context, img_text)

    t_img = cropped_img.threshold(90)
    img_text = pytesseract.image_to_string(t_img.getPIL(), lang='eng')
    learn_text(context, img_text)

    t_img = cropped_img.threshold(110)
    img_text = pytesseract.image_to_string(t_img.getPIL(), lang='eng')
    learn_text(context, img_text)

    #print context['best_list']
    pan_info = get_fields(context['best_list'])
    #print json.dumps(pan_info, indent=2)
    return pan_info
示例#6
0
def blob(id):
    cur = g.db.execute("SELECT id, image, x, y, width, height FROM annotations WHERE annotations.id=?", [id])
    line = cur.fetchone()

    img = Image("static\\img\\%d.jpg" % int(line[1]))
    blob = img.crop(line[2], line[3], line[4], line[5])
    io = StringIO()
    blob.save(io)
    data = io.getvalue()
    resp = make_response(data)
    resp.content_type = "image/jpeg"
    return resp
示例#7
0
 def getImage(self):
     if isinstance(self._scv_cam, Kinect):
         if self._scv_cam._usedepth == 1:
             img = self._scv_cam.getDepth()
         elif self._scv_cam._usematrix == 1:
             mat = self._scv_cam.getDepthMatrix().transpose()
             img = Image(np.clip(mat - np.min(mat), 0, 255))
         else:
             img = self._scv_cam.getImage()
     else:
         img = self._scv_cam.getImage()
     if self.crop:
         img = img.crop(self.crop)
     return img
def blob(id):
    cur = g.db.execute(
        'SELECT id, image, x, y, width, height FROM annotations WHERE annotations.id=?',
        [id])
    line = cur.fetchone()

    img = Image("static\\img\\%d.jpg" % int(line[1]))
    blob = img.crop(line[2], line[3], line[4], line[5])
    io = StringIO()
    blob.save(io)
    data = io.getvalue()
    resp = make_response(data)
    resp.content_type = "image/jpeg"
    return resp
示例#9
0
def capture_camera_image():
    log("capture_camera_image()")
    image = None
    if USE_MOTION_CAMERA:
        image = Image(MOTION_CAMERA_SNAPSHOT)
    else:  # default to USE_WEB_CAMERA
        if cam is None:
            image = Camera().getImage()
        else:
            image = cam.getImage()

    size = image.size()

    # dynamic background mean color calculation
    if (BACKGROUND_AUTO_CALIBRATE):
        x = CROP_IMAGE_BORDERS[0]
        y = CROP_IMAGE_BORDERS[1]
        if (x > 0 and y > 0):
            background = image.crop(0, 0, x, y)
            mean_color = background.meanColor()
            log("background as (%d, %d, %d)" % (mean_color[0], mean_color[1], mean_color[2]))
            # save it
            db = db_open()
            db_backgrounds_write(db, db_background(date_created=datetime.now(), mean_color=mean_color))
            db_close(db)

    # if image border needs to be cropped
    x = CROP_IMAGE_BORDERS[0]
    y = CROP_IMAGE_BORDERS[1]
    if (x > 0 and y > 0):
        width = size[0] - 2 * x
        height = size[1] - 2 * y
        cropped = image.crop(x, y, width, height)
    else:
        cropped = image
    return cropped
示例#10
0
def line_blobs(id):
    cur = g.db.execute("SELECT id, x, y, width, height FROM blobs WHERE image=?", [id])
    blobs = cur.fetchall()
    entries = []
    img = Image("static\\img\\%d.jpg" % id)
    for i, entry in enumerate(blobs):
        blob = img.crop(entry[1], entry[2], entry[3], entry[4])
        if blob and "SK_MODEL" in app.config:
            if blob.height > blob.width:
                blob = blob.resize(h=app.config["PATCH_SIZE"])
            else:
                blob = blob.resize(w=app.config["PATCH_SIZE"])
            blob = blob.embiggen((app.config["PATCH_SIZE"], app.config["PATCH_SIZE"]))
            np_img = blob.getGrayNumpy().transpose().reshape(-1)
            pred = labels.inverse_transform(sk_model.predict(np_img))[0]
            if app.config["DEBUG"]:
                blob.save("tmp\\pic%d %s.jpg" % (i, pred))
            entries.append(dict(x=entry[1], y=entry[2], width=entry[3], height=entry[4], pred=pred))
        else:
            entries.append(dict(x=entry[1], y=entry[2], width=entry[3], height=entry[4]))
    return jsonify(blobs=entries)
示例#11
0
def get_faces(image_path, api_mode=False, rescale_face_crop=0):
    """
    Return a list of cropped faces given an image path

    :param image_path: Path to image
    :type image_path: str
    :param api_mode: If api_mode is True get_faces returns a list of found HaarFeatures
    :type api_mode: bool
    :returns: list of images
    """

    original_image = Image(image_path)
    faces = original_image.findHaarFeatures(segment_face)

    if api_mode:
        return faces
    else:
        if rescale_face_crop:
            return [original_image.crop(scale_bounding_box(face.boundingBox(), rescale_face_crop)) for face in faces]
        else:
            return [face.crop() for face in faces]
def generate_negative_examples(argv):
    image_dirs = argv[4:]

    images = []
    for image_dir in image_dirs:
        # grab all images
        images.extend(glob(path.join(image_dir, '*.jpg')))
        images.extend(glob(path.join(image_dir, '*.JPG')))
        images.extend(glob(path.join(image_dir, '*.png')))
        images.extend(glob(path.join(image_dir, '*.PNG')))

    images = set(images)

    if len(images) < N:
        print 'Not enough images! (got %d, need %d)' % (len(images), N)
        return

    width, height, output_dir = int(argv[1]), int(argv[2]), argv[3]

    if path.exists(output_dir) and (not path.isdir(output_dir)):
        print '%s is not a directory' % output_dir
        return
    elif not path.exists(output_dir):
        os.mkdir(output_dir)

    for i in xrange(N):
        print 'generating %3d/%d...' % ((i+1), N)
        img = Image(images.pop())
        img = img.grayscale()
        if img.width > MAX_WIDTH:
            img = img.resize(MAX_WIDTH, int(1.0*img.height*MAX_WIDTH/img.width))

        x, y = random.randint(0, img.width-width), random.randint(0, img.height-height)
        img = img.crop(x, y, width, height)

        path_to_save = path.join(output_dir, '%d.png' % (i+1))
        img.save(path_to_save)
示例#13
0
文件: ogplab.py 项目: juoni/ogp
    def histo(self):
        cam_mode = self.cam_mode
        js = self.js
        ms = self.ms
        w = self.w
        cent = 0
        rgb1 = 0
        c2 = self.c2
        wsh = self.wsh 
        wsh2 = self.wsh2
        s.write('s')
        
        if cam_mode == 3:
            img1 = c2.getImage()
        if cam_mode==1:
            with picamera.PiCamera() as camera:
                camera.resolution = (544, 288)
                camera.capture('imagesmall.jpg')
            img1 = Image('imagesmall.jpg')
        if cam_mode==2:
            with picamera.PiCamera() as camera:
                camera.resolution = (544, 288)
                camera.capture('imagesmall.jpg')
            img1 = Image('imagesmall.jpg')
        self.img1 = img1
        blobs = img1.findBlobs()  
        
        if blobs:
            print "blob"
            x = self.x
            y = self.y
            p = self.p
            p = p + 1
            img1.drawCircle((blobs[-1].x,blobs[-1].y),30,color=(255,255,255))
            img1.drawCircle((blobs[0].centroid()),10,color=(255,100,100))
            print blobs[-1].meanColor()
            rgb1 = blobs[-1].meanColor()
            cent = blobs[-1].centroid()

            pth1 = "/var/www/images/image"
            pth3 = ".png"
            pth = pth1 + str(p) + pth3
            print pth
            img1.save(pth)
            
            thumbnail = img1.crop(150,25,250,250)
            thumbnail = thumbnail.scale(20,20)
            thumb1 = "/var/www/images/thumbs/thumb"
            thumb3 = ".png"
            thumbpath = thumb1 + str(p) + thumb3
            print thumbpath
            thumbnail.save(thumbpath)
            
            self.p = p
            
            mySet.add((p,x,y,w,cent,rgb1))
            self.mySet = mySet
            
            wshx = str(self.x)
            wshy = str(self.y)
            centroidx = int(cent[0])
            centroidy=int(cent[1])
            rcolor=rgb1[0]
            gcolor=rgb1[1]
            bcolor=rgb1[2]
            rcolor=int(rcolor)
            gcolor=int(gcolor)
            bcolor=int(bcolor)
            wsh.write_message(wsh2, "rgb_" + str(rcolor)+"_"+str(gcolor)+"_"+str(bcolor))
            wsh.write_message(wsh2, "x_" + str(centroidx)+"_"+str(centroidy))
            img1.save(js.framebuffer)
            wsh.write_message(wsh2, "d_" + wshx + "_" + wshy + "_" + str(p) )

        else:
            wshx = str(self.x)
            wshy = str(self.y)
            wsh.write_message(wsh2, wshx + " " + wshy + "dark")
 
            print "dark"
示例#14
0
from SimpleCV import Image
import time
img = Image('ladies.jpg')
# Crop starting at +(50, 5)+ for an area 280 pixels wide by 500 pixels tall
cropImg = img.crop(80, 5, 280, 500)
cropImg.show()
time.sleep(10)
示例#15
0
#import the modules
import time
from SimpleCV import Image
from SimpleCV import Color

car_in_lot = Image("parking-car.png")
car_not_in_lot = Image("parking-no-car.png")

car = car_in_lot.crop(470, 200, 200, 200)
win = car.show()
time.sleep(1)
win.quit()

yellow_car = car.colorDistance(Color.YELLOW)
win = yellow_car.show()
time.sleep(1)
win.quit()

only_car = car - yellow_car
win = only_car.show()
time.sleep(1)
win.quit()

print('resultados primera comparacion:')
R = only_car.meanColor()[2]
print('R: ' + str(R))
G = only_car.meanColor()[1]
print('G: ' + str(G))
B = only_car.meanColor()[0]
print('B: ' + str(B))
示例#16
0
    def histo(self):
        cam_mode = self.cam_mode
        js = self.js
        ms = self.ms
        w = self.w
        cent = 0
        rgb1 = 0
        c2 = self.c2
        wsh = self.wsh
        wsh2 = self.wsh2
        s.write('s')

        if cam_mode == 3:
            img1 = c2.getImage()
        if cam_mode == 1:
            with picamera.PiCamera() as camera:
                camera.resolution = (544, 288)
                camera.capture('imagesmall.jpg')
            img1 = Image('imagesmall.jpg')
        if cam_mode == 2:
            with picamera.PiCamera() as camera:
                camera.resolution = (544, 288)
                camera.capture('imagesmall.jpg')
            img1 = Image('imagesmall.jpg')
        self.img1 = img1
        blobs = img1.findBlobs()

        if blobs:
            print "blob"
            x = self.x
            y = self.y
            p = self.p
            p = p + 1
            img1.drawCircle((blobs[-1].x, blobs[-1].y),
                            30,
                            color=(255, 255, 255))
            img1.drawCircle((blobs[0].centroid()), 10, color=(255, 100, 100))
            print blobs[-1].meanColor()
            rgb1 = blobs[-1].meanColor()
            cent = blobs[-1].centroid()

            pth1 = "/var/www/images/image"
            pth3 = ".png"
            pth = pth1 + str(p) + pth3
            print pth
            img1.save(pth)

            thumbnail = img1.crop(150, 25, 250, 250)
            thumbnail = thumbnail.scale(20, 20)
            thumb1 = "/var/www/images/thumbs/thumb"
            thumb3 = ".png"
            thumbpath = thumb1 + str(p) + thumb3
            print thumbpath
            thumbnail.save(thumbpath)

            self.p = p

            mySet.add((p, x, y, w, cent, rgb1))
            self.mySet = mySet

            wshx = str(self.x)
            wshy = str(self.y)
            centroidx = int(cent[0])
            centroidy = int(cent[1])
            rcolor = rgb1[0]
            gcolor = rgb1[1]
            bcolor = rgb1[2]
            rcolor = int(rcolor)
            gcolor = int(gcolor)
            bcolor = int(bcolor)
            wsh.write_message(
                wsh2,
                "rgb_" + str(rcolor) + "_" + str(gcolor) + "_" + str(bcolor))
            wsh.write_message(wsh2,
                              "x_" + str(centroidx) + "_" + str(centroidy))
            img1.save(js.framebuffer)
            wsh.write_message(wsh2, "d_" + wshx + "_" + wshy + "_" + str(p))

        else:
            wshx = str(self.x)
            wshy = str(self.y)
            wsh.write_message(wsh2, wshx + " " + wshy + "dark")

            print "dark"
示例#17
0
from SimpleCV import Image
img = Image('ahmedabad.jpg')

cropImg = img.crop(50, 5, 200, 200)
cropImg.show()
raw_input()

示例#18
0
from __future__ import print_function
from SimpleCV import Image, Color

im = Image("img/ardrone.jpg")
# im = im.erode()
im = im.crop(2000, 1500, 2000, 2000, centered=True)
# im = im.erode()
# blobs = im.findBlobs()
# blobs.draw()

only_orange = im - im.colorDistance(Color.ORANGE)
only_black 	= im - im.colorDistance(Color.BLACK)
only_drone  = only_orange + only_black


body.save("img/ardrone2.jpg")
# raw_input("Hit enter to quit: ")
# window.quit()
示例#19
0
def cuteImg(pack_blob_zoom,pack_blob_size):
    oriImg = Image("./img/original.jpg")
    cropImg = oriImg.crop(pack_blob_zoom[0]*2,pack_blob_zoom[1]*2.05,pack_blob_size[0]*0.85,pack_blob_size[1]*0.58,centered=True)
    cropImg.save("./img/dest5.jpg")
示例#20
0
from SimpleCV import Image, Color, Display
import argparse

parser = argparse.ArgumentParser(description='Check for car in handicap spot.')
parser.add_argument('img')
args = parser.parse_args()
img = args.img

image = Image(img)

cropped_image = image.crop(470, 200, 200, 200)

color_distance = cropped_image.colorDistance(Color.YELLOW)

spot = cropped_image - color_distance
spot = spot.toRGB()

(r, g, b) = spot.meanColor()

if ((r > 15) and (g > 10)):
    print "The car is in the lot. Call the attendant."
else:
    print "The car is not in the lot."
示例#21
0
from SimpleCV import Image, Color, Display
import argparse

parser = argparse.ArgumentParser(description='Check for car in handicap spot.')
parser.add_argument('img')
args = parser.parse_args()
img = args.img

image = Image(img)

cropped_image = image.crop(470,200,200,200)

color_distance = cropped_image.colorDistance(Color.YELLOW)

spot = cropped_image - color_distance
spot = spot.toRGB()

(r, g, b) =spot.meanColor()

if ((r>15) and (g>10)):
    print "The car is in the lot. Call the attendant."
else:
    print "The car is not in the lot."
#import the modules
import time
from SimpleCV import Image
from SimpleCV import Color

car_in_lot = Image("parking-car.png")
car_not_in_lot = Image("parking-no-car.png")

car = car_in_lot.crop(470,200,200,200)
win = car.show()
time.sleep(1)
win.quit()

yellow_car = car.colorDistance(Color.YELLOW)
win = yellow_car.show()
time.sleep(1)
win.quit()

only_car = car - yellow_car
win = only_car.show()
time.sleep(1)
win.quit()

print('resultados primera comparacion:')
R = only_car.meanColor()[2]
print('R: ' + str(R))
G = only_car.meanColor()[1]
print('G: ' + str(G))
B = only_car.meanColor()[0]
print('B: ' + str(B))
示例#23
0
文件: session7.py 项目: oOo0oOo/Pytut
# time.sleep(5)


## Cropping the image into the four pieces
## The x coords are always the whole size
x = 55
segment_height = img_size[1]/4
y_coords = [i * segment_height for i in range(4)]
# print segment_height, y_coords

avg_sizes = []

for y in y_coords:
	
	## Crop the image (x, y, width, height)
	channel = droplets.crop(x, y, img_size[0], segment_height)
	# channel.show()

	## Find droplets (blobs of color)
	blobs = channel.findBlobs()
	
	## If blobs found
	if blobs:
		## Keep only reasonably sized blobs
		blobs = [b for b in blobs if b.area() < 1000]

		## Get the coordinates of each blob if it is reasonably sized
		points = [b.coordinates() for b in blobs]

		## Draw the center point
		channel.drawPoints(points, color=(255, 0, 0), sz=3, width=-1)
示例#24
0
def fancify():
    if request.method == 'POST':
        print request.data
        cur_request = json.loads(request.data)
    else:
        #cur_request = """{"url": "", "debug":true}"""
        #cur_request = """{"url": "", "debug":true}"""
        #cur_request = """{"url": "", "debug":true}"""
        cur_request = """{"url": "http://localhost/images/scrubs.jpg", "debug":true}"""
        #cur_request = """{"url": "http://www.newrichstrategies.com/wp-content/uploads/2012/03/How-to-Find-Good-People-in-Your-Life.jpg", "debug":false}"""
        #cur_request = """{"url": "http://greenobles.com/data_images/frank-lampard/frank-lampard-02.jpg", "debug":true}"""
        #cur_request = """{"url": "http://www.billslater.com/barack__obama.jpg"}"""
        #cur_request = """{"url": "http://celebrityroast.com/wp-content/uploads/2011/01/arnold-schwarzenegger-body-building.jpg", "debug":false}"""
        #cur_request = """{"url": "http://face2face.si.edu/.a/6a00e550199efb8833010536a5483e970c-800wi", "debug":true}"""
        #cur_request = """{"url": "http://collider.com/uploads/imageGallery/Scrubs/scrubs_cast_image__medium_.jpg", "debug":false}"""
        #cur_request = """{"url": "http://localhost/images/Kevin_Bacon_at_the_2010_SAG_Awards.jpg", "debug":false}"""
        #cur_request = """{"url": "http://cdn02.cdn.justjared.com/wp-content/uploads/headlines/2012/02/anna-faris-oscars-red-carpet-2012.jpg", "debug":true}"""
        #cur_request = """{"url": "http://www.viewzone.com/attractive.female.jpg", "debug":true}"""
        cur_request = json.loads(cur_request)

    print cur_request["url"]
    img = Image(str(cur_request["url"]))
    img = img.scale(2.0)

    debug = True
    #if "debug" in cur_request:
    #    debug = cur_request["debug"]

    chosen_faces = []
    faces = img.findHaarFeatures(face_cascade)
    if faces is not None:
        for face in faces:
            face_features = []
            invalid_face = False
            face_rect = Rect(face.x - (face.width() / 2), face.y - (face.height() / 2), face.width(), face.height())
            for chosen_face in chosen_faces:
                if face_rect.colliderect(chosen_face):
                    invalid_face = True
                    break
            if invalid_face:
                break

            nose = None
            mouth = None
            left_eye = None
            right_eye = None
            cur_face = img.crop(face.x, face.y, face.width(), face.height(), centered=True)
            #cur_face = face.crop()

            noses = cur_face.findHaarFeatures(nose_cascade)
            mouths = cur_face.findHaarFeatures(mouth_cascade)
            eyes = cur_face.findHaarFeatures(eye_cascade)

            face_left_edge = face.x - (face.width() / 2)
            face_top_edge = face.y - (face.height() / 2)

            if noses is not None:
                nose = noses[0]
                nose_dist = (abs(nose.x - (face.width() / 2)) +
                             abs(nose.y - (face.height() * 5 / 9)) +
                             abs(nose.width() - (face.width() / 4)))
                for cur_nose in noses:
                    cur_dist = (abs(cur_nose.x - (face.width() / 2)) +
                                abs(cur_nose.y - (face.height() * 5 / 9)) +
                                abs(cur_nose.width() - (face.width() / 4)))
                    if cur_dist < nose_dist:
                        nose = cur_nose
                        nost_dist = cur_dist

            if nose and (nose.y < (face.height() / 3)):
                nose = None

            if nose and mouths is not None:
                mouth = mouths[0]
                mouth_dist = abs(mouth.x - nose.x) + (abs(mouth.y - (face.height() * 4 / 5)) * 2)

                for cur_mouth in mouths:
                    cur_dist = abs(cur_mouth.x - nose.x) + (abs(cur_mouth.y - (face.height() * 4/ 5)) * 2)
                    if (cur_dist < mouth_dist) and (cur_mouth.y > nose.y):
                        mouth = cur_mouth
                        mouth_dist = cur_dist

            if nose and eyes:
                right_eye = eyes[0]
                right_eye_dist = (abs(right_eye.x - (3 * face.width() / 4)) * 2 +
                                  abs(right_eye.y - (nose.y - (nose.height() / 2)) / 2) +
                                  abs(right_eye.width() - (face.width() / 3)))
                for cur_eye in eyes:
                    cur_right_dist = (abs(cur_eye.x - (3 * face.width() / 4)) +
                                      abs(cur_eye.y - (nose.y - (nose.height() / 2)) / 2) +
                                      abs(cur_eye.width() - (face.width() / 3)))

                    if (cur_right_dist <= right_eye_dist): # and (cur_eye.y < nose.y):
                        right_eye = cur_eye
                        right_eye_dist = cur_right_dist

            if nose and right_eye and (((right_eye.y - (right_eye.height() / 2)) > nose.y) or (right_eye.x < nose.x)):
                print "Culling right_eye"
                right_eye = None

            if nose and mouth:
                chosen_faces.append(face_rect)
                x_face = face.x - (face.width() / 2)
                y_face = face.y - (face.height() / 2)

                x_nose = nose.x - (nose.width() / 2)
                y_nose = nose.y - (nose.height() / 2)

                # Setup TopHat Image
                scale_factor = face.width() / 175.0
                cur_hat = hat.copy()
                cur_hat = cur_hat.scale(scale_factor)
                cur_hat_mask = hat_mask.copy()
                cur_hat_mask = cur_hat_mask.scale(scale_factor)
                cur_hat_mask = cur_hat_mask.createAlphaMask(hue_lb=0, hue_ub=100)

                # Calculate the hat position
                if (face.y - face.height() / 2) > cur_hat.height:
                    x_hat = face.x - (cur_hat.width / 2)
                    y_hat = face.y - (face.height() * 7 / 10) - (cur_hat.height / 2)
                    img = img.blit(cur_hat, pos=(x_hat, y_hat), alphaMask=cur_hat_mask)

                if mouth:
                    x_mouth = mouth.x - (mouth.width() / 2)
                    y_mouth = mouth.y - (mouth.height() / 2)
                    # Setup Mustache Image
                    cur_stache = stache.copy()
                    scale_factor = ((nose.width() / 300.0) + (face.width() / 600.0)) / 2.0
                    cur_stache = cur_stache.scale(scale_factor)
                    stache_mask = cur_stache.createAlphaMask(hue_lb=0, hue_ub=10).invert()

                    # Calculate the mustache position
                    bottom_of_nose = y_nose + (nose.height() * 4 / 5)
                    top_of_mouth = y_mouth
                    # if top_of_mouth > bottom_of_nose:
                    #    top_of_mouth = bottom_of_nose
                    y_must = y_face + ((bottom_of_nose + top_of_mouth) / 2) - (cur_stache.height / 2)

                    middle_of_nose = nose.x
                    middle_of_mouth = mouth.x
                    x_must = x_face + ((middle_of_nose + middle_of_mouth) / 2) - (cur_stache.width / 2)

                if right_eye:
                    x_right_eye = right_eye.x - (right_eye.width() / 2)
                    y_right_eye = right_eye.y - (right_eye.height() / 2)

                    # Setup Monocle Image
                    cur_mono = monocle.copy()
                    scale_factor = ((right_eye.width() / 65.0) + (face.width() / 200.0)) / 2.0
                    cur_mono = cur_mono.scale(scale_factor)
                    mono_mask = cur_mono.createAlphaMask(hue_lb=0, hue_ub=100).invert()

                    # Calculate Monocle Position
                    x_mono = x_face + x_right_eye
                    y_mono = y_face + y_right_eye
                    img = img.blit(cur_mono, pos=(x_mono, y_mono), alphaMask=mono_mask)

                img = img.blit(cur_stache, pos=(x_must, y_must), alphaMask=stache_mask)

                if debug:
                    noselayer = DrawingLayer((img.width, img.height))
                    nosebox_dimensions = (nose.width(), nose.height())
                    center_point = (face.x - (face.width() / 2) + nose.x,
                                    face.y - (face.height() / 2) + nose.y)
                    nosebox = noselayer.centeredRectangle(center_point, nosebox_dimensions, width=3)
                    img.addDrawingLayer(noselayer)
                    img = img.applyLayers()

            else:
                print "Face culled:"
                if not nose:
                    print "  No Nose"
                if not mouth:
                    print "  No mouth"
                if not right_eye:
                    print "  No right eye"
                    print

            if debug:
                face_left_edge = face.x - (face.width() / 2)
                face_top_edge = face.y - (face.height() / 2)

                facelayer = DrawingLayer((img.width, img.height))
                facebox_dimensions = (face.width(), face.height())
                center_point = (face.x, face.y)
                facebox = facelayer.centeredRectangle(center_point, facebox_dimensions, Color.BLUE)
                img.addDrawingLayer(facelayer)

                if noses:
                    for nose in noses:
                        noselayer = DrawingLayer((img.width, img.height))
                        nosebox_dimensions = (nose.width(), nose.height())
                        center_point = (face.x - (face.width() / 2) + nose.x,
                                    face.y - (face.height() / 2) + nose.y)
                        nosebox = noselayer.centeredRectangle(center_point, nosebox_dimensions)
                        img.addDrawingLayer(noselayer)

                if mouths:
                    for mouth in mouths:
                        mouthlayer = DrawingLayer((img.width, img.height))
                        mouthbox_dimensions = (mouth.width(), mouth.height())
                        center_point = (face.x - (face.width() / 2) + mouth.x,
                                face.y - (face.height() / 2) + mouth.y)
                        mouthbox = mouthlayer.centeredRectangle(center_point, mouthbox_dimensions, Color.GREEN)
                        img.addDrawingLayer(mouthlayer)

                if eyes:
                    for right_eye in eyes:
                        right_eyelayer = DrawingLayer((img.width, img.height))
                        right_eyebox_dimensions = (right_eye.width(), right_eye.height())
                        right_eye_center_point = (face_left_edge + right_eye.x, face_top_edge + right_eye.y)
                        right_eyebox = right_eyelayer.centeredRectangle(right_eye_center_point, right_eyebox_dimensions)
                        img.addDrawingLayer(right_eyelayer)

                img = img.applyLayers()

    img = img.scale(0.5)
    w_ratio = img.width / 800.0
    h_ratio = img.height / 600.0

    if h_ratio > 1.0 or w_ratio > 1.0:
        if h_ratio > w_ratio:
            img = img.resize(h=600)
        else:
            img = img.resize(w=800)

    output = StringIO.StringIO()
    img.getPIL().save(output, format="JPEG") #, quality=85, optimize=True)
    img_contents = output.getvalue()

    mimetype = "image/jpeg"
    return app.response_class(img_contents, mimetype=mimetype, direct_passthrough=False)
示例#25
0
from __future__ import print_function
from SimpleCV import Image, Color, VirtualCamera, Display
import SimpleCV as scv

# # video = VirtualCamera('', 'video')
# display = Display()

# while display.isNotDone():
# 	img = video.getImage()
# 	try:
# 		dist = img - img.colorDistance(Color.RED)
# 		dist.show()
# 	except KeyboardInterrupt:
# 		display.done = True
# 	if display.mouseRight:
# 		display.done = True
# display.quit()

img = Image('img/curb.JPG')
img = img.crop(0, 2*img.height/3, img.width, 5*img.height/6)
print(img.meanColor())
# img = img.binarize()
# img.findBlobs().draw()
# ls = img.findLines()
# for l in ls:
	# l.draw()
# img.findBlobs().draw()
# img = img.colorDistance(Color.RED)

img.save('img/curb_processed.jpg')
示例#26
0
from SimpleCV import Image, Color, Display
import time

car_in_lot = Image("parking-car.png")

car = car_in_lot.crop(470, 200, 200, 200)

yellow_car = car.colorDistance(Color.YELLOW)

only_car = car - yellow_car
only_car = only_car.toRGB()

displayObject = Display()

print only_car.meanColor()

# Show the results
only_car.save(displayObject)

while displayObject.isNotDone():
    time.sleep(0.5)
示例#27
0
from SimpleCV import Image,Color
car_in_lot = Image("parking-car.png")
car_not_in_lot = Image("parking-no-car.png")
car = car_in_lot.crop(470,200,200,200)
yellow_car = car.colorDistance(Color.YELLOW)

yellow_car.show()
示例#28
0

img = Image("SquareCube1.jpg") # Obtain image with cube within the red square
test = {}			# Dictionary containing difference images of a particular sticker and standard colors 
pixValues = []		# List containing the pixel values from (20,50) to (20,80) of each image present in test
averagesDict = {}	# Dictionary containing the average of pixValues corresponding to each color
averagesList = []	# List containing all the average of pixValues corresponding to each color
stickerColors = [[],[],[]] # Contains first letter of color of all the stickers in a picture

# Coordinates corresponding to the 'origin' of the cube
squareCornerX = 170	
squareCornerY = 90

for i in range(0,3):
	for j in range(0,3):
		croppedImg = img.crop(squareCornerX + j*100, squareCornerY + i*100, 100, 100) # Obtain just the sticker
		
		# Find the difference image of the sticker and all the standard colors and store it in test 
		for color in rubikColor:
			test[color] = croppedImg.colorDistance(color = rubikColor[color])
		
			# Obtain values of a line in the middle of the difference image
			# From observation, we see best results with pixels from (20,50) to (80,50)
			for j1 in range(20,81):
				pixValues.append(test[color].getPixel(j1,50)[0])
		
			average = np.average(pixValues)	# Find average of pixel values
			averagesList.append(average)	# And add the value to averagesList
			averagesDict[color] = average   # And also add the value to averagesDict
			pixValues = []					# Clear pixValues
		
示例#29
0
from SimpleCV import Image

img = Image('ex16.png')

# Crop starting at +(50, 5)+ for an area 400 pixels wide by 400 pixels tall
cropImg = img.crop(50, 5, 400, 400)
cropImg.show()
示例#30
0
文件: ogplab.py 项目: opengimbal/ogp
    def histo(self): ## this def is the "light meter" part---
        cam_mode = self.cam_mode ## the pic gets cataloged if true ---
        js = self.js
        w = self.w
        cent = 0
        rgb1 = 0
        c2 = self.c2
        wsh = self.wsh
        wsh2 = self.wsh2
        i=0
        brightpixels=0
        darkpixels=0
        blobs = 0

        if cam_mode == 3: ## sort out the confusing cam modes
            img1 = c2.getImage()
            time.sleep(.25)

        if cam_mode==1:
            with picamera.PiCamera() as camera:
                camera.resolution = (544, 288)
                camera.capture('imagesmall.jpg')
            img1 = Image('imagesmall.jpg')

        if cam_mode==2:
            with picamera.PiCamera() as camera:
                camera.resolution = (544, 288)
                camera.capture('imagesmall.jpg')
            img1 = Image('imagesmall.jpg')

        blobs = img1.findBlobs()
        time.sleep(0.5)

        if blobs:
##find the blob centroid and cut it out 20x20
            crop1 = blobs[-1].x      
            crop2 = blobs[-1].y
            crop3 = crop1 - 10
            crop4 = crop2 - 10
            thumbnail = img1.crop(crop3,crop4,20,20)
            img2 = thumbnail
            hist = img2.histogram(20)
            ## split the thumb into 20 levels of darkness
            brightpixels = hist[10]
            ## 10 is where the darkest of the light pixels accumulate
            print brightpixels

##while i < 20:
##old code for if you want to split the histogram in two
##if (i < 10):
##darkpixels = darkpixels + hist[i]
##self.darkpixels = darkpixels
##print hist[i]
##else:
##brightpixels = brightpixels + hist[i]
##self.brightpixels = brightpixels
##print hist[i]
##i = i + 1

            if (brightpixels<400):   ## heres where it decides to catalog the pic or not...
                wsh.write_message(wsh2, "histo_" + str(darkpixels) + "_" + str(brightpixels))
                print "blob"
                x = self.x
                y = self.y
                p = self.p
                p = p + 1
                thumb1 = "/var/www/html/images/thumbs/thumb"
                thumb3 = ".png"
                thumbpath = thumb1 + str(p) + thumb3
                print thumbpath
                thumbnail.save(thumbpath)
                img1.drawText("blob = True", 10, 35, color=(255,255,255),fontsize=30)
                img1.drawText("search_mode", 10, 5, color=(0,0,255),fontsize=40)
                img1.drawText("blob centroid", blobs[-1].x,blobs[-1].y, color=(255,255,255),fontsize=20)
                img1.drawCircle((blobs[-1].x,blobs[-1].y),30,color=(255,255,0))
                img1.drawCircle((blobs[0].centroid()),10,color=(255,255,255))
                print blobs[-1].meanColor()
                rgb1 = blobs[-1].meanColor()
                cent = blobs[-1].centroid()

                pth1 = "/var/www/html/images/image"
                pth3 = ".png"
                pth = pth1 + str(p) + pth3
                print pth
                img1.save(pth)
                time.sleep(0.5)

                self.p = p

                mySet.add((p,x,y,w,cent,rgb1))
                self.mySet = mySet

                wshx = str(self.x)
                wshy = str(self.y)

                centroidx = int(cent[0])
                centroidy=int(cent[1])
                rcolor=rgb1[0]
                gcolor=rgb1[1]
                bcolor=rgb1[2]
                rcolor=int(rcolor)
                gcolor=int(gcolor)
                bcolor=int(bcolor)
                wsh.write_message(wsh2, "rgb_" + str(rcolor)+"_"+str(gcolor)+"_"+str(bcolor))
                wsh.write_message(wsh2, "x_" + str(centroidx)+"_"+str(centroidy))
                img1.save(js.framebuffer)
                time.sleep(0.5)

                wsh.write_message(wsh2, "d_" + wshx + "_" + wshy + "_" + str(p) )

            else:
                print "senosor dark"
                print darkpixels,'_', brightpixels
                blobs = 0
                wsh.write_message(wsh2, "histo_" + str(darkpixels) + "_" + str(brightpixels))

        else:
            wshx = str(self.x)
            wshy = str(self.y)
            wsh.write_message(wsh2, wshx + " " + wshy + "dark")
            img1.save(js.framebuffer)

            print "no blob"