Esempio n. 1
0
def clarity_contrast(img,beta = 0.2):
	try:
		img2, out_dict = region_of_interest(img)
		roi = cv2.GetSubRect(img,GetImageROI(img2))
		fourier_img = cv2.cvDFT(img)
		fourier_roi = cv2.cvDFT(roi)
		img_area = len(img)*len(img[0])
		(x,y,roi_width,roi_height) = GetImageROI(img2)
		roi_area = roi_width*roi_height
		img_max = max(fourier_img)
		roi_max = max(fourier_roi)
		img_set = 0.0
		roi_set = 0.0
		for item in list(fourier_img):
			if item >= beta*img_max:
				image_set += item*item
		for item in list(fourier_roi):
			if item >= beta*roi_max:
				roi_set += item*item
		m_i = np.sqrt(image_set)
		m_r = np.sqrt(roi_set)
		clarity_feature = (m_r/roi_area)/(m_i/img_area)
		out_dict = {"name" : "clarity_contrast", "value" : clarity_feature}
	except:
		out_dict = {}
		raise
	return out_dict
Esempio n. 2
0
def scanner_procces(frame, set_zbar):
    set_width = 100.0 / 100
    set_height = 90.0 / 100

    coord_x = int(frame.width * (1 - set_width) / 2)
    coord_y = int(frame.height * (1 - set_height) / 2)
    width = int(frame.width * set_width)
    height = int(frame.height * set_height)

    get_sub = cv.GetSubRect(frame,
                            (coord_x + 1, coord_y + 1, width - 1, height - 1))

    cv.Rectangle(frame, (coord_x, coord_y),
                 (coord_x + width, coord_y + height), (255, 0, 0))

    cm_im = cv.CreateImage((get_sub.width, get_sub.height), cv.IPL_DEPTH_8U, 1)
    cv.ConvertImage(get_sub, cm_im)
    image = zbar.Image(cm_im.width, cm_im.height, 'Y800', cm_im.tostring())

    set_zbar.scan(image)
    for symbol in image:
        print '\033[1;32mResult : %s symbol "%s" \033[1;m' % (symbol.type,
                                                              symbol.data)

    cv.ShowImage("webcame", frame)
    #cv.ShowImage("webcame2", get_sub)
    cv.WaitKey(10)
Esempio n. 3
0
    def crop_face(self, image, coordinates, image_filename):
        """ Crops all faces from a list of images and coordinates
        Returns a list with all faces"""

        logging.debug(
            'Start method "crop_face" for file %s (face-detector.py)' %
            image_filename)
        cropped_faces = []  # list with all cropped faces (defined with ROI)

        for i in range(len(coordinates)):
            rectangle = coordinates[i][0]
            cropped_faces.append(cv.GetSubRect(
                image, rectangle))  # save faces (with ROI) in new list

            #check face for max image size
            if cropped_faces[i].height > parameter.max_facesize[
                    0] or cropped_faces[i].width > parameter.max_facesize[
                        1]:  #start resize
                (cropped_faces[i],
                 downsize_factor) = tools.downsize_image(cropped_faces[i])
                logging.debug(
                    'Face in image %s has been downsized with factor %d (face-detector.py)'
                    % (image_filename, downsize_factor))

        logging.debug('%d faces successfully cropped (face-detector.py)',
                      len(cropped_faces))
        return cropped_faces  # faces are defined with ROI
Esempio n. 4
0
def lighting_quality(img):
	try:
		img2, out_dict = region_of_interest(img)
		img3 = cv2.GetSubRect(img,GetImageROI(img2))
		img4 = cv2.cvtColor(img2, cv2.COLOR_BGR2LAB)
		roi = cv2.cvtColor(img3, cv2.COLOR_BGR2LAB)
		background = cv2.Sub(img2,roi) # There may be an issue with the arrays not being of the same size.
		b_background = np.mean(background[:,:,0]) # Mean might not be completely correct, mean of an array with many 0s in it, which are counted in the mean.
		b_roi = np.mean(ROI[:,:,0])
		lighting_measure = abs(np.log(b_roi/b_background))
		out_dict = {"name" : "lighting_quality", "value" : lighting_measure}
	except:
		out_dict = {}
		raise
	return out_dict
Esempio n. 5
0
def simplicity(img, gamma=0.1):
  #NOTE Probably f****d up.
	try:
		img2, out_dict = region_of_interest(img)
		img3 = cv2.GetSubRect(img,cv2.GetImageROI(img2))
		background = cv2.Sub(img2,ROI) # There may be an issue with the arrays not being of the same size.
		# Need to quantize the image into 16 channels.
		hist,_binedges = np.histogram(background, bins=4096)
		max_bound = max(list(hist))
		color_div = 0.0
		for item in list(hist):
			if item >= gamma*max_bound:
				color_div += item*item
		simplicity_measure = np.sqrt(color_div)/4.096 
		out_dict = {"name" : "simplicity", "value" : simplicity_measure}
	except:
		out_dict = {}
		raise
	return out_dict
    def getSegments(self, image):
        """
        segmentize the region of interest into segments, each containing a mean value
        """

        left, top, width, height = self.regionOfInterest

        #create a new image containing just the distances in the region of interest
        imageDepth = image.depth
        imageChannels = image.nChannels
        regionDistances = cv.CreateImage((width, height), imageDepth,
                                         imageChannels)
        src_region = cv.GetSubRect(image, (left, top, width, height))
        cv.Copy(src_region, regionDistances)

        #segmentize the distances into segments
        segments = self.segmentize(regionDistances)

        return segments
Esempio n. 7
0
import cv2
import sys
storage = cv2.CreateMemStorage()
image_path = "yusei.jpg"
img = cv2.imread(image_path)

hc = cv2.Load("../data/haarcascades/haarcascade_frontalface_default.xml")
faces = cv2.HaarDetectObjects(img, hc, storage, 1.1, 3, 0, (0, 0))

max = 0
maxh = 0
maxw = 0
resx = 0
resy = 0
for (x, y, w, h), n in faces:
    if max < w * h:
        maxw = w
        maxh = h
        resx = x
        resy = y
        max = w * h

sub = cv2.GetSubRect(img, (resx, resy, maxw, maxh))
cv2.SaveImage("face_" + sys.argv[1], sub)