Ejemplo n.º 1
0
def desaturate(image,method=0):
    '''Method:
        0: Average
        1: Luminosity (from gimp)
        2: Lightness (from gimp)'''
    mock = image.copy()
    imap = mock.unmap_rgb
    pxarray = PixelArray(mock)
    
    w,h = mock.get_size()
    for y in range(h):
        for x in range(w):        
            r,g,b,a = imap(pxarray[x,y])
            if method == 0:
                p = (r+g+b)//3         
            elif method == 1:
                p = int(0.21 * r + 0.72 * g + 0.07 * b)
            elif method == 2:
                p = int(0.5 + (max([r,g,b]) + min([r,g,b])))
            
            pxarray[x,y] = p,p,p,a

    render = pxarray.surface
    del pxarray
    return render
Ejemplo n.º 2
0
def transformSurface(image):
    '''
    Apply transform to make query image
    '''
    screenCnt = None
    ratio = image.shape[0] / 500.0
    orig = image.copy()
    image = imutils.resize(image, height=500)

    # convert the image to grayscale, blur it, and find edges
    # in the image
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (5, 5), 0)
    edged = cv2.Canny(gray, 75, 200)

    # show the original image and the edge detected image
    #print "STEP 1: Edge Detection"
    # cv2.imshow("Image", image)
    #cv2.imwrite("imgs/edged.jpg", edged)
    # cv2.waitKey(0)
    # cv2.destroyAllWindows()

    # find the contours in the edged image, keeping only the
    # largest ones, and initialize the screen contour
    (cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_LIST,
                                 cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]

    # loop over the contours
    for c in cnts:
        # approximate the contour
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)

        # if our approximated contour has four points, then we
        # can assume that we have found our screen
        if len(approx) == 4:
            screenCnt = approx
            print screenCnt
            break

    # show the contour (outline) of the piece of paper
    #print "STEP 2: Find contours of paper"
    # cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)
    # cv2.imwrite("imgs/outline.jpg", image)
    # cv2.waitKey(0)
    # cv2.destroyAllWindows()

    if screenCnt is not None:
        # apply the four point transform to obtain a top-down
        # view of the original image
        warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)

        # show the original and scanned images
        #print "STEP 3: Apply perspective transform"
        # cv2.imshow("Original", imutils.resize(orig, height = 650))
        # cv2.imshow("Scanned", imutils.resize(warped, height = 650))
        # cv2.waitKey(0)
        # cv2.destroyAllWindows()
        cv2.imwrite("imgs/query.jpg", warped)
Ejemplo n.º 3
0
def vscroll(scrollval, image):
    offs = image.get_height()
    newimage = image.copy()
    newimage.fill((0, 0, 0, 0))
    newimage.blit(image, (0, scrollval))
    if (str(scrollval))[0] == "-":
        newimage.blit(image, (0, (scrollval + offs)))
    else:
        newimage.blit(image, (0, (scrollval - offs)))
    return newimage
Ejemplo n.º 4
0
def hscroll(scrollval, image):
    offs = image.get_width()
    newimage = image.copy()
    newimage.fill((0, 0, 0, 0))
    newimage.blit(image, (scrollval, 0))
    if (str(scrollval))[0] == "-":
        newimage.blit(image, ((scrollval + offs), 0))
    else:
        newimage.blit(image, ((scrollval - offs), 0))
    return newimage
Ejemplo n.º 5
0
def colorize(image,color):
    mock = image.copy()
    unmap = mock.unmap_rgb
    pxarray = PixelArray(mock)
    if type(color) is list:
        color = Color(*color)
    hi = color.hsva[0]/360
    
    w,h = mock.get_size()
    for y in range(h):
        for x in range(w):
            s,v,alpha = Color(*unmap(pxarray[x,y])).hsva[1:]
            
            r,g,b,a = hsv2rgb(hi,s/100,v/100)+[int(alpha)]

            pxarray[x,y] = Color(r,g,b,a)

    render = pxarray.surface
    del pxarray
    return render
Ejemplo n.º 6
0
def detect_eye(
        image,
        cords):  #na całym obrazku rysuje obrys oczu dla kazdej wykrytej twarzy

    (x, y, w, h) = cords

    face = image.copy()
    face[y:y + int(w / 3.3), x:x + h] = (255, 255, 255)
    face[y + int(w / 2.1):y + w, x:x + h] = (255, 255, 255)
    face[y:y + w, x:x + int(h / 5)] = (255, 255, 255)
    face[y:y + w, x + (h - int(h / 5)):x + h] = (255, 255, 255)

    imgray = cv2.cvtColor(face[y:y + w, x:x + h], cv2.COLOR_BGR2GRAY)
    ret, thresh = cv2.threshold(imgray, 127, 255, 0)
    image2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,
                                                   cv2.CHAIN_APPROX_SIMPLE)

    image[y:y + w, x:x + h] = cv2.drawContours(image[y:y + w,
                                                     x:x + h], contours, -1,
                                               (100, 255, 100), 1)
    return image
Ejemplo n.º 7
0
last_pred = 0

while True:
	img = cam.get_image()
	pygame.image.save(img, path)
	img = io.imread(path)


# construct the argument parser and parse the arguments
#ap = argparse.ArgumentParser()
#ap.add_argument("-i", "--image", required = True, help = "Path to the image")
#args = vars(ap.parse_args())

# load the image, clone it for output, and then convert it to grayscale
	image = cv2.imread("test.bmp")
	output_sv = image.copy()
	output = image.copy()
	gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)


	# detect circles in the image
	circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1.2, 75)
	r_max = 0
	x_max = 0
	y_max = 0

# ensure at least some circles were found
	if circles is not None:
		# convert the (x, y) coordinates and radius of the circles to integers
		circles = np.round(circles[0, :]).astype("int")
		print(circles)