コード例 #1
0
def createClare(channelB, channelG, channelR):

    # create a CLAHE object(Contrast Limited Adaptive Histogram Equalization)

    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    cl1 = clahe.apply(channelB)
    cv2.imwrite('bluechannel.jpg', cl1)

    clahe1 = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    cl2 = clahe.apply(channelG)
    cv2.imwrite('greenchanel.jpg', cl2)

    clahe2 = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    cl3 = clahe.apply(channelR)
    cv2.imwrite('redChannel.jpg', cl3)

    result = cv2.merge((cl1, cl2, cl3))

    dirname = "afterEqualization"
    os.mkdir(dirname)


    cv2.imwrite(os.path.join(dirname, "image after equalization.jpg"), result)

    return result
コード例 #2
0
ファイル: preprocess.py プロジェクト: nPellejero/deepNet
def detect(img, cascade, cascade1, cascade_eyes):
  clahe = cv2.createCLAHE(clipLimit=5, tileGridSize=(2,2))
  img = clahe.apply(img)
  clahe = cv2.createCLAHE(clipLimit=5, tileGridSize=(2,2))
  img = clahe.apply(img)
  best_diff = 1000
  imgF = None
  for scale in [float(i)/10 for i in range(11, 15)]:
     face = cascade.detectMultiScale(img, scaleFactor=scale, minNeighbors=6, minSize=(30, 30))
     if len(face) != 1:
      continue
     x,y,w,h = face[0]
     m = max(w,h)
     y1 = max(y-m/3, 0)
     y2 = min(y + m + m/3, len(img))
     x1 = max(x-m/3, 0)
     x2 = min(x + m + m/3, len(img[0]))
     image = img[y1:y2, x1:x2]
     eyes = cascade_eyes.detectMultiScale(image, minNeighbors=3, scaleFactor=scale)
     if len(eyes) != 2:
      continue
     my_eye_right = max(eyes, key=lambda x:x[0])
     my_eye_left  = min(eyes, key=lambda x:x[0])
     dim_r = my_eye_right[2:]
     dim_l = my_eye_left[2:]
     cel = (my_eye_left[0]+dim_l[0]/2,my_eye_left[1]+dim_l[1]/2)
     cer = (my_eye_right[0]+dim_r[0]/2, my_eye_right[1]+dim_r[1]/2)
     #cv2.rectangle(image,cer,(cer[0]+5,cer[1]+5),3)
     #cv2.rectangle(image,cel,(cel[0]+5,cel[1]+5),3)
     prom = (cer[0] + cel[0])/2.0
     width = x2-x1
     height = y2-y1
     width = x2-x1
     height = y2-y1
     w1 = width*(1.0/3.0)
     w2 = width*(2.0/3.0)
     diff = abs(cer[0]-w2) + abs(cel[0]-w1)
     if diff/float(width) > 0.2 or cel[1] > height/2 or cer[1] > height/2:
      continue
     print diff, width, w1, w2, abs(cel[0]-w1), abs(cer[0]-w2)
     if best_diff > diff:
      best_diff = diff
      cerF = cer
      celF = cel
      imgF = image
  if imgF == None:
   return []
  im = Image.fromarray(imgF)
  im = CropFace(im, eye_left=celF, eye_right=cerF, offset_pct=(0.2,0.3), dest_sz=(128,128))
  image = np.array(im)
  return [image]
コード例 #3
0
ファイル: .py プロジェクト: ecerrillo/PyDRA
def inicio(path):
    datalist=[('Component',[0,'1st','2nd','3rd']),]
    met=fedit(datalist, title="Select Method")
    if met==[0]:
        name='/comp1.tif'
    if met==[1]:
        name='/comp2.tif'
    if met==[2]:
        name='/comp3.tif'
    image=np.array(Image.open(path+'/'+name))
    clahe(image)
    
    datalist=[('Clip Limit',[0,'None','2','4','6','8','10','12']),]
    met=fedit(datalist, title="Select Clip Limit")
    if met==[1]: cl=2.0
    if met==[2]: cl=4.0
    if met==[3]: cl=6.0
    if met==[4]: cl=8.0
    if met==[5]: cl=10.0
    if met==[6]: cl=12.0
    
    image=cv2.createCLAHE(clipLimit=cl, tileGridSize=(4,4)).apply(image.astype(np.uint8))
    plt.close()
    
    maps=[m for m in plt.cm.datad if not m.endswith("_r")]
    maps=[0]+maps
    maps.sort()
    print maps
    
    """colormap(image)
コード例 #4
0
ファイル: AC3D.py プロジェクト: UASLab/ImageAnalysis
def make_textures_opencv(src_dir, project_dir, image_list, resolution=256):
    dst_dir = project_dir + '/Textures/'
    if not os.path.exists(dst_dir):
        print "Notice: creating texture directory =", dst_dir
        os.makedirs(dst_dir)
    for image in image_list:
        src = src_dir + "/" + image.name
        dst = dst_dir + image.name
        if not os.path.exists(dst):
            src = cv2.imread(src)
            height, width = src.shape[:2]
            # downscale image first
            method = cv2.INTER_AREA  # cv2.INTER_AREA
            scale = cv2.resize(src, (0,0),
                               fx=resolution/float(width),
                               fy=resolution/float(height),
                               interpolation=method)
            # convert to hsv color space
            hsv = cv2.cvtColor(scale, cv2.COLOR_BGR2HSV)
            hue,sat,val = cv2.split(hsv)
            # adaptive histogram equalization on 'value' channel
            clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
            aeq = clahe.apply(val)
            # recombine
            hsv = cv2.merge((hue,sat,aeq))
            # convert back to rgb
            result = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
            cv2.imwrite(dst, result)
            print "Texture %dx%d %s" % (resolution, resolution, dst)
コード例 #5
0
def CLAHE():
    img = cv2.imread('test.jpg',0)
    # create a CLAHE object (Arguments are optional).
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
    cl1 = clahe.apply(img)

    cv2.imwrite('clahe_2.jpg',cl1)
コード例 #6
0
ファイル: find_signs_8020.py プロジェクト: uf-mil/PropaGator
def adaptive_histogram_eq(img):
    b,g,r = cv2.split(img)
    clahe = cv2.createCLAHE(clipLimit=.1, tileGridSize=(8,8))
    b = clahe.apply(b)
    g = clahe.apply(g)
    r = clahe.apply(r)
    return cv2.merge([b,g,r])
def find_hottest_points(cv_image):
  
  clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(3,3))
  #gray = clahe.apply(img)
  gray = clahe.apply(cv_image)
  gray = cv2.GaussianBlur (gray, (21,21), 0)

  min_thresh = cv2.threshold(gray, min_th, 255, cv2.THRESH_BINARY)[1]
  max_thresh = cv2.threshold(gray, max_th, 255, cv2.THRESH_BINARY_INV)[1]

  thresh = cv2.bitwise_and(min_thresh, max_thresh)

  thresh = cv2.dilate(thresh, None, iterations = 2)
  (cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
    cv2.CHAIN_APPROX_SIMPLE)

  for c in cnts:
    if cv2.contourArea(c) > min_area and cv2.contourArea(c) < max_area:
      
      (x,y,w,h) = cv2.boundingRect(c)
#      cv2.rectangle(cv_image, (x, y), (x+w, y+h), (0, 255, 0), 2)
      cv2.rectangle(cv_image, (x, y), (x+w, y+h), 0, 2)
      continue


  cv2.imshow("region_detector", cv_image)
  cv2.moveWindow("region_detector",900,0)
  cv2.imshow("band_threshold_image", thresh)
  cv2.moveWindow("band_threshold_image",900,400)
  cv2.waitKey(1)
コード例 #8
0
ファイル: Scope_Script.py プロジェクト: alexwal/old_research
	def adjust_zstack(self):
		#Removes bckgd fluor from ea. img.
		#trying hist eq

		self.zsum()
		zsum = self.My_Zsum
		zstk = np.copy(self.My_Zstack)

		####new:####
		for frame in range(zstk.shape[2]):
			clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
			cl1 = clahe.apply(zstk[:,:,frame].astype('uint8'))
			self.My_Zstack[:,:,frame] = cl1
		###end new###

		bckgd_fluors=[]
		for frame in range(zstk.shape[2]):
			if self.weights==None:
				self.bckgd_weights(np.copy(zstk[:,:,frame]), zsum, preview=False)
			fluor=np.average(zstk[:,:,frame], weights=self.weights)
			bckgd_fluors.append(fluor)
		bckgd_fluors=np.array(bckgd_fluors)

		for frame in range(zstk.shape[2]):
			sbd = zstk[:,:,frame]-bckgd_fluors[frame]*np.ones((zstk[:,:,frame].shape)) #subtracted img
			clip_im = np.clip(sbd, 0 , sbd)
			self.My_Zstack[:,:,frame] = clip_im
コード例 #9
0
ファイル: rpotter.py プロジェクト: sean-obrien/rpotter
def FindWand():
    global rval,old_frame,old_gray,p0,mask,color,ig,img,frame
    try:
        rval, old_frame = cam.read()
	cv2.flip(old_frame,1,old_frame)
        old_gray = cv2.cvtColor(old_frame,cv2.COLOR_BGR2GRAY)
        equalizeHist(old_gray)
	old_gray = GaussianBlur(old_gray,(9,9),1.5)
        dilate_kernel = np.ones(dilation_params, np.uint8)
        old_gray = cv2.dilate(old_gray, dilate_kernel, iterations=1)
        clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
        old_gray = clahe.apply(old_gray)
        #TODO: trained image recognition
        p0 = cv2.HoughCircles(old_gray,cv2.HOUGH_GRADIENT,3,50,param1=240,param2=8,minRadius=4,maxRadius=15)
	if p0 is not None:
            p0.shape = (p0.shape[1], 1, p0.shape[2])
            p0 = p0[:,:,0:2] 
            mask = np.zeros_like(old_frame)
            ig = [[0] for x in range(20)]
        print "finding..."
        threading.Timer(3, FindWand).start()
    except:
        e = sys.exc_info()[1]
        print "Error: %s" % e 
        exit
コード例 #10
0
ファイル: mosaic.py プロジェクト: larssbr/mosaicProgram
    def claheAdjustImages(self):
        #clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
        clahe = cv2.createCLAHE(clipLimit=6.0,tileGridSize=(8, 8))
        #self.logger.info("The clahe type {}".format(type(clahe.type)))
        for i, bgr_img, in enumerate(self.images):
            self.logger.info("color adjusting image {}".format(i))
            # first convert RGB color image to Lab
            lab_image = cv2.cvtColor(bgr_img, cv2.cv.CV_RGB2Lab)
            #cv2.imshow("lab_image before clahe", lab_image);

            # Extract the L channel
            lab_planes = cv2.split(lab_image)

            # then apply CLAHE algorithm to L channel
            #dst = []  #creating empty array to store L value
            #dst = clahe.apply(lab_planes[0])  # L channel = lab_planes[0]
            lab_planes[0] = clahe.apply(lab_planes[0])

            # Merge the the color planes back into an Lab image
            lab_image = cv2.merge(lab_planes,lab_planes[0])

            #cv2.imshow("lab_image after clahe", lab_image);

            # convert back to RGB space and store the color corrected image
            self.images[i] = cv2.cvtColor(lab_image, cv2.cv.CV_Lab2RGB)

            # overwrite old image
            #self.images[i] = cv2.imwrite(cl1)
            #cv2.imshow("image original", bgr_img);
            #cv2.imshow("image CLAHE", self.images[i]);
            cv2.waitKey()
コード例 #11
0
ファイル: facial_alignment.py プロジェクト: 121onto/noaa
def experiement_clahe_and_entropy(file=None):
    images = image_generator()
    if file is not None:
        images = add_image_to_image_generator(images, file)

    claheizer = cv2.createCLAHE()

    for fn, im in images:
        image_arrays = []
        titles = []

        # plot original image
        titles.append("Original")
        image_arrays.append(im)

        # clahe
        clahe = cv2.cvtColor(im.astype("uint8"), cv2.COLOR_RGB2Lab)
        tmp = clahe.copy()
        tmp[:, :, 0] = claheizer.apply(clahe[:, :, 0])
        clahe = cv2.cvtColor(tmp.astype("uint8"), cv2.COLOR_Lab2RGB)

        titles.append("CLAHE")
        image_arrays.append(clahe)

        tmp = cv2.cvtColor(clahe, cv2.COLOR_RGB2Lab)
        ent = skimage.filters.rank.entropy(tmp[:, :, 0], skimage.morphology.disk(5))
        titles.append("Entropy L")
        image_arrays.append(ent)

        # plot
        subplot_images(image_arrays, titles=titles, show_plot=True, suptitle=fn.split("/")[-1])
コード例 #12
0
ファイル: facedect.py プロジェクト: GordonCai/500c1face
def colorchange(pic):
    img = cv2.imread(pic)
    for k in range(n):  
        i = int(numpy.random.random() * img.shape[1])  
        j = int(numpy.random.random() * img.shape[0]) 
        if img.ndim == 2:   
            img[j,i] = 255        
        elif img.ndim == 3:     
            img[j,i,0]= 255    
            img[j,i,1]= 255    
            img[j,i,2]= 255  
    #cv2.imwrite("th3.png", img, [int(cv2.IMWRITE_PNG_COMPRESSION), 0])       
    dst=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) 

    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
    cl1 = clahe.apply(dst)
    #cv2.imwrite("cl1.png", cl1, [int(cv2.IMWRITE_PNG_COMPRESSION), 0])
    th4 = cv2.adaptiveThreshold(cl1,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
            cv2.THRESH_BINARY,11,2)

    #th3 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
            #cv2.THRESH_BINARY,11,2)

    cv2.imwrite("im2.png", th4, [int(cv2.IMWRITE_PNG_COMPRESSION), 0])
    cha=cv2.imread('im2.png')
    ori = cv2.imread(pic)

    
    cha[numpy.where((cha == [255,255,255]).all(axis = 2))] = [b,g,r]
    cha[numpy.where((cha == [0,0,0]).all(axis = 2))] =[B,G,R]

    dst= cv2.addWeighted(cha,0.7,ori,0.3,0) 
    new= cv2.bilateralFilter(dst,7,75,75) 
    return new
コード例 #13
0
    def convert_hsv(self, img_file):
        pix = cv2.imread(os.path.dirname(__file__) + img_file)

        r_size = 5
        c_size = r_size - 1

        # using HSV space
        hsv = cv2.cvtColor(pix, cv2.COLOR_BGR2HLS)
        pix_s = hsv[:, :, 1]
        cv2.imshow('pix-s', pix_s)
        # Adaptive Threshold -> CLAHE
        clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(c_size, c_size))
        pix_clahe = clahe.apply(pix_s)
        pix_bin = cv2.adaptiveThreshold(pix_clahe, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV,
                                        blockSize=r_size*2 + 1, C=r_size)
        cv2.imshow('pix-bin', pix_bin)
        pix_threshold = histogram_equalization.ostu_algorithm(pix_bin, r_size - 2)
        cv2.imshow('pix-ostu', pix_threshold)

        # fe = FingerprintEnhance()
        # pix_enhance = fe.gabor_filter(pix_threshold)
        # cv2.imshow("pix-enhance", np.array(pix_enhance))

        gabor = GaborFilter()
        pix_gabor = gabor.process(pix_threshold)
        cv2.imshow("pix-gabor", np.array(pix_gabor))

        cv2.waitKey()
コード例 #14
0
ファイル: main2.py プロジェクト: roramas/javasensei
def read_images():
    X,y = [], []
    cascade_face = cv2.CascadeClassifier()
    cascade_face.load("classifiers/lbpcascade_frontalface.xml")
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    #emociones = ("enojado", "feliz", "neutral", "sorpresa", "triste")
    #carpeta_emociones = "D:/Respaldo Jose Luis/proyecto RVERK/RafD_Ordenado/"
    carpeta_emociones = "D:/Francisco/Pictures/Camera Roll/"
    indice = -1
    for emocion in emociones:
        imagenes = glob.glob(carpeta_emociones + emocion + "\\*.jpg")
        indice += 1
        for imagen in imagenes:
            try:
                im = cv2.imread(imagen, cv2.IMREAD_GRAYSCALE)
                faces = cascade_face.detectMultiScale(im)
                if len(faces) > 0:
                    for (corX, corY, w, h) in faces[:1]:
                        im = im[corY:corY + h, corX:corX + w]
                        #im = cv2.resize(im ,(100, 100))
                        X.append(np.asarray(im, dtype=np.uint8))
                        y.append(indice)
            except IOError, (errno, strerror):
                print "I/O error({0}): {1}".format(errno, strerror)
            except:
コード例 #15
0
def CLAHE(filename, clip_limit=2.0):
	img = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)

	clahe = cv2.createCLAHE(clipLimit=clip_limit)
	equalized = clahe.apply(img)

	return [img, equalized]
コード例 #16
0
def hist_eq(image_dir = 'test_hist/', target_dir = 'test_result_hist/', method = 'CLAHE'):
    if not os.path.exists(target_dir):
        os.makedirs(target_dir)

    #pic_list = os.listdir(image_dir)
    pic_list = glob.glob(image_dir+'/*.jpeg')
    list_length = len(pic_list)
    
    util.update_progress(0)
    for j, image_path in enumerate(pic_list):
        
        img = cv2.imread(image_path,1)
        # Use file name only, without .jpeg
        image_name = image_path.split('/')[-1][:-5] 
        
        b,g,r = cv2.split(img)        
        
        if method == 'HE':
            cv2.equalizeHist(b,b)
            cv2.equalizeHist(g,g)
            cv2.equalizeHist(r,r)
        else:
            clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
            clahe.apply(g,g)
            if not method =='CLAHE_G':
                clahe.apply(b,b)
                clahe.apply(r,r)
            
        recombined = cv2.merge((b,g,r))
        cv2.imwrite(target_dir + image_name + method +'.jpeg', recombined)
        util.update_progress(j/list_length)
        
    util.update_progress(1)
コード例 #17
0
def interpolate_bg(img):
     bg = img

     op = np.random.randint(0,2)
     if op: # flip the image
        bg = cv2.flip(bg,np.random.randint(-1,2))

     op = np.random.randint(0,2)
     if op: # perform a rotation
         rows, cols = bg.shape
         degree = [90,180,360]
         d = np.random.randint(0,3)
         M = cv2.getRotationMatrix2D((cols/2, rows/2), degree[d], 1)
         bg = cv2.warpAffine(bg, M, (cols, rows))

     op = np.random.randint(0,4)
     if op == 0: # lighten with equalizer
        bg = cv2.equalizeHist(bg)
     elif op == 1: # ligten with CLAHE
         clahe = cv2.createCLAHE(clipLimit=1.2, tileGridSize=(20,20))
         bg = clahe.apply(bg)
     elif op ==2: # darken the image
         bg = contrast_wire(bg, 0.9, 1.10)
     else: # do nothing
         bg = bg

     #cv2.imshow('new iamge',res)
     # cv2.imshow('clahe', bg)
     # cv2.waitKey(0)
     # cv2.destroyAllWindows()
     return bg
コード例 #18
0
ファイル: background.py プロジェクト: huang475/HouseKeeper
def capture():
    global servo_move_flag
    print 'start collecting frame'
    faces_detected = []
    stream = io.BytesIO()
    cam.capture(stream, format='jpeg')
    data = np.fromstring(stream.getvalue(), dtype=np.uint8)
    frame = cv2.imdecode(data, 1)
    #frame = cv2.imread('test/yuchen.jpg')
    gray= cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
    clear_gray = clahe.apply(gray)
    small_gray = cv2.resize(clear_gray,(320,240))
    
    print 'trying to detect faces'
    faces = face_cascade.detectMultiScale(small_gray, 1.3, 5)
    
    if faces is not ():
        face_cnt = 1
        servo_move_flag  = 0
        for face in os.listdir('faces'):
            os.remove('faces/'+face)
        for x,y,w,h in faces:
            x,y,w,h = wider_area(divider*x,divider*y,divider*w,divider*h)
            face_filename = 'face_'+str(face_cnt)
            cv2.imwrite('face_buff.jpg',frame[y:y+h,x:x+w])
            face_cnt =face_cnt + 1
            face = gray[y:y+h,x:x+w]
            faces_detected.append(np.asarray(resize(face), dtype=np.uint8))
            print 'face detected has been appended'
    else:
        servo_move_flag  = 1
        print 'no face detected'
    return faces_detected
コード例 #19
0
def ProcessImageHistogram(paramterers):
    # Get process realted informations
    current = multiprocessing.current_process()
    proc_name = current.name
    # We are starting
    logging.info('[%s] Starting ...' % (proc_name))
    # Parameters
    input_file = paramterers[0]
    output_file = paramterers[1]
    contrast_limit = paramterers[2]
    # Opening file
    logging.info('[%s] Opening: %s file ...' % (proc_name, input_file))
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    # We got problem with the opened file
    if img is None:
        logging.error('[%s] Error opening: %s file!' % (proc_name, input_file))
        return -1
    else:
        # Color image should be splitted to BGR color channels
        logging.info('[%s] Split BGR image color channels of: %s file ...' % (proc_name, input_file))
        b, g, r = cv2.split(img)
        logging.info('[%s] Processing file: %s ... Contrast limit is: %s.' % (proc_name, input_file, contrast_limit))
        # create a CLAHE object - Contrast Limited Adaptive Histogram Equalization an apply on every channel
        clahe = cv2.createCLAHE(clipLimit=contrast_limit, tileGridSize=(8, 8))
        b = clahe.apply(b)
        g = clahe.apply(g)
        r = clahe.apply(r)
        # Merge image color channels
        logging.info('[%s] Merge BGR image color channels of: %s file ...' % (proc_name, input_file))
        cl1 = cv2.merge((b, g, r))
        logging.info('[%s] File has been processed: %s.' % (proc_name, input_file))
        # Write image, file type decided based on its extension
        cv2.imwrite(output_file, cl1)
        logging.info('[%s] File has been written: %s.' % (proc_name, output_file))
    return 0
コード例 #20
0
ファイル: features.py プロジェクト: gzuidhof/cad
def histogram_equalization(image, adaptive=False):
    if adaptive:
        clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
        image = clahe.apply(image)
    else:
        image = cv2.equalizeHist(image)
    return image
コード例 #21
0
ファイル: face-tracker.py プロジェクト: ganeshredcobra/OpenCV
def get_landmarks(lena_gray, x, y, width, height):
    keypoints = None
    p0 = None

    face = face_classifier.detectMultiScale(lena, 1.2, 5)
    if len(face) == 0:
      return p0,False
      
    # Histogram equalization to improve contrast (CLAHE)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
    lena_gray = clahe.apply(lena_gray)
    localizer = Flandmark()
    keypoints = localizer.locate(lena_gray, y, x, height, width)
    keypoints = np.fliplr(keypoints)

    i = 0
    p0_len = keypoints.shape[0]
    main = np.array([[[]]])
    while i < p0_len:
      to_add = keypoints[i]
      if i == 0:
        main = to_add[None,:][None,:]
      else:
        main = np.concatenate((main, to_add[None,:][None,:]))
        
      i = i + 1
    main = np.array(main, dtype='f')
    p0 = main

    return p0, True
コード例 #22
0
 def __init__(self, features=150, gauss_blur=3, med_blur=5, history=1, clip_limit=100.0, tile_grid=(8, 8)):
     self.num_features = 150
     self.orb = cv2.ORB_create(features)
     self.bsb = cv2.createBackgroundSubtractorMOG2(history)
     self.clahe = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=tile_grid)
     self.gaussian_blur_factor = gauss_blur
     self.median_blur_factor = med_blur
コード例 #23
0
      def GenericFilter(self,infile,outfile):
	      #print infile,":",outfile
	      clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
	      kernel1 = np.ones((5,5),np.uint8)
	      kernel2 = np.ones((3,3),np.uint8)
	      img = cv2.imread(str(infile),0)
	      cv2.imshow("img",img)
	      blur=cv2.medianBlur(img,5)
	      cl1 = clahe.apply(blur)
	      circles_mask = cv2.dilate(cl1,kernel1,iterations = 6)
	      circles_mask = (255-circles_mask)
	      circles_mask = cv2.threshold(circles_mask, 0, 255, cv2.THRESH_BINARY)[1]
	      edges = cv2.Canny(cl1,100,200)
	      dilation = cv2.dilate(edges,kernel1,iterations = 1)
	      display = cv2.bitwise_and(img,img,mask=dilation) 
	      cl2 = clahe.apply(display)
	      cl2 = clahe.apply(cl2)
	      ret,th = cv2.threshold(cl2,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
	      th = 255 - th
	      thg = cv2.adaptiveThreshold(display,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
			                  cv2.THRESH_BINARY,11,2)

	      saveimgfile=outfile.split(".")
	      print saveimgfile[1]

	      final = cv2.bitwise_and(dilation,dilation,mask=th) 
	      cv2.imwrite("Filtered-images/"+str(saveimgfile[0])+"1."+str(saveimgfile[1]),final)
	      finalg = cv2.bitwise_and(dilation,dilation,mask=thg) 
	      cv2.imwrite("Filtered-images/"+str(saveimgfile[0])+"2."+str(saveimgfile[1]),finalg)
	      finalg = 255 - finalg
	      abso = cv2.bitwise_and(dilation,dilation,mask=finalg) 
	      cv2.imwrite("Filtered-images/"+str(saveimgfile[0])+"orig."+str(saveimgfile[1]),abso)

	      cv2.waitKey(0)
コード例 #24
0
ファイル: cv_tk.py プロジェクト: carolchang/ece568_project
def capture(root):
    frame = queue.get()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(4,4))
    clear_grey = clahe.apply(gray)

    faces = face_cascade.detectMultiScale(clear_grey, 1.3, 5)
    if faces is not []:
		for x,y,w,h in faces:
			#print x,y,w,h
			x,y,w,h = wider_area(x,y,w,h)
			#print x,y,w,h
			frame = frame[y:y+h,x:x+w]
			clear_grey = clear_grey[y:y+h,x:x+w]
			im = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
			a = Image.fromarray(im)
			b = ImageTk.PhotoImage(image=a)
			image_label.configure(image=b)
			image_label._image_cache = b  # avoid garbage collection
			root.update()

			cv2.imwrite('temp.jpg',frame)
			#save_image_process.terminate
			#save_image_process.start()
			save_image(root)
def face_detection (cv_image):

	cv_image = cv2.flip(cv_image, 1)
#	gray_vid = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
	
	clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
	#gray_vid = clahe.apply(gray_vid)
	gray_vid = clahe.apply(cv_image)

	faces = face_cascade.detectMultiScale(gray_vid, 1.2, 5)

	print "entro a funcion"
	print faces

	for (x, y, w, h) in faces:
		cv2.rectangle(cv_image, (x,y), (x+w, y+h), (255, 0, 0), 2)
		roi_gray = gray_vid[y:y+h, x:x+w]
		roi_color = cv_image[y:y+h, x:x+w]

#		eyes = eye_cascade.detectMultiScale(roi_gray)

#		print "entro a caras"

#		for(ex, ey, ew, eh) in eyes:
#			cv2.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (0, 255, 0), 2)
#			print "entro a ojos"	  	

	#cv2.imshow("video_stream", cv_image)
  	#cv2.waitKey(1)

	return cv_image
コード例 #26
0
ファイル: Image.py プロジェクト: UASLab/ImageAnalysis
    def load_gray(self):
        # print "Loading " + self.image_file
        try:
            rgb = cv2.imread(self.image_file)
            if self.height == 0 or self.width == 0:
                self.height, self.width, self.fulld = img_rgb.shape
            gray = cv2.cvtColor(rgb, cv2.COLOR_BGR2GRAY)

            # cv2.imshow('rgb', rgb)
            # cv2.imshow('grayscale', gray)

            # histogram equalization
            # eq = cv2.equalizeHist(gray)
            # cv2.imshow('history equalization', eq)

            # adaptive histogram equilization (block by block)
            clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))
            aeq = clahe.apply(gray)
            # cv2.imshow('adaptive history equalization', aeq)

            # print 'waiting for keyboard input...'
            # key = cv2.waitKey() & 0xff

            return aeq

        except:
            print self.image_file + ":\n" + "  load error: " + str(sys.exc_info()[1])
コード例 #27
0
def claheAdjustImagesBW(img):
    # --> This method does clahe on lab space to keep the color
    # transform to lab color space and conduct clahe filtering on l channel then merge and transform back

    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    #clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
    clahe = cv2.createCLAHE(clipLimit=6.0,tileGridSize=(8, 8))


    #print 'converted image to Lab space'
    #lab_planes = cv2.split(lab_image)


    #lab_planes[0] = clahe.apply(lab_planes[0])
    gray = clahe.apply(gray)
    print 'apply clahe to image channel 0 --> L sapce'

    cv2.imshow("grayCLAHE", gray)

    img =  cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)

    cv2.imshow("imgCLAHE", img)
    # Merge the the color planes back into an Lab image
    #lab_image = cv2.merge(lab_planes, lab_planes[0])
    print 'merge channels back and transform back to rgb space'
    return img
コード例 #28
0
ファイル: xxx.py プロジェクト: chnsbs/ipt1_triangulation
def CenterThreshold(img):
    clahe = cv2.createCLAHE(clipLimit=4, tileGridSize=(4,4))
    dst = clahe.apply(img[650:800, 1125:1275])
    re, dst = cv2.threshold(dst, 253, 255, cv2.THRESH_BINARY_INV)
    dst = cv2.GaussianBlur(dst, (11,11),0 )
    re, dst = cv2.threshold(dst, 253, 255, cv2.THRESH_BINARY)
    return dst
コード例 #29
0
ファイル: util.py プロジェクト: gzuidhof/cad
def histogram_equalization(images, adaptive=True):

    _images = np.array(images * 255, dtype = np.uint8)

    pool = ThreadPool(4)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))

    def process_image(image):
        #print image.shape, image.dtype
        image = image.transpose(1,2,0)

        if adaptive:
            image[:,:,0] = clahe.apply(image[:,:,0])
            image[:,:,1] = clahe.apply(image[:,:,1])
            image[:,:,2] = clahe.apply(image[:,:,2])
        else:
            image[:,:,0] = cv2.equalizeHist(image[:,:,0])
            image[:,:,1] = cv2.equalizeHist(image[:,:,1])
            image[:,:,2] = cv2.equalizeHist(image[:,:,2])

        image = image.transpose(2,0,1)
        return image

    equalized = pool.map(process_image, _images)
    equalized = np.array(equalized, dtype=np.float32)/255.

    #visualize_data(np.append(images[:8],equalized[:8],axis=0).transpose(0,2,3,1))
    return equalized
コード例 #30
0
    def _queryFrame(self):
        
        frame_1 = self.mmc.getLastImage()
        #self.npFrame = copy.deepcopy(frame_1);

        self.rawFrame = copy.deepcopy(frame_1)
        if self.equalizationOn:
            clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
            cl1 = clahe.apply(frame_1)
            frame_1 = cl1
        else:
            brightness_temp = np.array(frame_1, dtype = np.int32)
            brightness_temp = brightness_temp + self.BrightnessOffset
            clipped = np.clip(brightness_temp, 0, 255)
            frame_1 = np.array(clipped, dtype = np.uint8)

        frame_flip = cv2.flip(frame_1, 0)
        
        frame_1 = frame_flip
        self.npFrame = frame_1;
        #frame = [frame_1, frame_1, frame_1]
        #print np.var(frame_1)
        frame = cv2.cvtColor(frame_1, cv2.COLOR_GRAY2RGB)
        #self.npFrame = frame
        bitmap = cv2.cv.CreateImageHeader((frame.shape[1], frame.shape[0]), cv2.cv.IPL_DEPTH_8U, 3)
        cv2.cv.SetData(bitmap, frame.tostring(), frame.dtype.itemsize * 3 * frame.shape[1])
        #frame = OpenCVQImage(frame_3)
        self.newFrame.emit(bitmap)
コード例 #31
0
def clahe(image):
    gray = cv.cvtColor(image,cv.COLOR_BGR2GRAY)
    clahe = cv.createCLAHE(clipLimit=2.0,tileGridSize=(8,8))
    dst = clahe.apply(gray)
    cv.imshow("clash",dst)
コード例 #32
0
import cv2
import numpy as np
import dlib
from sklearn.svm import SVC
import glob
import random
import math
import itertools
from sklearn.externals import joblib
import os

img_path = '/var/www/html/emorecog/data/test_images/test1.jpg'  # THE PATH OF THE IMAGE TO BE ANALYZED

font = cv2.FONT_HERSHEY_DUPLEX
emotions = ["anger", "happy", "sadness"]  #Emotion list
clahe = cv2.createCLAHE(clipLimit=2.0,
                        tileGridSize=(8, 8))  # Histogram equalization object
face_det = dlib.get_frontal_face_detector()
land_pred = dlib.shape_predictor(
    "/var/www/html/emorecog/data/DlibPredictor/shape_predictor_68_face_landmarks.dat"
)


def crop_face(i_path):

    image = cv2.imread(i_path)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    dest_i = '/var/www/html/emorecog/data/cap_image/test.png'
    # Loading all the HAAR Cascade classifiers
    face1 = cv2.CascadeClassifier(
        "/var/www/html/emorecog/data/HAARCascades/haarcascade_frontalface_default.xml"
    )
コード例 #33
0
no_of_eyes = 0

clean_up()

os.chdir('All_images')

files = glob.glob('*')

for file in files:

    try:

        img = cv2.imread(file)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        clahe = cv2.createCLAHE(clipLimit=1.6, tileGridSize=(3, 3))
        gray = clahe.apply(gray)

        faces = face_cascade.detectMultiScale(gray, 1.3, 5)

        # print file
        smile_found = 0
        face_found = 0
        eyes_found = 0

        img = cv2.imread(file)

        for (x1, y1, w1, h1) in faces:

            face_found = 1
            cv2.rectangle(img, (x1, y1), (x1 + w1, y1 + h1), (255, 0, 0), 2)
コード例 #34
0
def extract_bv(image):
    b, green_fundus, r = cv2.split(image)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    contrast_enhanced_green_fundus = clahe.apply(green_fundus)

    # applying alternate sequential filtering (3 times closing opening)
    r1 = cv2.morphologyEx(contrast_enhanced_green_fundus,
                          cv2.MORPH_OPEN,
                          cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)),
                          iterations=1)
    R1 = cv2.morphologyEx(r1,
                          cv2.MORPH_CLOSE,
                          cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)),
                          iterations=1)
    r2 = cv2.morphologyEx(R1,
                          cv2.MORPH_OPEN,
                          cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                    (11, 11)),
                          iterations=1)
    R2 = cv2.morphologyEx(r2,
                          cv2.MORPH_CLOSE,
                          cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                    (11, 11)),
                          iterations=1)
    r3 = cv2.morphologyEx(R2,
                          cv2.MORPH_OPEN,
                          cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                    (23, 23)),
                          iterations=1)
    R3 = cv2.morphologyEx(r3,
                          cv2.MORPH_CLOSE,
                          cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                    (23, 23)),
                          iterations=1)
    f4 = cv2.subtract(R3, contrast_enhanced_green_fundus)
    f5 = clahe.apply(f4)

    # removing very small contours through area parameter noise removal
    ret, f6 = cv2.threshold(f5, 15, 255, cv2.THRESH_BINARY)
    mask = np.ones(f5.shape[:2], dtype="uint8") * 255
    im2, contours, hierarchy = cv2.findContours(f6.copy(), cv2.RETR_LIST,
                                                cv2.CHAIN_APPROX_SIMPLE)
    for cnt in contours:
        if cv2.contourArea(cnt) <= 200:
            cv2.drawContours(mask, [cnt], -1, 0, -1)
    im = cv2.bitwise_and(f5, f5, mask=mask)
    ret, fin = cv2.threshold(im, 15, 255, cv2.THRESH_BINARY_INV)
    newfin = cv2.erode(fin,
                       cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)),
                       iterations=1)

    # removing blobs of unwanted bigger chunks taking in consideration they are not straight lines like blood
    #vessels and also in an interval of area
    fundus_eroded = cv2.bitwise_not(newfin)
    xmask = np.ones(fundus.shape[:2], dtype="uint8") * 255
    x1, xcontours, xhierarchy = cv2.findContours(fundus_eroded.copy(),
                                                 cv2.RETR_LIST,
                                                 cv2.CHAIN_APPROX_SIMPLE)
    for cnt in xcontours:
        shape = "unidentified"
        peri = cv2.arcLength(cnt, True)
        approx = cv2.approxPolyDP(cnt, 0.04 * peri, False)
        if len(approx) > 4 and cv2.contourArea(
                cnt) <= 3000 and cv2.contourArea(cnt) >= 100:
            shape = "circle"
        else:
            shape = "veins"
        if (shape == "circle"):
            cv2.drawContours(xmask, [cnt], -1, 0, -1)

    finimage = cv2.bitwise_and(fundus_eroded, fundus_eroded, mask=xmask)
    # blood_vessels = cv2.bitwise_not(finimage)
    blood_vessels = finimage
    return blood_vessels
コード例 #35
0
def Clahe(frame):
    a = cv2.split(cv2.cvtColor(frame, cv2.COLOR_BGR2LAB))
    clahe = cv2.createCLAHE(clipLimit=4.5, tileGridSize=(7, 7))
    a[0] = clahe.apply(a[0])
    return cv2.cvtColor(cv2.merge(a), cv2.COLOR_LAB2BGR)
コード例 #36
0
ファイル: core.py プロジェクト: FrancescElies/image-to-scan
def convert_object(file_path, screen_size=None, new_file_suffix="scanned"):
    """ Identifies 4 corners and does four point transformation """
    debug = True if log.level == logging.DEBUG else False
    image = cv2.imread(str(file_path))

    # convert the image to grayscale, blur it, and find edges
    # in the image
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.bilateralFilter(
        gray, 11, 17, 17
    )  # 11  //TODO 11 FRO OFFLINE MAY NEED TO TUNE TO 5 FOR ONLINE

    gray = cv2.medianBlur(gray, 5)
    edged = cv2.Canny(gray, 30, 400)

    if debug:
        previewImage("Edged Image", edged)

    # find contours in the edged image, keep only the largest
    # ones, and initialize our screen contour

    contours, hierarcy = cv2.findContours(
        edged, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE
    )

    log.debug("Contours found: %s", len(contours))


    # approximate the contour
    ContourArea = namedtuple('ContourArea', ['curve', 'area'])
    contourAreas = [ContourArea(curve=x, area=cv2.contourArea(x))
                    for x in contours]
    contourAreas = sorted(contourAreas, key=attrgetter('area'))

    if debug:
        previewContours(image, [x.curve for x in contourAreas])

    screens = []  # 4 point polygons, repressenting possible screens (rectangles)
    for contour in contourAreas:
        peri = cv2.arcLength(contour.curve, True)
        polygon_less_vertices = cv2.approxPolyDP(contour.curve,
                                                 epsilon=0.02 * peri,  # approximation accuracy
                                                 closed=True)

        num_vertices = len(polygon_less_vertices)
        if num_vertices == 4:
            (x, y, width, height) = cv2.boundingRect(contour.curve)
            log.debug(f'x={x} y={y} width={width} height={height}')
            screens.append(Screen(fourpoints=polygon_less_vertices, width=width))

    if debug:
        log.debug(f"Screens found {len(screens)}: {screens}")
        previewContours(image, [x.fourpoints for x in screens])

    # find largest screen
    largest_screen = max(screens, key=attrgetter('width'))

    if debug:
        previewContours(image, [largest_screen.fourpoints])

    # now that we have our screen contour, we need to determine
    # the top-left, top-right, bottom-right, and bottom-left
    # points so that we can later warp the image -- we'll start
    # by reshaping our contour to be our finals and initializing
    # our output rectangle in top-left, top-right, bottom-right,
    # and bottom-left order
    pts = largest_screen.fourpoints.reshape(4, 2)
    log.debug("Found bill rectagle at %s", pts)
    rect = order_points(pts)
    log.debug(rect)

    warped = transform_to_four_points(image, pts)

    # convert the warped image to grayscale and then adjust
    # the intensity of the pixels to have minimum and maximum
    # values of 0 and 255, respectively
    warp = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
    # Replacement for `skimage.exposure.rescale_intensity`
    # Contrast Limited Adaptive Histogram Equalization
    clahe = cv2.createCLAHE(clipLimit=1.0, tileGridSize=(8, 8))
    warp = clahe.apply(warp)

    # show the original and warped images
    if debug:
        previewImage("Original", image)
        previewImage("warp", warp)

    warp_file = str(file_path.parent / f"{file_path.stem}-{new_file_suffix}.jpg")
    cv2.imwrite(warp_file, warp)
    log.debug(f"Result: {warp_file}")

    if screen_size:
        return cv2.cvtColor(
            cv2.resize(warp, screen_size), cv2.COLOR_GRAY2RGB
        )
    else:
        return cv2.cvtColor(warp, cv2.COLOR_GRAY2RGB)
コード例 #37
0
ファイル: datasets.py プロジェクト: IssacCyj/yolov5
from tqdm import tqdm

from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, clean_str
from utils.torch_utils import torch_distributed_zero_first
import albumentations as A

# Parameters
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff',
               'dng']  # acceptable image suffixes
vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv',
               'mkv']  # acceptable video suffixes
logger = logging.getLogger(__name__)

CLAHE = False
clahe2 = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
clahe4 = cv2.createCLAHE(clipLimit=4.0, tileGridSize=(8, 8))

# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
    if ExifTags.TAGS[orientation] == 'Orientation':
        break

train_transforms = A.Compose([
    A.OneOf([
        A.CLAHE(clip_limit=2),
        A.IAASharpen(),
        A.IAAEmboss(),
        A.RandomBrightnessContrast(),
    ],
            p=0.35),
コード例 #38
0
        gray_img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # perform contrast limited adaptive equalization
        # based on example at:
        # http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/py_histogram_equalization.html

        # get parameters from track bars

        clip_limit = cv2.getTrackbarPos("clip limit", window_name4)
        tile_size = cv2.getTrackbarPos("tile size", window_name4)

        # perform filtering

        clahe = cv2.createCLAHE(clipLimit=clip_limit,
                                tileGridSize=(tile_size,
                                              tile_size))  # create filter
        output = clahe.apply(gray_img)  # apply filter

        # display image

        cv2.imshow(window_name1, gray_img)
        cv2.imshow(
            window_name2,
            hist_lines(cv2.calcHist([gray_img], [0], None, [256], [0, 256])))
        cv2.imshow(window_name3, output)
        cv2.imshow(
            window_name4,
            hist_lines(cv2.calcHist([output], [0], None, [256], [0, 256])))

        # start the event loop - essential
コード例 #39
0
def main():
	cv2.namedWindow('Gamma Correction',cv2.WINDOW_NORMAL)
	cv2.namedWindow('Correction',cv2.WINDOW_NORMAL)
	cv2.namedWindow('Auto White Balance',cv2.WINDOW_NORMAL)
	cv2.namedWindow('Clahe',cv2.WINDOW_NORMAL)
	cv2.namedWindow('Clahe LAB',cv2.WINDOW_NORMAL)
	cv2.namedWindow('Gaussian',cv2.WINDOW_NORMAL)
	cv2.namedWindow('Anisotropic Diffusion',cv2.WINDOW_NORMAL)
	cv2.namedWindow('Median Blur',cv2.WINDOW_NORMAL)
	cv2.namedWindow('Median Blur 2',cv2.WINDOW_NORMAL)
	cv2.namedWindow('YCrCb Median Blur',cv2.WINDOW_NORMAL)
	cv2.namedWindow('Output',cv2.WINDOW_NORMAL)
	cv2.namedWindow('Thresh Output',cv2.WINDOW_NORMAL)
	cv2.namedWindow('Video',cv2.WINDOW_NORMAL)
	
	cv2.resizeWindow('Video',500,500)
	cv2.resizeWindow('Clahe',500,500)
	cv2.resizeWindow('Clahe LAB',500,500)
	cv2.resizeWindow('Gaussian',500,500)
	cv2.resizeWindow('Anisotropic Diffusion',500,500)
	cv2.resizeWindow('Median Blur',500,500)
	cv2.resizeWindow('Median Blur 2',500,500)
	cv2.resizeWindow('YCrCb Median Blur',500,500)
	cv2.resizeWindow('Output',500,500)
	cv2.resizeWindow('Thresh Output',500,500)
	
	imgpath = "/home/charmi/Desktop/Trident_Work/OpenCV/Output/SavedVideos2/Output15.avi"
	'''
	a ='/home/charmi/Desktop/Trident_Work/OpenCV/Output/SavedVideos/OutputThresh'
	b =1
	c = '.avi'
	filename = a+str(b)+c
	#d ='/home/charmi/Desktop/Trident_Work/OpenCV/Output/SavedVideos/Output'
	#filename2=d+str(b)+c
	while(os.path.exists(filename)):
		b+=1
		filename = a+str(b)+c
		#filename2=d+str(b)+c
	codec = cv2.VideoWriter_fourcc('W', 'M', 'V', '2')
	framerate = 10
	resolution = (640, 480)
	VideoFileOutput = cv2.VideoWriter(filename, codec, framerate, resolution)
	#VideoFileOutput2 = cv2.VideoWriter(filename2, codec, framerate, resolution)
	'''
	
	cv2.createTrackbar('+ve Gamma','Gamma Correction',1,20,emptyFunction)
	cv2.createTrackbar('-ve Gamma','Gamma Correction',0,20,emptyFunction)
	cv2.createTrackbar('Hue','Correction',1,200,emptyFunction)
	cv2.createTrackbar('Saturation','Correction',1,200,emptyFunction)
	cv2.createTrackbar('Value','Correction',1,200,emptyFunction)
	cv2.createTrackbar('Low H','Thresh Output',0,180,emptyFunction)
	cv2.createTrackbar('Low S','Thresh Output',0,255,emptyFunction)
	cv2.createTrackbar('Low V','Thresh Output',0,255,emptyFunction)
	cv2.createTrackbar('High H','Thresh Output',0,360,emptyFunction)
	cv2.createTrackbar('High S','Thresh Output',0,255,emptyFunction)
	cv2.createTrackbar('High V','Thresh Output',0,255,emptyFunction)
	cv2.createTrackbar('Kernel Size','YCrCb Median Blur',3,15,emptyFunction)
	cv2.createTrackbar('Laplacian ksize','Output',3,15,emptyFunction)
	cv2.createTrackbar('Clip Limit (Blue)','Clahe',0,100,emptyFunction)
	cv2.createTrackbar('Tile Grid Size (Blue)','Clahe',1,20,emptyFunction)
	cv2.createTrackbar('Clip Limit (Green)','Clahe',0,100,emptyFunction)
	cv2.createTrackbar('Tile Grid Size (Green)','Clahe',1,20,emptyFunction)
	cv2.createTrackbar('Clip Limit (Red)','Clahe',0,100,emptyFunction)
	cv2.createTrackbar('Tile Grid Size (Red)','Clahe',1,20,emptyFunction)
	cv2.createTrackbar('Pair','Clahe',0,1,emptyFunction)
	cv2.createTrackbar('Kernel Size','Gaussian',3,15,emptyFunction)
	cv2.createTrackbar('Standard Deviation','Gaussian',0,50,emptyFunction)
	cv2.createTrackbar('+ve Alpha','Gaussian',0,20,emptyFunction)
	cv2.createTrackbar('+ve Beta','Gaussian',0,20,emptyFunction)
	cv2.createTrackbar('-ve Alpha','Gaussian',0,20,emptyFunction)
	cv2.createTrackbar('-ve Beta','Gaussian',0,20,emptyFunction)
	cv2.createTrackbar('Alpha','Anisotropic Diffusion',0,10,emptyFunction)
	cv2.createTrackbar('Sensitivity','Anisotropic Diffusion',0,500,emptyFunction)
	cv2.createTrackbar('Iterations','Anisotropic Diffusion',1,500,emptyFunction)
	cv2.createTrackbar('Kernel','Median Blur',3,15,emptyFunction)
	cv2.createTrackbar('Kernel','Median Blur 2',3,25,emptyFunction)
	cv2.createTrackbar('FGauss','Gaussian',0,1,emptyFunction)
	cv2.createTrackbar('FGamma','Gamma Correction',0,1,emptyFunction)
	cv2.createTrackbar('FAWBal','Auto White Balance',0,1,emptyFunction)
	cv2.createTrackbar('FAni','Anisotropic Diffusion',0,1,emptyFunction)
	cv2.createTrackbar('FB','Median Blur',0,1,emptyFunction)
	cv2.createTrackbar('FG','Median Blur',0,1,emptyFunction)
	cv2.createTrackbar('FR','Median Blur',0,1,emptyFunction)
	cv2.createTrackbar('FMed','Median Blur 2',0,1,emptyFunction)
	cv2.createTrackbar('FL','Clahe LAB',0,1,emptyFunction)
	cv2.createTrackbar('FA','Clahe LAB',0,1,emptyFunction)
	cv2.createTrackbar('FB','Clahe LAB',0,1,emptyFunction)
	cv2.createTrackbar('FB','Clahe',0,1,emptyFunction)
	cv2.createTrackbar('FG','Clahe',0,1,emptyFunction)
	cv2.createTrackbar('FR','Clahe',0,1,emptyFunction)
	cv2.createTrackbar('FL','Output',0,1,emptyFunction)
	cv2.createTrackbar('FM','YCrCb Median Blur',0,1,emptyFunction)
	
	cap = cv2.VideoCapture(imgpath)
	ret, image=cap.read()
	(h, w, c) = image.shape
	cv2.circle(image, (w//2, h//2), 7, (255, 255, 255), -1) 
	width2 = float(w)/2
	
	cv2.setTrackbarPos('+ve Gamma','Gamma Correction',16)
	cv2.setTrackbarPos('-ve Gamma','Gamma Correction',0)
	cv2.setTrackbarPos('Hue','Correction',100)
	cv2.setTrackbarPos('Saturation','Correction',100)
	cv2.setTrackbarPos('Value','Correction',100)
	cv2.setTrackbarPos('Low H','Thresh Output',8)
	cv2.setTrackbarPos('Low S','Thresh Output',149)
	cv2.setTrackbarPos('Low V','Thresh Output',170)
	cv2.setTrackbarPos('High H','Thresh Output',34)
	cv2.setTrackbarPos('High S','Thresh Output',255)
	cv2.setTrackbarPos('High V','Thresh Output',255)
	
	cv2.setTrackbarPos('Kernel Size','YCrCb Median Blur',3)
	cv2.setTrackbarPos('Laplacian ksize','Output',3)
	
	cv2.setTrackbarPos('Clip Limit (Blue)','Clahe',0)
	cv2.setTrackbarPos('Tile Grid Size (Blue)','Clahe',4)
	cv2.setTrackbarPos('Clip Limit (Green)','Clahe',0)
	cv2.setTrackbarPos('Tile Grid Size (Green)','Clahe',4)
	cv2.setTrackbarPos('Clip Limit (Red)','Clahe',0)
	cv2.setTrackbarPos('Tile Grid Size (Red)','Clahe',4)
	cv2.setTrackbarPos('Pair','Clahe',1)
	
	cv2.setTrackbarPos('Kernel Size','Gaussian',3)
	cv2.setTrackbarPos('Standard Deviation','Gaussian',5)
	cv2.setTrackbarPos('+ve Alpha','Gaussian',11)
	cv2.setTrackbarPos('+ve Beta','Gaussian',2)
	cv2.setTrackbarPos('-ve Alpha','Gaussian',2)
	cv2.setTrackbarPos('-ve Beta','Gaussian',0)
	cv2.setTrackbarPos('Alpha','Anisotropic Diffusion',1)
	cv2.setTrackbarPos('Sensitivity','Anisotropic Diffusion',20)
	cv2.setTrackbarPos('Iterations','Anisotropic Diffusion',2)
	cv2.setTrackbarPos('Kernel','Median Blur',5)
	cv2.setTrackbarPos('Kernel','Median Blur 2',3)
	cv2.setTrackbarPos('FGauss','Gaussian',0)
	cv2.setTrackbarPos('FGamma','Gamma Correction',1)
	cv2.setTrackbarPos('FAWBal','Auto White Balance',0)
	cv2.setTrackbarPos('FAni','Anisotropic Diffusion',1)
	cv2.setTrackbarPos('FB','Median Blur',1)
	cv2.setTrackbarPos('FG','Median Blur',1)
	cv2.setTrackbarPos('FR','Median Blur',1)
	cv2.setTrackbarPos('FMed','Median Blur 2',0)
	cv2.setTrackbarPos('FL','Clahe LAB',0)
	cv2.setTrackbarPos('FA','Clahe LAB',0)
	cv2.setTrackbarPos('FB','Clahe LAB',0)
	
	cv2.setTrackbarPos('FB','Clahe',1)
	cv2.setTrackbarPos('FG','Clahe',1)
	cv2.setTrackbarPos('FR','Clahe',1)
	
	cv2.setTrackbarPos('FL','Output',0)
	cv2.setTrackbarPos('FM','YCrCb Median Blur',0)
	
	
	ret = True
	flag=1
	xdiff=0
	txdiff=0
	cX=0
	cY=0
	maxArea=0
	while (1):
		maxArea=0
		if flag==1:
			ret,img = cap.read()
		
		fgs = cv2.getTrackbarPos('FGauss','Gaussian')
		fgm = cv2.getTrackbarPos('FGamma','Gamma Correction')
		fab = cv2.getTrackbarPos('FAWBal', 'Auto White Balance')
		fad = cv2.getTrackbarPos('FAni','Anisotropic Diffusion')
		fmb = cv2.getTrackbarPos('FB','Median Blur')
		fmg = cv2.getTrackbarPos('FG','Median Blur')
		fmr = cv2.getTrackbarPos('FR','Median Blur')
		fmed = cv2.getTrackbarPos('FMed','Median Blur 2')
		fcll = cv2.getTrackbarPos('FL','Clahe LAB')
		fcla = cv2.getTrackbarPos('FA','Clahe LAB')
		fclb = cv2.getTrackbarPos('FB','Clahe LAB')
		
		fcb = cv2.getTrackbarPos('FB','Clahe')
		fcg = cv2.getTrackbarPos('FG','Clahe')
		fcr = cv2.getTrackbarPos('FR','Clahe')
		
		fyl = cv2.getTrackbarPos('FL','Output')
		fym = cv2.getTrackbarPos('FM','YCrCb Median Blur')
		
		hl = cv2.getTrackbarPos('Low H','Thresh Output')
		hh = cv2.getTrackbarPos('High H','Thresh Output')
		sl = cv2.getTrackbarPos('Low S','Thresh Output')
		sh = cv2.getTrackbarPos('High S','Thresh Output')
		vl = cv2.getTrackbarPos('Low V','Thresh Output')
		vh = cv2.getTrackbarPos('High V','Thresh Output')
		clipLim1=(cv2.getTrackbarPos('Clip Limit (Blue)','Clahe'))
		clipLim1=float(clipLim1)/1000
		tgs1=cv2.getTrackbarPos('Tile Grid Size (Blue)','Clahe')
		clipLim2=(cv2.getTrackbarPos('Clip Limit (Green)','Clahe'))
		clipLim2=float(clipLim2)/1000
		tgs2=cv2.getTrackbarPos('Tile Grid Size (Green)','Clahe')
		clipLim3=(cv2.getTrackbarPos('Clip Limit (Red)','Clahe'))
		clipLim3=float(clipLim3)/1000
		tgs3=cv2.getTrackbarPos('Tile Grid Size (Red)','Clahe')
		
		pgamma=cv2.getTrackbarPos('+ve Gamma','Gamma Correction')
		ngamma=cv2.getTrackbarPos('-ve Gamma','Gamma Correction')
		hc=cv2.getTrackbarPos('Hue','Correction')
		sc=cv2.getTrackbarPos('Saturation','Correction')
		vc=cv2.getTrackbarPos('Value','Correction')
		pgamma=float(pgamma)/10
		ngamma=float(ngamma)/10
		hc=float(hc)/100
		sc=float(sc)/100
		vc=float(vc)/100
		ks=cv2.getTrackbarPos('Kernel Size','Gaussian')
		sd=(cv2.getTrackbarPos('Standard Deviation','Gaussian'))
		sd=float(sd)/10
		alpha=(cv2.getTrackbarPos('+ve Alpha','Gaussian'))
		beta=(cv2.getTrackbarPos('+ve Beta','Gaussian'))
		nalpha=(cv2.getTrackbarPos('-ve Alpha','Gaussian'))
		nbeta=(cv2.getTrackbarPos('-ve Beta','Gaussian'))
		alpha=float(alpha)/10
		beta=float(beta)/10
		nalpha=float(nalpha)/10
		nbeta=float(nbeta)/10
		alph=(cv2.getTrackbarPos('Alpha','Anisotropic Diffusion'))
		alph=float(alph)/10
		sens=cv2.getTrackbarPos('Alpha','Anisotropic Diffusion')
		itern=cv2.getTrackbarPos('Alpha','Anisotropic Diffusion')
		mker=cv2.getTrackbarPos('Kernel','Median Blur')
		medker=cv2.getTrackbarPos('Kernel','Median Blur 2')
		ymker=cv2.getTrackbarPos('Kernel Size','YCrCb Median Blur')
		ylksize=cv2.getTrackbarPos('Laplacian ksize','Output')
		
		
		if ks%2==0:
			ks+=1
		if mker%2==0:
			mker+=1
		if medker%2==0:
			medker+=1
		if ymker%2==0:
			ymker+=1
		if ylksize%2==0:
			ylksize+=1
		
		fg=cv2.getTrackbarPos('Pair','Clahe')
		if ret:
			if cv2.waitKey(2) == 27:
				break
			if cv2.waitKey(2) == 97:
				flag = 1
			if cv2.waitKey(2) == 32:
				flag=0
			low = np.array([hl,sl,vl])
			high = np.array([hh,sh,vh])
			
			#Gamma Correction
			t1=time.time()
			gc=adjust_gamma(img,pgamma-ngamma)
			t2=time.time()-t1
			#print(t2)
			cv2.imshow('Gamma Correction',gc)
			if(fgm==0):
				gc=img
			
			
			#Gaussian
			gaussian = cv2.GaussianBlur(gc ,(ks,ks), sd)
			gauss = cv2.addWeighted(img, alpha-nalpha, gaussian, beta-nbeta, 0)
			cv2.imshow('Gaussian',gauss)
			if(fgs==0):
				gauss=gc
			
			
			#Correcting HSV Values
			st1=cv2.cvtColor(gauss,cv2.COLOR_BGR2HSV)
			st1[:, :, 0]=st1[:, :, 0]*hc
			st1[:, :, 1]=st1[:, :, 1]*sc
			st1[:, :, 2]=st1[:, :, 2]*vc
			st3=cv2.cvtColor(st1,cv2.COLOR_HSV2BGR)
			cv2.imshow('Correction',st3)
			
			
			#Auto White Balance
			result1 = cv2.cvtColor(st3, cv2.COLOR_BGR2LAB)
			avg_a = np.average(result1[:, :, 1])
			avg_b = np.average(result1[:, :, 2])
			result1[:, :, 1] = result1[:, :, 1] - ((avg_a - 128) * (result1[:, :, 0] / 255.0) * 1.5)
			result1[:, :, 2] = result1[:, :, 2] - ((avg_b - 128) * (result1[:, :, 0] / 255.0) * 1.5)
			result1 = cv2.cvtColor(result1, cv2.COLOR_LAB2BGR)
			cv2.imshow('Auto White Balance',result1)
			if (fab==0):
				result1=st3
			
			#Anisotropic Diffusion
			adf = cv2.ximgproc.anisotropicDiffusion(result1, alph, sens, itern)
			cv2.imshow('Anisotropic Diffusion',adf)
			if(fad==0):
				adf=result1
				
			#Median Blur
			b, g, r = cv2.split(adf)
			b1 = cv2.medianBlur(b,mker)
			if(fmb==0):
				b1=b
			g1 = cv2.medianBlur(g,mker)
			if(fmg==0):
				g1=g
			r1 = cv2.medianBlur(r,mker)
			if(fmr==0):
				r1=r
			medfil = cv2.merge((b1, g1, r1))
			cv2.imshow('Median Blur',medfil)
			
			#Clahe LAB
			clahe1 = cv2.createCLAHE(clipLimit=clipLim1,tileGridSize=(tgs1,tgs1))
			clahe2 = cv2.createCLAHE(clipLimit=clipLim2,tileGridSize=(tgs2,tgs2))
			clahe3 = cv2.createCLAHE(clipLimit=clipLim3,tileGridSize=(tgs3,tgs3))
			lab=cv2.cvtColor(medfil,cv2.COLOR_BGR2LAB)
			l, a, b = cv2.split(lab)
			l1 = clahe1.apply(l)
			if(fcll==0):
				l1=l
			a1 = clahe2.apply(a)
			if(fcla==0):
				a1=a
			b1 = clahe3.apply(b)
			if(fclb==0):
				b1=b
			cmer=cv2.merge((l1,a1,b1))
			cllab=cv2.cvtColor(cmer,cv2.COLOR_LAB2BGR)
			cv2.imshow('Clahe LAB',cllab)
			
			#Clahe BGR
			if(fg==1):
				cv2.setTrackbarPos('Clip Limit (Green)','Clahe',int(clipLim1*1000))
				cv2.setTrackbarPos('Tile Grid Size (Green)','Clahe',tgs1)
				cv2.setTrackbarPos('Clip Limit (Red)','Clahe',int(clipLim1*1000))
				cv2.setTrackbarPos('Tile Grid Size (Red)','Clahe',tgs1)
			clahe1 = cv2.createCLAHE(clipLimit=clipLim1,tileGridSize=(tgs1,tgs1))
			clahe2 = cv2.createCLAHE(clipLimit=clipLim2,tileGridSize=(tgs2,tgs2))
			clahe3 = cv2.createCLAHE(clipLimit=clipLim3,tileGridSize=(tgs3,tgs3))
			b, g, r = cv2.split(cllab)
			b1 = clahe1.apply(b)
			if(fcb==0):
				b1=b
			g1 = clahe2.apply(g)
			if(fcg==0):
				g1=g
			r1 = clahe3.apply(r)
			if(fcr==0):
				r1=r
			cl=cv2.merge((b1,g1,r1))
			cv2.imshow('Clahe',cl)
			
			#Median Blur
			medblur = cv2.medianBlur(cl,medker)
			cv2.imshow('Median Blur 2',medblur)
			if(fmed==0):
				medblur=cl
				
			#YCrCb  Laplacian
			ycrcb=cv2.cvtColor(medblur, cv2.COLOR_BGR2YCR_CB)
			y,cr,cb=cv2.split(ycrcb)
			dst = cv2.Laplacian( y, cv2.CV_16S, ksize=ylksize)
			absDst = cv2.convertScaleAbs( dst )
			out=cv2.merge((absDst,cr,cb))
			output=cv2.cvtColor(out, cv2.COLOR_YCR_CB2BGR)
			cv2.imshow('Output',output)
			if(fyl==0):
				output=medblur
				
			#YCrCb Median 
			med=cv2.medianBlur(absDst,ymker)
			tempOut=cv2.merge((med,cr,cb))
			tempOutput=cv2.cvtColor(tempOut, cv2.COLOR_YCR_CB2BGR)
			cv2.imshow('YCrCb Median Blur',tempOutput)
			if(fym==0):
				tempOutput=output
			
			
			
			#Masking
			hsv1= cv2.cvtColor(output, cv2.COLOR_BGR2HSV)
			obj1 = cv2.inRange(hsv1, low, high)
			res1 = cv2.bitwise_and(output, output, mask=obj1)
			
			#DETECTION
			gray=cv2.cvtColor(res1, cv2.COLOR_BGR2GRAY)
			ret1,thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
			(contours,hierarchy) = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
			for pic, contour in enumerate(contours):
				area1 = cv2.contourArea(contour)
				if(area1>700):
					x,y,w,h = cv2.boundingRect(contour)
					if(w*1.5<h and h>120):
						cv2.rectangle(res1,(x,y),(x+w,y+h),(0,165,255),2)
						M = cv2.moments(contour)
						cX = int(M["m10"] / M["m00"])
						cY = int(M["m01"] / M["m00"])
						cv2.circle(res1, (cX, cY), 7, (255, 255, 255), -1)
						if(area1>maxArea):
							maxArea=area1
							txdiff=width2-cX
			if(txdiff!=xdiff):
				xdiff=txdiff
				print('Pixel Difference:', xdiff)	
			#Display
			cv2.imshow('Thresh Output',res1)
			cv2.imshow('Video',img)	
			#VideoFileOutput.write(res1)
			#VideoFileOutput2.write(img)
			
		else:
			break
	cv2.destroyAllWindows()
	#VideoFileOutput.release()
	#VideoFileOutput2.release()
	cap.release()
コード例 #40
0
def get_landmarksDIST(pic):
    landmarks_vectorised1 = []
    #detector = dlib.get_frontal_face_detector()
    frame = cv2.imread(pic)

    frame = cv2.resize(frame, (300, 300))

    #frame = cv2.resize(frame, (256, 256))

    #image = imutils.resize(frame, width=40)  # , length = 40)

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    '''equ = cv2.equalizeHist(gray)
    res = np.hstack((gray, equ))  # stacking images side-by-side'''

    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(16, 16))
    clahe_image = clahe.apply(gray)
    ####
    clahe_image = gray
    ####
    rects = detector(clahe_image, 1)

    # loop over the face detections
    #print(pic)
    #print("Number of faces detected: {}".format(len(rects)))
    winSize = (64, 64)
    blockSize = (16, 16)  #16
    blockStride = (8, 8)
    cellSize = (8, 8)
    nbins = 9
    derivAperture = 1
    winSigma = 4.
    histogramNormType = 0
    L2HysThreshold = 2.0000000000000001e-01
    gammaCorrection = 0
    nlevels = 64
    #hog = cv2.HOGDescriptor(winSize, blockSize, blockStride, cellSize, nbins, derivAperture, winSigma,
    #                        histogramNormType, L2HysThreshold, gammaCorrection, nlevels)
    hog = cv2.HOGDescriptor()
    ########
    desc = LocalBinaryPatterns(250, 8)
    radius = 16
    # Number of points to be considered as neighbourers
    no_points = 16  #8 * radius
    # Uniform LBP is used

    counter = 0
    #########
    for rect in rects:
        face = 1
        # extract the ROI of the *original* face, then align the face
        # using facial landmarks
        (x, y, w, h) = rect_to_bb(rect)
        d = rect

        img = clahe_image  # np.float32(frame) / 255.0;
        crop = img[d.top():d.bottom(), d.left():d.right()]

        faceAligned = clahe_image  # fa.align(frame, gray, rect)

        #faceAligned = fa.align(frame, gray, rect)

        #faceAligned = fa.align(frame, gray, rect)

        winStride = (8, 8)
        padding = (8, 8)
        locations = ((10, 20), )
        #detections = rects #detector(image, 1)
        #d = rect
        #test =0
        #if test == 0: #for k, d in enumerate(detections):  # For all detected face instances individually
        shape = predictor(
            clahe_image,
            rect)  # Draw Facial Landmarks with the predictor class
        #landmarksPoints = np.matrix([[p.x, p.y] for p in predictor(image, detections[0]).parts()])
        landmarksPoints = np.matrix(
            [[p.x, p.y] for p in predictor(faceAligned, rect).parts()])
        ##Get centre of mass

        #####hog features scickit
        from skimage.feature import hog as hog1
        from skimage import data, color, exposure

        #crop = cv2.resize(crop, (150, 150))
        '''hog_image = hog1(faceAligned, orientations=8, pixels_per_cell=(32, 32), # 32
                            cells_per_block=(1, 1)) #, visualize=True)'''

        features, hog_image = hog1(clahe_image,
                                   orientations=8,
                                   pixels_per_cell=(32, 32),
                                   cells_per_block=(1, 1),
                                   visualise=True)

        #fd, hog_image = hog(image, orientations=8, pixels_per_cell=(32, 32),
        #                    cells_per_block=(1, 1), visualize=True)
        hog_features = features
        hog_images = hog_image
        #h = hog.compute(faceAligned, winStride, padding, locations)
        hog_image = hog_image  # np.float32(hog_features) #hog_image)
        h = hog_image
        h = h.flatten()

        image = clahe_image
        '''fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex=True, sharey=True)

        ax1.axis('off')
        ax1.imshow(image, cmap=plt.cm.gray)
        ax1.set_title('Input image')
        ax1.set_adjustable('box-forced')

        # Rescale histogram for better display
        #hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 10))
        hog_image_rescaled = hog_image
        ax2.axis('off')
        ax2.imshow(hog_image_rescaled, cmap=plt.cm.gray)
        ax2.set_title('Histogram of Oriented Gradients')
        ax1.set_adjustable('box-forced')
        plt.show()'''

        ######get local binary pattern

        #histLBP = desc.describe(crop)

        # Calculate the histogram

        lbp = local_binary_pattern(crop, no_points, radius,
                                   method='uniform')  #''default')
        lbparray = np.array(lbp)
        import scipy
        histogram = scipy.stats.itemfreq(lbp)

        lbparray = lbparray.flatten()  #histogram.flatten()
        x = itemfreq(lbp.ravel())
        # Normalize the histogram
        histLBP = lbparray  # x[:, 1] / sum(x[:, 1])
        # Append image path in X_name

        #######################

        xlist = []
        ylist = []
        allcoords = []

        for i in range(1, 68):  # Store X and Y coordinates in two lists
            xlist.append(float(shape.part(i).x))
            ylist.append(float(shape.part(i).y))
            coords = [float(shape.part(i).x), float(shape.part(i).y)]
            allcoords.append(coords)

        ###### TODO write ANIMA inspired code here
        # get centre of left brows
        # get centre of left eye
        #distance between brows and eyes centres (add to features array)
        #get centre right brows
        #get centre right eyes
        #distance between brows and eyes centres (add this to feature array )
        #distance between centre left brows and right brows
        #get centre of upper lip
        #get centre of lower lips
        #get distance between centres upper and lower lips

        #################################

        xmean = np.mean(
            xlist)  # Get the mean of both axes to determine centre of gravity
        ymean = np.mean(ylist)
        xcentral = [
            (x - xmean) for x in xlist
        ]  # get distance between each point and the central point in both axes
        ycentral = [(y - ymean) for y in ylist]

        if xlist[26] == xlist[
                29]:  # If x-coordinates of the set are the same, the angle is 0, catch to prevent 'divide by 0' error in function
            anglenose = 0
        else:
            anglenose = int(
                math.atan((ylist[26] - ylist[29]) / (xlist[26] - xlist[29])) *
                180 / math.pi)

        if anglenose < 0:
            anglenose += 90
        else:
            anglenose -= 90

        landmarks_vectorised = []

        centreMass = ndimage.measurements.center_of_mass(np.array(allcoords))
        '''p1, p2 = HeadPoseEstimation(faceAligned, landmarksPoints)
        p1 = np.asarray(p1)
        p2 = np.asarray(p2)'''
        for x, y, w, z in zip(xcentral, ycentral, xlist, ylist):
            landmarks_vectorised.append(w)
            landmarks_vectorised.append(z)
            meannp = np.asarray((ymean, xmean))
            coornp = np.asarray((w, z))
            #TODO : DISTANCE FROM CENTRE GRAVITY TO EACH POINT

            #DistCentreMass = np.linalg.norm(coornp - centreMass)
            DistCentreMass = np.linalg.norm(coornp - meannp)
            #add points for head pose and distance for each point
            '''distp1 = np.linalg.norm(coornp - p1)
            distp1X = p1[0]
            distp1Y = p1[1]
            distp2 = np.linalg.norm(coornp - p2)
            distp2X = p2[0]
            distp2Y = p2[1]'''
            #sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
            dist = np.linalg.norm(coornp - meannp)
            anglerelative = (math.atan(
                (z - ymean) / (w - xmean)) * 180 / math.pi) - anglenose
            #landmarks_vectorised.append(dist)
            '''landmarks_vectorised.append(distp1)
            landmarks_vectorised.append(distp2)'''
            '''landmarks_vectorised.append(distp1X)
            landmarks_vectorised.append(distp1Y)

            landmarks_vectorised.append(distp2X)
            landmarks_vectorised.append(distp2Y)'''

            #landmarks_vectorised.append(DistCentreMass)

            #landmarks_vectorised.append(dist)
            #landmarks_vectorised.append(anglerelative)
            '''cv2.circle(frame, (int(coornp[0]), int(coornp[1])), 1, (255, 0, 0), -1)

            p1 = (int(coornp[0]), int(coornp[1]))
            #p2 = (int(centreMass[0]), int(centreMass[1]))
            p2 = (int(xmean), int(ymean))
            if counter < 2:
                cv2.line(frame, p1, p2, (255,0, 0), 1)
            counter +=1'''
    ###cv2.imwrite('CentreMassCK.png', frame)
    if len(rects) < 1:
        landmarks_vectorised2 = "error"
    else:

        #landmarks_vectorised2 = np.array(landmarks_vectorised)
        landmarks_vectorised2 = h  # np.array(h) #np.concatenate((landmarks_vectorised2, h), axis=0)
        #landmarks_vectorised2 = np.array(histLBP)
        #landmarks_vectorised2 =  np.concatenate((landmarks_vectorised2, h), axis=0)
    #landmarks_vectorised2 = landmarks_vectorised2.reshape(67,6,1)
    #landmarks_vectorised1.append(landmarks_vectorised2)
    #landmarks_vectorised1 = np.array(landmarks_vectorised1)
    #landmarks_vectorised1 = np.array(landmarks_vectorised2)
    return landmarks_vectorised2
コード例 #41
0
def clache(img):
    gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
    clache = cv.createCLAHE(clipLimit=2, tileGridSize=(8, 8))
    dst = clache.apply(gray)
    cv.imshow('Clache', dst)
コード例 #42
0
def CLAHE(image, clipLimit=2.0, tileGridSize=(8,8)):
    clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize)
    cl_img = clahe.apply(image)
    return cl_img
コード例 #43
0
#  Image enhancement code using CLAHE
import cv2

#  Read the image
file_name = input('Enter the file name that you want to enhance: ')
#  img = cv2.imread('kids-room.jpg')
img = cv2.imread(file_name)

#  Preparation for applying CLAHE
clahe = cv2.createCLAHE()

#  Converting the image into Grey-scale image
gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

#  Apply enhancement process
enh_image = clahe.apply(gray_image)

#  Save it in a file
#  cv2.imwrite('kids-room-enhanced.jpg', enh_image)
cv2.imwrite(file_name[:-4] + '-enhanced' + file_name[-4:], enh_image)

print('Done enhancing')
コード例 #44
0
ファイル: Tools_002.py プロジェクト: sankalpav/OpSeF-IV
def clahe_augment2(im, tg, cl):
    # applies the open CV implementation of Contrast-limited adaptive histogram equalization (CLAHE)
    # see https://docs.opencv.org/3.1.0/d5/daf/tutorial_py_histogram_equalization.html
    clahe_low = cv.createCLAHE(clipLimit=cl, tileGridSize=tg)
    img_low = clahe_low.apply(im)
    return img_low
コード例 #45
0
width = gray.shape[1] 

gray_counter = np.zeros(256, dtype = int) #256 from 0 to 255 frequency

for i in range(height):
    for j in range(width):
        gray_counter[gray[i][j]] += 1
        
equ_image=cv2.equalizeHist(gray)
        
equ_counter = np.zeros(256, dtype = int) 
for i in range(height):
    for j in range(width):
        equ_counter[equ_image[i][j]] += 1
        
clahe = cv2.createCLAHE(clipLimit = 8, tileGridSize=(4,4)) #Default = clip size 40, tile grid size = 8x8   
clahe_image = clahe.apply(gray)

clahe_counter = np.zeros(256, dtype = int) 
for i in range(height):
    for j in range(width):
        clahe_counter[clahe_image[i][j]] += 1

#visualization
plt.plot(clahe_counter,'r',label="Histogram")
plt.title('Clahe-Histogram')
plt.ylabel('Intensity'),plt.xlabel('Pixel')
plt.show()
  
plt.plot(equ_counter,'g',label="Histogram")
plt.title('Histogram-Equalizer')
コード例 #46
0
ファイル: HistEQ.py プロジェクト: danielorf/opencv_toolbox
print(image_width)
print(image_height)

img = cv2.resize(img, (image_width, image_height),
                 interpolation=cv2.INTER_CUBIC)

img2 = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

#img2 = cv2.medianBlur(img2,3)
#img2 = cv2.bilateralFilter(img2,3,25,25)
#img2[:, :, 0] = cv2.equalizeHist(img2[:, :, 0])
#img2[:, :, 1] = cv2.equalizeHist(img2[:, :, 1])
#img2[:, :, 0] = cv2.GaussianBlur(img2[:, :, 0],(9,9),0)

#img2[:, :, 2] = cv2.equalizeHist(img2[:, :, 2])

clahe1 = cv2.createCLAHE(clipLimit=1, tileGridSize=(5, 5))
clahe2 = cv2.createCLAHE(clipLimit=1.250, tileGridSize=(25, 25))
img2[:, :, 1] = clahe1.apply(img2[:, :, 1])
img2[:, :, 2] = clahe2.apply(img2[:, :, 2])
#img2[:, :, 1] = cv2.medianBlur(img2[:, :, 1],9)
#img2[:, :, 2] = cv2.bilateralFilter(img2[:, :, 2],3,25,25)
#img2[:, :, 1] = cv2.GaussianBlur(img2[:, :, 1],(9,9),0)

img2 = cv2.cvtColor(img2, cv2.COLOR_HSV2BGR)

img3 = np.hstack((img, img2))

cv2.imshow('img3', img3)
if cv2.waitKey(0) & 0xff == 27:
    cv2.destroyAllWindows()
コード例 #47
0
def hist_eq(frame):
    lab = cv2.cvtColor(frame, cv2.COLOR_BGR2LAB)
    clahe = cv2.createCLAHE(clipLimit=4)
    lab[..., 0] = clahe.apply(lab[..., 0])
    frame = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
    return frame
コード例 #48
0
ファイル: test_photometric.py プロジェクト: sennnnn/mmcv
 def _clahe(img, clip_limit=40.0, tile_grid_size=(8, 8)):
     clahe = cv2.createCLAHE(clip_limit, tile_grid_size)
     return clahe.apply(np.array(img, dtype=np.uint8))
コード例 #49
0
ファイル: image.py プロジェクト: nalinahuja22/spectra
    def process_images(self):
        if (self.image_list):
            # Initialize Process Counter
            curr = 0

            # Initialize Hash List
            self.hashes = []

            # Initalize Blurred Array
            self.blurred = []

            # Load Image Data Map
            image_data = load_image_data(self.image_path)

            # Error Check Image Data Map
            if (image_data is None):
                image_data = {}

            # Calculate Hash Values
            for image in self.image_list:
                # Create Data Object
                if (not (image in image_data)):
                    image_data[image] = data(lmod=util.get_lmod(image))

                # Calculate Imagehash
                self.hashes.append(imagehash.average_hash(Image.open(image)))

                # End Imagehash Calculation----------------------------------------------------------------------------------------------------------------------------

                # Store Image Name
                input_image = image

                # Store Recent Modification Time
                curr_lmod = util.get_lmod(image)

                # Calculate Blur Coefficient
                if ((image_data[image].vari is None)
                        or (image_data[image].nmax is None)
                        or (image_data[image].rmsv is None)
                        or (image_data[image].lmod < curr_lmod)):
                    # Compute RMS Value
                    loaded_image = Image.open(image).convert('L')
                    image_stats = ImageStat.Stat(loaded_image)
                    image_rms = image_stats.rms[0]

                    # Determine RMS Deficiency
                    if (image_rms < self.rms_threshold):
                        # Create Cache Folder
                        try:
                            util.mkdir(
                                util.form_path([self.image_path, TEMP_FOLD]))
                        except FileExistsError:
                            pass

                        # Create Cache File
                        input_image = util.form_path([
                            util.dirname(util.absolute(image)), TEMP_FOLD,
                            EQ_IMAGE.format(util.basename(image))
                        ])

                        # Equalize Image Histogram
                        image_file = cv2.imread(image, cv2.IMREAD_GRAYSCALE)
                        clahe = cv2.createCLAHE(clipLimit=1.125,
                                                tileGridSize=(4, 4))
                        eq_image = clahe.apply(image_file)
                        cv2.imwrite(input_image, eq_image)

                    # Ignore Future Warnings
                    with warnings.catch_warnings():
                        warnings.filterwarnings("ignore")

                        # Compute Laplace Matrix
                        loaded_image = rgb2gray(io.imread(input_image))
                        laplace_data = laplace(loaded_image, ksize=10)

                    # Store Image Data
                    image_data[image].vari = variance(laplace_data)
                    image_data[image].nmax = np.amax(laplace_data)
                    image_data[image].rmsv = image_rms
                    image_data[image].lmod = curr_lmod

                # Group Blurry Images
                if ((image_data[image].vari < self.var_threshold)
                        and (image_data[image].nmax < self.max_threshold)):
                    self.blurred.append(image)

                # Update Prompt
                print("\rProcessing Images - {}% ".format(
                    int(curr * 100 / len(self.image_list))),
                      end="")
                curr += 1

            # End Variance Computation---------------------------------------------------------------------------------------------------------------------------------

            # Write Computed Data To Data File
            with open(util.form_path([self.image_path, DATA_FILE]),
                      'w') as data_file:
                for image in image_data:
                    if (image in self.image_list):
                        data_file.write("{},{},{},{},{}\n".format(
                            image, image_data[image].vari,
                            image_data[image].nmax, image_data[image].rmsv,
                            image_data[image].lmod))
            # Close File
            data_file.close()

            # End Write Operation--------------------------------------------------------------------------------------------------------------------------------------

            # Initialize Diff List
            self.hash_diffs = []

            # Calculate Hash Differences
            for i in range(len(self.hashes) - 1):
                self.hash_diffs.append(
                    (self.hashes[i + 1] - self.hashes[i]) * self.precision)

            # End Hash Difference Computation--------------------------------------------------------------------------------------------------------------------------

            # Update Prompt
            print("\rProcessed All Images   ")
        else:
            util.perror("spectra: Found no images to process")
コード例 #50
0
def grab_webcamframe():
    ret, frame = video_capture.read()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    clahe_image = clahe.apply(gray)
    return clahe_image
コード例 #51
0
def clahe(img, clipLimit=2.0, tileGridSize=(5, 5)):
    img_yuv = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
    clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize)
    img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0])
    img_output = cv2.cvtColor(img_yuv, cv2.COLOR_LAB2RGB)
    return img_output
コード例 #52
0
def process_img(img_path):
    global avg_error, max_error, img_maxerr

    # abre a imagem e extrai o tamanho dela, extensão e nome do arquivo
    img = cv2.imread(img_path)
    img_name = img_path.split(".")[0]
    img_h, img_w, img_channels = img.shape  # as imagens podem ter diferentes resoluções
    
    # define crop e masks dependendo da resolução (usar apenas na versão Fresh Up?)
    if(img_w == 1024):
        crop_y = img_h - 145
        crop_x = img_w - 131
        size = 108
        mask_img = '_mask1024.png'
        imask_img = '_imask1024.png'
        magic_x = 102
        magic_y = 42

    if(img_w == 640):
        crop_y = img_h - 91
        crop_x = img_w - 83
        size = 68
        mask_img = '_mask640.png'
        imask_img = '_imask640.png'
        magic_x = 64
        magic_y = 26

    # define alguns caminhos importantes - melhor legibilidade no código
    crop_path = output_dir + img_name + '_1_crop.png'
    output_imgpath = output_dir + img_name + "_"

    printstep("processando %s..." % img_name)

    # extrai um quadrado de 68x68 no canto inferior direito - funciona para todas as resoluções
    crop_img = img[crop_y:crop_y+size, crop_x:crop_x+size]
    cv2.imwrite(crop_path, crop_img)

    # aplica máscara
    mask_img = cv2.imread(mask_img, 0)
    proc_img = cv2.bitwise_and(crop_img, crop_img, mask=mask_img)
    color_value = proc_img[magic_y, magic_x]  # ponto para pegar cor para pintar a mask
    proc_img[numpy.where((proc_img==[0,0,0]).all(axis=2))] = color_value
    cv2.imwrite(output_imgpath + "2_mask.png", proc_img)


    # converte para outro espaço de cores
    proc_img = cv2.cvtColor(proc_img, cv2.COLOR_BGR2Lab)
    proc_img = proc_img[:,:,2]  # extrai o canal
    cv2.imwrite(output_imgpath + "3_colorspace.png", proc_img)


    # inpainting
    inmask_img = cv2.imread(imask_img, 0)
    proc_img = cv2.inpaint(proc_img, inmask_img, 1, cv2.INPAINT_NS)
    cv2.imwrite(output_imgpath + "4_inpaint.png", proc_img)


    # normaliza com CLAHE para melhorar o resultado do inpainting
    clahe = cv2.createCLAHE(clipLimit=1.0, tileGridSize=(1,1))
    proc_img = clahe.apply(proc_img)
    cv2.imwrite(output_imgpath + "5_clahe.png", proc_img)


    # binariza a imagem usando otsu
    threshold_value, proc_img = cv2.threshold(proc_img, 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
    cv2.imwrite(output_imgpath + "6_binary.png", proc_img)


    # aplica morfologia
    kernel = numpy.ones((3,3), numpy.uint8)
    proc_img = cv2.morphologyEx(proc_img, cv2.MORPH_OPEN, kernel, iterations=2)
    cv2.imwrite(output_imgpath + "7_morph.png", proc_img)


    # detecta e desenha o contorno, apenas para fins de visualização
    contours, hierarchy = cv2.findContours(proc_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    contorno_img = cv2.imread(crop_path)
    cv2.drawContours(contorno_img, contours, -1, (0,255,0), 1)
    cv2.imwrite(output_imgpath + "8_contorno.png", contorno_img)


    # desenha a elipse, apenas para fins de visualização
    contours = contours[0]
    ellipse = cv2.fitEllipse(contours)
    cv2.ellipse(crop_img, ellipse, (0,255,0), 1)
    cv2.imwrite(output_imgpath + "9_elipse.png", crop_img)


    # resultado
    printresult("calculando %s..." % img_name)
    ang_obtido = ellipse[2]
    if(ang_obtido > 90):
        ang_obtido = 180 - ang_obtido
    print(ang_obtido)

    # coisas para o test dataset
    if(glob_testmode):
        ang_esperado = int(img_name.split("_")[0])
        ang_esperado /= 10
        error = abs(ang_esperado - ang_obtido)
        printresult("ang esp: %.1f" % ang_esperado)
        printresult("ang obt: %.1f" % ang_obtido)

        printresult("erro: %.1f" % error)
        avg_error += error
        quad_imgs[int(img_name.split("_")[2])-1] += 1
        quad_err[int(img_name.split("_")[2])-1] += error
        if(error > max_error):
            max_error = error
            img_maxerr = img_name

        if(glob_printstep or glob_printresult): # imprime uma newline entre cada iteração do loop se estiver debugando
            print()
コード例 #53
0
    top_w = top_w + tw
    left_w = left_w + lw
    cv2.namedWindow('protus', cv2.WINDOW_NORMAL)
    cv2.moveWindow('protus', top_w, left_w)
    cv2.resizeWindow('protus', (int(newiw * sc), int(ih * sc)))

    top_w = top_w + tw
    left_w = left_w + lw
    cv2.namedWindow('clahe', cv2.WINDOW_NORMAL)
    cv2.moveWindow('clahe', top_w, left_w)
    cv2.resizeWindow('clahe', (int(newiw * sc), int(ih * sc)))

    # create a CLAHE object (Arguments are optional)
    #clahe = cv2.createCLAHE(clipLimit=0.8, tileGridSize=(5,5))
    clahe = cv2.createCLAHE(clipLimit=0.8, tileGridSize=(2, 2))
    cl1 = clahe.apply(frame)

    #image leger seuils
    frame1 = np.copy(frame)
    Seuil_bas = np.percentile(frame, 25)
    Seuil_haut = np.percentile(frame, 99.9999)
    frame1[frame1 > Seuil_haut] = Seuil_haut
    #print('seuil bas', Seuil_bas)
    #print('seuil haut', Seuil_haut)
    fc = (frame1 - Seuil_bas) * (65000 / (Seuil_haut - Seuil_bas))
    fc[fc < 0] = 0
    frame_contrasted = np.array(fc, dtype='uint16')
    cv2.imshow('sun', frame_contrasted)
    #cv2.waitKey(0)
コード例 #54
0
ファイル: a.py プロジェクト: Uncle-Justice/hello-world
def transform(imagePath,show=False):
    # 读取输入
    image = cv2.imread(imagePath)

    ###### 预处理 ##########
    #灰度
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    #clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(5,5)) 
    #res_clahe = clahe.apply(gray.copy())
    # gray = cv2.GaussianBlur(gray, (5, 5), 0)
    #gray = cutHist(gray,gray[6][6] - 100, gray[6][6] - 55, 80)

    if drawHist:
        cv2.imshow("gray", gray)
        cv2.waitKey(time)
        cv2.destroyAllWindows()
        drawHistoGram(gray)
        return

    #锐化:也叫边缘增强
    kernel = np.array([[-0.7 * sharpen, -sharpen, -0.7 * sharpen], [-sharpen, 7 * sharpen  , -sharpen], [-0.7 * sharpen, -sharpen, -0.7 * sharpen]], np.float32)
    #np.array([[0, -1.4, 0], [-1.4, 6.2, -1.4], [0, -1.4, 0]], np.float32) 
    dst = cv2.filter2D(gray, -1, kernel=kernel)

    
    bg = dst[bgx][bgy]
    
    #二值化
    ret, thresh = cv2.threshold(dst, bg, 255, cv2.THRESH_BINARY)
    
    if debugID:
        cv2.imshow("threshID", thresh)
        cv2.waitKey(time)
        cv2.destroyAllWindows()

    #水印预处理 64 - 145(bg170, peak 164)
    wp = cv2.imread(path + 'waterprint/thresh2.jpg',cv2.IMREAD_GRAYSCALE)
    r, wp = cv2.threshold(wp, 127, 255, cv2.THRESH_BINARY_INV)
    wp = wp // 255
    if getWP:
        retW, wp = cv2.threshold(wp, 178, 255, cv2.THRESH_BINARY)
        cv2.imshow("wp",wp)
        cv2.imwrite(path + 'waterprint/thresh2.jpg',wp)
        wContour = cv2.findContours(wp, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[1]
        wContour = sorted(wContour, key=cv2.contourArea, reverse=True)[:2]
        rect = cv2.minAreaRect(wContour[0])
        box = cv2.boxPoints(rect)
        pts = np.int0(box)
        print(pts)
        cv2.waitKey(time)

    
    #clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(4,4)) 
    #res_clahe = clahe.apply(gray.copy())
    
    
    
    ###边缘(未使用)
    #edged = cv2.Canny(thresh, 120, 170)
    
    # 展示预处理结果
    if show:
        print("STEP 1: 边缘检测")
        result = np.hstack((dst,thresh))
        cv2.imshow("Result", result)
        # cv2.imshow("Edged", edged)
        cv2.waitKey(time)
        cv2.destroyAllWindows()

    ## card轮廓检测 #####################################################################################################################
    #这里获取所有轮廓的坐标集合
    cnts = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[1]
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:12]
    
    screenCnt = []
    # 遍历轮廓,并排序
    # 计算IDcard轮廓矩形近似
    cnts = rectBound(cnts,screenCnt,True)
    screenCnt.clear()
    #按面积和重心确定两个轮廓
    findContWithArea(image,cnts,stdArea,areaOffSet,screenCnt,debugID)
    

    ### 水印轮廓 #########################################################################################################################
    
    wpCnt = []
    for sc in screenCnt:
        #sc = np.array(sc)
        waterCnt = []
        wraped = four_point_transform(gray, sc)
        #print(wraped.shape)
        #wraped = np.rollaxis(wraped,1)
        #print(wraped.shape)

        #十字方向边缘加强
        clahe = cv2.createCLAHE(clipLimit = 3.0, tileGridSize=(5,5)) 
        res_clahe = clahe.apply(wraped.copy())
        threshR = cv2.filter2D(res_clahe, -1, kernel = np.array([[0, -1.3, 0], [-1.3, 6, -1.3], [0, -1.3, 0]], np.float32))
        if debugWP:    
            cv2.imshow("sharpen", threshR)
            cv2.waitKey(time)
            cv2.destroyAllWindows()
        #二值化
        #retR, threshR = cv2.threshold(threshR, 20 , 255, cv2.THRESH_TOZERO)
        kernel_hat = np.ones((3,3),np.uint8)
        alpha = getBgG(threshR)
            
        retR, threshR = cv2.threshold(threshR, alpha - 50, 255, cv2.THRESH_BINARY)
        #threshR = cv2.dilate(threshR,kernel_hat,iterations = 1)
        #threshR = cv2.erode(threshR,kernel_hat,iterations = 1)
        
        if debugWP:    
            cv2.imshow("binary", threshR)
            cv2.waitKey(time)
            cv2.destroyAllWindows()

        #形态学膨胀与梯度操作
        kernel_v = np.array([[0,1,0],[0,1,0],[0,1,0]],np.uint8)  
        kernel_h = np.array([[0,0,0],[1,1,1],[0,0,0]],np.uint8) 
        threshR = cv2.dilate(threshR,kernel_hat,iterations = 1)
        
        cntsR = cv2.findContours(threshR.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[1]
        cntsR = sorted(cntsR, key=cv2.contourArea, reverse=True)[:8]
    
        # 计算waterprint轮廓矩形近似
        cntsR = rectBound(cntsR,waterCnt)
        waterCnt.clear()
        
        for pts in cntsR:
            # 计算面积及重心
            m1 = cv2.moments(pts)
            a1 = cv2.contourArea(pts)

            if debugWP:
                print(a1)
                print(cv2.matchShapes(wpCntIn, pts, 1, 0.0))
                copy = wraped.copy()
                #cv2.drawContours(copy, [wpCntIn], 0, (0, 200, 255), 2)
                #cv2.drawContours(copy, [pts], 0, (0, 255, 0), 2)
                wpDebug = four_point_transform(copy, pts)
                cv2.imshow("WaterPrintOutline", wpDebug)  
                cv2.waitKey(time)
                cv2.destroyAllWindows()

            #按面积确定水印轮廓
            findWP(a1,m1,pts,waterCnt)
        
        #备选
        if len(waterCnt) < 1 :
            threshR2 = cv2.morphologyEx(threshR, cv2.MORPH_GRADIENT, kernel_hat)
            if debugWP:    
                cv2.imshow("erode_dilate", threshR2)
                cv2.waitKey(time)
                cv2.destroyAllWindows()
            #threshR = cv2.dilate(threshR,kernel_h,iterations = 3)
            #threshR = cv2.erode(threshR,kernel_h,iterations = 2)

            cntsR = cv2.findContours(threshR2.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[1]
            cntsR = sorted(cntsR, key=cv2.contourArea, reverse=True)[:8]
            cntsR = rectBound(cntsR,waterCnt)
            waterCnt.clear()
            for pts in cntsR:
                m1 = cv2.moments(pts)
                a1 = cv2.contourArea(pts)
                if debugWP:
                    print(a1)
                    print(cv2.matchShapes(wpCntIn, pts, 1, 0.0))
                    copy = wraped.copy()
                    #cv2.drawContours(copy, [wpCntIn], 0, (0, 200, 255), 2)
                    #cv2.drawContours(copy, [pts], 0, (0, 255, 0), 2)
                    wpDebug = four_point_transform(copy, pts)
                    cv2.imshow("WaterPrintOutline2", wpDebug)  
                    cv2.waitKey(time)
                    cv2.destroyAllWindows()
                findWP(a1,m1,pts,waterCnt)
        #TODO:确定水印外沿
        if len(waterCnt) > 0:
            alpha = getBgG(wraped)
            #输出预处理
            #获取背景灰度与255差值(比)
            bgc = (255 - alpha) / 255
            #np.ones(gray.shape,np.uint8)
            #曝光
            wraped = cv2.add(np.array(wraped * bgc * exposure, np.uint8), wraped)
            #afterProcess = cv2.addWeighted(gray, 1, gray * bgc , exposure / 255, 0)
            #cv2.imshow("曝光", waraped)
            #cv2.waitKey(time)
            #锐化
            wraped = cv2.filter2D(wraped, -1, kernel=np.array([[-0.7 * sharpen, -sharpen, -0.7 * sharpen], [-sharpen, 7.9 * sharpen  , -sharpen], [-0.7 * sharpen, -sharpen, -0.7 * sharpen]], np.float32))
            #cv2.imshow("锐化", wraped)
            #cv2.waitKey(time)
            #r, threshWP = cv2.threshold(wraped,alpha - 1, 255, cv2.THRESH_BINARY)
            if debugWP:    
                cv2.imshow("wraped", wraped)
                cv2.waitKey(time)
                cv2.destroyAllWindows()
            
            alpha = getBgG(wraped)
            ###从里到外四个点,再从外到里四个点,mask一下
            rect = order_points(waterCnt[0])
            (tl, tr, br, bl) = rect
            (TLO, TRO, BRO, BLO) = rect
            (TLI, TRI, BRI, BLI) = rect
            for i in range(-5, 5):
                TLO = tl + np.array([ i,  i])
                #外灰度减内灰度
                deltaTL = int(wraped[int(TLO[1])][int(TLO[0])]) - int(wraped[int(TLO[1]) + 1][int(TLO[0]) + 1])
                if deltaTL > wpGrad * alpha or deltaTL < - wpTxDt * alpha and int(wraped[int(TLO[1])][int(TLO[0])]) > wpBgRatio * alpha:
                    TLO = TLO + np.array([ -1, -1])
                    break
                elif i == 4:
                    TLO = tl + np.array([- 3, - 3])
            for i in range(-5, 5):
                TRO = tr + np.array([  -i,  i])
                deltaTR = int(wraped[int(TRO[1])][int(TRO[0])]) - int(wraped[int(TRO[1]) - 1][int(TRO[0]) + 1])
                if deltaTR > wpGrad * alpha or deltaTR < - wpTxDt * alpha and int(wraped[int(TRO[1])][int(TRO[0])]) > wpBgRatio * alpha:
                    TRO = TRO + np.array([ 1, -1])
                    break
                elif i == 4:
                    TRO = tr + np.array([ 3, - 3])
            for i in range(-5, 5):
                BRO = br + np.array([ -i,  -i])
                deltaBR = int(wraped[int(BRO[1])][int(BRO[0])]) - int(wraped[int(BRO[1]) - 1][int(BRO[0]) - 1])
                if deltaBR > wpGrad * alpha or deltaBR < - wpTxDt * alpha and int(wraped[int(BRO[1])][int(BRO[0]) ]) > wpBgRatio * alpha:
                    BRO = BRO + np.array([ 1,  1])
                    break
                elif i == 4:
                    BRO = br + np.array([ 3,  3])
            for i in range(-5, 5):
                BLO = bl + np.array([ i, - i])
                deltaBL = int(wraped[int(BLO[1])][int(BLO[0])]) - int(wraped[int(BLO[1]) + 1][int(BLO[0]) - 1])
                if deltaBL > wpGrad * alpha or deltaBL < - wpTxDt * alpha and int(wraped[int(BLO[1])][int(BLO[0]) ]) > wpBgRatio * alpha:
                    BLO = BLO + np.array([ -1,  1])
                    break
                elif i == 4:
                    BLO = bl + np.array([ -3, 3])


            for i in range(-5, 5):
                TLI = tl + np.array([ -i,  -i])
                #内灰度减外灰度
                deltaTL = int(wraped[int(TLI[1])][int(TLI[0])]) - int(wraped[int(TLI[1]) - 1][int(TLI[0]) - 1])
                if deltaTL > wpGrad * alpha or deltaTL < -wpTxDt * alpha and int(wraped[int(TLI[1])][int(TLI[0])]) > wpBgRatio * alpha:
                    #TLI = TLI + np.array([ 1, 1])
                    break
                elif i == 4:
                    TLI = tl + np.array([1, 1])
            for i in range(-5, 5):
                TRI = tr + np.array([ i,  -i])
                deltaTR = int(wraped[int(TRI[1])][int(TRI[0])]) - int(wraped[int(TRI[1]) + 1][int(TRI[0]) - 1])
                if deltaTR > wpGrad * alpha or deltaTR < -wpTxDt * alpha and int(wraped[int(TRI[1])][int(TRI[0])]) > wpBgRatio * alpha:
                    #TRI = TRI + np.array([ -1, 1])
                    break
                elif i == 4:
                    TRI = tr + np.array([-1, 1])
            for i in range(-5, 5):
                BRI = br + np.array([ i,  i])
                #内灰度减外灰度
                deltaBR = int(wraped[int(BRI[1])][int(BRI[0])]) - int(wraped[int(BRI[1]) + 1][int(BRI[0]) + 1])
                if deltaBR > wpGrad * alpha or deltaBR < -wpTxDt * alpha and int(wraped[int(BRI[1])][int(BRI[0])]) > wpBgRatio * alpha:
                    #BRI = BRI + np.array([ -1, -1])
                    break
                elif i == 4:
                    BRI = br + np.array([-1, -1])
            for i in range(-5, 5):
                BLI = bl + np.array([ -i,  i])
                #内灰度减外灰度
                deltaBL = int(wraped[int(BLI[1])][int(BLI[0])]) - int(wraped[int(BLI[1]) - 1][int(BLI[0]) + 1])
                if deltaBL > wpGrad * alpha or deltaBL < -wpTxDt * alpha and int(wraped[int(BLI[1])][int(BLI[0])]) > wpBgRatio * alpha:
                    #BLI = BLI + np.array([ 1, -1])
                    break
                elif i == 4:
                    BLI = bl + np.array([1, -1])
            #if abs(int(wraped[int(BLO[1])][int(BLO[0])]) - int(wraped[int(bl[1])][int(bl[0])])) < 0.4 * wraped[int(bl[1])][int(bl[0])]:
            cntO = np.array([TLO, TRO, BRO, BLO],np.int32)
            cntI = np.array([TLI, TRI, BRI, BLI],np.int32)
            mask = np.zeros(wraped.shape,np.uint8)
            maskIn = cv2.bitwise_not(cv2.fillConvexPoly(mask.copy(), cntI, 255))
            maskOut = cv2.fillConvexPoly(mask.copy(), cntO,  255)
            cv2.bitwise_and(maskIn,maskOut.copy(),mask)
            cv2.imshow("mask", cv2.bitwise_and(wraped,wraped,mask = mask))  
            cv2.waitKey(time)
            cv2.destroyAllWindows()
            #cv2.imshow("maskOut", maskOut)  
            #cv2.waitKey(time)
            #cv2.destroyAllWindows()
            if debugWP:
                print("tl " + str(wraped[int(TLO[1])][int(TLO[0])]) \
                + " tr " + str(wraped[int(TRO[1])][int(TRO[0])]) \
                + " br " + str(wraped[int(BRO[1])][int(BRO[0])]) \
                + " bl " + str(wraped[int(BLO[1])][int(BLO[0])]))
                copy = wraped.copy()
                #wpDebug = cv2.circle(copy,(int(TLO[0]),int(TLO[1])),1, color = (0,0,0))
                #wpDebug = cv2.circle(wpDebug,(int(TRO[0]),int(TRO[1])),1, color = (0,0,0))
                #wpDebug = cv2.circle(wpDebug,(int(BRO[0]),int(BRO[1])),1, color = (0,0,0))
                #wpDebug = cv2.circle(wpDebug,(int(BLO[0]),int(BLO[1])),1, color = (0,0,0))
                #wpDebug = cv2.circle(wpDebug,(int(TLI[0]),int(TLI[1])),1, color = (0,0,0))
                #wpDebug = cv2.circle(wpDebug,(int(TRI[0]),int(TRI[1])),1, color = (0,0,0))
                #wpDebug = cv2.circle(wpDebug,(int(BRI[0]),int(BRI[1])),1, color = (0,0,0))
                #wpDebug = cv2.circle(wpDebug,(int(BLI[0]),int(BLI[1])),1, color = (0,0,0))
                #cv2.imshow("WaterPrintOutline", wpDebug)  
                #cv2.waitKey(time)
                #cv2.destroyAllWindows()

        hst = cv2.calcHist([wraped],[0],mask,[256],[0,256])[:int(0.4 * alpha)]
        indexes, _ = scipy.signal.find_peaks(np.array(hst).ravel(), height=20, distance=28)
        if len(indexes) == 1:
            indexes2, _ = scipy.signal.find_peaks(np.array(hst[:80]).ravel(), height=6, distance=10)
            indexes = np.concatenate((indexes, indexes2))
        print(indexes)
        Cut = cutHist(wraped,int(max(indexes) * 0.5 + min(indexes) * 0.5),int(max(indexes) * 2.7), alpha,1,maskOut)
        cv2.imshow("Cut", Cut)  
        cv2.waitKey(time)
        cv2.destroyAllWindows()
        plt.plot(hst,'r')
        plt.xlim([0,int(0.4 * alpha)])
        plt.show()
        wpCnt.extend(waterCnt)

    

    #################### 异常 ################################################################################################################
    if len(screenCnt) < 2 :
        raise Exception("WARNING: IDcard PROCESS FAILED")
    if len(wpCnt) < 1 :
        print("WARNING: WaterPrint PROCESS FAILED: " + imagePath)
        #raise Exception("WARNING: WaterPrint PROCESS FAILED")
    ##=======================================================================================================================================##

    # 展示结果
    if show:
        print("STEP 2: 获取轮廓")
        for sc in screenCnt:
            copy = image.copy()
            cv2.drawContours(copy, [sc], 0, (0, 255, 0), 2)
            cv2.imshow("Outline", copy)
            cv2.waitKey(time)
            cv2.destroyAllWindows()

    #TODO:去水印 : 1.身份证拉正。 2.找水印外围框 3. 水印mask直方图(均衡化) 4.两个峰取阈值 5.原图拉到背景色####################################
        
    #copy = gray.copy()
    #icopy = image.copy()
    #if len(wpCnt) > 0 :
    #    for wc in wpCnt:
    #        waterPrint = four_point_transform(copy, wc)
            #drawHistoGram(waterPrint)
            #Hist = cv2.calcHist([waterPrint],[0],None,[256],[0,256])
            #alpha = 0
            #for i in range(1,256):
            #    m = max(Hist)
            #    if Hist[i] == m:
            #        alpha = i -140
            #        break
            #wpMask = wp * alpha
            #wpWrap = four_point_transform(wpMask, wc, 1, 1000, 1000)
            #backGround = np.ones((1000,1000),np.uint8) * alpha
            #outPut = cv2.add(copy,wpWrap)
            if showprint:
                #print(alpha)
                cv2.imshow("WaterPrint", waterPrint)
                cv2.waitKey(time)
                cv2.destroyAllWindows()
    #wpres = four_point_transform(icopy, waterCnt[1])
    #cv2.imwrite(path + '/waterprint/waterprint.jpg', wpres)


    ############## 透视变换 ###########################################################################################

    
    
    screenCnt = np.array(screenCnt)
    warped1 = four_point_transform(afterProcess, screenCnt[0])
    warped2 = four_point_transform(afterProcess, screenCnt[1])

    # warped = resize(warped, height=600, width=600)
    #warped = cv2.resize(warped, (600, 600))
    if showID:
        cv2.imshow("Scanned1", warped1)
        cv2.waitKey(time)
        cv2.imshow("Scanned2", warped2)
        cv2.waitKey(time)
        cv2.destroyAllWindows()


    #image = resize(orig, height=500)

    return (warped1,warped2)
コード例 #55
0
ファイル: client.py プロジェクト: AthanatiusC/V-CORE
def createCLAHE(frame):
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(20, 20))
        res = clahe.apply(frame)
        return res
コード例 #56
0
import os
import cv2
from matplotlib import pyplot as plt

imgPath = '/home/dell/downloads/Skype/blur1.jpeg'

imgStatus = os.path.isfile(imgPath)
print(imgStatus)

if imgStatus == True:
    img = cv2.imread(imgPath, 0)
    h, w = img.shape
    print(h, w)

    equalImg = cv2.equalizeHist(img)  # Histogram Equalization (HE)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(4, 4))
    claheImg = clahe.apply(
        img)  # Contrast Limited Adaptive Histogram Equalization (CLAHE)

    plt.figure(figsize=(20, 20))

    plt.subplot(3, 2, 1)
    plt.axis('off')
    plt.imshow(img, cmap='gray')
    plt.title('Snow Dog')

    plt.subplot(3, 2, 2)
    plt.axis('off')
    plt.hist(img)
    plt.title('Histogram of Snow Dog')
コード例 #57
0
 def __init__(self, clipLimit=2, tileSize=(8, 8)):
     self.trans = cv2.createCLAHE(clipLimit=clipLimit,
                                  tileGridSize=tileSize)
コード例 #58
0
def MA(base_path, image_id):
    eye = cv2.imread(base_path + image_id)

    image = cv2.cvtColor(eye, cv2.COLOR_BGR2RGB)
    median = cv2.medianBlur(image, 1)

    green_image = median.copy()  # Make a copy
    green_image[:, :, 0] = 0
    green_image[:, :, 2] = 0
    gPixels = np.array(green_image)

    # -----Converting image to LAB Color model-----------------------------------
    lab = cv2.cvtColor(gPixels, cv2.COLOR_BGR2LAB)

    # -----Splitting the LAB image to different channels-------------------------
    l, a, b = cv2.split(lab)

    # -----Applying CLAHE to L-channel-------------------------------------------
    clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))
    cl = clahe.apply(l)

    # -----Merge the CLAHE enhanced L-channel with the a and b channel-----------
    limg = cv2.merge((cl, a, b))

    # -----Converting image from LAB Color model to RGB model--------------------
    final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)

    #############################################################################
    edges = cv2.Canny(final, 70, 35)
    final_gray = cv2.cvtColor(final, cv2.COLOR_BGR2GRAY)

    edge_test = final_gray + edges

    eye_final = edge_test

    # Perform closing to find individual objects
    eye_final = cv2.dilate(eye_final, (3, 3),
                           iterations=2)  # eye_final.dilate(2)
    eye_final = cv2.erode(eye_final, (3, 3), iterations=1)

    eye_final = cv2.dilate(eye_final, (3, 3), iterations=4)
    eye_final = cv2.erode(eye_final, (3, 3), iterations=3)

    # Setup SimpleBlobDetector parameters.
    params = cv2.SimpleBlobDetector_Params()

    # Set Area filtering parameters
    params.filterByArea = True
    params.minArea = 10  # lesser the area more the number of keypoints generated

    # Detect blobs.
    detector = cv2.SimpleBlobDetector_create(params)
    big_blobs = detector.detect(eye_final)

    # create a blank image to mask
    blank = np.zeros(eye.shape, np.uint8)
    blobs = cv2.drawKeypoints(image, big_blobs, blank, (255, 255, 255),
                              cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

    blobs_gray = cv2.cvtColor(blobs, cv2.COLOR_RGB2GRAY)
    eye_final = eye_final - blobs_gray
    eye_final = cv2.erode(eye_final, (3, 3), iterations=1)

    print(eye_final[100, 200])
    print(eye_final[200, 300])
    print(eye_final[299, 300])

    # Setup SimpleBlobDetector parameters.
    params = cv2.SimpleBlobDetector_Params()
    # Change thresholds
    params.minThreshold = 1
    params.maxThreshold = 500

    # Detect blobs.
    detector = cv2.SimpleBlobDetector_create(params)
    # Detect blobs.
    small_blobs = detector.detect(eye_final)
    #print(small_blobs)
    if small_blobs:

        small_blobs_detected = cv2.drawKeypoints(
            image, big_blobs, blank, (255, 255, 255),
            cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
        eye_final_small = eye_final - cv2.cvtColor(small_blobs_detected,
                                                   cv2.COLOR_RGB2GRAY)
        # print("secondtime")
        print(eye_final[100, 200])
        print(eye_final[200, 300])
        print(eye_final[299, 300])

        return eye_final_small, blobs

    return 'np'
import numpy as np
import cv2

img = cv2.imread('test2.jpg', 0)

# create a CLAHE object (Arguments are optional).
clahe = cv2.createCLAHE(clipLimit=4.0, tileGridSize=(8, 8))
cl1 = clahe.apply(img)

cv2.imwrite('eq_test2_adaptive_4.jpg', cl1)
コード例 #60
0
ファイル: rpotter.py プロジェクト: cdwilliams32/rpotter
def TrackWand():
    global rval, old_frame, old_gray, p0, mask, color, ig, img, frame
    try:
        color = (0, 0, 255)
        rval, old_frame = cam.read()
        cv2.flip(old_frame, 1, old_frame)
        old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
        equalizeHist(old_gray)
        old_gray = GaussianBlur(old_gray, (9, 9), 1.5)
        dilate_kernel = np.ones(dilation_params, np.uint8)
        old_gray = cv2.dilate(old_gray, dilate_kernel, iterations=1)
        clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))
        old_gray = clahe.apply(old_gray)

        # Take first frame and find circles in it
        p0 = cv2.HoughCircles(old_gray,
                              cv2.HOUGH_GRADIENT,
                              3,
                              50,
                              param1=240,
                              param2=8,
                              minRadius=4,
                              maxRadius=15)
        if p0 is not None:
            p0.shape = (p0.shape[1], 1, p0.shape[2])
            p0 = p0[:, :, 0:2]
            mask = np.zeros_like(old_frame)
    except:
        print("No points found")
    # Create a mask image for drawing purposes
    while True:
        try:
            rval, frame = cam.read()
            cv2.flip(frame, 1, frame)
            if p0 is not None:
                frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                equalizeHist(frame_gray)
                frame_gray = GaussianBlur(frame_gray, (9, 9), 1.5)
                dilate_kernel = np.ones(dilation_params, np.uint8)
                frame_gray = cv2.dilate(frame_gray,
                                        dilate_kernel,
                                        iterations=1)
                frame_clahe = cv2.createCLAHE(clipLimit=3.0,
                                              tileGridSize=(8, 8))
                frame_gray = frame_clahe.apply(frame_gray)

                # calculate optical flow
                p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray,
                                                       p0, None, **lk_params)

                # Select good points
                good_new = p1[st == 1]
                good_old = p0[st == 1]

                # draw the tracks
                for i, (new, old) in enumerate(zip(good_new, good_old)):
                    a, b = new.ravel()
                    c, d = old.ravel()
                    # only try to detect gesture on highly-rated points (below 10)
                    if (i < 15):
                        IsGesture(a, b, c, d, i)
                    dist = math.hypot(a - c, b - d)
                    if (dist < movment_threshold):
                        cv2.line(mask, (a, b), (c, d), (0, 255, 0), 2)
                    cv2.circle(frame, (a, b), 5, color, -1)
                    cv2.putText(frame, str(i), (a, b),
                                cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255))
                img = cv2.add(frame, mask)

                cv2.putText(img, "Press ESC to close.", (5, 25),
                            cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255))
            cv2.imshow("Raspberry Potter", frame)

            # get next frame
            rval, frame = cam.read()

            # Now update the previous frame and previous points
            old_gray = frame_gray.copy()
            p0 = good_new.reshape(-1, 1, 2)
        except IndexError:
            print("Index error - Tracking")
        except:
            e = sys.exc_info()[0]
            print("Tracking Error: %s" % e)
        key = cv2.waitKey(20)
        if key in [27, ord('Q'), ord('q')]:  # exit on ESC
            cv2.destroyAllWindows()
            cam.release()
            break