def work(): global result while goon: if frame is not None: t0 = time.time() result = detect(frame) t1 = time.time() putText(result, '{:.0f}ms'.format(1000 * (t1 - t0)))
def work(): global win, lab while goon: if frame is not None: t0 = time.time() win, lab = classify(frame) t1 = time.time() putText(win, '{:.0f}ms {}'.format(1000 * (t1 - t0), lab))
# resetear el punto inicial del tracking if n == 0 or key == ord('c'): corners = cv.goodFeaturesToTrack(gray, **corners_params).reshape(-1, 2) nextPts = corners prevgray = gray t0 = time.time() # encontramos la posición siguiente a partir de la anterior nextPts, status, err = cv.calcOpticalFlowPyrLK(prevgray, gray, nextPts, None, **lk_params) prevgray = gray t1 = time.time() # Unimos la primera y la última posición de cada trayectoria for (x0, y0), (x, y), ok in zip(corners, nextPts, status): if ok: cv.circle(frame, (int(x), int(y)), radius=3, color=(0, 0, 255), thickness=-1, lineType=cv.LINE_AA) cv.line(frame, (int(x0), int(y0)), (int(x), int(y)), color=(0, 0, 255), thickness=1, lineType=cv.LINE_AA) putText(frame, f'{len(corners)} corners, {(t1-t0)*1000:.0f}ms') cv.imshow('input', frame)
for key,frame in autoStream(): g = cv.cvtColor(frame,cv.COLOR_BGR2GRAY) cs = extractContours(g, minarea=5) els = detectEllipses(cs, tol=param.err) # para cada elipse detectada for e in els: cv.ellipse(frame,e, color=(0,0,255)) # sacamos el centro cx,cy = int(e[0][0]), int(e[0][1]) # y escribimos el nivel de gris que tiene el centro de la elipse info = '{}'.format(g[cy,cx]) putText(frame, info, (cx,cy),color=(255,255,255)) # Lo necesitamos para detectar el círculo especial que no está # relleno, situado en el origen de coordenadas. # Cuando hemos encontrado 4 elipses tenemos que ordenarlas correctamente, # empezando por el círculo especial y en sentido de las agujas del reloj # (El detector puede devolverlas revueltas y la homografía se calcularía mal) # Para hacer esto calculamos la envolvente convexa de los 4 centros. La figura # es la misma, pero la función devuelve una polilínea bien ordenada, en la que # no se cruzan las diagonales # finalmente rotamos el polígono para que el círculo hueco quede el primero if len(els)==4: hull = cv.convexHull(np.array([(e[0][0], e[0][1]) for e in els]).astype(np.float32)).reshape(-1,2) # extraemos los valores de gris del centro de cada elipse vals = np.array([ g[int(y), int(x)] for x,y in hull])
import numpy as np import cv2 as cv from umucv.util import ROI, putText from umucv.stream import autoStream cv.namedWindow("input") cv.moveWindow('input', 0, 0) region = ROI("input") for key, frame in autoStream(): if region.roi: [x1, y1, x2, y2] = region.roi if key == ord('c'): trozo = frame[y1:y2 + 1, x1:x2 + 1] cv.imshow("trozo", trozo) if key == ord('x'): region.roi = [] cv.rectangle(frame, (x1, y1), (x2, y2), color=(0, 255, 255), thickness=2) putText(frame, f'{x2-x1+1}x{y2-y1+1}', orig=(x1, y1 - 8)) h, w, _ = frame.shape putText(frame, f'{w}x{h}') cv.imshow('input', frame)
import cv2 as cv from umucv.stream import autoStream from umucv.util import putText n = 0 for key, frame in autoStream(): gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) if n == 0 or key == ord('c'): corners = cv.goodFeaturesToTrack(gray, 50, 0.1, 10).reshape(-1, 2) nextPts = corners prevgray = gray n += 1 nextPts, status, err = cv.calcOpticalFlowPyrLK(prevgray, gray, nextPts, None) prevgray = gray for (x, y), ok, (x0, y0) in zip(nextPts, status, corners): if ok: cv.circle(frame, (x0, y0), 2, (0, 0, 128), -1, cv.LINE_AA) cv.circle(frame, (x, y), 3, (0, 0, 255), -1, cv.LINE_AA) cv.line(frame, (x0, y0), (x, y), (0, 0, 255), 1, cv.LINE_AA) else: cv.circle(frame, (x0, y0), 3, (128, 128, 128), -1, cv.LINE_AA) putText(frame, '{}'.format(len(corners)), (5, 20), color=(0, 255, 255)) cv.imshow('input', frame) cv.destroyAllWindows()
t1 = time.time() d = abs(p0-p0r).reshape(-1, 2).max(-1) good = d < 1 new_tracks = [] for tr, (x, y), good_flag in zip(tracks, p1.reshape(-1, 2), good): if not good_flag: continue tr.append((x, y)) if len(tr) > track_len: del tr[0] new_tracks.append(tr) cv.circle(vis, (x, y), 2, (0, 255, 0), -1) tracks = new_tracks cv.polylines(vis, [np.int32(tr) for tr in tracks], False, (0, 255, 0)) putText(vis, 'tracks: {}, {:.0f}ms'.format(len(tracks), 1000*(t1-t0)) ) #for t in tracks: # print( t[0],t[-1] ) if frame_idx % detect_interval == 0: mask = np.zeros_like(frame_gray) mask[:] = 255 for x, y in [np.int32(tr[-1]) for tr in tracks]: cv.circle(mask, (x, y), 5, 0, -1) p = cv.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params) if p is not None: for x, y in np.float32(p).reshape(-1, 2): tracks.append([(x, y)]) frame_idx += 1
#print(names) #print(encodings) print(encodings[0].shape) for key, frame in autoStream(): t0 = time.time() face_locations = face_recognition.face_locations(frame) t1 = time.time() face_encodings = face_recognition.face_encodings(frame, face_locations) t2 = time.time() for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings): match = face_recognition.compare_faces(encodings, face_encoding) #print(match) name = "Unknown" for n, m in zip(names, match): if m: name = n cv.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 1) putText(frame, name, orig=(left + 3, bottom + 16)) putText(frame, f'{(t1-t0)*1000:.0f} ms {(t2-t1)*1000:.0f} ms') cv.imshow('Video', frame)
result = np.zeros_like(frame) cp = [c for c in contours if orientation(c)] cn = [c for c in contours if not orientation(c)] cv.drawContours(result, cp, contourIdx=-1, color=(255, 128, 128), thickness=1, lineType=cv.LINE_AA) cv.drawContours(result, cn, -1, (128, 128, 255), 1) else: result = frame # en este modo de visualización mostramos solo los detectados cv.drawContours(result, found, -1, (0, 255, 0), cv.FILLED) # y en ambos modos mostramos la similitud (y el área) for c in found: s = np.linalg.norm(invar(c) - invmodel) a = cv.contourArea(c) #info = f'{s:.2f} {a}' info = f'{s:.2f}' putText(result, info, c.mean(axis=0).astype(int)) cv.imshow('shape recognition', result) cv.destroyAllWindows() # puedes añadir un trackbar para controlar el umbral de detección # se pueden evitar la repetición de operaciones en putText
def cleanModelWindows(): for window in range(0, len(models) + 1): cv.destroyWindow("Model" + str(window)) for key, x in autoStream(): # Vaciar la lista de modelos if key == ord('x'): cleanModelWindows() models = [] t0 = time.time() keypoints, descriptors = sift.detectAndCompute(x, mask=None) t1 = time.time() putText(x, '{} {:.0f}ms'.format(len(keypoints), 1000 * (t1 - t0))) # añadimos una imagen de referencia, con sus puntos y descriptores if key == ord('c'): models.append((keypoints, descriptors, x)) # Mostramos por pantalla la camara flag = cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS cv.drawKeypoints(x, keypoints, x, color=(100, 150, 255), flags=flag) cv.imshow('CAM', x) if key == ord('v'): cleanModelWindows() # Si hay modelos para comparar if models:
human_choice = "R" elif category == 1: human_choice = "P" elif category == 2: human_choice = "S" else: human_choice = "N" swing_counter = 0 bot_choice = very_smart_bot.output # update the text on screen text = fight(human_choice, bot_choice) # only update the bot if a category was detected if human_choice != "N": very_smart_bot.update(human_choice) # Draw the ROI cv2.rectangle(frame, (x1, y1), (x2, y2), color=(0, 255, 255), thickness=2) t1 = time.time() putText(frame, f'{(t1 - t0) * 1000:.0f} ms') putText(frame, text, orig=(5, 36)) putText(frame, f'Human: {human_wins}, Machine: {bot_wins}', orig=(5, 56)) cv2.imshow("input", frame) cv2.destroyAllWindows()
import cv2 as cv from umucv.stream import autoStream from collections import deque import numpy as np from umucv.util import putText points = deque(maxlen=2) def fun(event, x, y, flags, param): if event == cv.EVENT_LBUTTONDOWN: points.append((x, y)) cv.namedWindow("webcam") cv.setMouseCallback("webcam", fun) for key, frame in autoStream(): for p in points: cv.circle(frame, p, 3, (0, 0, 255), -1) if len(points) == 2: cv.line(frame, points[0], points[1], (0, 0, 255)) c = np.mean(points, axis=0).astype(int) d = np.linalg.norm(np.array(points[1]) - points[0]) putText(frame, f'{d:.1f} pix', c) cv.imshow('webcam', frame) cv.destroyAllWindows()
for (x, y), ok, (x0, y0) in zip(nextPts, status, corners): if ok: sX += x - x0 sY += y - y0 else: lost += 1 # Calcular distancia media tam = len(corners) distMedia = (int(-sX / tam), int(sY * -1 / tam)) #Longitud de la flecha a dibujar finArrow = (distMedia[0] + centroImg[0], distMedia[1] + centroImg[1]) # Circulo en el centro de la imagen cv.circle(frame, centroImg, 2, COLOR_LINEAS, -1, cv.LINE_AA) # Flecha indicando la direccion de la imagen cv.arrowedLine(frame, centroImg, finArrow, COLOR_LINEAS, 1, cv.LINE_AA) # Reinciar las sumas de las coincidencias sX = 0 sY = 0 putText(frame, 'Esquinas : {}'.format(len(corners)), (5, 20), color=COLOR_LINEAS) cv.imshow('Rotacion', frame) cv.destroyAllWindows()
continue tr.append((x, y)) if len(tr) > track_len: del tr[0] new_tracks.append(tr) cv.circle(vis, (x, y), 2, (0, 255, 0), -1) tracks = new_tracks for tr in tracks: actual = np.int32(tr) cv.polylines(vis, [actual], False, (0, 255, 0)) diffX = actual[0][0] - actual[len(actual) - 1][ 0] #Calculo el cambio en las coordenadas x e y de los tracks diffY = actual[0][1] - actual[len(actual) - 1][ 1] #Desde el inicio de track hasta el final if diffX < 0 and abs(diffX) > sensibilidad: putText(vis, 'mov: derecha') #Imprimo el resultado en la ventana elif diffX >= 0 and abs(diffX) > sensibilidad: putText(vis, 'mov: izquierda') if diffY < 0 and abs(diffY) > sensibilidad: putText(vis, 'mov: arriba') elif diffY >= 0 and abs(diffY) > sensibilidad: putText(vis, 'mov: abajo') if frame_idx % detect_interval == 0: mask = np.zeros_like(frame_gray) mask[:] = 255 for x, y in [np.int32(tr[-1]) for tr in tracks]: cv.circle(mask, (x, y), 5, 0, -1) p = cv.goodFeaturesToTrack(frame_gray, mask=mask, **feature_params) if p is not None: for x, y in np.float32(p).reshape(-1, 2):
frame = None goon = True win = None def GUI(): global frame, goon, win for key, frame in autoStream(): cv.imshow('cam', frame) if win is not None: cv.imshow('inception', win) win = None goon = False t = Thread(target=GUI, args=()) t.start() while frame is None: pass while goon: t0 = time.time() win, lab = classify(frame) t1 = time.time() putText(win, '{:.0f}ms {}'.format(1000 * (t1 - t0), lab))
key = cv.waitKey(1) & 0xFF help.show_if(key, ord('h')) if key == 27: break if key == ord('x'): MODEL = None if key == ord('g'): SHOW = not SHOW x = cam.frame.copy() if SHOW: h, sh = feat(x) putText(sh, str(h.shape)) cv.imshow('hog', sh) else: h = feat(x) if MODEL is not None: h1, w1 = h.shape[:2] h2, w2 = MODEL.shape[:2] detected = [] for j in range(h1 - h2): for k in range(w1 - w2): vr = h[j:j + h2, k:k + w2].flatten() v = MODEL.flatten() detected.append((dist(vr, v), j, k)) dmin, jmin, kmin = min(detected)
p0.append(0) if (len(x0) > 0): print("Imágenes leídas") else: print("Sin imágenes cargadas") exit() imgMatcheada = None for key, frame in autoStream(): t0 = time.time() keypoints , descriptors = sift.detectAndCompute(frame, mask=None) t1 = time.time() putText(frame, f'{len(keypoints)} pts {1000*(t1-t0):.0f} ms') i = 0 if key == ord('c'): for i in range(i, len(x0)-1): t2 = time.time() # solicitamos las dos mejores coincidencias de cada punto, no solo la mejor matches = matcher.knnMatch(descriptors, d0[i], k=2) t3 = time.time() # ratio test # nos quedamos solo con las coincidencias que son mucho mejores que # que la "segunda opción". Es decir, si un punto se parece más o menos lo mismo # a dos puntos diferentes del modelo lo eliminamos. good = [] for m in matches:
model = VGG16(weights='imagenet') S = 224 if MODEL == 2: from keras.applications.resnet50 import ResNet50, preprocess_input, decode_predictions model = ResNet50(weights='imagenet') S = 224 def classify(img): arr = preprocess_input(np.expand_dims(img.astype(np.float32), axis=0)) preds = model.predict(arr) _, lab, p = decode_predictions(preds, top=3)[0][0] if p < 0.5: lab = '' return lab for key, frame in autoStream(): h, w, _ = frame.shape dw = (w - S) // 2 dh = (h - S) // 2 trozo = frame[dh:S + dh, dw:S + dw] t0 = time.time() lab = classify(trozo) t1 = time.time() putText(frame, '{:.0f}ms {}'.format(1000 * (t1 - t0), lab)) cv.rectangle(frame, (dw, dh), (dw + S, dh + S), (0, 0, 0), 3) cv.imshow('inception', frame)
for key, frame in autoStream(): if roi.roi: [x1, y1, x2, y2] = roi.roi # si la región es muy pequeña no hacemos nada if abs(y2 - y1) < 10: continue # extraemos la región y la marcamos region = frame[y1:y2, x1:x2].copy() cv.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 1) # cv.imshow('ROI', region) # medimos el tiempo de proceso t0 = time.time() # binarizamos la imagen con umbral automático (opcional) #_, region = cv.threshold(region[:,:,1], 160, 255, cv.THRESH_BINARY+cv.THRESH_OTSU) # llamamos al OCR tesseract.SetImage(Image.fromarray(region)) ocr_result = tesseract.GetUTF8Text() t1 = time.time() print(ocr_result) # mostramos el resultado en la ventana junto con el tamaño del ROI y el tiempo de cómputo h, w = region.shape[:2] putText(frame, f'{ocr_result[:-1]} ({w}x{h}, {1000*(t1-t0):.0f}ms)', orig=(x1 + 5, y1 - 8)) cv.imshow('OCR', frame)
if len(points) == 2: # Machacará el frame original escribiendo una línea que va del primer punto al segundo. Otra vez de color verde. cv.line(rec, points[0], points[1], (0, 255, 0)) # Calcula la media por columnas de los puntos. # Es decir, calcula el punto medio entre dos puntos # Calcula la media en el eje 0. c = np.mean(points, axis=0).astype(int) # el astype(int) es para putText # Número de pixeles d = np.linalg.norm(np.array(points[0]) - points[1]) # Calcular la distancia entre los puntos distance = np.sqrt( np.square(int(points[1][0]) - int(points[0][0])) + np.square(int(points[1][1]) - int(points[0][1]))) print("Distancia: " + str(distance) + " pixels.") centimeters = (distance / pixelsXMil) / 10 # mm / 10 print("Distancia: " + str(centimeters) + " cm.") # Resetear el array de puntos, para poder seleccionar dos nuevos points = deque(maxlen=2) # Escribimos la cadena indicada en el punto c, que en nuestro caso será la media putText(rec, f'{centimeters:.1f} cm.', c) cv.imshow("Imagen", rec) cv.destroyAllWindows()
sift = cv.SIFT_create(nfeatures=500) matcher = cv.BFMatcher() x0 = None for key, frame in autoStream(): if key == ord('x'): x0 = None t0 = time.time() keypoints , descriptors = sift.detectAndCompute(frame, mask=None) t1 = time.time() putText(frame, f'{len(keypoints)} pts {1000*(t1-t0):.0f} ms') if key == ord('c'): k0, d0, x0 = keypoints, descriptors, frame if x0 is None: flag = cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS cv.drawKeypoints(frame, keypoints, frame, color=(100,150,255), flags=flag) cv.imshow('SIFT', frame) else: t2 = time.time() # solicitamos las dos mejores coincidencias de cada punto, no solo la mejor matches = matcher.knnMatch(descriptors, d0, k=2) t3 = time.time() # ratio test
return r frame = None goon = True def fun(): global frame, goon, key for key, frame in autoStream(): cv.imshow('cam', frame) goon = False t = Thread(target=fun, args=()) t.start() while frame is None: pass while goon: t0 = time.time() result = work(frame) t1 = time.time() putText(result, '{:.0f}ms'.format(1000 * (t1 - t0))) cv.imshow('work', result)
for key, image in autoStream(): if key == ord('m'): MULTISCALE = not MULTISCALE t0 = time.time() if MULTISCALE: (rects, weights) = hog.detectMultiScale(image, winStride=(4, 4), padding=(8, 8), scale=1.1) else: (rects, weights) = hog.detect(image, winStride=(4, 4), padding=(8, 8)) t1 = time.time() if len(rects) > 0: for rect, p in zip(rects, weights.flatten()): if MULTISCALE: x, y, w, h = rect else: x, y = rect w, h = 64, 128 cv.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2) putText(image, '{:.1f}'.format(p), (x + 2, y - 7), (0, 128, 255)) putText(image, '{:.0f} ms'.format((t1 - t0) * 1000), (7, 18), (0, 255, 128)) cv.imshow('pedestrian', image) cv.destroyAllWindows()
# Imprimir todos los modelos en una nueva ventana models.append(reg) mHistRed.append(histRed) mHistGreen.append(histGreen) mHistBlue.append(histBlue) cv.imshow("Modelos", concatenateImages(models, len(models) - 1)) # Diferencia de los histogramas RGB if (len(models) > 0): results = [] texto = "" for i in range(0, len(models)): dR = np.sum(cv.absdiff(histRed, mHistRed[i])) dG = np.sum(cv.absdiff(histGreen, mHistGreen[i])) dB = np.sum(cv.absdiff(histBlue, mHistBlue[i])) results.append(max(dR, dG, dB) / 1000) texto = texto + str(results[-1]) + " " putText(frame, texto) bestResult = min(results) if (bestResult < 1.2): i = results.index(bestResult) cv.imshow("Detectada", models[i]) else: cv.destroyWindow("Detectada") # Mostrar la webcam cv.imshow("Camara", frame) cv.destroyAllWindows()
for key, frame in autoStream(): gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY).astype(np.float32) / 255 if region.roi: [x1, y1, x2, y2] = region.roi if key == ord('c'): model = gray[y1:y2 + 1, x1:x2 + 1] cv.imshow("model", model) region.roi = [] cv.rectangle(frame, (x1, y1), (x2, y2), color=(0, 255, 255), thickness=2) putText(frame, f'{x2-x1+1}x{y2-y1+1}', orig=(x1, y1 - 8)) if model is not None: cc = cv.matchTemplate(gray, model, cv.TM_CCORR_NORMED) min_val, max_val, min_loc, max_loc = cv.minMaxLoc(cc) #mr,mc = divmod(cc.argmax(),cc.shape[1]) #cv.imshow('CC',cc) putText(cc, f'max correlation {max_val:.2f}') cv.imshow('CC', (cc - min_val) / (max_val - min_val)) x1, y1 = max_loc h, w = model.shape[:2] x2 = x1 + w y2 = y1 + h cv.rectangle(frame, (x1, y1), (x2, y2), color=(0, 255, 255), thickness=2)
for t in tracks: if (len(t) > 1): xIn, yIn = np.int32(t[-2]) xFin, yFin = np.int32(t[-1]) dx += xFin - xIn dy += yFin - yIn dx /= len(tracks) dy /= len(tracks) dxTotal += dx dyTotal += dy putText( frame, f'{round(math.degrees(2*np.arctan((dxTotal/2)/focalLenght)), 2)} grados en X acumulados', orig=(5, 72), color=(200, 255, 200)) putText( frame, f'{round(math.degrees(2*np.arctan((dyTotal/2)/focalLenght)), 2)} grados en Y acumulados', orig=(5, 108), color=(200, 255, 200)) new = [] dx = 0 dy = 0 for t in tracks: xIn, yIn = np.int32(t[0])
import cv2 as cv import time from umucv.stream import autoStream from umucv.util import putText # inicializamos el detector con los parámetros de trabajo deseados # mira en la documentación su significado y prueba distintos valores # https://docs.opencv.org/3.4/d5/d3c/classcv_1_1xfeatures2d_1_1SIFT.html sift = cv.xfeatures2d.SIFT_create(nfeatures=0, contrastThreshold=0.1, edgeThreshold=8) # sift = cv.AKAZE_create() for key, frame in autoStream(): t0 = time.time() # invocamos al detector (por ahora no usamos los descriptores) keypoints , _ = sift.detectAndCompute(frame, mask=None) t1 = time.time() putText(frame, '{} keypoints {:.0f} ms'.format(len(keypoints), 1000*(t1-t0))) # dibujamos los puntos encontrados, con un círculo que indica su tamaño y un radio # que indica su orientación. # al mover la cámara el tamaño y orientación debe mantenerse coherente con la imagen flag = cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS cv.drawKeypoints(frame, keypoints, frame, color=(100,150,255), flags=flag) cv.imshow('SIFT', frame)
vr = h[j:j + h2, k:k + w2].flatten() v = MODEL.flatten() detected.append((dist(vr, v), j, k)) d, j, k = min(detected) x1 = k * PPC y1 = j * PPC x2 = x1 + (w2 + CPB - 1) * PPC y2 = y1 + (h2 + CPB - 1) * PPC if d < 0.04: cv.rectangle(x, (x1, y1), (x2, y2), color=(255, 255, 0), thickness=2) putText(x, '{:.3f}'.format(d), (6, 18), (0, 128, 255)) if roi.roi: [x1, y1, x2, y2] = roi.roi reg = x[y1:y2, x1:x2].copy() if key == ord('c'): if SHOW: MODEL, sh = feat(reg) cv.imshow('model', sh) else: MODEL = feat(reg) roi.roi = [] cv.rectangle(x, (x1, y1), (x2, y2), color=(0, 255, 255), thickness=2)
loc = [c[0] for c in cosas] t0 = time.time() if nor: clas, prob = classify(nor) else: clas, prob = [], [] t1 = time.time() for (x, y), label, pr in zip(loc, clas, prob): col = (0, 255, 255) if pr < 0.5: label = '?' if pr < 0.9: col = (0, 160, 160) putText(frame, str(label), (int(x), int(y)), color=col, div=1, scale=2, thickness=2) caben = frame.shape[1] // 28 for k, x in enumerate(nor[:caben]): frame[-28:, 28 * k:28 * (k + 1), :] = x.reshape(28, 28, 1) * 255 cv.imshow('digits', frame) cv.destroyAllWindows()
if key==ord("p"): POINTS = not POINTS with mp_face_mesh.FaceMesh(min_detection_confidence=0.5, min_tracking_confidence=0.5) as face_mesh: image = cv.cvtColor(frame, cv.COLOR_BGR2RGB) results = face_mesh.process(image) if results.multi_face_landmarks: for face_landmarks in results.multi_face_landmarks: if POINTS: for p, lan in enumerate(face_landmarks.landmark): x=int(lan.x*w) y=int(lan.y*h) putText(frame,str(p), orig=(x,y), div=1, scale=0.5) else: mp_drawing.draw_landmarks( image=frame, landmark_list=face_landmarks, connections=mp_face_mesh.FACEMESH_CONTOURS, landmark_drawing_spec=drawing_spec, connection_drawing_spec=drawing_spec) cv.imshow('MediaPipe FaceMesh', frame) cv.destroyAllWindows()