import cv2
from win10toast import ToastNotifier

import modules.HolisticModule as hm
from modules.fps import fps_present

import numpy as np

# video input
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)

canvas = np.zeros((int(cap.get(4)), int(cap.get(3)), 3), np.uint8)

# Holistic 객체(어떠한 행위를 하는 친구) 생성
detector = hm.HolisticDetector()

img_counter = 1

while True:
    # defalut BGR img
    success, img = cap.read()
    # mediapipe를 거친 이미지 생성 -> img
    img = detector.findHolistic(img, draw=False)
    # output -> list ( id, x, y, z) 32 개 좌표인데 예를 들면, (11, x, y, z)
    pose_lmList = detector.findPoseLandmark(img, draw=False)
    # 468개의 얼굴 점 리스트
    face_lmList = detector.findFaceLandmark(img, draw=False)

    draw_list = [
        [98, 97, 2, 326, 327],  # nose
        [168, 4],  # nose center
from comtypes import CLSCTX_ALL
from pycaw.pycaw import AudioUtilities, IAudioEndpointVolume

##############################################
wCam, hCam = 640, 480
##############################################

cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
cap.set(3, wCam)
cap.set(4, hCam)

cTime = 0
pTime = 0

# detector = htm.handDetector(detectionCon=0.7)
detector = hm.HolisticDetector(min_detection_confidence=0.7,
                               min_tracking_confidence=0.5)

devices = AudioUtilities.GetSpeakers()
interface = devices.Activate(IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
volume = cast(interface, POINTER(IAudioEndpointVolume))
# volume.GetMute()
# volume.GetMasterVolumeLevel()
volRange = volume.GetVolumeRange()
# volume.SetMasterVolumeLevel(0.0, None)
minVol = volRange[0]
maxVol = volRange[1]
vol = 0
volBar = 400
volPer = 0

while True: