Пример #1
0
    def __init__(self, datasets_path, path, cond_path, overlap_len, q_levels,
                 ulaw, seq_len, batch_size, cond_dim, cond_len, norm_ind,
                 static_spk, look_ahead, partition):
        super().__init__()

        # Define class variables from initialization parameters
        self.overlap_len = overlap_len
        self.q_levels = q_levels
        self.ulaw = ulaw
        if self.ulaw:
            self.quantize = utils.uquantize
        else:
            self.quantize = utils.linear_quantize
        self.seq_len = seq_len
        self.batch_size = batch_size

        # Define sets of data, conditioners and speaker IDs
        self.data = []
        self.global_spk = []
        self.audio = []
        self.cond_dim = cond_dim
        self.cond_len = cond_len
        self.cond = np.empty(shape=[0, self.cond_dim])

        # Define npy training dataset file names
        npy_name_data = 'npy_datasets/' + partition + '/data' + static_spk * '_static' + '.npy'
        npy_name_spk = 'npy_datasets/' + partition + '/speakers' + static_spk * '_static' + '.npy'

        npy_name_audio_id = 'npy_datasets/' + partition + '/audio_id' + static_spk * '_static' + '.npy'

        # Define npy file names with maximum and minimum values of de-normalized conditioners
        npy_name_min_max_cond = 'npy_datasets/min_max' + norm_ind*'_ind' + (not norm_ind)*'_joint' \
                                + static_spk*'_static' + '.npy'
        npy_name_cond = 'npy_datasets/' + partition + '/conditioners' + norm_ind*'_ind' + (not norm_ind)*'_joint'\
                        + static_spk*'_static' + '.npy'

        # Define npy file name with array of unique speakers in dataset
        npy_name_spk_id = 'npy_datasets/spk_id' + static_spk * '_static' + '.npy'

        # Check if dataset has to be created
        files = [
            npy_name_data, npy_name_cond, npy_name_spk, npy_name_min_max_cond
        ]
        create_dataset = len(files) != len(
            [f for f in files if os.path.isfile(f)])

        nosync = True

        if create_dataset:
            print('Create ' + partition + ' dataset', '-' * 60, '\n')
            print('Extracting wav from: ', path)
            print('Extracting conditioning from: ', cond_path)
            print('List of files is: wav_' + partition +
                  static_spk * '_static' + '.list')

            # Get file names from partition's list list
            file_names = open(datasets_path + 'wav_' + partition + static_spk*'_static' + '.list', 'r').\
                read().splitlines()

            if not os.path.isfile(npy_name_spk_id):
                # Search for unique speakers in list and sort them
                spk_list = list()
                for file in file_names:
                    current_spk = file[0:2]
                    if current_spk not in spk_list:
                        spk_list.append(current_spk)
                spk_list.sort()
                spk = np.asarray(spk_list)
                np.save(npy_name_spk_id, spk)
            else:
                spk = np.load(npy_name_spk_id)

            # Load each of the files from the list. Note that extension has to be added
            for counter, file in enumerate(file_names):
                # Load WAV
                print(file + '.wav')
                (d, _) = load(path + file + '.wav', sr=None, mono=True)
                num_samples = d.shape[0]

                # Load CC conditioner
                c = np.loadtxt(cond_path + file + '.cc')
                c = c.reshape(-1, c.shape[1])
                (num_ceps, _) = c.shape

                # Load LF0 conditioner
                f0file = np.loadtxt(cond_path + file + '.lf0')
                f0, _ = interpolation(f0file, -10000000000)
                f0 = f0.reshape(f0.shape[0], 1)

                # Load FV conditioner
                fvfile = np.loadtxt(cond_path + file + '.gv')
                fv, uv = interpolation(fvfile, 1e3)
                num_fv = fv.shape[0]
                uv = uv.reshape(num_fv, 1)
                fv = fv.reshape(num_fv, 1)

                # Load speaker conditioner (index where the ID is located)
                speaker = np.where(spk == file[0:2])[0][0]
                speaker = np.repeat(speaker, num_fv)

                # Array of audio ID to show the rearranging
                audio = np.repeat(counter, num_fv)

                if nosync:
                    oversize = num_samples % 80
                    print('oversize', oversize)
                    if oversize >= 60:
                        zeros = 80 - oversize
                        d = np.append(d, np.zeros(zeros))
                    if oversize <= 60 and oversize != 0:
                        d = d[:-oversize]
                        c = c[:-1][:]
                        f0 = f0[:-1]
                        fv = fv[:-1]
                        uv = uv[:-1]
                else:
                    truncate = num_ceps * 80
                    d = d[:truncate]

                if not ulaw:
                    d = self.quantize(torch.from_numpy(d),
                                      self.q_levels).numpy()

                # Concatenate all speech conditioners
                cond = np.concatenate((c, f0), axis=1)
                cond = np.concatenate((cond, fv), axis=1)
                cond = np.concatenate((cond, uv), axis=1)

                # Append/Concatenate current audio file, speech conditioners and speaker ID
                self.data = np.append(self.data, d)
                self.cond = np.concatenate((self.cond, cond), axis=0)
                self.global_spk = np.append(self.global_spk, speaker)
                self.audio = np.append(self.audio, audio)

            total_samples = self.data.shape[0]
            dim_cond = self.cond.shape[1]
            print('Total samples: ', total_samples)

            lon_seq = self.seq_len + self.overlap_len
            self.num_samples = self.batch_size * (
                total_samples // (self.batch_size * lon_seq * self.cond_len))

            print('Number of samples (1 audio file): ', self.num_samples)
            self.total_samples = self.num_samples * (
                self.seq_len + self.overlap_len) * self.cond_len
            total_conditioning = self.total_samples // self.cond_len
            self.data = self.data[:self.total_samples]
            self.cond = self.cond[:total_conditioning]
            self.data = self.data[:self.total_samples].reshape(
                self.batch_size, -1)

            self.length = self.total_samples // self.seq_len

            self.cond = self.cond[:total_conditioning].reshape(
                self.batch_size, -1, dim_cond)

            self.global_spk = self.global_spk[:total_conditioning].reshape(
                self.batch_size, -1)

            self.audio = self.audio[:total_conditioning].reshape(
                self.batch_size, -1)

            # Save maximum and minimum of de-normalized conditioners for conditions of train partition
            if partition == 'train' and not os.path.isfile(
                    npy_name_min_max_cond):
                # Compute maximum and minimum of de-normalized conditioners of train partition
                if norm_ind:
                    print(
                        'Computing maximum and minimum values for each speaker of training dataset.'
                    )
                    num_spk = len(spk)
                    self.max_cond = np.empty(shape=(num_spk, cond_dim))
                    self.min_cond = np.empty(shape=(num_spk, cond_dim))
                    for i in range(num_spk):
                        print('Computing speaker', i, 'of', num_spk,
                              'with ID:', spk[i])
                        self.max_cond[i] = np.amax(
                            self.cond[self.global_spk == i], axis=0)
                        self.min_cond[i] = np.amin(
                            self.cond[self.global_spk == i], axis=0)
                else:
                    print(
                        'Computing maximum and minimum values for every speaker of training dataset.'
                    )
                    self.max_cond = np.amax(np.amax(self.cond, axis=1), axis=0)
                    self.min_cond = np.amin(np.amin(self.cond, axis=1), axis=0)
                np.save(npy_name_min_max_cond,
                        np.array([self.min_cond, self.max_cond]))

            # Load maximum and minimum of de-normalized conditioners
            else:
                self.min_cond = np.load(npy_name_min_max_cond)[0]
                self.max_cond = np.load(npy_name_min_max_cond)[1]

            # Normalize conditioners with absolute maximum and minimum for each speaker of training partition
            if norm_ind:
                # Normalize conditioners with absolute maximum and minimum for each speaker of training partition
                print(
                    'Normalizing conditioners for each speaker of training dataset.'
                )
                for i in range(len(spk)):
                    self.cond[self.global_spk == i] = (self.cond[self.global_spk == i] - self.min_cond[i]) / \
                                                      (self.max_cond[i] - self.min_cond[i])
            else:
                # Normalize conditioners with absolute maximum and minimum for each speaker of training partition
                print(
                    'Normalizing conditioners for all speakers of training dataset.'
                )
                self.cond = (self.cond - self.min_cond) / (self.max_cond -
                                                           self.min_cond)

            # Save partition's dataset
            np.save(npy_name_data, self.data)
            np.save(npy_name_cond, self.cond)
            np.save(npy_name_spk, self.global_spk)
            np.save(npy_name_audio_id, self.audio)

            print('Dataset created for ' + partition + ' partition', '-' * 60,
                  '\n')

        else:
            # Load previously created dataset
            self.data = np.load(npy_name_data)
            self.global_spk = np.load(npy_name_spk)

            if look_ahead:
                if os.path.isfile(npy_name_cond.replace('.npy', '_ahead.npy')):
                    self.cond = np.load(
                        npy_name_cond.replace('.npy', '_ahead.npy'))
                else:
                    self.cond = np.load(npy_name_cond)
                    delayed = np.copy(self.cond)
                    delayed[:, :-1, :] = delayed[:, 1:, :]
                    self.cond = np.concatenate((self.cond, delayed), axis=2)
                    np.save(npy_name_cond.replace('.npy', '_ahead.npy'),
                            self.cond)
            else:
                self.cond = np.load(npy_name_cond)

            # Load maximum and minimum of de-normalized conditioners
            self.min_cond = np.load(npy_name_min_max_cond)[0]
            self.max_cond = np.load(npy_name_min_max_cond)[1]

            # Compute length for current partition
            self.length = np.prod(self.data.shape) // self.seq_len

            print('Data shape:', self.data.shape)
            print('Conditioners shape:', self.cond.shape)
            print('Global speaker shape:', self.global_spk.shape)

            print('Dataset loaded for ' + partition + ' partition', '-' * 60,
                  '\n')
Пример #2
0
def main(frame_sizes, **params):

    use_cuda = torch.cuda.is_available()

    params = dict(default_params, frame_sizes=frame_sizes, **params)

    # Redefine parameters listed in the experiment directory and separated with '~'
    for i in params['model'].split('/')[1].split('~'):
        param = i.split(':')
        if param[0] in params:
            params[param[0]] = as_type(param[1], type(params[param[0]]))
    # Define npy file names with maximum and minimum values of de-normalized conditioners
    npy_name_min_max_cond = 'npy_datasets/min_max' + params['norm_ind'] * '_ind' + (not params['norm_ind']) * '_joint' \
                            + params['static_spk'] * '_static' + '.npy'

    # Define npy file name with array of unique speakers in dataset
    npy_name_spk_id = 'npy_datasets/spk_id.npy'

    # Get file names from partition's list
    file_names = open(
        str(params['datasets_path']) + 'generate_cond_gina.list',
        'r').read().splitlines()

    spk_names = open(
        str(params['datasets_path']) + 'generate_spk_gina.list',
        'r').read().splitlines()

    datasets_path = os.path.join(params['datasets_path'], params['cond_set'])

    spk = np.load(npy_name_spk_id)

    if len(spk_names) != len(file_names):
        print(
            'Length of speaker file do not match length of conditioner file.')
        quit()

    print('Generating', len(file_names), 'audio files')

    for i in range(len(file_names)):
        print('Generating Audio', i)
        print('Generating...', file_names[i])

        # Load CC conditioner
        c = np.loadtxt(datasets_path + file_names[i] + '.cc')

        # Load LF0 conditioner
        f0file = np.loadtxt(datasets_path + file_names[i] + '.lf0')
        f0, _ = interpolation(f0file, -10000000000)
        f0 = f0.reshape(f0.shape[0], 1)

        # Load FV conditioner
        fvfile = np.loadtxt(datasets_path + file_names[i] + '.gv')
        fv, uv = interpolation(fvfile, 1e3)
        num_fv = fv.shape[0]
        uv = uv.reshape(num_fv, 1)
        fv = fv.reshape(num_fv, 1)

        # Load speaker conditioner
        speaker = np.where(spk == spk_names[i])[0][0]

        cond = np.concatenate((c, f0), axis=1)
        cond = np.concatenate((cond, fv), axis=1)
        cond = np.concatenate((cond, uv), axis=1)

        # Load maximum and minimum of de-normalized conditioners
        min_cond = np.load(npy_name_min_max_cond)[0]
        max_cond = np.load(npy_name_min_max_cond)[1]

        # Normalize conditioners with absolute maximum and minimum for each speaker of training partition
        if params['norm_ind']:
            print(
                'Normalizing conditioners for each speaker of training dataset'
            )
            cond = (cond - min_cond[speaker]) / (max_cond[speaker] -
                                                 min_cond[speaker])
        else:
            print('Normalizing conditioners jointly')
            cond = (cond - min_cond) / (max_cond - min_cond)

        print('Shape cond', cond.shape)
        if params['look_ahead']:
            delayed = np.copy(cond)
            delayed[:-1, :] = delayed[1:, :]
            cond = np.concatenate((cond, delayed), axis=1)
            print('Shape cond after look ahead', cond.shape)

        print(cond.shape)
        seed = params.get('seed')
        init_random_seed(seed, use_cuda)

        spk_dim = len([
            i for i in os.listdir(
                os.path.join(params['datasets_path'], params['cond_set']))
            if os.path.islink(
                os.path.join(params['datasets_path'], params['cond_set']) +
                '/' + i)
        ])

        print('Start Generate SampleRNN')
        model = SampleRNN(frame_sizes=params['frame_sizes'],
                          n_rnn=params['n_rnn'],
                          dim=params['dim'],
                          learn_h0=params['learn_h0'],
                          q_levels=params['q_levels'],
                          ulaw=params['ulaw'],
                          weight_norm=params['weight_norm'],
                          cond_dim=params['cond_dim'] *
                          (1 + params['look_ahead']),
                          spk_dim=spk_dim,
                          qrnn=params['qrnn'])
        print(model)

        if use_cuda:
            model = model.cuda()
            predictor = Predictor(model).cuda()
        else:
            predictor = Predictor(model)

        f_name = params['model']
        model_data = load_model(f_name)

        if model_data is None:
            sys.exit('ERROR: Model not found in' + str(f_name))
        (state_dict, epoch_index, iteration) = model_data
        print('OK: Read model', f_name, '(epoch:', epoch_index, ')')
        print(state_dict)
        predictor.load_state_dict(state_dict)

        original_name = file_names[i].split('/')[1]
        if original_name == "..":
            original_name = file_names[i].split('/')[3]

        generator = RunGenerator(model=model,
                                 sample_rate=params['sample_rate'],
                                 cuda=use_cuda,
                                 epoch=epoch_index,
                                 cond=cond,
                                 spk_list=spk,
                                 speaker=speaker,
                                 checkpoints_path=f_name,
                                 original_name=original_name)

        generator(params['n_samples'], params['sample_length'], cond, speaker)
Пример #3
0
def main():
    print("Set threshold for left and right eye.")
    print("Press v to show calibrating vector.")

# ---------------------------------- Making empty arrays for future use -------------------------------------------- #
    mask_for_eyetracking_bgr = empty_mask_for_eyetracking(size_of_output_screen)
    mask_for_eyetracking_target = empty_mask_for_eyetracking(size_of_output_screen)
    mask_for_eyetracking_target_save = empty_mask_for_eyetracking(size_of_output_screen)
    mask_bgr_reshaped_nearest = mask_for_eyetracking_bgr
    mask_reshape_dimenstion = dimension(mask_for_eyetracking_bgr,
                                        int((screensize[1] * 100) / size_of_output_screen[0]))
# ---------------------------------- Reaching the port 0 for video capture ------------------------------------------ #
    cap = cv2.VideoCapture(0)

# ---------------------------------- Starting making video ---------------------------------------------------------- #
    fourcc_detection = cv2.VideoWriter_fourcc(*'XVID')
    out_detection = cv2.VideoWriter('detection.mkv', fourcc_detection, 20.0, (int(cap.get(3)), int(cap.get(4))))
    fourcc_mask = cv2.VideoWriter_fourcc(*'XVID')
    out_mask = cv2.VideoWriter('mask.mkv', fourcc_mask, 20.0, mask_reshape_dimenstion)

# ---------------------------------- Creating window for result and trackbars in it --------------------------------- #
    cv2.namedWindow('Dlib Landmarks')
    cv2.createTrackbar('Right', 'Dlib Landmarks', 0, 255, nothing)  # threshold track bar
    cv2.createTrackbar('Left', 'Dlib Landmarks', 0, 255, nothing)  # threshold track bar

# ---------------------------------- Initiation part ---------------------------------------------------------------- #
    left_center_pupil_in_eye_frame = [0, 0]
    right_center_pupil_in_eye_frame = [0, 0]
    output_vector_in_eye_frame = [0, 0, 0, 0]
    left_eye_crop = [0, 0]
    right_eye_crop = [0, 0]
    min_left = [0, 0]
    min_right = [0, 0]
    send_calibration_data_state = True
    upper_left_corner = [0, 0, 0, 0]
    middle_right_corner = [0, 0, 0, 0]
    upper_right_corner = [0, 0, 0, 0]
    middle_left_corner = [0, 0, 0, 0]
    middle = [0, 0, 0, 0]
    middle_up_corner = [0, 0, 0, 0]
    lower_left_corner = [0, 0, 0, 0]
    middle_bottom_corner = [0, 0, 0, 0]
    lower_right_corner = [0, 0, 0, 0]
    u_interp = np.zeros(size_of_output_screen, np.uint8)
    v_interp = np.zeros(size_of_output_screen, np.uint8)
    press_v = False
    press_c = True
    press_1 = True
    press_2 = True
    press_3 = True
    press_4 = True
    press_5 = True
    press_6 = True
    press_7 = True
    press_8 = True
    press_9 = True
    press_detele = True
    press_s = True
    press_e = False
    k = 0
    press_n = False
    target_m = False
    calibrating_vector_in_frame_left = [0, 0, 0, 0]
    calibrating_vector_in_frame_right = [0, 0, 0, 0]
    coordinates_of_center_dot = (int((20 * size_of_output_screen[0] - 1) / 100),
                                 int((20 * size_of_output_screen[1] - 1) / 100))
    draw_point_after_next_target = False
    hit_target = False
    hit_target_value = []
    value_of_point = 0
    result_eyetracker_coordinate_x = []
    result_eyetracker_coordinate_y = []
    result_eyetracker_found_u_normalized = []
    result_eyetracker_found_v_normalized = []
    result_eyetracker_found_u = []
    result_eyetracker_found_v = []
    target_coordinate_x = []
    target_coordinate_y = []
    measured_vector_true_u_normalized = []
    measured_vector_true_v_normalized = []
    measured_vector_true_u = []
    measured_vector_true_v = []
    part = 1
    acceptation_of_change = []
    change_accepted = False
    method = "n"

# ---------------------------------- Get the video frame and prepare it for detection ------------------------------- #
    while cap.isOpened():  # while th video capture is
        _, frame = cap.read()  # convert cap to matrix for future work
        frame = cv2.flip(frame, 1)  # flip video to not be mirrored
        frame = frame[::4, ::4]  # for rpi
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)  # change color from rgb to gray

# ---------------------------------- Dlib Landmark face detection --------------------------------------------------- #
        faces = detector_dlib(gray)
        for face in faces:
            # view_face_frame(face, frame)  # view face frame
            landmarks = predictor_dlib(gray, face)  # detect face structures using landmarks

            # crop eyes from the video
            left_landmarks_array = landmarks_array(36, 37, 38, 39, 40, 41, landmarks, gray, lines=0)
            right_landmarks_array = landmarks_array(42, 43, 44, 45, 46, 47, landmarks, gray, lines=0)

            eye_fill = fill_frame(gray, left_landmarks_array, right_landmarks_array)  # black mask with just eyes

            # crop eyes from black rectangle
            left_eye_crop, min_left = crop_eyes(eye_fill, left_landmarks_array)
            right_eye_crop, min_right = crop_eyes(eye_fill, right_landmarks_array)

            # draw points into eyes
            for i in range(36, 48):
                draw_point(i, landmarks, frame)

            # draw points into face
            # for i in range(0, 67):
            #   draw_point(i, landmarks, frame)

# ---------------------------------- Left eye ---------------------------------------------------------------------- #
            threshold_left = cv2.getTrackbarPos('Right', 'Dlib Landmarks')  # getting position of the trackbar
            no_reflex_left = delete_corneal_reflection(left_eye_crop, threshold_left)  # deleting corneal reflex
            gama_corrected_left = gama_correction(no_reflex_left, 1.2)  # gama correction
            hsv_img_left = converting_gray_to_hsv(gama_corrected_left)  # converting frame to hsv
            filtrated_img_left = filtration(hsv_img_left)  # applying some filtration
            eye_preprocessed_left = preprocessing(filtrated_img_left, threshold_left)  # morfological operations
            cv2.imshow("eye_processed_left", eye_preprocessed_left)
            contours_left = contours_of_shape(eye_preprocessed_left, threshold_left)  # get contours
            if contours_left is not None:
                for c_left in contours_left:
                    if c_left is not None:
                        m_left = cv2.moments(c_left)
                        if m_left["m00"] > 1:
                            cx_left = int(m_left["m10"] / m_left["m00"])  # x coordinate for middle of blob
                            cy_left = int(m_left["m01"] / m_left["m00"])  # y coordinate for middle of blob
                            # position of left pupil in whole frame
                            left_center_pupil = [cx_left + min_left[0], cy_left + min_left[1]]
                            # position of left pupil in eye frame
                            left_center_pupil_in_eye_frame = [cx_left, cy_left]
                            # show pupil
                            cv2.circle(frame, (left_center_pupil[0], left_center_pupil[1]), 1, (255, 0, 0), 2)

# ---------------------------------- Right eye ---------------------------------------------------------------------- #
            threshold_right = cv2.getTrackbarPos('Left', 'Dlib Landmarks')  # getting position of the trackbar
            no_reflex_right = delete_corneal_reflection(right_eye_crop, threshold_right)  # deleting corneal reflex
            gama_corrected_right = gama_correction(no_reflex_right, 1.2)  # gama correction
            hsv_img_right = converting_gray_to_hsv(gama_corrected_right)  # converting frame to hsv
            filtrated_img_right = filtration(hsv_img_right)  # applying some filtration
            eye_preprocessed_right = preprocessing(filtrated_img_right, threshold_right)  # morfological operations
            cv2.imshow("eye_processed_right", eye_preprocessed_right)
            contours_right = contours_of_shape(eye_preprocessed_right, threshold_right)  # get contours
            if contours_right is not None:
                for c_right in contours_right:
                    if c_right is not None:
                        m_right = cv2.moments(c_right)
                        if m_right["m00"] > 1:
                            cx_right = int(m_right["m10"] / m_right["m00"])  # x coordinate for middle of blob
                            cy_right = int(m_right["m01"] / m_right["m00"])  # y coordinate for middle of blob
                            # position of right pupil in whole frame
                            right_center_pupil = [cx_right + min_right[0], cy_right + min_right[1]]
                            # position of right pupil in eye frame
                            right_center_pupil_in_eye_frame = [cx_right, cy_right]
                            # show pupil
                            cv2.circle(frame, (right_center_pupil[0], right_center_pupil[1]), 1, (255, 0, 0), 2)

# ---------------------------------- Show vector after pressing v --------------------------------------------------- #
        if k == ord('v') and not press_v:
            press_v = True  # for active vector drawing
            print("Vector mode activated.")
            print('For starting calibration mode press p.')

            # finding calibration vector
            calibrating_vector_in_frame_left = calibrate_vector_eye_center(left_center_pupil_in_eye_frame)
            calibrating_vector_in_frame_right = calibrate_vector_eye_center(right_center_pupil_in_eye_frame)

        if press_v:
            press_c = False
            # get vector coordinates in frame
            left_vector_center = vector_start_center(calibrating_vector_in_frame_left, [min_left[0], min_left[1]])
            right_vector_center = vector_start_center(calibrating_vector_in_frame_right, [min_right[0], min_right[1]])

            # finding vector
            output_vector_in_eye_frame = find_vector(left_center_pupil_in_eye_frame,
                                                     (calibrating_vector_in_frame_left[0],
                                                      calibrating_vector_in_frame_left[1]),
                                                     right_center_pupil_in_eye_frame,
                                                     (calibrating_vector_in_frame_right[0],
                                                      calibrating_vector_in_frame_right[1]))

            # start of vector
            start_left = (left_vector_center[0], left_vector_center[1])
            start_right = (right_vector_center[0], right_vector_center[1])

            # end of vector
            end_left = (int(output_vector_in_eye_frame[0] * 10) + left_vector_center[0],
                        int(output_vector_in_eye_frame[1] * 10) + left_vector_center[1])
            end_right = (int(output_vector_in_eye_frame[0] * 10) + right_vector_center[0],
                         int(output_vector_in_eye_frame[1] * 10) + right_vector_center[1])

            # show vector in frame
            if end_left == [0, 0] or end_right == [0, 0]:
                print("Pupil not detected. Try to adjust threshold better and press v again..")

            if end_left is not None and end_right is not None:
                if output_vector_in_eye_frame[2] > 0:
                    cv2.arrowedLine(frame, start_left, end_left, color=(0, 255, 0), thickness=1)
                    cv2.arrowedLine(frame, start_right, end_right, color=(0, 255, 0), thickness=1)

# ---------------------------------- Get nine calibration points ------------------------------------------------ #
        if k == ord('c') and not press_c:
            prepare_mask_for_calibration(screensize, 1)
            press_1 = False
            press_c = True
            print('Look into lower left corner and press 1.')

        if k == ord('1') and not press_1:
            prepare_mask_for_calibration(screensize, 2)
            press_1 = True
            press_2 = False
            press_detele = False
            lower_left_corner = lower_left(output_vector_in_eye_frame)
            print("Lower left corner saved.")
            print('Look into middle left and press 2.')

        if k == ord('2') and not press_2:
            prepare_mask_for_calibration(screensize, 3)
            press_2 = True
            press_3 = False
            middle_left_corner = middle_left(output_vector_in_eye_frame)
            print("Middle left saved.")
            print('Look into upper left corner and press 3.')

        if k == ord('3') and not press_3:
            prepare_mask_for_calibration(screensize, 4)
            press_3 = True
            press_4 = False
            upper_left_corner = upper_left(output_vector_in_eye_frame)
            print("Upper left corner saved.")
            print('Look into middle bottom and press 4.')

        if k == ord('4') and not press_4:
            prepare_mask_for_calibration(screensize, 5)
            press_4 = True
            press_5 = False
            middle_bottom_corner = middle_bottom(output_vector_in_eye_frame)
            print("Middle bottom saved.")
            print('Look into middle of the screen and press 5.')

        if k == ord('5') and not press_5:
            prepare_mask_for_calibration(screensize, 6)
            press_5 = True
            press_6 = False
            middle = middle_screen(output_vector_in_eye_frame)
            print("Middle saved.")
            print('Look into middle top and press 6.')

        if k == ord('6') and not press_6:
            prepare_mask_for_calibration(screensize, 7)
            press_6 = True
            press_7 = False
            middle_up_corner = middle_up(output_vector_in_eye_frame)
            print("Middle top saved.")
            print('Look into lower right corner and press 7.')

        if k == ord('7') and not press_7:
            prepare_mask_for_calibration(screensize, 8)
            press_7 = True
            press_8 = False
            lower_right_corner = lower_right(output_vector_in_eye_frame)
            print("Lower right corner saved.")
            print('Look into middle right corner and press 8.')

        if k == ord('8') and not press_8:
            prepare_mask_for_calibration(screensize, 9)
            press_8 = True
            press_9 = False
            middle_right_corner = middle_right(output_vector_in_eye_frame)
            print("Middle right saved.")
            print('Look into upper right corner and press 9.')

        if k == ord('9') and not press_9:
            press_9 = True
            send_calibration_data_state = True
            press_e = False
            upper_right_corner = upper_right(output_vector_in_eye_frame)
            print("Upper right corner saved.")
            print("Pres enter for saving measured data or d for deleting measured data")
            prepare_mask_for_calibration(screensize, 0)

# ---------------------------------- Delete everything and start over ----------------------------------------------- #
        if k == ord('d') and not press_detele:
            press_detele = True
            press_v = False
            print("Vector mode deactivated.")
            print("Measured data from corners were deleted.")
            print("Ready to start new measurment.")
            print("Press v to show vector")
            press_c = True
            press_1 = True
            press_2 = True
            press_3 = True
            press_4 = True
            press_5 = True
            press_6 = True
            press_7 = True
            press_8 = True
            press_9 = True
            lower_left_corner = [0, 0, 0, 0]
            upper_right_corner = [0, 0, 0, 0]
            upper_left_corner = [0, 0, 0, 0]
            lower_right_corner = [0, 0, 0, 0]
            middle = [0, 0, 0, 0]
            middle_right_corner = [0, 0, 0, 0]
            middle_up_corner = [0, 0, 0, 0]
            middle_bottom_corner = [0, 0, 0, 0]
            middle_left_corner = [0, 0, 0, 0]
            send_calibration_data_state = False
            press_s = True
            press_e = False
            cv2.destroyWindow('calibration')
            mask_for_eyetracking_bgr = empty_mask_for_eyetracking(size_of_output_screen)
            mask_for_eyetracking_target = empty_mask_for_eyetracking(size_of_output_screen)
            mask_for_eyetracking_target_save = empty_mask_for_eyetracking(size_of_output_screen)

# ---------------------------------- Show calibration points and interpolate them ----------------------------------- #
        if upper_left_corner != [0, 0, 0, 0] and upper_right_corner != [0, 0, 0, 0] and \
                lower_left_corner != [0, 0, 0, 0] and lower_right_corner != [0, 0, 0, 0] and middle != [0, 0, 0, 0] and \
                middle_right_corner != [0, 0, 0, 0] and middle_up_corner != [0, 0, 0, 0] and \
                middle_bottom_corner != [0, 0, 0, 0] and middle_left_corner != [0, 0, 0, 0] and \
                send_calibration_data_state and k == 13 and not press_e:
            send_calibration_data_state = False
            cv2.destroyWindow('calibration')

            # print calibrating points vectors
            print("Data for calibration were measured successfully.")
            print("Lower left corner: ", lower_left_corner)
            print("Middle left: ", middle_left_corner)
            print("Upper left corner: ", upper_left_corner)
            print("Middle bottom: ", middle_bottom_corner)
            print("Middle: ", middle)
            print("Middle top: ", middle_up_corner)
            print("Lower right corner: ", lower_right_corner)
            print("Middle right: ", middle_right_corner)
            print("Upper right corner: ", upper_right_corner)
            print("Wait please. Calibration in progress...")

            # interpolate with calibrated points
            u_interp, v_interp = interpolation(lower_left_corner, middle_left_corner, upper_left_corner,
                                               middle_bottom_corner, middle, middle_up_corner,
                                               lower_right_corner, middle_right_corner, upper_right_corner,
                                               size_of_output_screen)

            print("Calibration done successfully.")
            print("For starting eyetracker press e. For stopping eyetracker press s.")

# ---------------------------------- Start eyetracking -------------------------------------------------------------- #
        if k == ord('e') and not press_e:
            press_e = True  # activates eyetracking mode
            press_s = False
            print("You can choose between random target and fix target."
                  "For random target press n, for fix target press m.")
            print("Eyetracker starts...")

        if press_e:  # active eyetracking mode
            normalized_u_interp, normalized_u = normalize_array(u_interp, output_vector_in_eye_frame[2])  # normalize u
            normalized_v_interp, normalized_v = normalize_array(v_interp, output_vector_in_eye_frame[3])  # normalize v

            # find best vector in interpolated field
            result_numbers, result_x, \
            result_y, result_diff, nothing_found = find_closest_in_array(normalized_u_interp, normalized_v_interp,
                                                                         (normalized_u, normalized_v), 0.2, 0.2)

# ---------------------------------- Start moving target after pressing m ------------------------------------------- #
            if k == ord('m'):
                print("Target mode activated.")
                target_m = True
                mask_for_eyetracking_target = empty_mask_for_eyetracking(size_of_output_screen)
                method = "m"
                print("Moving target started")

            if target_m:  # method is moving target
                step = 0

                # delete old target
                draw_line(mask_for_eyetracking_target, coordinates_of_center_dot, step, (255, 255, 255))

                # get new center
                coordinates_of_center_dot_out, part_out, \
                acceptation_of_change, change_accepted = change_coordinates_of_target(coordinates_of_center_dot,
                                                                                      size_of_output_screen, part,
                                                                                      change_accepted,
                                                                                      acceptation_of_change)
                coordinates_of_center_dot = coordinates_of_center_dot_out
                if len(acceptation_of_change) == speed_of_target:  # buffer for target
                    change_accepted = True
                    acceptation_of_change = []
                part = part_out

                if part == 16:
                    draw_line(mask_for_eyetracking_target, coordinates_of_center_dot, step, (255, 255, 255))

                hit_target_value = []
                hit_target = False

                # draw target
                draw_line(mask_for_eyetracking_target, coordinates_of_center_dot, step, (255, 0, 0))
# ---------------------------------- Random target after pressing n -------------------------------------------------- #
            if k == ord('n') and not press_n:  # recount coordinates for target
                mask_for_eyetracking_bgr = empty_mask_for_eyetracking(size_of_output_screen)
                press_n = True
            elif k == ord('n') and press_n:  # start moving target if not started
                print("Target mode activated.")
                print("Press n for next target.")
                method = "n"
                step = 0

                # delete old target
                draw_line(mask_for_eyetracking_bgr, coordinates_of_center_dot, step, (255, 255, 255))

                # check if there is a eyetracker value before placing target
                mask_for_eyetracking_bgr_out, draw_point_after_next_target, \
                value_of_point = check_target_spot_before(draw_point_after_next_target, hit_target,
                                                          mask_for_eyetracking_bgr, coordinates_of_center_dot,
                                                          value_of_point, hit_target_value)
                mask_for_eyetracking_bgr = mask_for_eyetracking_bgr_out

                # get new center
                coordinates_of_center_dot = change_coordinates_of_target_random(size_of_output_screen)

                hit_target_value = []
                hit_target = False

                # check if eyetracket hitted target
                mask_for_eyetracking_bgr_out, draw_point_after_next_target, \
                value_of_point = check_target_spot_after(mask_for_eyetracking_bgr, coordinates_of_center_dot)
                mask_for_eyetracking_bgr = mask_for_eyetracking_bgr_out

                # draw target
                draw_line(mask_for_eyetracking_bgr, coordinates_of_center_dot, step, (255, 0, 0))

# ---------------------------------- Draw/show eyetracking ---------------------------------------------------------- #
            if method == "n":
                cv2.namedWindow("Eyetracking", cv2.WINDOW_NORMAL)  # make new window with window name
                cv2.setWindowProperty("Eyetracking", cv2.WND_PROP_FULLSCREEN,
                                      cv2.WINDOW_FULLSCREEN)  # set window to full screen
            elif method == "m":
                cv2.namedWindow("Target", cv2.WINDOW_NORMAL)  # make new window with window name
                cv2.setWindowProperty("Target", cv2.WND_PROP_FULLSCREEN,
                                      cv2.WINDOW_FULLSCREEN)  # set window to full screen

            # show eyetracking result in frame called 'Eyetracking'
            coor_x, coor_y, \
            mask_for_eyetracking_bgr, hit_target, \
            hit_target_value = show_eyetracking(result_x, result_y, size_of_output_screen,
                                                mask_for_eyetracking_bgr, coordinates_of_center_dot,
                                                hit_target, hit_target_value)

            # show target
            coor_x_target, coor_y_target, \
            mask_for_eyetracking_target_save = show_target(mask_for_eyetracking_target_save, coordinates_of_center_dot)

# ---------------------------------- Saving results ----------------------------------------------------------------- #
            dot_0 = coordinates_of_center_dot[1]
            dot_1 = coordinates_of_center_dot[0]
            u_found_normalized = normalized_u_interp[coor_x, coor_y]
            v_found_normalized = normalized_v_interp[coor_x, coor_y]
            u_found = u_interp[coor_x, coor_y]
            v_found = v_interp[coor_x, coor_y]

            if nothing_found == 1:
                print("Vector can't be found.")
                coor_x = -1
                coor_y = -1
                u_found_normalized = -1
                v_found_normalized = -1
                u_found = -1
                v_found = -1

                dot_0 = -1
                dot_1 = -1
                normalized_u = -1
                normalized_v = -1
                output_vector_in_eye_frame[2] = -1
                output_vector_in_eye_frame[3] = -1

            result_eyetracker_coordinate_x.append(coor_x)
            result_eyetracker_coordinate_y.append(coor_y)
            result_eyetracker_found_u_normalized.append(u_found_normalized)
            result_eyetracker_found_v_normalized.append(v_found_normalized)
            result_eyetracker_found_u.append(u_found)
            result_eyetracker_found_v.append(v_found)

            target_coordinate_x.append(dot_0)
            target_coordinate_y.append(dot_1)
            measured_vector_true_u_normalized.append(normalized_u)
            measured_vector_true_v_normalized.append(normalized_v)
            measured_vector_true_u.append(output_vector_in_eye_frame[2])
            measured_vector_true_v.append(output_vector_in_eye_frame[3])

# ---------------------------------- Write video and show image ----------------------------------------------------- #
            mask_bgr_reshaped_nearest = cv2.resize(mask_for_eyetracking_bgr, mask_reshape_dimenstion,
                                                   interpolation=cv2.INTER_NEAREST)
            mask_bgr_target_reshaped_nearest = cv2.resize(mask_for_eyetracking_target, mask_reshape_dimenstion,
                                                          interpolation=cv2.INTER_NEAREST)

            out_detection.write(frame)
            out_mask.write(mask_bgr_reshaped_nearest)

            if method == "n":  # for random target
                cv2.imshow("Eyetracking", mask_bgr_reshaped_nearest)
            elif method == "m":  # for animated target
                cv2.imshow("Target", mask_bgr_target_reshaped_nearest)

# ---------------------------------- Stop eyetracking --------------------------------------------------------------- #
        if k == ord('s') and not press_s:
            press_s = True
            press_e = False
            cv2.waitKey(1)
            cv2.destroyWindow("Eyetracking")
            cv2.destroyWindow("Target")
            mask_for_eyetracking_bgr = empty_mask_for_eyetracking(size_of_output_screen)
            mask_for_eyetracking_target = empty_mask_for_eyetracking(size_of_output_screen)
            mask_for_eyetracking_target_save = empty_mask_for_eyetracking(size_of_output_screen)
            print("Eyetracker stops...")

# ---------------------------------- Show result and keyboard check ------------------------------------------------- #
        cv2.imshow('Dlib Landmarks', frame)  # visualization of detection
        k = cv2.waitKey(1) & 0xFF  # get key that is pressed on keyboard

# ---------------------------------- Quit program after pressing q -------------------------------------------------- #
        if k == ord('q'):
            cap.release()
            out_detection.release()
            out_mask.release()
            cv2.destroyAllWindows()

            # make array from found and measured data
            result_eyetracker_array = make_array_from_vectors(result_eyetracker_coordinate_x,
                                                              result_eyetracker_coordinate_y,
                                                              result_eyetracker_found_u_normalized,
                                                              result_eyetracker_found_v_normalized,
                                                              result_eyetracker_found_u,
                                                              result_eyetracker_found_v)

            target_and_measured_vector_array = make_array_from_vectors(target_coordinate_x,
                                                                       target_coordinate_y,
                                                                       measured_vector_true_u_normalized,
                                                                       measured_vector_true_v_normalized,
                                                                       measured_vector_true_u,
                                                                       measured_vector_true_v)
            heat_map(mask_for_eyetracking_bgr, (screensize[1], screensize[0]), "eyetracking")
            heat_map(mask_for_eyetracking_target_save, (screensize[1], screensize[0]), "target")
            # save measured and found data
            np.save("results/result_eyetracker_array", result_eyetracker_array)
            np.save("results/target_and_measured_vector_array", target_and_measured_vector_array)
            np.save("results/eyetracker_screen_nearest", mask_bgr_reshaped_nearest)
            np.save("results/eyetracker_screen", mask_for_eyetracking_bgr)
            np.save("results/eyetracker_target", mask_for_eyetracking_target_save)
            break
    cap.release()  # release recording and streaming videos
    out_detection.release()
    out_mask.release()
    cv2.destroyAllWindows()  # close all windows