コード例 #1
0
def main():
    print('Initializing main function')
    file_path = '/home/mauricio/Videos/mjpeg/11-00-00.mjpeg'

    print('Initializing instance')
    open_pose = ClassOpenPose()

    images = ClassMjpegReader.process_video(file_path)
    print('Size list')
    print(len(images))
    print('Done')

    print('Reading list using opencv')
    cv2.namedWindow('test')

    for elem in images:
        image = elem[0]
        ticks = elem[1]

        image_np = np.frombuffer(image, dtype=np.uint8)
        print(ticks)
        print(len(image))
        image_cv = cv2.imdecode(image_np, cv2.IMREAD_ANYCOLOR)
        image_recognize = open_pose.recognize_image_draw(image_cv)

        cv2.imshow('test', image_recognize)
        cv2.waitKey(10)

    cv2.destroyAllWindows()
    print('Done!')
コード例 #2
0
def main():
    print('Initializing main function')

    # Initialing instances
    instance_pose = ClassOpenPose()

    filename = '/home/mauricio/Pictures/walk.jpg'
    image = cv2.imread(filename)

    arr = instance_pose.recognize_image(image)

    if len(arr) != 1:
        raise Exception(
            'There is more than one person in the image: {0}'.format(len(arr)))

    per_arr = arr[0]
    print(per_arr)

    ClassDescriptors.draw_pose(image, per_arr, min_score)
    cv2.namedWindow('main_window', cv2.WINDOW_AUTOSIZE)

    cv2.imshow('main_window', image)
    cv2.waitKey(0)

    cv2.destroyAllWindows()

    print('Done!')
コード例 #3
0
def pre_process_images(list_folders_scores, recalculate):
    print('Start pre_processing images')

    # Loading instances
    instance_pose = ClassOpenPose()

    for folder, min_score in list_folders_scores:
        for file in os.listdir(folder):
            full_path = os.path.join(folder, file)
            extension = ClassUtils.get_filename_extension(full_path)

            if extension != '.jpg':
                print('Ignoring file {0}'.format(full_path))
                continue

            file_no_ext = ClassUtils.get_filename_no_extension(full_path)
            arr_file_name = os.path.join(folder, '{0}.json'.format(file_no_ext))

            # If image recalculation
            if not recalculate:
                if os.path.isfile(arr_file_name):
                    print('File already processed {0}'.format(full_path))
                    continue

            # Processing file
            print('Processing file {0}'.format(full_path))
            image = cv2.imread(full_path)

            arr, img_draw = instance_pose.recognize_image_tuple(image)

            arr_pass = list()

            # Checking vector integrity for all elements
            # Verify there is at least one arm and one leg
            for elem in arr:
                if ClassUtils.check_vector_integrity_part(elem, min_score):
                    arr_pass.append(elem)

            # If there is more than one person with vector integrity
            if len(arr_pass) != 1:
                for elem in arr_pass:
                    pt1, pt2 = ClassUtils.get_rectangle_bounds(elem, ClassUtils.MIN_POSE_SCORE)
                    cv2.rectangle(img_draw, pt1, pt2, (0, 0, 255), 3)

                cv2.namedWindow('main_window')
                cv2.imshow('main_window', img_draw)
                print(arr)
                print(arr_pass)
                cv2.waitKey(0)
                cv2.destroyAllWindows()
                raise Exception('Invalid len: {0} file {1}'.format(len(arr_pass), full_path))

            person_arr = arr_pass[0]

            arr_str = json.dumps(person_arr.tolist())

            with open(arr_file_name, 'w') as text_file:
                text_file.write(arr_str)

    print('Done!')
コード例 #4
0
def main():
    print('Initializing main function')

    # Initializing instances
    instance_pose = ClassOpenPose()

    # Withdrawing Tk window
    Tk().withdraw()

    # Loading folder from element in list
    init_dir = '/home/mauricio/Pictures/Poses'
    options = {'initialdir': init_dir}
    dir_name = filedialog.askdirectory(**options)

    if not dir_name:
        print('Directory not selected')
    else:
        print('Selected dir: {0}'.format(dir_name))

        list_files = os.listdir(dir_name)
        cv2.namedWindow('main_window', cv2.WND_PROP_AUTOSIZE)

        index = 0
        print('Press arrows to move, ESC to exit')
        while True:
            file = list_files[index]
            full_path = os.path.join(dir_name, file)

            print('Loading image {0}'.format(full_path))
            image = cv2.imread(full_path)

            arr, pro_img = instance_pose.recognize_image_tuple(image)
            cv2.imshow('main_window', pro_img)

            key = cv2.waitKey(0)

            if key == 52:
                # Left arrow
                index -= 1
                if index < 0:
                    index = 0
            elif key == 54:
                # Right arrow
                index += 1
                if index == len(list_files):
                    index = len(list_files) - 1
            elif key == 27:
                # Esc
                break

        print('Done!')
コード例 #5
0
def test_full_color_compare():
    print('Process lab adjustment')

    # Loading instances
    instance_pose = ClassOpenPose()

    # Loading images
    image1, image2, pose1, pose2 = ClassDescriptors.load_images_comparision(
        instance_pose, min_score)

    # Performing color comparision
    upper1, lower1 = ClassDescriptors.process_colors_person(pose1,
                                                            min_score,
                                                            image1,
                                                            decode_img=False)
    upper2, lower2 = ClassDescriptors.process_colors_person(pose2,
                                                            min_score,
                                                            image2,
                                                            decode_img=False)

    # Performing custom comparison first
    diff_upper, diff_lower, delta = ClassUtils.compare_colors(
        upper1, upper2, lower1, lower2)

    print('Diff upper: {0}'.format(diff_upper))
    print('Diff lower: {0}'.format(diff_lower))
    print('Delta: {0}'.format(delta))

    # Showing images
    cv2.namedWindow('main_window', cv2.WND_PROP_AUTOSIZE)
    cv2.imshow('main_window', np.hstack((image1, image2)))

    cv2.waitKey(0)

    print('Done!')
コード例 #6
0
def get_transformed_points():
    min_score = 0.05
    instance_pose = ClassOpenPose()
    res = ClassDescriptors.load_images_comparision_ext(instance_pose, min_score, load_one_img=True)

    person_arr = res['pose1']
    descriptors = ClassDescriptors.get_person_descriptors(person_arr, min_score, cam_number=0,
                                                          image=None, calib_params=None,
                                                          decode_img=False,
                                                          instance_nn_pose=None)
    transformed_points = descriptors['transformedPoints']
    print(transformed_points)

    # Save pose for debugging purposes
    re_scale_factor = 100
    new_points = ClassDescriptors.re_scale_pose_factor(transformed_points,
                                                       re_scale_factor, min_score)
    img_pose = ClassDescriptors.draw_pose_image(new_points, min_score, is_transformed=True)

    cv2.namedWindow('main_window', cv2.WINDOW_AUTOSIZE)
    cv2.imshow('main_window', img_pose)

    cv2.waitKey()
    cv2.destroyAllWindows()

    print('Done!')
コード例 #7
0
def evaluating_fast_nn():
    print('Initializing evaluating fast nn')

    classes = 8
    hidden_layers = 40
    instance_nn = ClassNN(model_dir=ClassNN.model_dir_pose, classes=classes, hidden_number=hidden_layers)
    instance_pose = ClassOpenPose()

    info = ClassDescriptors.load_images_comparision_ext(instance_pose, min_score=0.05, load_one_img=True)
    pose1 = info['pose1']

    items = ClassDescriptors.get_person_descriptors(pose1, 0.05)

    # Valid pose for detection
    data_to_add = list()
    data_to_add += items['angles']
    data_to_add += ClassUtils.get_flat_list(items['transformedPoints'])

    data_np = np.asanyarray(data_to_add, dtype=np.float)
    result = instance_nn.predict_model_fast(data_np)
    print(result)
    key_pose = result['classes']
    probability = result['probabilities'][key_pose]

    print('Key pose: {0}'.format(key_pose))
    print('Probability: {0}'.format(probability))
    print('Done!')
コード例 #8
0
def process_btf():
    print('Processing BTF transformation')

    # Loading instances
    instance_pose = ClassOpenPose()
    image1, image2, pose1, pose2 = ClassDescriptors.load_images_comparision(
        instance_pose, min_score)

    hists1 = ClassDescriptors.get_color_histograms(pose1,
                                                   min_score,
                                                   image1,
                                                   decode_img=False)
    hists2 = ClassDescriptors.get_color_histograms(pose2,
                                                   min_score,
                                                   image2,
                                                   decode_img=False)

    # Plotting
    # plot_histograms(hists1)

    # Showing first images without transformation
    cv2.namedWindow('main_window', cv2.WINDOW_AUTOSIZE)

    upper1, lower1 = ClassDescriptors.process_colors_person(pose1,
                                                            min_score,
                                                            image1,
                                                            decode_img=False)
    upper2, lower2 = ClassDescriptors.process_colors_person(pose2,
                                                            min_score,
                                                            image2,
                                                            decode_img=False)

    print('Diff1: {0}'.format(ClassUtils.get_color_diff_rgb(upper1, upper2)))
    print('Diff2: {0}'.format(ClassUtils.get_color_diff_rgb(lower1, lower2)))

    cv2.imshow('main_window', np.hstack((image1, image2)))
    print('Press any key to continue')
    cv2.waitKey(0)

    # Perform image transformation
    image_tr = ClassDescriptors.transform_image(image2, hists2, hists1)
    upper1, lower1 = ClassDescriptors.process_colors_person(pose1,
                                                            min_score,
                                                            image1,
                                                            decode_img=False)
    upper2, lower2 = ClassDescriptors.process_colors_person(pose2,
                                                            min_score,
                                                            image_tr,
                                                            decode_img=False)

    print('Diff1: {0}'.format(ClassUtils.get_color_diff_rgb(upper1, upper2)))
    print('Diff2: {0}'.format(ClassUtils.get_color_diff_rgb(lower1, lower2)))

    cv2.imshow('main_window', np.hstack((image1, image_tr)))
    print('Press any key to continue')
    cv2.waitKey(0)

    cv2.destroyAllWindows()
    print('Done!')
コード例 #9
0
def test_color_compare():
    print('Test color comparision')

    print('Loading image comparision')
    # Loading instances
    instance_pose = ClassOpenPose()

    # Avoid to open two prompts
    obj_img = ClassDescriptors.load_images_comparision_ext(instance_pose,
                                                           min_score,
                                                           load_one_img=True)
    hist_pose1 = obj_img['listPoints1']

    list_process = list()

    # Iterating in examples folder
    for root, _, files in os.walk(EXAMPLES_FOLDER):
        for file in files:
            full_path = os.path.join(root, file)
            extension = ClassUtils.get_filename_extension(full_path)

            if extension == '.jpg':
                list_process.append(full_path)

    # Sorting list
    list_process.sort()

    list_result = list()
    score_max_pt = -1

    for full_path in list_process:
        print('Processing file: {0}'.format(full_path))
        json_path = full_path.replace('.jpg', '.json')

        with open(json_path, 'r') as f:
            obj_json = json.loads(f.read())

        his_pose2 = obj_json['histPose']
        diff = ClassDescriptors.get_kmeans_diff(hist_pose1, his_pose2)
        print('Diff {0}'.format(diff))

        if diff <= 15:
            res = True
        else:
            res = False

        list_result.append({
            'filename':
            ClassUtils.get_filename_no_extension(full_path),
            'score':
            res
        })

    # list_result.sort(key=lambda x: x['score'])
    print('Printing list result')
    print(json.dumps(list_result, indent=2))
    print('min_score: {0}'.format(score_max_pt))

    print('Done!')
コード例 #10
0
def get_poses_seq(folder: str,
                  instance_nn: ClassNN,
                  instance_pose: ClassOpenPose,
                  only_json=False):
    # List all folders
    list_files = []
    for file in os.listdir(folder):
        list_files.append(os.path.join(folder, file))

    # Sorting elements
    list_files.sort()

    # Get elements
    list_desc = list()
    for path in list_files:
        ext = ClassUtils.get_filename_extension(path)
        if only_json:
            if ext != '.json':
                print('Ignoring file: {0}'.format(path))
                continue

            with open(path, 'r') as file:
                person_arr_str = file.read()
                person_arr = json.loads(person_arr_str)
        else:
            if ext != '.jpg':
                print('Ignoring file: {0}'.format(path))
                continue

            print('Processing file {0}'.format(path))
            image = cv2.imread(path)

            arr = instance_pose.recognize_image(image)

            arr_pass = []
            for person_arr in arr:
                if ClassUtils.check_vector_integrity_part(
                        person_arr, min_score):
                    arr_pass.append(person_arr)

            if len(arr_pass) != 1:
                print('Ignoring file {0} - Len arr_pass: {1}'.format(
                    path, len(arr_pass)))
                continue

            person_arr = arr_pass[0]

        result_desc = ClassDescriptors.get_person_descriptors(
            person_arr, min_score)
        list_desc.append(result_desc['fullDesc'])

    list_desc_np = np.asarray(list_desc, np.float)
    results = instance_nn.predict_model_array(list_desc_np)

    list_classes = []
    for result in results:
        list_classes.append(result['classes'])

    return list_classes
コード例 #11
0
def process_single():
    print('Initializing process_single')

    # Loading instances
    instance_pose = ClassOpenPose()

    # Opening filename
    init_dir = '/home/mauricio/Pictures'
    options = {'initialdir': init_dir}
    filename = askopenfilename(**options)

    if not filename:
        filename = '/home/mauricio/Pictures/Poses/left_walk/636550788328999936_420.jpg'

    # Reading video extension
    extension = os.path.splitext(filename)[1]

    if extension != '.jpg' and extension != '.jpeg':
        raise Exception('Extension is not jpg or jpeg')

    # Opening filename
    image = cv2.imread(filename)

    # Loading image and array
    arr, processed_img = instance_pose.recognize_image_tuple(image)

    # Showing image to generate elements
    cv2.namedWindow('main_window', cv2.WND_PROP_AUTOSIZE)

    arr_pass = list()
    min_score = 0.05

    # Checking vector integrity for all elements
    # Verify there is at least one arm and one leg
    for elem in arr:
        if ClassUtils.check_vector_integrity_pos(elem, min_score):
            arr_pass.append(elem)

    if len(arr_pass) != 1:
        print('There is more than one person in the image')
        cv2.imshow('main_window', processed_img)
        cv2.waitKey(0)
    else:
        person_array = arr_pass[0]
        print('Person array: {0}'.format(person_array))
        generate_descriptors(person_array, image, processed_img, min_score)
コード例 #12
0
    def convert_video_mjpeg(cls, file_path: str, delete_mjpeg=False):
        extension = os.path.splitext(file_path)[1]

        if extension != '.mjpeg':
            raise Exception('file_path must have extension .mjpeg')
        else:
            output_video = file_path.replace('.mjpeg', '.mjpegx')
            print('output_video: ' + output_video)

            # Converting first with _1 to avoid confusions
            file_path_temp = output_video + '_1'

            print('Initializing instance')
            open_pose = ClassOpenPose()

            images = ClassMjpegReader.process_video(file_path)
            print('Len images: ' + str(len(images)))
            with open(file_path_temp, 'wb') as newFile:
                counter = 0
                for elem in images:
                    image = elem[0]
                    ticks = elem[1]

                    image_np = np.frombuffer(image, dtype=np.uint8)
                    image_cv = cv2.imdecode(image_np, cv2.IMREAD_ANYCOLOR)
                    arr = open_pose.recognize_image(
                        image_cv)  # type: np.ndarray

                    json_dict = {'vectors': arr.tolist()}
                    ClassUtils.write_in_file(newFile, ticks, image, json_dict)

                    counter += 1
                    if counter % 10 == 0:
                        print('Counter: ' + str(counter))

            # Deleting mjpegx file if exists and rename
            if os.path.exists(output_video):
                os.remove(output_video)

            # Naming new
            os.rename(file_path_temp, output_video)

            if delete_mjpeg:
                os.remove(file_path)
コード例 #13
0
def main():
    print('Initializing main function')

    Tk().withdraw()

    # Loading instances
    instance_pose = ClassOpenPose()

    # Loading filename
    init_dir = '/home/mauricio/Pictures/TestPlumb'
    options = {'initialdir': init_dir}
    filename = askopenfilename(**options)

    if not filename:
        filename = '/home/mauricio/Pictures/419.jpg'

    print('Filename: {0}'.format(filename))
    image = cv2.imread(filename)

    arr = instance_pose.recognize_image(image)
    valid_arr = list()

    for person_arr in arr:
        if ClassUtils.check_vector_integrity_pos(person_arr, min_score):
            valid_arr.append(person_arr)

    if len(valid_arr) != 1:
        raise Exception('Invalid len for arr: {0}'.format(len(valid_arr)))

    person_arr = valid_arr[0]
    ClassDescriptors.draw_pose(image, person_arr, min_score)

    cv2.namedWindow('main_window', cv2.WINDOW_AUTOSIZE)
    cv2.imshow('main_window', image)

    print_distances(person_arr)

    cv2.waitKey()
    cv2.destroyAllWindows()

    print('Done!')
コード例 #14
0
def main():
    print('Initializing main function')

    # Initializing instances
    instance_pose = ClassOpenPose()

    folder_training_1 = '/home/mauricio/Pictures/Poses/Walk_Front'
    folder_training_2 = '/home/mauricio/Pictures/Poses/Vehicle'
    folder_training_3 = '/home/mauricio/Pictures/Poses/Tires'

    data_1 = get_sets_folder(folder_training_1, 0, instance_pose)
    data_2 = get_sets_folder(folder_training_2, 1, instance_pose)
    data_3 = get_sets_folder(folder_training_3, 1, instance_pose)

    data_training = np.asarray(data_1[0] + data_2[0] + data_3[0],
                               dtype=np.float)
    label_training = np.asarray(data_1[1] + data_2[1] + data_3[1],
                                dtype=np.int)

    data_eval = np.asarray(data_1[2] + data_2[2] + data_3[2], dtype=np.float)
    label_eval = np.asarray(data_1[3] + data_2[3] + data_3[3], dtype=np.int)

    print(data_training)
    print(label_training)

    print('Len data_training: {0}'.format(data_training.shape))
    print('Len data_eval: {0}'.format(data_eval.shape))

    classes = 3
    hidden_neurons = 15
    model_dir = '/tmp/nnposes/'

    instance_nn = ClassNN(model_dir, classes, hidden_neurons)

    option = input(
        'Press 1 to train the model, 2 to eval, otherwise to predict')

    if option == '1':
        print('Training the model')

        # Delete previous folder to avoid conflicts in the training process
        if os.path.isdir(model_dir):
            # Removing directory
            shutil.rmtree(model_dir)

        instance_nn.train_model(data_training, label_training)
    elif option == '2':
        print('Eval the model')
        instance_nn.eval_model(data_eval, label_eval)
    else:
        print('Not implemented!')

    print('Done!')
コード例 #15
0
def main():
    print('Initializing main function')
    print('Initializing instance')

    instance = ClassOpenPose()
    print('Reading image')

    image = cv2.imread('/home/mauricio/Pictures/2_2.jpg')
    image2 = cv2.imread('/home/mauricio/Pictures/430.jpg')

    print('Recognizing image 1')
    arr, img_open = instance.recognize_image_tuple(image)
    print(arr)

    cv2.namedWindow('main_window', cv2.WINDOW_AUTOSIZE)
    cv2.imshow('main_window', img_open)
    cv2.waitKey()

    print('Recognizing image 2')
    arr = instance.recognize_image(image2)
    print(arr)

    print('Done generating elements!')
コード例 #16
0
def main():
    print('Generating angle descriptors')

    # Initializing instances
    instance_pose = ClassOpenPose()

    # Reading pose from dir path
    image = '/home/mauricio/Pictures/walk.jpg'

    if not os.path.exists(image):
        print('The path {0} does not exists'.format(image))
    else:
        img_cv = cv2.imread(image)

        # Forwarding image
        arr, output_img = instance_pose.recognize_image_tuple(img_cv)
        person_array = arr[0]

        # Generating other descriptors
        # Assume image is OK
        shoulder_dis = ClassUtils.get_euclidean_distance_pt(person_array[1], person_array[2]) + \
                       ClassUtils.get_euclidean_distance_pt(person_array[1], person_array[5])

        torso_dis = ClassUtils.get_euclidean_distance_pt(person_array[1], person_array[8])

        # In total, we have a vector with 8 angles
        # We need to extract the characteristics of the 8 angles

        relation = shoulder_dis / torso_dis
        print(relation)

        cv2.namedWindow('main_window', cv2.WND_PROP_AUTOSIZE)
        cv2.imshow('main_window', output_img)
        cv2.waitKey(0)

        cv2.destroyAllWindows()
        print('Done!')
コード例 #17
0
def process_lab_adj():
    print('Process lab adjustment')

    # Loading instances
    instance_pose = ClassOpenPose()

    # Loading images
    image1, image2, pose1, pose2 = ClassDescriptors.load_images_comparision(
        instance_pose, min_score)

    # Performing color comparision
    upper1, lower1 = ClassDescriptors.process_colors_person(pose1,
                                                            min_score,
                                                            image1,
                                                            decode_img=False)
    upper2, lower2 = ClassDescriptors.process_colors_person(pose2,
                                                            min_score,
                                                            image2,
                                                            decode_img=False)

    # Performing custom comparison first
    diff_upper = ClassUtils.get_color_diff_rgb(upper1, upper2)
    diff_lower = ClassUtils.get_color_diff_rgb(lower1, lower2)

    print('Diff upper: {0}'.format(diff_upper))
    print('Diff lower: {0}'.format(diff_lower))

    # Performing lab conversion and equal L values
    diff_upper_lum = ClassUtils.get_color_diff_rgb_lum(upper1, upper2)
    diff_lower_lum = ClassUtils.get_color_diff_rgb_lum(lower1, lower2)

    print('Diff upper lum: {0}'.format(diff_upper_lum))
    print('Diff lower lum: {0}'.format(diff_lower_lum))

    # Showing images
    cv2.namedWindow('main_window', cv2.WND_PROP_AUTOSIZE)
    cv2.imshow('main_window', np.hstack((image1, image2)))

    cv2.waitKey(0)

    print('Done!')
コード例 #18
0
def test_view_histogram():
    print('Test view histogram')

    # Loading instances
    instance_pose = ClassOpenPose()

    # Loading images
    image1, _, pose1, _ = ClassDescriptors.load_images_comparision(
        instance_pose, min_score, load_one_img=True)

    # Drawing poses
    ClassDescriptors.draw_pose(image1, pose1, min_score)

    # Showing image
    cv2.namedWindow('main_window', cv2.WND_PROP_AUTOSIZE)
    cv2.imshow('main_window', image1)

    print('Press a key to continue')
    cv2.waitKey(0)

    cv2.destroyAllWindows()
    print('Done!')
コード例 #19
0
def main():
    print('Initializing main function')

    # Withdrawing tkinter

    # Loading model dirs
    list_folder_data = [
        ('/home/mauricio/CNN/Classes/Door', 0.05),
        ('/home/mauricio/CNN/Classes/Tires', 0.05),
        ('/home/mauricio/CNN/Classes/Walk', 0.05),
    ]

    list_hmm = []

    for folder_data in list_folder_data:
        label_name = get_label_from_folder(folder_data[0])
        full_model_dir = os.path.join(hnn_model_folder,
                                      '{0}.pkl'.format(label_name))
        list_hmm.append(ClassHMM(full_model_dir))

    # Initializing instances
    instance_pose = ClassOpenPose()
    instance_nn = ClassNN.load_from_params(nn_model_dir)

    option = input('Select 1 to train, 2 to eval hmm, 3 to preprocess: ')

    if option == '1':
        print('Train hmm selection')
        train_hmm(list_folder_data, list_hmm, instance_nn, instance_pose)
    elif option == '2':
        eval_hmm(list_folder_data, list_hmm, instance_nn, instance_pose)
    elif option == '3':
        recalculate = False
        pre_process_images(instance_pose, list_folder_data, recalculate)
    else:
        print('Invalid argument: {0}'.format(option))
コード例 #20
0
def main():
    print('Initializing main function')
    print('Folder selection')

    folder_images = '/home/mauricio/PosesProcessed/folder_images'
    folder_images_draw = '/home/mauricio/PosesProcessed/folder_images_draw'

    Tk().withdraw()

    # Getting options
    init_dir = '/home/mauricio/Videos/Oviedo'
    options = {'initialdir': init_dir}
    dir_name = askdirectory(**options)

    if not dir_name:
        raise Exception('Directory not selected')

    # Create directory if does not exists
    if not os.path.isdir(folder_images):
        os.makedirs(folder_images)

    # Create directory if does not exists
    if not os.path.isdir(folder_images_draw):
        os.makedirs(folder_images_draw)

    # Initializing openpose instance
    instance_pose = ClassOpenPose()

    for root, subdirs, files in os.walk(dir_name):
        for file in files:
            full_path = os.path.join(root, file)
            print('Processing {0}'.format(full_path))

            extension = ClassUtils.get_filename_extension(full_path)

            if extension == '.mjpeg':
                file_info = ClassMjpegReader.process_video(full_path)
            else:
                print('Extension ignored: {0}'.format(extension))
                continue

            # Getting idcam from file
            id_cam = full_path.split('/')[-2]
            print('IdCam: {0}'.format(id_cam))

            for index, info in enumerate(file_info):
                print('Processing {0} of {1} from {2}'.format(index, len(file_info), full_path))
                frame = info[0]
                ticks = info[1]

                image_np = np.frombuffer(frame, dtype=np.uint8)
                image = cv2.imdecode(image_np, cv2.IMREAD_ANYCOLOR)

                arr, image_draw = instance_pose.recognize_image_tuple(image)
                min_score = 0.05

                arr_pass = list()
                for elem in arr:
                    if ClassUtils.check_vector_integrity_pos(elem, min_score):
                        arr_pass.append(elem)

                if len(arr_pass) > 0:
                    # Draw rectangles for all candidates
                    for person_arr in arr_pass:
                        pt1, pt2 = ClassUtils.get_rectangle_bounds(person_arr, min_score)
                        cv2.rectangle(image_draw, pt1, pt2, (0, 0, 255), 5)

                    # Overwriting 1
                    full_path_images = os.path.join(folder_images, '{0}_{1}.jpg'.format(ticks, id_cam))
                    print('Writing image {0}'.format(full_path_images))
                    cv2.imwrite(full_path_images, image)

                    # Overwriting 2
                    full_path_draw = os.path.join(folder_images_draw, '{0}_{1}.jpg'.format(ticks, id_cam))
                    print('Writing image {0}'.format(full_path_images))
                    cv2.imwrite(full_path_draw, image_draw)

    print('Done!')
コード例 #21
0
def main():
    Tk().withdraw()

    instance_pose = ClassOpenPose()

    print('Initializing main function')
    list_files1 = os.listdir(base1_folder)
    hists_1 = read_hists(base1_folder, list_files1)

    list_files2 = os.listdir(base2_folder)
    hists_2 = read_hists(base2_folder, list_files2)

    cum_hists_1 = ClassDescriptors.get_cumulative_hists(hists_1)
    cum_hists_2 = ClassDescriptors.get_cumulative_hists(hists_2)

    # Processing img
    filename1 = '/home/mauricio/Pictures/Poses/bend_left/636453039344460032_1.jpg'
    filename2 = '/home/mauricio/Pictures/Poses/left_walk/636550795366450048_420.jpg'

    init_dir = '/home/mauricio/Pictures'
    options = {'initialdir': init_dir}
    filename1 = askopenfilename(**options)

    if not filename1:
        filename1 = '/home/mauricio/Pictures/2_1.jpg'

    ext1 = os.path.splitext(filename1)[1]
    if ext1 != '.jpg' and ext1 != '.jpeg':
        raise Exception('Extension1 is not jpg or jpeg')

    print('Loading filename 2')
    filename2 = askopenfilename(**options)

    if not filename2:
        filename2 = '/home/mauricio/Pictures/2_2.jpg'

    ext2 = os.path.splitext(filename2)[1]
    if ext2 != '.jpg' and ext2 != '.jpeg':
        raise Exception('Extension2 is not jpg or jpeg')

    image1 = cv2.imread(filename1)
    if image1 is None:
        raise Exception('Invalid image in filename {0}'.format(filename1))

    image2 = cv2.imread(filename2)
    if image2 is None:
        raise Exception('Invalid image in filename {0}'.format(filename2))

    is_json = True
    new_file_1 = filename1.replace('.jpeg', '.json')
    new_file_1 = new_file_1.replace('.jpg', '.json')

    new_file_2 = filename2.replace('.jpeg', '.json')
    new_file_2 = new_file_2.replace('.jpg', '.json')

    if not os.path.exists(new_file_1):
        print('File not found: {0}'.format(new_file_1))
        is_json = False

    if not os.path.exists(new_file_2):
        print('File not found: {0}'.format(new_file_2))
        is_json = False

    if not is_json:
        poses1 = instance_pose.recognize_image(image1)
        poses2 = instance_pose.recognize_image(image2)

        if len(poses1) != 1:
            raise Exception('Invalid len for pose1: {0}'.format(len(poses1)))
        if len(poses2) != 1:
            raise Exception('Invalid len for pose2: {0}'.format(len(poses2)))
        if not ClassUtils.check_vector_integrity_pos(poses1[0], min_score):
            raise Exception('Pose 1 not valid')
        if not ClassUtils.check_vector_integrity_pos(poses2[0], min_score):
            raise Exception('Pose 2 not valid')

        pose1 = poses1[0]
        pose2 = poses2[0]
    else:
        with open(new_file_1, 'r') as f:
            obj_json1 = json.loads(f.read())

        with open(new_file_2, 'r') as f:
            obj_json2 = json.loads(f.read())

        pose1 = obj_json1['vector']
        pose2 = obj_json2['vector']

        if not ClassUtils.check_vector_integrity_pos(pose1, min_score):
            raise Exception('Pose 1 not valid')
        if not ClassUtils.check_vector_integrity_pos(pose2, min_score):
            raise Exception('Pose 2 not valid')

    # Plotting
    # plot_histograms(hists1)

    # Showing first images without transformation
    cv2.namedWindow('main_window', cv2.WINDOW_AUTOSIZE)

    upper1, lower1 = ClassDescriptors.process_colors_person(pose1,
                                                            min_score,
                                                            image1,
                                                            decode_img=False)
    upper2, lower2 = ClassDescriptors.process_colors_person(pose2,
                                                            min_score,
                                                            image2,
                                                            decode_img=False)

    print('Upper1 {0} - Lower1 {0}'.format(upper1, lower1))
    print('Upper2 {0} - Lower2 {0}'.format(upper2, lower2))

    print('Diff1: {0}'.format(ClassUtils.get_color_diff_rgb(upper1, upper2)))
    print('Diff2: {0}'.format(ClassUtils.get_color_diff_rgb(lower1, lower2)))

    cv2.imshow('main_window', np.hstack((image1, image2)))
    print('Press any key to continue')
    cv2.waitKey(0)

    # Perform image transformation
    image_tr = ClassDescriptors.transform_image(image2, cum_hists_2,
                                                cum_hists_1)
    upper1, lower1 = ClassDescriptors.process_colors_person(pose1,
                                                            min_score,
                                                            image1,
                                                            decode_img=False)
    upper2, lower2 = ClassDescriptors.process_colors_person(pose2,
                                                            min_score,
                                                            image_tr,
                                                            decode_img=False)

    print('Diff1: {0}'.format(ClassUtils.get_color_diff_rgb(upper1, upper2)))
    print('Diff2: {0}'.format(ClassUtils.get_color_diff_rgb(lower1, lower2)))

    cv2.imshow('main_window', np.hstack((image1, image_tr)))
    print('Press any key to continue')
    cv2.waitKey(0)

    cv2.destroyAllWindows()
    print('Done!')
コード例 #22
0
def process_test_color_eq():
    print('Processing color eq')

    # Loading instances
    instance_pose = ClassOpenPose()

    im1, im2, pose1, pose2 = ClassDescriptors.load_images_comparision(
        instance_pose, min_score)

    upper1, lower1 = ClassDescriptors.process_colors_person(pose1,
                                                            min_score,
                                                            im1,
                                                            decode_img=False)
    upper2, lower2 = ClassDescriptors.process_colors_person(pose2,
                                                            min_score,
                                                            im2,
                                                            decode_img=False)

    img_size = 100
    image1 = np.zeros((img_size, img_size, 3), np.uint8)
    image2 = np.zeros((img_size, img_size, 3), np.uint8)
    image3 = np.zeros((img_size, img_size, 3), np.uint8)
    image4 = np.zeros((img_size, img_size, 3), np.uint8)

    # Generating elements in list
    cv2.namedWindow('aux_window', cv2.WND_PROP_AUTOSIZE)
    cv2.imshow('aux_window', np.hstack((im1, im2)))

    # BGR format -> Generating
    image1[:, :] = (upper1[2], upper1[1], upper1[0])
    image2[:, :] = (lower1[2], lower1[1], lower1[0])

    image3[:, :] = (upper2[2], upper2[1], upper2[0])
    image4[:, :] = (lower2[2], lower2[1], lower2[0])

    # Performing custom comparison first
    diff_upper = ClassUtils.get_color_diff_rgb(upper1, upper2)
    diff_lower = ClassUtils.get_color_diff_rgb(lower1, lower2)

    print('Diff upper: {0}'.format(diff_upper))
    print('Diff lower: {0}'.format(diff_lower))

    cv2.namedWindow('main_window', cv2.WND_PROP_AUTOSIZE)

    cv2.imshow(
        'main_window',
        np.hstack((np.vstack((image1, image2)), np.vstack((image3, image4)))))
    cv2.waitKey(0)

    # Performing lab conversion and equal L values
    diff_upper_lum = ClassUtils.get_color_diff_rgb_lum(upper1, upper2)
    diff_lower_lum = ClassUtils.get_color_diff_rgb_lum(lower1, lower2)

    # Transform point
    upper2_eq = ClassUtils.eq_lum_rgb_colors(upper1, upper2)
    lower2_eq = ClassUtils.eq_lum_rgb_colors(lower1, lower2)

    print('Diff upper lum: {0}'.format(diff_upper_lum))
    print('Diff lower lum: {0}'.format(diff_lower_lum))

    image3[:, :] = (upper2_eq[2], upper2_eq[1], upper2_eq[0])
    image4[:, :] = (lower2_eq[2], lower2_eq[1], lower2_eq[0])

    # Showing again image
    cv2.imshow(
        'main_window',
        np.hstack((np.vstack((image1, image2)), np.vstack((image3, image4)))))
    cv2.waitKey(0)

    # Destroying all windows
    cv2.destroyAllWindows()

    print('Done!')
コード例 #23
0
def test_color_compare_hist(perform_eq=False):
    print('Test color comparision')

    print('Loading image comparision')
    # Loading instances
    instance_pose = ClassOpenPose()

    ignore_json_color = False
    if perform_eq:
        ignore_json_color = True

    # Avoid to open two prompts
    obj_img = ClassDescriptors.load_images_comparision_ext(
        instance_pose,
        min_score,
        load_one_img=True,
        perform_eq=perform_eq,
        ignore_json_color=ignore_json_color)

    # Extract color comparision from image
    hist1 = obj_img['listPoints1']

    # Generating examples folder
    list_process = list()
    for root, _, files in os.walk(EXAMPLES_FOLDER):
        for file in files:
            full_path = os.path.join(root, file)
            extension = ClassUtils.get_filename_extension(full_path)

            if extension == '.jpg':
                list_process.append(full_path)

    # Sorting list
    list_process.sort()
    """
    list_result = list()
    score_max_pt = -1
    """

    for full_path in list_process:
        print('Processing file: {0}'.format(full_path))
        json_path = full_path.replace('.jpg', '.json')

        with open(json_path, 'r') as f:
            obj_json = json.loads(f.read())

        image2 = cv2.imread(full_path)

        if perform_eq:
            image2 = ClassUtils.equalize_hist(image2)

        if 'vector' in obj_json:
            pose2 = obj_json['vector']
        elif 'vectors' in obj_json:
            pose2 = obj_json['vectors']
        else:
            raise Exception('Invalid vector property for vector custom')

        hist2 = ClassDescriptors.get_points_by_pose(image2, pose2, min_score)

        diff = ClassDescriptors.get_kmeans_diff(hist1, hist2)

        # Getting mean y from image 2 - discarding purposes
        pt1, pt2 = ClassUtils.get_rectangle_bounds(pose2, min_score)
        image2_crop = image2[pt1[1]:pt2[1], pt1[0]:pt2[0]]
        image2_ycc = cv2.cvtColor(image2_crop, cv2.COLOR_BGR2YCrCb)
        mean_y = np.mean(image2_ycc[:, :, 0])

        print('Diff color: {0} - Mean y: {1}'.format(diff, mean_y))
    """
    list_result.sort(key=lambda x: x['score'])
    print('Printing list result')
    print(list_result)
    print('min_score: {0}'.format(score_max_pt))
    """

    print('Done!')
コード例 #24
0
def get_sets_folder(base_folder, label, instance_pose: ClassOpenPose):
    list_files = os.listdir(base_folder)

    # Work 70, 30
    total_files = len(list_files)
    training = int(total_files * 70 / 100)

    tr_features = list()
    tr_labels = list()

    eval_features = list()
    eval_labels = list()

    for index, file in enumerate(list_files):
        full_name = os.path.join(base_folder, file)

        image = cv2.imread(full_name)

        if image is None:
            raise Exception('Error reading image: {0}'.format(full_name))

        print('Processing image: {0}'.format(full_name))
        arr = instance_pose.recognize_image(image)

        # Selecting array for list of vectors
        person_array = []
        if len(arr) == 0:
            raise Exception('Invalid len for image {0}: {1}'.format(
                full_name, len(arr)))
        elif len(arr) == 1:
            person_array = arr[0]

            integrity = ClassUtils.check_vector_integrity_part(
                person_array, min_score)

            if not integrity:
                raise Exception(
                    'Invalid integrity for points in image: {0}'.format(
                        full_name))
        else:
            found = False
            for arr_index in arr:
                integrity = ClassUtils.check_vector_integrity_part(
                    arr_index, min_score)

                if integrity:
                    found = True
                    person_array = arr_index
                    break

            if not found:
                raise Exception(
                    'Cant find a valid person array for image {0}'.format(
                        full_name))

        results = ClassDescriptors.get_person_descriptors(
            person_array, min_score)

        if index < training:
            tr_features.append(results['angles'])
            tr_labels.append(label)
        else:
            eval_features.append(results['angles'])
            eval_labels.append(label)

    return tr_features, tr_labels, eval_features, eval_labels
コード例 #25
0
def main():
    print('Initializing main function')

    # Initializing instances
    instance_pose = ClassOpenPose()

    # Withdrawing tkinter
    Tk().withdraw()

    # Loading folder images
    folder_images = '/home/mauricio/PosesProcessed/folder_images'
    folder_images_draw = '/home/mauricio/PosesProcessed/folder_images_draw'
    pose_base_folder = '/home/mauricio/Pictures/PosesNew'

    # Reading all elements in list
    list_files = sorted(os.listdir(folder_images_draw))

    cv2.namedWindow('main_window', cv2.WND_PROP_AUTOSIZE)
    index = 0

    while True:
        file = list_files[index]
        full_path_draw = os.path.join(folder_images_draw, file)

        image_draw = cv2.imread(full_path_draw)
        cv2.imshow('main_window', image_draw)

        key = cv2.waitKey(0)
        print('Key pressed: {0}'.format(key))
        if key == 52:
            # Left arrow
            index -= 1
            if index < 0:
                index = 0
        elif key == 54:
            # Right arrow
            index += 1
            if index == len(list_files):
                index = len(list_files) - 1
        elif key == 27:
            # Esc
            # Ask are you sure question
            result = messagebox.askyesno("Exit", "Are you sure to exit?")
            print(result)

            if result:
                break
        elif key == 115:
            # Check JSON file
            image_full_path = os.path.join(folder_images, file)
            json_path = image_full_path.replace(".jpg", ".json")

            if not os.path.exists(json_path):
                # Generating JSON array
                image = cv2.imread(image_full_path)
                arr = instance_pose.recognize_image(image).tolist()
            else:
                with open(json_path, 'r') as file:
                    arr_str = file.read()
                    arr = json.load(arr_str)

            arr_pass = []
            min_score = 0.05

            # Drawing candidates
            for elem in arr:
                if ClassUtils.check_vector_integrity_part(elem, min_score):
                    arr_pass.append(elem)

            if len(arr_pass) == 0:
                print('Arr pass size equals to zero')
                continue
            elif len(arr_pass) == 1:
                selection_int = 0
                person_arr = arr_pass[0]
                arr_str = json.dumps(person_arr)
            else:
                # Draw rectangles for all candidates
                for index_elem, person_arr in enumerate(arr_pass):
                    pt1, pt2 = ClassUtils.get_rectangle_bounds(person_arr, min_score)
                    cv2.rectangle(image_draw, pt1, pt2, (0, 0, 255), 5)

                    font = cv2.FONT_HERSHEY_SIMPLEX
                    bottom_left_corner = pt2
                    font_scale = 0.6
                    font_color = (255, 255, 255)
                    line_type = 2

                    cv2.putText(image_draw, '{0}'.format(index_elem),
                                bottom_left_corner,
                                font,
                                font_scale,
                                font_color,
                                line_type)

                while True:
                    print('Select image to put')
                    cv2.imshow('main_window', image_draw)
                    wait_key = cv2.waitKey(0)

                    print('Wait Key: {0}'.format(wait_key))
                    selection_int = ClassUtils.key_to_number(wait_key)

                    print('Selection: {0}'.format(selection_int))
                    if 0 <= selection_int < len(arr_pass):
                        break

                person_arr = arr_pass[selection_int]
                arr_str = json.dumps(person_arr)

            # Getting new image using numpy slicing
            pt1, pt2 = ClassUtils.get_rectangle_bounds(person_arr, min_score)
            image_draw = image_draw[pt1[1]:pt2[1], pt1[0]:pt2[0]]

            # Selecting directory after processing image!
            init_dir = '/home/mauricio/Pictures/PosesNew'
            options = {'initialdir': init_dir}
            dir_name = filedialog.askdirectory(**options)

            if not dir_name:
                print('Directory not selected')
            else:
                new_name = ClassUtils.get_filename_no_extension(file)
                new_name = "{0}_{1}.{2}".format(new_name, selection_int, 'jpg')

                new_image_path = os.path.join(dir_name, new_name)
                new_json_path = os.path.join(dir_name, new_name.replace('.jpg', '.json'))

                cv2.imwrite(new_image_path, image_draw)
                with open(new_json_path, 'w') as file:
                    file.write(arr_str)

                print('File copied from {0} to {1}'.format(full_path_draw, new_image_path))
                index += 1
                if index == len(list_files):
                    index = len(list_files) - 1

    cv2.destroyAllWindows()
    print('Done!')
コード例 #26
0
def main():
    print('Initializing main function')

    # Initializing instances
    instance_pose = ClassOpenPose()
    instance_net = ClassNN.load_from_params(model_dir)

    # Withdrawing list
    Tk().withdraw()

    # Select directory to process
    init_dir = '/home/mauricio/CNN/Images'
    options = {'initialdir': init_dir}
    dir_name = filedialog.askdirectory(**options)

    if not dir_name:
        print('Directory not selected')
    else:
        # Loading images
        list_files = os.listdir(dir_name)
        list_files.sort()

        desc_list = list()

        for file in list_files:
            full_path = os.path.join(dir_name, file)

            print('Processing image {0}'.format(full_path))
            image = cv2.imread(full_path)
            arr = instance_pose.recognize_image(image)

            arr_pass = list()
            for person_arr in arr:
                if ClassUtils.check_vector_integrity_part(
                        person_arr, min_pose_score):
                    arr_pass.append(person_arr)

            if len(arr_pass) != 1:
                print('Invalid len {0} for image {1}'.format(
                    len(arr_pass), full_path))
                continue
            else:
                result_des = ClassDescriptors.get_person_descriptors(
                    arr_pass[0], min_pose_score)
                descriptor_arr = result_des['fullDesc']

                # Add descriptors to list
                desc_list.append(descriptor_arr)

        # Convert to numpy array
        print('Total poses: {0}'.format(len(desc_list)))

        # Transform list and predict
        desc_list_np = np.asarray(desc_list, dtype=np.float)
        print('ndim pose list: {0}'.format(desc_list_np.ndim))

        list_classes = list()
        predict_results = instance_net.predict_model_array(desc_list_np)
        for result in predict_results:
            list_classes.append(result['classes'])

        print('Predict results: {0}'.format(list_classes))
        print('Classes label: {0}'.format(instance_net.label_names))

        print('Done!')
コード例 #27
0
def main():
    print('Initializing main function')
    Tk().withdraw()

    # filename1 = '/home/mauricio/Pictures/Poses/temp/636550787632290048_419.jpg'
    # filename2 = '/home/mauricio/Pictures/Poses/walk_front/636550801813440000_424.jpg'
    filename1 = '/home/mauricio/Pictures/Poses/bend_left/636453039344460032_1.jpg'
    filename2 = '/home/mauricio/Pictures/Poses/left_walk/636550795366450048_420.jpg'

    print('Select first file')
    
    init_dir = '/home/mauricio/Pictures/Poses'
    options = {'initialdir': init_dir}
    filename1_tmp = askopenfilename(**options)

    if filename1_tmp:
        filename1 = filename1_tmp
    
    print('File selected: {0}'.format(filename1))
    print('Select second file')
    filename2_tmp = askopenfilename(**options)

    if filename2_tmp:
        filename2 = filename2_tmp
    
    print('File selected: {0}'.format(filename2))
    img1 = cv2.imread(filename1)
    img2 = cv2.imread(filename2)

    instance_pose = ClassOpenPose()
    vectors1, img_draw1 = instance_pose.recognize_image_tuple(img1)
    vectors2, img_draw2 = instance_pose.recognize_image_tuple(img2)

    if len(vectors1) != 1:
        raise Exception('Invalid len for vector 1')

    if len(vectors2) != 1:
        raise Exception('Invalid len for vector 2')

    person_vector1 = vectors1[0]
    person_vector2 = vectors2[0]

    min_percent = 0.05

    if not ClassUtils.check_vector_integrity_pos(person_vector1, min_percent):
        raise Exception('Invalid integrity for vector 1')

    if not ClassUtils.check_vector_integrity_pos(person_vector2, min_percent):
        raise Exception('Invalid integrity for vector 2')

    colors1 = ClassDescriptors.process_colors(vectors1, min_percent, img1, decode_img=False)
    colors2 = ClassDescriptors.process_colors(vectors2, min_percent, img2, decode_img=False)

    upper1 = colors1[0][0]
    lower1 = colors1[1][0]
    color_diff1 = ClassUtils.get_color_diff_rgb(upper1, lower1)

    upper2 = colors2[0][0]
    lower2 = colors2[1][0]
    color_diff2 = ClassUtils.get_color_diff_rgb(upper2, lower2)

    diff_upper = ClassUtils.get_color_diff_rgb(upper1, upper2)
    diff_lower = ClassUtils.get_color_diff_rgb(lower1, lower2)
    diff_diff = math.fabs(color_diff1 - color_diff2)

    print('Diff upper: {0}'.format(diff_upper))
    print('Diff lower: {0}'.format(diff_lower))
    print('Diff diffs: {0}'.format(diff_diff))

    cv2.namedWindow('main_window', cv2.WND_PROP_AUTOSIZE)
    cv2.imshow('main_window', np.hstack((img_draw1, img_draw2)))

    cv2.waitKey(0)
    cv2.destroyAllWindows()

    print('Done!')
コード例 #28
0
def main():
    print('Initializing main function')

    # Prompt for user input
    cam_number_str = input('Insert camera number to process: ')
    cam_number = int(cam_number_str)

    # Open video from opencv
    cap = cv2.VideoCapture(cam_number)

    # Initializing open pose distance
    instance_pose = ClassOpenPose()

    # Initializing variables
    model_dir = '/home/mauricio/models/nn_classifier'
    instance_nn = ClassNN.load_from_params(model_dir)

    while True:
        # Capture frame-by-frame
        ret, frame = cap.read()

        # Processing frame with openpose
        arr, frame = instance_pose.recognize_image_tuple(frame)

        # Check if there is one frame with vector integrity
        arr_pass = list()

        min_score = 0.05
        # Checking vector integrity for all elements
        # Verify there is at least one arm and one leg
        for elem in arr:
            if ClassUtils.check_vector_integrity_pos(elem, min_score):
                arr_pass.append(elem)

        if len(arr_pass) != 1:
            print('Invalid len for arr_pass: {0}'.format(arr_pass))
        else:
            person_array = arr_pass[0]

            # Getting person descriptors
            results = ClassDescriptors.get_person_descriptors(
                person_array, min_score)

            # Descriptors
            data_to_add = results['angles']
            data_to_add += ClassUtils.get_flat_list(
                results['transformedPoints'])

            data_np = np.asanyarray(data_to_add, dtype=np.float)

            # Getting result predict
            result_predict = instance_nn.predict_model(data_np)
            detected_class = result_predict['classes']

            label_class = get_label_name(instance_nn.label_names,
                                         detected_class)
            print('Detected: {0} - Label: {1}'.format(detected_class,
                                                      label_class))

            # Draw text class into image - Evaluation purposes
            font = cv2.FONT_HERSHEY_SIMPLEX
            font_scale = 0.6
            font_color = (255, 255, 255)
            line_type = 2

            cv2.putText(frame, '{0}'.format(label_class), (0, 0), font,
                        font_scale, font_color, line_type)

        # Display the resulting frame
        cv2.imshow('frame', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()

    print('Done!')