def main():
    print('Initializing main function')

    # Withdrawing tkinter

    # Loading model dirs
    list_folder_data = [
        ('/home/mauricio/CNN/Classes/Door', 0.05),
        ('/home/mauricio/CNN/Classes/Tires', 0.05),
        ('/home/mauricio/CNN/Classes/Walk', 0.05),
    ]

    list_hmm = []

    for folder_data in list_folder_data:
        label_name = get_label_from_folder(folder_data[0])
        full_model_dir = os.path.join(hnn_model_folder,
                                      '{0}.pkl'.format(label_name))
        list_hmm.append(ClassHMM(full_model_dir))

    # Initializing instances
    instance_pose = ClassOpenPose()
    instance_nn = ClassNN.load_from_params(nn_model_dir)

    option = input('Select 1 to train, 2 to eval hmm, 3 to preprocess: ')

    if option == '1':
        print('Train hmm selection')
        train_hmm(list_folder_data, list_hmm, instance_nn, instance_pose)
    elif option == '2':
        eval_hmm(list_folder_data, list_hmm, instance_nn, instance_pose)
    elif option == '3':
        recalculate = False
        pre_process_images(instance_pose, list_folder_data, recalculate)
    else:
        print('Invalid argument: {0}'.format(option))
Пример #2
0
def main():
    print('Initializing main function')

    # Initializing instances
    instance_pose = ClassOpenPose()
    instance_net = ClassNN.load_from_params(model_dir)

    # Withdrawing list
    Tk().withdraw()

    # Select directory to process
    init_dir = '/home/mauricio/CNN/Images'
    options = {'initialdir': init_dir}
    dir_name = filedialog.askdirectory(**options)

    if not dir_name:
        print('Directory not selected')
    else:
        # Loading images
        list_files = os.listdir(dir_name)
        list_files.sort()

        desc_list = list()

        for file in list_files:
            full_path = os.path.join(dir_name, file)

            print('Processing image {0}'.format(full_path))
            image = cv2.imread(full_path)
            arr = instance_pose.recognize_image(image)

            arr_pass = list()
            for person_arr in arr:
                if ClassUtils.check_vector_integrity_part(
                        person_arr, min_pose_score):
                    arr_pass.append(person_arr)

            if len(arr_pass) != 1:
                print('Invalid len {0} for image {1}'.format(
                    len(arr_pass), full_path))
                continue
            else:
                result_des = ClassDescriptors.get_person_descriptors(
                    arr_pass[0], min_pose_score)
                descriptor_arr = result_des['fullDesc']

                # Add descriptors to list
                desc_list.append(descriptor_arr)

        # Convert to numpy array
        print('Total poses: {0}'.format(len(desc_list)))

        # Transform list and predict
        desc_list_np = np.asarray(desc_list, dtype=np.float)
        print('ndim pose list: {0}'.format(desc_list_np.ndim))

        list_classes = list()
        predict_results = instance_net.predict_model_array(desc_list_np)
        for result in predict_results:
            list_classes.append(result['classes'])

        print('Predict results: {0}'.format(list_classes))
        print('Classes label: {0}'.format(instance_net.label_names))

        print('Done!')
def main():
    print('Initializing main function')

    # Prompt for user input
    cam_number_str = input('Insert camera number to process: ')
    cam_number = int(cam_number_str)

    # Open video from opencv
    cap = cv2.VideoCapture(cam_number)

    # Initializing open pose distance
    instance_pose = ClassOpenPose()

    # Initializing variables
    model_dir = '/home/mauricio/models/nn_classifier'
    instance_nn = ClassNN.load_from_params(model_dir)

    while True:
        # Capture frame-by-frame
        ret, frame = cap.read()

        # Processing frame with openpose
        arr, frame = instance_pose.recognize_image_tuple(frame)

        # Check if there is one frame with vector integrity
        arr_pass = list()

        min_score = 0.05
        # Checking vector integrity for all elements
        # Verify there is at least one arm and one leg
        for elem in arr:
            if ClassUtils.check_vector_integrity_pos(elem, min_score):
                arr_pass.append(elem)

        if len(arr_pass) != 1:
            print('Invalid len for arr_pass: {0}'.format(arr_pass))
        else:
            person_array = arr_pass[0]

            # Getting person descriptors
            results = ClassDescriptors.get_person_descriptors(
                person_array, min_score)

            # Descriptors
            data_to_add = results['angles']
            data_to_add += ClassUtils.get_flat_list(
                results['transformedPoints'])

            data_np = np.asanyarray(data_to_add, dtype=np.float)

            # Getting result predict
            result_predict = instance_nn.predict_model(data_np)
            detected_class = result_predict['classes']

            label_class = get_label_name(instance_nn.label_names,
                                         detected_class)
            print('Detected: {0} - Label: {1}'.format(detected_class,
                                                      label_class))

            # Draw text class into image - Evaluation purposes
            font = cv2.FONT_HERSHEY_SIMPLEX
            font_scale = 0.6
            font_color = (255, 255, 255)
            line_type = 2

            cv2.putText(frame, '{0}'.format(label_class), (0, 0), font,
                        font_scale, font_color, line_type)

        # Display the resulting frame
        cv2.imshow('frame', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()

    print('Done!')