def code_main(my_detector, multipeople_classifier, multiperson_tracker, image):
    try:
        humans = my_detector.detect(image)
        count_human = len(humans)
        # print('count_human',len(humans))
        skeletons, scale_y = my_detector.humans_to_skelsList(humans)
        skeletons = remove_skeletons_with_few_joints(skeletons)
        dict_id2skeleton = multiperson_tracker.track(
            skeletons)  # int id -> np.array() skeleton
        min_id = min(dict_id2skeleton.keys())
        dict_id2label = multipeople_classifier.classify(dict_id2skeleton)
        print("prediced label is :", dict_id2label[min_id])
        label = dict_id2label[min_id]
        my_detector.draw(image, humans)  # Draw all skeletons
        ith_img = 1
        if len(dict_id2skeleton):
            for id, label in dict_id2label.items():
                skeleton = dict_id2skeleton[id]
                skeleton[1::2] = skeleton[1::2] / scale_y

                drawActionResult(image, id, skeleton, label)
        # image_disp = add_white_region_to_left_of_image(image)
        draw_resulf(image, dict_id2label[min_id])
        image_disp = image
        # multipeople_classifier.get(id='min').draw_scores_onto_image(image_disp)
        ret, jpeg = cv2.imencode('.jpg', image_disp)
        return count_human, label, jpeg.tobytes()
    except:
        print('no_human')
        ret, jpeg = cv2.imencode('.jpg', image)
        return 'none', 'none', jpeg.tobytes()
                dict_id2skeleton = {min_id: dict_id2skeleton[min_id]}
                dict_id2label = {min_id: img_action_type}
                print("Ground_truth label is :", dict_id2label[min_id])

        # -- Draw
        my_detector.draw(image_disp, humans)  # Draw all skeletons

        if len(dict_id2skeleton):

            # Draw outer box and label for each person
            for id, label in dict_id2label.items():
                skeleton = dict_id2skeleton[id]
                skeleton[1::2] = skeleton[
                    1::2] / scale_y  # scale the y data back to original
                # print("Drawing skeleton: ", dict_id2skeleton[id], "with label:", label, ".")
                drawActionResult(image_disp, id, skeleton, label)

        # Add blank to the left for displaying prediction scores of each class
        image_disp = add_white_region_to_left_of_image(image_disp)

        # Draw predicting score for only 1 person (not using for)
        if DO_INFER_ACTIONS and len(dict_id2skeleton):
            multipeople_classifier.get(
                id='min').draw_scores_onto_image(image_disp)

        # -- Write skeleton.txt and image.png
        if SAVE_RESULTANT_SKELETON_TO_TXT_AND_IMAGE:

            ids = sorted(dict_id2skeleton.keys())
            skel_to_save = [
                img_info + dict_id2skeleton[id].tolist() for id in ids
                else:
                    prediced_label = action_type
                    print("Ground_truth label is :", prediced_label)

                # Draw skeleton
                if ith_skel == 0:
                    my_detector.draw(image_disp, humans)

                if DO_INFER_ACTIONS:
                    # Draw score
                    if DO_INFER_ACTIONS:
                        classifier.draw_scores_onto_image(image_disp)

                    # Draw bounding box and action type
                    drawActionResult(
                        image_disp,
                        SkeletonDetector.get_ith_skeleton(
                            skelsList_no_scale_y, target_idx), prediced_label)
        else:
            # tracker.reset() # clear the prev
            classifier.reset()  # clear the deque

        # Write result to txt/png
        if SAVE_RESULTANT_SKELETON_TO_TXT_AND_IMAGE:
            skel_to_save = []

            for ith_skel in range(0, len(skelsList)):
                skel_to_save.append(img_info +
                                    SkeletonDetector.get_ith_skeleton(
                                        skelsList, ith_skel).tolist())

            myio.save_skeletons(