Exemplo n.º 1
0
def getimage():
    try:

        data = (request.data.decode()).split(",")[1]
        body = base64.decodebytes(data.encode("utf-8"))
        img = Image.open(BytesIO(body))
        img = np.array(img)
        RGB_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        # cv2.imshow("name", RGB_img)
        # cv2.waitKey(0)
        # cv2.destroyAllWindows()
        scene_class = label_img_scene.classify(RGB_img)
        # make_response = jsonify({"scene": scene_class})
        # make_response.headers.add(
        #     "Access-Control-Allow-Origin", "http://localhost:8001/"
        # )
        # make_response.headers.add(
        #     "Access-Control-Allow-Headers", "Content-Type,Authorization"
        # )
        # make_response.headers.add("Access-Control-Allow-Methods", "GET,PUT,POST,DELETE")
        # make_response.headers.add("Access-Control-Allow-Credentials", "true")
        # print(make_response)
        return scene_class

    except Exception as e:
        print(e)
        return "error"
Exemplo n.º 2
0
    logger.debug('+image processing+')
    logger.debug('+postprocessing+')
    start_time = time.time()
    humans = e.inference(image, upsample_size=4.0)
    img = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)

    logger.debug('+classification+')
    # Getting only the skeletal structure (with white background) of the actual image
    image = np.zeros(image.shape, dtype=np.uint8)
    image.fill(255)
    image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)

    # Classification
    pose_class = label_img.classify(image)
    scene_class = label_img_scene.classify(args.image)
    end_time = time.time()
    logger.debug('+displaying+')
    cv2.putText(img, "Predicted Pose: %s" % (pose_class), (10, 10),
                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
    cv2.putText(img, "Predicted Scene: %s" % (scene_class), (10, 30),
                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
    print('\n Overall Evaluation time (1-image): {:.3f}s\n'.format(end_time -
                                                                   start_time))
    cv2.imwrite('show1.png', img)
    cv2.imshow('tf-human-action-classification result', img)
    cv2.waitKey(0)
    logger.debug('+finished+')
    cv2.destroyAllWindows()

# =============================================================================
Exemplo n.º 3
0
    logger.debug('+image processing+')
    logger.debug('+postprocessing+')
    start_time = time.time()
    humans = e.inference(image, upsample_size=4.0)
    img = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)

    logger.debug('+classification+')
    # Getting only the skeletal structure (with white background) of the actual image
    image = np.zeros(image.shape, dtype=np.uint8)
    image.fill(255)
    image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)

    # Classification
    pose_class = label_img.classify(image)
    scene_class = label_img_scene.classify(address + args.image)
    end_time = time.time()
    logger.debug('+displaying+')
    cv2.putText(img, "Predicted Pose: %s" % (pose_class), (10, 10),
                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
    cv2.putText(img, "Predicted Scene: %s" % (scene_class), (10, 30),
                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
    print('\n Overall Evaluation time (1-image): {:.3f}s\n'.format(end_time -
                                                                   start_time))
    cv2.imwrite('show1.png', img)
    cv2.imshow('tf-human-action-classification result', img)
    cv2.waitKey(0)
    logger.debug('+finished+')
    cv2.destroyAllWindows()

# =============================================================================
Exemplo n.º 4
0
        logger.debug("+image processing+")
        logger.debug("+postprocessing+")
        start_time = time.time()
        humans = e.inference(image, upsample_size=4.0)
        img = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)

        logger.debug("+classification+")
        # Getting only the skeletal structure (with white background) of the actual image
        image = np.zeros(image.shape, dtype=np.uint8)
        image.fill(255)
        image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)

        # Classification
        pose_class = label_img.classify(image)
        scene_class = label_img_scene.classify(address + file)
        end_time = time.time()
        img_count += 1
        total_time = total_time + (end_time - start_time)
        logger.debug("+Completed image: {}+".format(img_count))
        true_label_pose.append(pose_class)
        true_label_scene.append(scene_class)
        img_name.append(file)

    outF = open("pose.txt", "w")
    for line in true_label_pose:
        outF.write(line)
        outF.write("\n")
    outF.close()

    outF = open("scene.txt", "w")