コード例 #1
0
ファイル: body_seg.py プロジェクト: Tenfleques/h-track
    def __init__(
            self,
            img,
            model_path='./bodypix_resnet50_float_model-stride16/model.json',
            output_stride=16):
        print("[INFO] Loading model...")
        self.graph = load_graph_model(model_path)
        print("[INFO] Loaded model...")
        self.output_stride = output_stride

        self.img = img

        # Get input and output tensors
        self.input_tensor_names = get_input_tensors(self.graph)
        print(self.input_tensor_names)
        self.output_tensor_names = get_output_tensors(self.graph)
        print(self.output_tensor_names)
        self.input_tensor = self.graph.get_tensor_by_name(
            self.input_tensor_names[0])
コード例 #2
0
CONNECTED_KEYPOINT_INDICES = [(KEYPOINT_IDS[a], KEYPOINT_IDS[b])
                              for a, b in CONNECTED_KEYPOINTS_NAMES]

PART_CHANNELS = [
    'left_face', 'right_face', 'left_upper_arm_front', 'left_upper_arm_back',
    'right_upper_arm_front', 'right_upper_arm_back', 'left_lower_arm_front',
    'left_lower_arm_back', 'right_lower_arm_front', 'right_lower_arm_back',
    'left_hand', 'right_hand', 'torso_front', 'torso_back',
    'left_upper_leg_front', 'left_upper_leg_back', 'right_upper_leg_front',
    'right_upper_leg_back', 'left_lower_leg_front', 'left_lower_leg_back',
    'right_lower_leg_front', 'right_lower_leg_back', 'left_feet', 'right_feet'
]

print("Loading model...", end="")
graph = load_graph_model(modelPath)  # downloaded from the link above
print("done.\nLoading sample image...", end="")


def getBoundingBox(keypointPositions, offset=(10, 10, 10, 10)):
    minX = math.inf
    minY = math.inf
    maxX = -math.inf
    maxY = -math.inf
    for x, y in keypointPositions:
        if (x < minX):
            minX = x
        if (y < minY):
            minY = y
        if (x > maxX):
            maxX = x
コード例 #3
0
def pil2cv(image):
    new_image = np.array(image, dtype=np.uint8)
    if new_image.ndim == 2:
        pass
    elif new_image.shape[2] == 3:
        new_image = cv2.cvtColor(new_image, cv2.COLOR_RGB2BGR)
    elif new_image.shape[2] == 4:
        new_image = cv2.cvtColor(new_image, cv2.COLOR_RGBA2BGRA)
    return new_image

OutputStride = 16
modelPath = './bodypix_mobilenet_float_050_model-stride16/model.json'
#modelPath = './bodypix_resnet50_float_model-stride16/model.json'

print("Loading model...", end="")
graph = load_graph_model(modelPath)
print("done.\nLoading sample image...", end="")

#capture = cv2.VideoCapture("out.mp4")
capture = cv2.VideoCapture(0)

input_tensor_names = get_input_tensors(graph)
output_tensor_names = get_output_tensors(graph)
input_tensor = graph.get_tensor_by_name(input_tensor_names[0])

sess = tf.compat.v1.Session(graph=graph)

while(capture.isOpened()):
	ret, image = capture.read()
	InputImageShape = image.shape
	targetWidth = (InputImageShape[1] // OutputStride) * OutputStride + 1
コード例 #4
0
ファイル: convert.py プロジェクト: yath/bodypix_tflite
import tensorflow as tf
import sys
import os
sys.path.insert(0, (os.path.dirname(__file__) or ".") +
                "/deps/simple_bodypix_python")
import utils

tfjs_filename, graphdef_filename, width, height, pu_type, output_tensor_suffix, output_filename = \
        sys.argv[1:]

graph = utils.load_graph_model(tfjs_filename)

input_tensors = utils.get_input_nodes(graph)
assert len(input_tensors) == 1

input_tensor_name = input_tensors[0].name
input_tensor_shape = input_tensors[0].shape
assert input_tensor_shape[1] is None
input_tensor_shape[1] = int(width)
assert input_tensor_shape[2] is None
input_tensor_shape[2] = int(height)

print("Selected input tensor {} with shape {}".format(input_tensor_name,
                                                      input_tensor_shape))

output_tensors = utils.get_output_nodes(graph)
output_tensor = [
    t for t in output_tensors if t.name.endswith(output_tensor_suffix)
]
assert len(output_tensor) == 1
output_tensor = output_tensor[0]