Beispiel #1
0
def main():
  background_locked = False
  timer_time = time.monotonic()

  def run_inference(engine, input_tensor):
    return engine.run_inference(input_tensor)

  def render_overlay(engine, output, src_size, inference_box):
    nonlocal timer_time, background_locked
    svg_canvas = svgwrite.Drawing('', size=src_size)
    outputs, inference_time = engine.ParseOutput()
    now_time = time.monotonic()

    if not background_locked:
      print('Waiting for everyone to leave the frame...')
      pose_camera.shadow_text(svg_canvas, 10, 20,
                              'Waiting for everyone to leave the frame...')
      if outputs:  # frame still has people in it, restart timer
        timer_time = now_time
      elif now_time > timer_time + BACKGROUND_DELAY:  # frame has been empty long enough
        background_locked = True
        print('Background set.')

    for pose in outputs:
      pose_camera.draw_pose(svg_canvas, pose, src_size, inference_box)

    return (svg_canvas.tostring(), background_locked)

  pose_camera.run(run_inference, render_overlay)
def main():
    background_image = None
    timer_time = time.monotonic()

    def render_overlay(engine, image, svg_canvas):
        nonlocal timer_time, background_image
        outputs, inference_time = engine.DetectPosesInImage(image)
        now_time = time.monotonic()

        if background_image is None:
            pose_camera.shadow_text(
                svg_canvas, 10, 20,
                'Waiting for everyone to leave the frame...')
            if outputs:  # frame still has people in it, restart timer
                print('Waiting for everyone to leave the frame...')
                timer_time = now_time
            elif now_time > timer_time + BACKGROUND_DELAY:  # frame has been empty long enough
                background_image = image
                print('Background set.')
        else:
            image = background_image

        for pose in outputs:
            pose_camera.draw_pose(svg_canvas, pose)

        return image

    pose_camera.run(render_overlay, use_appsrc=True)
Beispiel #3
0
def main():
    pose_tracker = PoseTracker()
    synth = fluidsynth.Synth()

    synth.start('alsa')
    soundfont_id = synth.sfload('/usr/share/sounds/sf2/FluidR3_GM.sf2')
    for channel, instrument in enumerate(CHANNELS):
        synth.program_select(channel, soundfont_id, 0, instrument)

    prev_notes = set()

    def run_inference(engine, input_tensor):
        return engine.run_inference(input_tensor)

    def render_overlay(engine, output, src_size, inference_box):
        nonlocal prev_notes
        svg_canvas = svgwrite.Drawing('', size=src_size)
        outputs, inference_time = engine.ParseOutput(output)

        poses = [
            pose for pose in (Pose(pose, 0.2) for pose in outputs)
            if pose.keypoints
        ]
        pose_tracker.assign_pose_ids(poses)

        velocities = {}
        for pose in poses:
            left = pose.keypoints.get('left wrist')
            right = pose.keypoints.get('right wrist')
            if not (left and right): continue

            identity = IDENTITIES[pose.id % len(IDENTITIES)]
            left = 1 - left.yx[0] / engine.image_height
            right = 1 - right.yx[0] / engine.image_height
            velocity = int(left * 100)
            i = int(right * identity.extent)
            note = (identity.base_note + OCTAVE * (i // len(SCALE)) +
                    SCALE[i % len(SCALE)])
            velocities[(identity.channel, note)] = velocity

        for note in prev_notes:
            if note not in velocities: synth.noteoff(*note)
        for note, velocity in velocities.items():
            if note not in prev_notes: synth.noteon(*note, velocity)
        prev_notes = velocities.keys()

        for i, pose in enumerate(poses):
            identity = IDENTITIES[pose.id % len(IDENTITIES)]
            pose_camera.draw_pose(svg_canvas,
                                  pose,
                                  src_size,
                                  inference_box,
                                  color=identity.color)

        return (svg_canvas.tostring(), False)

    pose_camera.run(run_inference, render_overlay)
Beispiel #4
0
def main():
    pose_tracker = PoseTracker()

    def run_inference(engine, input_tensor):
        return engine.run_inference(input_tensor)

    def render_overlay(engine, output, src_size, inference_box):

        svg_canvas = svgwrite.Drawing('', size=src_size)
        outputs, inference_time = engine.ParseOutput(output)

        poses = [
            pose for pose in (Pose(pose, 0.2) for pose in outputs)
            if pose.keypoints
        ]
        pose_tracker.assign_pose_ids(poses)

        velocities = {}
        for pose in poses:
            left = pose.keypoints.get('left wrist')
            right = pose.keypoints.get('right wrist')
            if not (left and right): continue

            identity = IDENTITIES[pose.id % len(IDENTITIES)]
            lefty = 1 - left.yx[0] / engine.image_height
            leftx = 1 - left.yx[1] / engine.image_width
            righty = 1 - right.yx[0] / engine.image_height
            rightx = 1 - right.yx[1] / engine.image_width
            #print (lefty, leftx, righty, rightx)
            #print (int(lefty * 1200)+900 , int(leftx * 2400)+900, int(righty * 1200)+900, int((rightx-0.5) * 2400)+900 )
            #ch3 = int(lefty * 1200)+900
            #ch4 = int(leftx * 2400)+900
            #ch2 = int(righty * 1200)+900
            # ch1 = int((rightx-0.5) * 2400)+900

            #These values have been scaled down , making control easier
            ch3 = int(lefty * 600) + 1200
            ch4 = int(leftx * 1200) + 1200
            ch2 = int(righty * 600) + 1200
            ch1 = int((rightx - 0.5) * 1200) + 1200

            print(ch1, ch2, ch3, ch4)
            #print("Set Ch1-Ch8 overrides to 110-810 respectively")
            vehicle.channels.overrides = {
                '1': ch1,
                '2': ch2,
                '3': ch3,
                '4': ch4,
                '5': 1000,
                '6': 1000,
                '7': 1000,
                '8': 1000
            }
            #Need to reduce command stream to 10 Hz, so we don<t get buffers overflow and lag
            time.sleep(0.1)

        for i, pose in enumerate(poses):
            identity = IDENTITIES[pose.id % len(IDENTITIES)]
            pose_camera.draw_pose(svg_canvas,
                                  pose,
                                  src_size,
                                  inference_box,
                                  color=identity.color)

        return (svg_canvas.tostring(), False)

    pose_camera.run(run_inference, render_overlay)