Example #1
0
def main():
    # Generate recording object
    reco_cam = Record.Camera(saving_directory="./")
    video_name = "test_record.mp4"
    reco_time = 10

    # Start recording:
    print("Starting recording for {} sec.".format(reco_time))
    reco_cam.record(video_name)

    time.sleep(reco_time)

    # Stop recording
    reco_cam.stop()
    time.sleep(2)
    print("Recording finished.")

    # Check if video is saved:
    files = os.listdir('.')
    if video_name in files:
        print("{} found !".format(video_name))
    else:
        print("video not saved: {}".format(files))
Example #2
0
def main():
    ## Get camera for recording
    reco_cam = Record.Camera()
    reco_led = Record.LED()
    vid_id = 0
    recording = False
    stop_count = 0

    ## Model file
    parser = argparse.ArgumentParser()
    parser.add_argument('-m',
                        '--model',
                        help="Model file (.tflite)",
                        dest='model_file',
                        required=True)
    args = parser.parse_args()
    print("Loading {}.".format(args.model_file))

    interpreter = tflite.Interpreter(model_path=args.model_file)
    interpreter.allocate_tensors()
    input_details = interpreter.get_input_details()
    input_shape = input_details[0]['shape']
    output_details = interpreter.get_output_details()

    ## Get Audio Stream
    audio_interface = pyaudio.PyAudio()
    usb_audio_i = find_index_usb_audio(audio_interface)
    form_1 = pyaudio.paInt16  #16-bit resolution
    chans = 1  # 1 channel
    sample_rate = 22050  # 44.1kHz sampling rate
    chunk = 1 * sample_rate  # 2^12 samples for buffer

    print("Audio parameters:")
    print(" {} | {} | {} | {}".format(form_1, chans, sample_rate, chunk))
    audio_stream = audio_interface.open(format=form_1,
                                        rate=sample_rate,
                                        channels=chans,
                                        input_device_index=usb_audio_i,
                                        input=True,
                                        frames_per_buffer=chunk)
    print("\nAudio ready.")

    # Main Loop
    audio_stream.start_stream()

    try:
        while True:
            data = np.fromstring(audio_stream.read(chunk), dtype=np.int16)

            # Pause the audio stream
            audio_stream.stop_stream()
            print("\nData: {}".format(data.shape))
            start = time.time()

            # Extract feature:
            feature = extract_feature(data)

            ## TODO add feature.shape == input_shape check as test.

            interpreter.set_tensor(input_details[0]['index'], feature)
            interpreter.invoke()
            output_data = interpreter.get_tensor(output_details[0]['index'])

            elapsed_time = time.time() - start
            print("Feature ~ Inference in {} ms.".format(elapsed_time * 1000))

            ## Post processing:
            current_state = DRONE_STATE[np.argmax(output_data)]
            print("Current STATE: {}".format(current_state))

            ## Recording simple logic:
            if current_state == "ARMED" and not recording:
                vid_name = "vid_test_" + str(vid_id) + ".mp4"
                reco_cam.record(vid_name)
                reco_led.blink()
                recording = True
                stop_count = 0

            if current_state == "DISARMED" and recording:
                if stop_count == 2:
                    reco_cam.stop()
                    reco_led.stop()
                    recording = False
                    vid_id += 1
                else:
                    stop_count += 1

            # Resume the audio stream
            audio_stream.start_stream()

    except KeyboardInterrupt:
        print('Requested to terminate')

    finally:
        audio_stream.stop_stream()
        audio_stream.close()
        audio_interface.terminate()
        print('Terminated')