Example #1
0
def add_sensors():
    # Creating an instance of sensor

    my_camera = CameraStreaming(camera_no=0,
                                name="camera",
                                output_path="./output")

    # Creating an instance of device coordinator
    device_coordinator = DeviceCoordinator()

    # Adding sensor to device coordinator
    device_coordinator.add_devices([my_camera])

    experiment_id = "p01"
    stimuli_id = "S00"

    input("Press a button to start data recording")

    # Starts data recording
    device_coordinator.dispatch(start_message(experiment_id, stimuli_id))
    time.sleep(5)

    # Stops deta recording
    device_coordinator.dispatch(stop_message(experiment_id, stimuli_id))
    time.sleep(0.5)
    # Terminate, This step is necessary to close the connection with added devices
    device_coordinator.terminate()
def server():
    '''
    Starts a server and sends several messages and terminates
    '''
    device_coordinator = DeviceCoordinator()
    socket_device = SocketNetworkDevice("0.0.0.0", 5002)
    device_coordinator.add_devices([socket_device])

    time.sleep(2)
    input("Press enter to start sending marker")
    message = start_message("test", "00")
    device_coordinator.dispatch(message)

    time.sleep(2)
    message = stop_message("test", "00")
    device_coordinator.dispatch(message)
    time.sleep(2)
    message = start_message("test", "01")
    device_coordinator.dispatch(message)
    time.sleep(2)
    message = stop_message("test", "01")
    device_coordinator.dispatch(message)
    time.sleep(3)

    device_coordinator.terminate()
Example #3
0
def simple_scenario(stimuli_path):
    # Reading image stimuli and assigning an ID to them based on their alphabetical order
    stimuli_list = os.listdir(stimuli_path)
    stimuli_list.sort()
    stimuli = {}
    i = 0
    for item in stimuli_list:
        stimuli[i] = item
        i += 1

    print("initializing")
    # Creating an instance of simmer3
    my_shimmer = Shimmer3Streaming(name="Shimmer3_sensor",
                                   output_path="./output")

    # Creating an instance of camera. by uncommenting this line and adding it to the dive_coordinator
    # you can record video data as well
    # my_camera = CameraStreaming(0, name="camera", output_path="./output", )

    # Creating an instance of device coordinator
    device_coordinator = DeviceCoordinator()

    # Adding sensor to device coordinator
    device_coordinator.add_devices([my_shimmer])

    experiment_id = "p01"

    # A delay to be sure initialing devices have finished
    time.sleep(3)

    input("\nPress a key to run the scenario")

    for stimuli_id, stmulus_name in stimuli.items():
        # Starts data recording by displaying the image
        device_coordinator.dispatch(start_message(experiment_id, stimuli_id))

        # Displaying image may start with some miliseconds delay after data recording
        # because of GTK initialization in show_image_standalone. If this delay is important to you,
        # use other tools for displaying image stimuli
        # Since image is displaying in another thread we have to manually create the same delay in current
        # thread to record data for 10 seconds

        timeout = 5
        stimulus = ImageStimulus(stimuli_id,
                                 os.path.join(stimuli_path, stmulus_name), 5)
        stimulus.show_standalone()
        time.sleep(timeout)

        # IF the stimuli is a video we are displaying stimuli as follows
        #stimulus = VideoStimulus(stimuli_id, os.path.join(stimuli_path, stmulus_name))
        #stimulus.show()

        # Stops data recording by closing image
        device_coordinator.dispatch(stop_message(experiment_id, stimuli_id))
        input("\nPress a key to continue")

    # Terminate, This step is necessary to close the connection with added devices
    device_coordinator.terminate()
Example #4
0
def test_http_network_device_happy_path():
    coordinator = DeviceCoordinator()
    device = HttpNetworkDevice(["http://localhost:5003/"],
                               name="test-http-network-device",
                               timeout=15)
    coordinator.add_device(device)

    server = http.server.ThreadingHTTPServer(("localhost", 5003), Handler)
    threading.Thread(target=server.serve_forever, daemon=True).start()

    # To ensure device and server are started
    time.sleep(1)

    coordinator.dispatch(start_message("exp1", "stim1"))
    coordinator.dispatch(stop_message("exp1", "stim2"))
    # Should send TERMINATE message
    coordinator.terminate()
def __server():
    '''
    Starts a server and send several messages and terminates
    '''
    device_coordinator = DeviceCoordinator()
    socket_device = SocketNetworkDevice("localhost", 5002)
    device_coordinator.add_devices([socket_device])

    time.sleep(5)
    message = start_message("test", "00")
    device_coordinator.dispatch(message)

    time.sleep(2)
    message = stop_message("test", "00")
    device_coordinator.dispatch(message)
    time.sleep(2)
    message = start_message("test", "01")
    device_coordinator.dispatch(message)
    time.sleep(2)
    message = stop_message("test", "01")
    device_coordinator.dispatch(message)
    time.sleep(3)

    device_coordinator.terminate()
def main():
    '''
    Scenario starts by 
    1- Initialization and displaying a gray screen (Put the cursor on each monitor that 
       you want to the scenario is being displayed)
    2- Displaying a fixation cross for 3 seconds
    3- Displaying a video stimulus and start to record data
    4- Displaying a questionnaire
    5- By answering the questionnaire and closing it, the same process repeat from the step 2.
       It will be repeated for the number of video stimulus in the stimuli directory
    
    Run the code like `pipenv run python watching_video_scenario.py -s 1`
    By running this command it will consider `1` as the subject number and will record data in `p01` directory
    '''

    subject_id = get_input_parameters()
    experiment_id = str(subject_id).zfill(2)
    output_path = "output/p{0}".format(experiment_id)
    os.makedirs(output_path, exist_ok=True)

    # Create and instance of video device
    camera = \
        CameraStreaming(name="webcam",
                        output_path=output_path,
                        camera_no=0,
                        image_width=640,
                        image_height=360)

    # Create an instance of OpenBCI device
    openbci = BrainFlowOpenBCIStreaming(name="eeg",
                                        output_path=output_path,
                                        serial_port="/dev/ttyUSB2",
                                        channels_order=[
                                            "Fp1", "Fp2", "F7", "F3", "F4",
                                            "F8", "T3", "C3", "C4", "T4", "T5",
                                            "P3", "P4", "T6", "O1", "O2"
                                        ])

    # Create Network device if you want to send triggers to other software and add it to device_coordinator
    #socket_device = SocketNetworkDevice("localhost", 5006)

    # Add to device coordinator for synchronous data recording
    device_coordinator = DeviceCoordinator()
    device_coordinator.add_devices([openbci, camera])

    stimuli_list = load_stimuli(stimuli_path)
    # Make delay for initializing all processes
    print("Initializing")
    time.sleep(5)

    # Creating the main Gtk window
    main_window = \
        MainWindow(experiment_id,
                   stimuli_list,
                   device_coordinator,
                   output_path=output_path)

    # After running this code, Run `pipenv run octopus-sensing-monitoring` in another terminal
    # and then monitor data on browser.
    # (Make sure to install octopus-sensing-monitoring if you want to monitor data in real time)
    monitoring_endpoint = MonitoringEndpoint(device_coordinator)
    monitoring_endpoint.start()

    try:
        # Start the scenario
        main_window.show()
        monitoring_endpoint.stop()

        # After running this code, data will be prepared in preprocessed_output path
        # By configging octopus_sensing_visualizer_config.conf in your project derectory and
        # running `pipenv run octopus-sensing-visualizer` you can visualize data on browser after finishing
        # the data collection (Make sure to install octopus-sensing-visualizer if you want to visualize data)
        preprocess_devices(device_coordinator,
                           "preprocessed_output",
                           openbci_sampling_rate=125,
                           signal_preprocess=True)
    finally:
        device_coordinator.terminate()