Ejemplo n.º 1
0
    def start_recording_btn_ressed(self):
        if not (len(self.parent.LSL_data_buffer_dicts.keys()) >= 1 or len(self.parent.cam_workers) >= 1):
            dialog_popup('You need at least one LSL Stream or Capture opened to start recording!')
            return
        self.save_path = self.generate_save_path()  # get a new save path
        self.save_stream = RNStream(self.save_path)
        self.recording_buffer = {}  # clear buffer
        self.is_recording = True
        self.StartRecordingBtn.setEnabled(False)
        self.StopRecordingBtn.setEnabled(True)
        self.recording_byte_count = 0

        self.timer.start()
Ejemplo n.º 2
0
def create_test_stream(test_stream_length, test_stream_mean,
                       test_stream_variance):
    temp_dir_path = tempfile.mkdtemp()
    stream = RNStream(os.path.join(temp_dir_path, 'test.dats'))
    data_ts = [
        gauss(test_stream_mean, math.sqrt(test_stream_variance))
        for i in range(test_stream_length)
    ]
    return stream, temp_dir_path, data_ts
Ejemplo n.º 3
0
class RecordingsTab(QtWidgets.QWidget):
    def __init__(self, parent):
        """
        :param lsl_data_buffer: dict, passed by reference. Do not modify, as modifying it makes a copy.
        :rtype: object
        """
        super().__init__()
        self.ui = uic.loadUi("ui/RecordingsTab.ui", self)

        self.recording_buffer = {}

        self.is_recording = False

        self.StartRecordingBtn.clicked.connect(self.start_recording_btn_ressed)
        self.StopRecordingBtn.clicked.connect(self.stop_recording_btn_pressed)
        self.SelectDataDirBtn.clicked.connect(self.select_data_dir_btn_pressed)

        self.StopRecordingBtn.setEnabled(False)
        self.parent = parent

        self.data_dir = config.DEFAULT_DATA_DIR
        self.save_path = ''
        self.save_stream = None

        self.saveRootTextEdit.setText(self.data_dir + '/')

        self.timer = QTimer()
        self.timer.setInterval(config.EVICTION_INTERVAL)
        self.timer.timeout.connect(self.evict_buffer)

        self.recording_byte_count = 0

    def select_data_dir_btn_pressed(self):

        selected_data_dir = str(QFileDialog.getExistingDirectory(self.widget_3, "Select Directory"))

        if selected_data_dir != '':
            self.data_dir = selected_data_dir

        print("Selected data dir: ", self.data_dir)
        self.saveRootTextEdit.setText(self.data_dir + '/')

    def start_recording_btn_ressed(self):
        if not (len(self.parent.LSL_data_buffer_dicts.keys()) >= 1 or len(self.parent.cam_workers) >= 1):
            dialog_popup('You need at least one LSL Stream or Capture opened to start recording!')
            return
        self.save_path = self.generate_save_path()  # get a new save path
        self.save_stream = RNStream(self.save_path)
        self.recording_buffer = {}  # clear buffer
        self.is_recording = True
        self.StartRecordingBtn.setEnabled(False)
        self.StopRecordingBtn.setEnabled(True)
        self.recording_byte_count = 0

        self.timer.start()

    def stop_recording_btn_pressed(self):
        self.is_recording = False
        self.StopRecordingBtn.setEnabled(False)
        self.StartRecordingBtn.setEnabled(True)

        self.evict_buffer()
        self.timer.stop()

        self.recording_byte_count = 0
        self.update_file_size_label()
        dialog_popup('Saved to {0}'.format(self.save_path), title='Info')

    def update_buffers(self, data_dict: dict):
        if self.is_recording:
            lsl_data_type = data_dict['lsl_data_type']  # get the type of the newly-come data
            if lsl_data_type not in self.recording_buffer.keys():
                self.recording_buffer[lsl_data_type] = [np.empty(shape=(data_dict['frames'].shape[0], 0)),
                                                        np.empty(shape=(0,))]  # data first, timestamps second

            buffered_data = self.recording_buffer[data_dict['lsl_data_type']][0]
            buffered_timestamps = self.recording_buffer[data_dict['lsl_data_type']][1]

            self.recording_buffer[lsl_data_type][0] = np.concatenate([buffered_data, data_dict['frames']], axis=-1)
            self.recording_buffer[lsl_data_type][1] = np.concatenate([buffered_timestamps, data_dict['timestamps']])
            pass

    def update_camera_screen_buffer(self, cam_id, new_frame, timestamp):
        if self.is_recording:
            if cam_id not in self.recording_buffer.keys():  # note array data type is uint8 0~255
                self.recording_buffer[cam_id] = [np.empty(shape=new_frame.shape + (0,), dtype=np.uint8),
                                                 np.empty(shape=(0,)), np.empty(shape=(0,))]

            _new_frame = np.expand_dims(new_frame, axis=-1)
            buffered_data = self.recording_buffer[cam_id][0]
            buffered_timestamps = self.recording_buffer[cam_id][1]

            self.recording_buffer[cam_id][0] = np.concatenate([buffered_data, _new_frame.astype(np.uint8)], axis=-1)
            self.recording_buffer[cam_id][1] = np.concatenate([buffered_timestamps, [timestamp]])
            self.recording_buffer[cam_id][2] = np.concatenate([self.recording_buffer[cam_id][2], [time.time()]])

            pass

    def generate_save_path(self):
        os.makedirs(self.saveRootTextEdit.toPlainText(), exist_ok=True)
        # datetime object containing current date and time
        now = datetime.now()
        dt_string = now.strftime("%m_%d_%Y_%H_%M_%S")
        return os.path.join(self.saveRootTextEdit.toPlainText(),
                            '{0}-Exp_{1}-Sbj_{2}-Ssn_{3}.dats'.format(dt_string,
                                                                      self.experimentNameTextEdit.toPlainText(),
                                                                      self.subjectTagTextEdit.toPlainText(),
                                                                      self.sessionTagTextEdit.toPlainText()))

    def evict_buffer(self):
        self.recording_byte_count += self.save_stream.stream_out(self.recording_buffer)
        self.recording_buffer = {}
        self.update_file_size_label()

    def update_file_size_label(self):
        self.parent.recordingFileSizeLabel. \
            setText('    Recording file size: {0} Mb'.format(str(round(self.recording_byte_count / 10 ** 6, 2))))
Ejemplo n.º 4
0
import pickle
import time
from utils.data_utils import RNStream

# recording_buffer = pickle.load(open('C:/Recordings/03_21_2021_22_48_25-Exp_Unity.RealityNavigationHotel.EventMarkers-Sbj_someone-Ssn_0.p', 'rb'))

# rns.stream_out(recording_buffer)

rns = RNStream(
    'C:/Users/S-Vec/Dropbox/research/RealityNavigation/Data/Pilot/03_22_2021_17_13_28-Exp_realitynavigation-Sbj_0-Ssn_3.dats'
)
print('String names are {0}'.format(str(rns.get_stream_names())))

start_time = time.time()
reloaded_buffer = rns.stream_in(only_stream=('BioSemi', ))
print('reload with only took {0}'.format(time.time() - start_time))
#
# start_time = time.time()
# reloaded_buffer = rns.stream_in(ignore_stream=('monitor1', '0'))
# print('reload with ignore took {0}'.format(time.time() - start_time))
#
#
# start_time = time.time()
# reloaded_buffer = rns.stream_in()
# print('reload all took {0}'.format(time.time() - start_time))

rns.generate_video(video_stream_name='monitor1')
Ejemplo n.º 5
0
import os

import numpy as np
import cv2

import config
from utils.data_utils import RNStream
import matplotlib.pyplot as plt

data_root = 'C:/Users/S-Vec/Dropbox/research/RealityNavigation/Data/Pilot/'
data_fn = '03_22_2021_17_03_52-Exp_realitynavigation-Sbj_0-Ssn_2.dats'

video_stream_label = 'monitor1'

rns = RNStream(os.path.join(data_root, data_fn))
data = rns.stream_in(ignore_stream=('0'))

video_frame_stream = data[video_stream_label][0]
frame_count = video_frame_stream.shape[-1]
frame_size = (data[video_stream_label][0].shape[1],
              data[video_stream_label][0].shape[0])
out_path = os.path.join(
    data_root, '{0}_{1}.avi'.format(data_fn.split('.')[0], video_stream_label))
out = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc(*'DIVX'),
                      1 / config.CAMERA_SCREENCAPTURE_REFRESH_INTERVAL,
                      frame_size)

for i in range(frame_count):
    print('Creating video progress {}%'.format(
        str(round(100 * i / frame_count, 2))),
          sep=' ',
import numpy as np

from utils.data_utils import RNStream
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure

file_path = 'C:/Users/S-Vec/Dropbox/research/RealityNavigation/Data/Pilot/03_22_2021_16_52_54-Exp_realitynavigation-Sbj_0-Ssn_1.dats'
em_stream_name = 'Unity.RotationWheel.EventMarkers'

rns = RNStream(file_path)
data = rns.stream_in(ignore_stream=('monitor1', '0'))

data_stream = data[em_stream_name][0]
timestamps_stream = data[em_stream_name][1]
event_label_stream = data_stream[-1, :]
item_count = 5
offset = 2
trial_count = 24
trial_started_index = 0
for target_label in range(offset, offset + 5):
    target_onset_em = np.logical_and(
        event_label_stream == target_label,
        np.concatenate([np.array([0]),
                        np.diff(event_label_stream)]) != 0)
    started_em = event_label_stream >= 1
    started = False
    target_count = 0
    target_indices = []
    clean_count = 0
    trial_count = 0
    for i in range(1, len(target_onset_em)):
Ejemplo n.º 7
0
from utils.data_utils import RNStream

test_rns = RNStream('C:/Recordings/test.dats')
test_reloaded_data = test_rns.stream_in()

another_rns = RNStream(
    'C:/Recordings/03_22_2021_00_00_55-Exp_myexperiment-Sbj_someone-Ssn_0.dats'
)
another_reloaded_data = another_rns.stream_in()

result_rns = RNStream('C:/Recordings/results.dats')
result_rns.stream_out(test_reloaded_data)
result_rns.stream_out(another_reloaded_data)

results_reloaded_data = result_rns.stream_in()
Ejemplo n.º 8
0
from utils.data_utils import RNStream

test_rns = RNStream(
    '/Users/Leo/Dropbox/research/RealityNavigation/Data/Pilot/03_22_2021_17_03_52-Exp_realitynavigation-Sbj_0-Ssn_2.dats'
)
reloaded_data = test_rns.stream_in(ignore_stream=('monitor1', '0'))