Exemple #1
0
class AIYVoiceInterface:
    def __init__(self, lsl_data_type,
                 num_channels):  # default board_id 2 for Cyton
        self.lsl_data_type = lsl_data_type
        self.lsl_num_channels = num_channels

        self.streams = resolve_byprop('name', self.lsl_data_type, timeout=1)
        if len(self.streams) < 1:
            raise AttributeError(
                'Unable to find LSL Stream with given type {0}'.format(
                    lsl_data_type))
        self.inlet = StreamInlet(self.streams[0])
        # TO-DO: fix this, we need to re-stream this since sometimes unity doesn't pick up AIY data for some reason
        info = StreamInfo('VoiceBox', 'Voice', num_channels, 0.0, 'string',
                          'voice')
        self.outlet = StreamOutlet(info)
        pass

    def start_sensor(self):
        # connect to the sensor
        self.streams = resolve_byprop('name', self.lsl_data_type, timeout=1)
        if len(self.streams) < 1:
            raise AttributeError(
                'Unable to find LSL Stream with given type {0}'.format(
                    self.lsl_data_type))
        self.inlet = StreamInlet(self.streams[0])
        self.inlet.open_stream()
        print(
            'LSLInletInterface: resolved, created and opened inlet for lsl stream with type '
            + self.lsl_data_type)

        # read the channel names is there's any
        # tell the sensor to start sending frames

    def process_frames(self):
        # return one or more frames of the sensor
        try:
            frames, timestamps = self.inlet.pull_chunk()
            if len(frames) > 0:
                self.outlet.push_sample(frames[0])  # TO-DO: see above
        except LostError:
            frames, timestamps = [], []
            pass  # TODO handle stream lost
        return np.transpose(frames), timestamps

    def stop_sensor(self):
        if self.inlet:
            self.inlet.close_stream()
        print('LSLInletInterface: inlet stream closed.')

    def info(self):
        return self.inlet.info()

    def get_num_chan(self):
        return self.lsl_num_channels

    def get_nominal_srate(self):
        return self.streams[0].nominal_srate()
Exemple #2
0
def testing():
    dummy_streamer = ble2lsl.Dummy(muse2016)  #

    streams = resolve_byprop(
        "type", "EEG", timeout=5
    )  #type: EEG, minimum return streams = 1, timeout after 5 seconds

    streamIn = StreamInlet(
        streams[0], max_chunklen=12, recover=True
    )  #Grab first stream from streams, MUSE chunk 12, drop lost stream
    print(streamIn)
    print(streamIn.info().channel_count())
    streamIn.open_stream(
    )  #This actually isn't required: pull_sample() and pull_chunk() implicitly open the stream.
    #But it's good to be explicit because it makes the code clearer
    print("Pull Sample")
    print(streamIn.pull_sample()
          )  #Returns a tuple with the actual values we want.
    #The first element is the list of channel values, the second element is a timestamp. This is a snapshot of our stream
    #at a certain point in time.
    print("Pull Chunk")
    ts = time.time()
    while (1):
        x = streamIn.pull_chunk()
        if all(x):
            #if not np.shape(x) == (2, 0):
            print(np.shape(x))
            print(np.shape(x[1]))
            t = [t - ts for t in x[1]]
            print(t)
            print(t[-1] - t[0])

        # for y in x:
        #     for z in y:
        #         print(z)
        #print("\n")

    plt.style.use('ggplot')

    # data first then time stamps, sick

    pprint(streamIn.info().as_xml())  #what
    timeC = streamIn.time_correction()
    print(timeC)

    #Clean up time

    streams.clear()
    streamIn.close_stream()  #calls lsl_close_stream
    streamIn.__del__()  #Not throwing errors
    dummy_streamer.stop()
Exemple #3
0
class LSLInletInterface:
    def __init__(self, lsl_data_type):
        self.streams = resolve_byprop('name', lsl_data_type, timeout=0.1)
        if len(self.streams) < 1:
            raise AttributeError(
                'Unable to find LSL Stream with given type {0}'.format(
                    lsl_data_type))
        self.inlet = StreamInlet(self.streams[0])
        self.lsl_data_type = lsl_data_type
        self.lsl_num_channels = self.inlet.channel_count
        pass

    def start_sensor(self):
        # connect to the sensor
        self.streams = resolve_byprop('name', self.lsl_data_type, timeout=0.1)
        if len(self.streams) < 1:
            raise AttributeError(
                'Unable to find LSL Stream with given type {0}'.format(
                    self.lsl_data_type))
        if not self.inlet:
            self.inlet = StreamInlet(self.streams[0])
        self.inlet.open_stream()
        print(
            'LSLInletInterface: resolved, created and opened inlet for lsl stream with type '
            + self.lsl_data_type)

        # read the channel names is there's any
        # tell the sensor to start sending frames

    def process_frames(self):
        # return one or more frames of the sensor
        try:
            frames, timestamps = self.inlet.pull_chunk()
        except LostError:
            frames, timestamps = [], []
            pass  # TODO handle stream lost
        return np.transpose(frames), timestamps

    def stop_sensor(self):
        if self.inlet:
            self.inlet.close_stream()
        print('LSLInletInterface: inlet stream closed.')

    def info(self):
        return self.inlet.info()

    def get_num_chan(self):
        return self.lsl_num_channels

    def get_nominal_srate(self):
        return self.streams[0].nominal_srate()
Exemple #4
0
class SimulationInterface:
    def __init__(self, lsl_data_type, num_channels,
                 sampling_rate):  # default board_id 2 for Cyton
        self.lsl_data_type = lsl_data_type
        self.lsl_num_channels = num_channels
        self.sampling_rate = sampling_rate
        with open('data/s01.dat', "rb") as f:
            deap_data = pickle.load(f, encoding="latin1")
        deap_data = np.array(deap_data['data'])
        # flatten so we have a continuous stream
        self.deap_data = deap_data.reshape(
            deap_data.shape[1], deap_data.shape[0] * deap_data.shape[2])
        self.dreader = None
        self.stream_process = None
        info = StreamInfo('DEAP Simulation', 'EEG', num_channels,
                          self.sampling_rate, 'float32', 'deapcontinuous')
        self.outlet = StreamOutlet(info, 32, 360)
        self.streams = resolve_byprop('name', self.lsl_data_type, timeout=1)
        if len(self.streams) < 1:
            raise AttributeError(
                'Unable to find LSL Stream with given type {0}'.format(
                    lsl_data_type))
        self.inlet = StreamInlet(self.streams[0])
        pass

    def start_sensor(self):
        # connect to the sensor
        self.dreader = DEAPReader(self.sampling_rate)
        self.stream_process = threading.Thread(target=self.dreader.run,
                                               args=(self.deap_data,
                                                     self.outlet))

        self.stream_process.start()
        self.streams = resolve_byprop('name', self.lsl_data_type, timeout=1)
        if len(self.streams) < 1:
            raise AttributeError(
                'Unable to find LSL Stream with given type {0}'.format(
                    self.lsl_data_type))
        self.inlet = StreamInlet(self.streams[0])
        self.inlet.open_stream()
        print(
            'LSLInletInterface: resolved, created and opened inlet for lsl stream with type '
            + self.lsl_data_type)

        # read the channel names is there's any
        # tell the sensor to start sending frames

    def process_frames(self):
        # return one or more frames of the sensor
        try:
            frames, timestamps = self.inlet.pull_chunk()
        except LostError:
            frames, timestamps = [], []
            pass  # TODO handle stream lost
        return np.transpose(frames), timestamps

    def stop_sensor(self):
        self.dreader.terminate()
        if self.inlet:
            self.inlet.close_stream()
        print('LSLInletInterface: inlet stream closed.')

    def info(self):
        return self.inlet.info()

    def get_num_chan(self):
        return self.lsl_num_channels

    def get_nominal_srate(self):
        return self.streams[0].nominal_srate()
Exemple #5
0
def fft_backend(input_stream, output_stream, window_length=256, pow2=True, window_type=np.hamming):
    
    #################################
    ## Stream Inlet and Outlet Creation
    #################################

    #streams = resolve_byprop("name",input_stream.name(),timeout= 10)
    #input_stream = streams[0]
    #print(input_stream.channel_count())
    #print(input_stream)
    #print(input_stream.name())
    inlet = StreamInlet(input_stream, max_chunklen=12, recover=True)
    inlet.open_stream() # Stream is opened implicitely on first call of pull chunk, but opening now for clarity

    # Create StreamOutlet to push data to output stream
    outlet = StreamOutlet(output_stream, chunk_size=129)
    ###################################
    ## FFT
    ###################################
    
    buffer = np.empty((0,5))
    window = window_type(window_length)
    g = True
    while(True):
        input_chunk = inlet.pull_chunk() # Pull Chunk
        #print(np.shape(input_chunk))

        if input_chunk[0] and np.shape(input_chunk)[1] > 0: # Check for available chunk
            #print("output samples")
            buffer = np.append(buffer, input_chunk[0], axis=0)

            if (len(buffer) >= window_length):
                # Take data from buffer
                data = buffer[0:window_length]
                data = np.transpose(data)

                # Get frequency labels/bins
                freq_labels = np.fft.rfftfreq(window_length, 1/input_stream.nominal_srate())

                # Take FFT of data for each channel
                data_windowed = []
                data_fft = []
                psd = []
                for i in range(0, output_stream.channel_count()):
                    # Multiply data by window
                    data_windowed.append(data[i] - np.mean(data[i], axis=0))
                    data_windowed[i] = data_windowed[i] * window

                    # Get FFT
                    data_fft.append(np.fft.rfft(data_windowed[i], n=window_length, axis=0))
                    data_fft[i] = data_fft[i]/window_length

                    # Convert FFT to PSD
                    psd.append(abs(data_fft[i])) # Take absolute value
                    # Assume input signal is real-valued and double power to account for negative frequencies 
                    # DC power (psd[i][0]) only occurs once and does not need to be doubled)
                    psd[i][1:] = 2*psd[i][1:]

                # Create Output Data Packet in shape 2 x N (Where N is the # of discrete frequencies)
                # The first dimension of output sample contains the data of shape CHANNELS x N
                # The second dimension contains the N labels for the frequencies in Hz 
                psd = np.transpose(psd)
                psd = psd.tolist()
                if(g==True):
                    #print(psd)
                    g=False

                #print(np.shape(psd))
                #freq_labels = freq_labels.tolist()
                #output_sample = (psd, freq_labels)
                #print(np.shape(output_sample))
                #print(output_sample)

                # Push fft transform for each channel using outlet
                outlet.push_chunk(psd)
Exemple #6
0
def plotFreqDomain(stream_info, chunkwidth, channels=0, size=(1500, 1500), title=None):
    """Plot Real-Time in the frequency domain using a static x-axis and changing y axis values.

    Accepts a pylsl StreamInlet Object and plots chunks in real-time as they are recieved
    using a pyqtgraph plot. Can plot multiple channels.

    Args:
        stream_info (pylsl StreamInfo Object): The stream info object for the stream to be plotted
        chunkwidth (int): The number of samples in each chunk when pulling chunks from the stream
        fs (int): The sampling frequency of the device. If zero function will attempt to determine 
            sampling frequency automatically
        size (array): Array of type (width, height) of the figure
        title (string): Title of the plot figure
    
    Returns:
        bool: True if window was closed and no errors were encountered. False if an error was encountered within
            the function
    """
    #################################
    ## Stream Inlet Creation
    #################################
    inlet = StreamInlet(stream_info, max_chunklen=chunkwidth, recover=True)
    inlet.open_stream() # Stream is opened implicitely on first call of pull chunk, but opening now for clarity

    #################################
    ## Variable Initialization
    #################################

    if(channels == 0):
        channels = stream_info.channel_count() # Get number of channels

    ##################################
    ## Figure and Plot Set Up
    ##################################

    ## Initialize QT
    app = QtGui.QApplication([])

    ## Define a top-level widget to hold everything
    fig = QtGui.QWidget()
    fig.resize(size[0], size[1]) # Resize window
    if (title != None): 
        fig.setWindowTitle(title) # Set window title
    layout = QtGui.QGridLayout()
    fig.setLayout(layout)

    # Set up initial plot conditions
    (x_vec, step) = np.linspace(0,chunkwidth,chunkwidth, retstep=True) # vector used to plot y values
    y_vec = np.zeros((channels,len(x_vec))) # Initialize y_values as zero

    # Set Up subplots and lines
    plots = []
    curves = []
    colors = ['c', 'm', 'g', 'r', 'y', 'b'] # Color options for various channels
    for i in range(0, channels):
        # Create plot widget and append to list
        plot = pg.PlotWidget(labels={'left': 'Power (dB)'}, title='Channel ' + (str)(i + 1)) # Create Plot Widget
        plot.plotItem.setMouseEnabled(x=False, y=False) # Disable panning for widget
        plot.plotItem.showGrid(x=True) # Enable vertical gridlines
        plots.append(plot)
        # Plot data and save curve. Append curve to list
        curve = plot.plot(x_vec, y_vec[i], pen=pg.mkPen(colors[i%len(colors)], width=0.5)) # Set thickness and color of lines
        curves.append(curve)
        # Add plot to main widget
        layout.addWidget(plot, np.floor(i/2), i%2)

    # Display figure as a new window
    fig.show()

    ###################################
    # Real-Time Plotting Loop
    ###################################

    firstUpdate = True
    buffer = []
    while(True):
        chunk = inlet.pull_chunk()
        #print(np.shape(chunk[0]))
        #print(chunk[0][0:129])
        #print(np.shape(chunk[0][0:129]))

        if not (np.size(chunk[0]) == 0): # Check for available chunk
            chunkdata = np.transpose(chunk[0]) # Get chunk data and transpose to be CHANNELS x CHUNKLENGTH
            if np.size(buffer) == 0:
                buffer = chunkdata
            else:
                buffer = np.append(buffer, chunkdata, axis=1)
        
        while np.size(buffer,1) > 129:
            data = buffer[:,0:129]
            buffer = buffer[:,129:]
            #if np.size(buffer,1) < 129:
                #data = np.zeros((5,129))
            # Update plotted data
            for i in range(0,channels):
                curves[i].setData(x_vec, data[i]) # Update data
            
            # Update QT Widget to reflect the changes we made
            pg.QtGui.QApplication.processEvents()

        # Check to see if widget if has been closed, if so exit loop
        if not fig.isVisible():
            break
    
    # Close the stream inlet
    inlet.close_stream()
    
    return True
Exemple #7
0
def plotTimeDomain(stream_info, chunkwidth=0, fs=0, channels=0, timewin=50, tickfactor=5, size=(1500, 800), title=None):
    """Plot Real-Time domain in the time domain using a scrolling plot.

    Accepts a pylsl StreamInlet Object and plots chunks in real-time as they are recieved
    using a scrolling pyqtgraph plot. Can plot multiple channels.

    Args:
        stream_info (pylsl StreamInfo Object): The stream info object for the stream to be plotted
        chunkwidth (int): The number of samples in each chunk when pulling chunks from the stream
        fs (int): The sampling frequency of the device. If zero function will attempt to determine 
            sampling frequency automatically
        channels (int): The number of channels in the stream (Eg. Number of EEG Electrodes). If
            zero the function will attempt determine automatically
        timewin (int): The number seconds to show at any given time in the plot. This affects the speed 
            with which the plot will scroll accross the screen. Can not be a prime number.
        tickfactor (int): The number of seconds between x-axis labels. Must be a factor of timewin
        size (array): Array of type (width, height) of the figure
        title (string): Title of the plot figure
    
    Returns:
        bool: True if window was closed and no errors were encountered. False if an error was encountered within
            the function
    """
    #################################
    ## Stream Inlet Creation
    #################################
    #stream = resolve_byprop("name",stream_info.name(),timeout= 10)
    inlet = StreamInlet(stream_info, max_chunklen=chunkwidth, recover=True)
    inlet.open_stream() # Stream is opened implicitely on first call of pull chunk, but opening now for clarity

    #################################
    ## Variable Initialization
    #################################

    ## Get/Check Default Params
    if(timewin%tickfactor != 0):
        print('''ERROR: The tickfactor should be a factor of of timewin. The default tickfactor
        \n is 5 seconds. If you changed the default timewin, make sure that 5 is a factor, or 
        \n change the tickfactor so that it is a factor of timewin''')
        return False

    if(fs == 0):
        fs = stream_info.nominal_srate() # Get sampling rate

    if(channels == 0):
        channels = stream_info.channel_count() # Get number of channels

    ## Initialize Constants
    XWIN = timewin*fs # Width of X-Axis in samples
    XTICKS = (int)((timewin + 1)/tickfactor) # Number of labels to have on X-Axis
    #CHUNKPERIOD = chunkwidth*(1/fs) # The length of each chunk in seconds

    ##################################
    ## Figure and Plot Set Up
    ##################################

    ## Initialize QT
    app = QtGui.QApplication([])

    ## Define a top-level widget to hold everything
    fig = QtGui.QWidget()
    fig.resize(size[0], size[1]) # Resize window
    if (title != None): 
        fig.setWindowTitle(title) # Set window title
    layout = QtGui.QGridLayout()
    fig.setLayout(layout)

    # Set up initial plot conditions
    (x_vec, step) = np.linspace(0,timewin,XWIN+1, retstep=True) # vector used to plot y values
    xlabels = np.zeros(XTICKS).tolist() # Vector to hold labels of ticks on x-axis
    xticks = [ x * tickfactor for x in list(range(0, XTICKS))] # Initialize locations of x-labels
    y_vec = np.zeros((channels,len(x_vec))) # Initialize y_values as zero

    # Set Up subplots and lines
    plots = []
    curves = []
    colors = ['c', 'm', 'g', 'r', 'y', 'b'] # Color options for various channels
    for i in range(0, channels):
        # Create axis item and set tick locations and labels
        axis = pg.AxisItem(orientation='bottom')
        axis.setTicks([[(xticks[i],str(xlabels[i])) for i in range(len(xticks))]]) # Initialize all labels as zero
        # Create plot widget and append to list
        plot = pg.PlotWidget(axisItems={'bottom': axis}, labels={'left': 'Volts (mV)'}, title='Channel ' + (str)(i + 1)) # Create Plot Widget
        plot.plotItem.setMouseEnabled(x=False, y=False) # Disable panning for widget
        plot.plotItem.showGrid(x=True) # Enable vertical gridlines
        plots.append(plot)
        # Plot data and save curve. Append curve to list
        curve = plot.plot(x_vec, y_vec[i], pen=pg.mkPen(colors[i%len(colors)], width=0.5)) # Set thickness and color of lines
        curves.append(curve)
        # Add plot to main widget
        layout.addWidget(plot, i, 0)

    # Display figure as a new window
    fig.show()

    ###################################
    # Real-Time Plotting Loop
    ###################################

    firstUpdate = True
    while(True):
        chunk = inlet.pull_chunk()

        # (something is wierd with dummy chunks, get chunks of diff sizes, data comes in too fast)
        if chunk and np.shape(chunk)[1] > 0: # Check for available chunk 
            print(np.shape(chunk))
            chunkdata = np.transpose(chunk[0]) # Get chunk data and transpose to be CHANNELS x CHUNKLENTH
            chunkperiod = len(chunkdata[0])*(1/fs)
            xticks = [x - chunkperiod for x in xticks] # Update location of x-labels

            # Update x-axis locations and labels
            if(xticks[0] < 0): # Check if a label has crossed to the negative side of the y-axis

                # Delete label on left of x-axis and add a new one on the right side
                xticks.pop(0)
                xticks.append(xticks[-1] + tickfactor)

                # Adjust time labels accordingly
                if (firstUpdate == False): # Check to see if it's the first update, if so skip so that time starts at zero
                    xlabels.append(xlabels[-1] + tickfactor)
                    xlabels.pop(0)
                else:
                    firstUpdate = False
            
            # Update plotted data
            for i in range(0,channels):
                y_vec[i] = np.append(y_vec[i], chunkdata[i], axis=0)[len(chunkdata[i]):] # Append chunk to the end of y_data (currently only doing 1 channel)
                curves[i].setData(x_vec, y_vec[i]) # Update data

                # Update x-axis labels
                axis = plots[i].getAxis(name='bottom')
                axis.setTicks([[(xticks[i],str(xlabels[i])) for i in range(len(xticks))]])
               
        # Update QT Widget to reflect the changes we made
        pg.QtGui.QApplication.processEvents()

        # Check to see if widget if has been closed, if so exit loop
        if not fig.isVisible():
            break
    
    # Close the stream inlet
    inlet.close_stream()
    
    return True
Exemple #8
0
# first resolve an EEG stream on the lab network
print "looking for an EEG stream..."
streams = resolve_stream('type', 'EEG')
print "EEG stream found"

conn.send('startstimuli'
          )  # tell linux machine to start displaying the fixation points

# create a new inlet to read from the stream
inlet = StreamInlet(streams[0])

time.sleep(2.0)  #wait while fixation points are shown

conn.send('openstream')  # tell linux machine to start flickering stimuli
inlet.open_stream()  # start gathering data from Data Acquisition Software

sample, timestamp = inlet.pull_chunk(
    max_samples=1510,
    timeout=3.1)  # timeout in seconds, retreive data for 3s (1500 arrays)
print len(sample), len(sample[0])
inlet.close_stream()

# send array with all the data one element at a time (voltages of sensors on EEG kit)

for i in range(
        len(sample)
):  # the array is composed of 1500 arrays, each one an array of 13 elements

    sample_i = np.asarray(sample[i])
    sample_i = sample_i.tostring(
Exemple #9
0
class InferenceInterface:
    def __init__(self,
                 lsl_data_name=config.INFERENCE_LSL_NAME,
                 lsl_data_type=config.INFERENCE_LSL_TYPE
                 ):  # default board_id 2 for Cyton
        self.lsl_data_type = lsl_data_type
        self.lsl_data_name = lsl_data_name

        # TODO need to change the channel count when adding eeg
        info = StreamInfo(lsl_data_name,
                          lsl_data_type,
                          channel_count=config.EYE_TOTAL_POINTS_PER_INFERENCE,
                          channel_format='float32',
                          source_id='myuid2424')
        info.desc().append_child_value("apocalyvec", "RealityNavigation")

        # chns = info.desc().append_child("eeg_channels")
        # channel_names = ["C3", "C4", "Cz", "FPz", "POz", "CPz", "O1", "O2", '1','2','3','4','5','6','7','8']
        # for label in channel_names:
        #     ch = chns.append_child("channel")
        #     ch.append_child_value("label", label)
        #     ch.append_child_value("unit", "microvolts")
        #     ch.append_child_value("type", "EEG")

        chns = info.desc().append_child("eye")
        channel_names = [
            'left_pupil_diameter_sample', 'right_pupil_diameter_sample'
        ]
        for label in channel_names:
            ch = chns.append_child("channel")
            ch.append_child_value("label", label)
            ch.append_child_value("unit", "mm")
            ch.append_child_value("type", "eye")

        self.outlet = StreamOutlet(info, max_buffered=360)
        self.start_time = local_clock()

        self.inlet = None
        self.connect_inference_result_stream()

    def connect_inference_result_stream(self):
        streams = resolve_byprop('type',
                                 config.INFERENCE_LSL_RESULTS_TYPE,
                                 timeout=1)

        if len(streams) == 0:
            print('No inference stream open.')
        else:  # TODO handle external inference stream lost
            self.inlet = StreamInlet(streams[0])
            self.inlet.open_stream()

    def disconnect_inference_result_stream(self):
        self.inlet.close_stream()

    def send_samples_receive_inference(self, samples_dict):
        """
        receive frames
        :param frames:
        """
        # TODO add EEG
        sample = np.reshape(samples_dict['eye'],
                            newshape=(-1, ))  # flatten out
        sample = sample.tolist()  # have to convert to list for LSL

        # chunk[0][0] = 42.0
        # chunk[0][1] = 24.0

        self.outlet.push_sample(sample)

        if self.inlet:
            inference_results_moving_averaged, timestamps = self.inlet.pull_chunk(
            )
            return inference_results_moving_averaged
        else:
            return sim_inference()
Exemple #10
0
            file_name = f"chunck-{self.get_timestamp()}-{self.counter}.csv"
            np.savetxt(file_name, info_np, delimiter=",", fmt='%f')
            print(f"saved-chunk-{len(self.chunk)}")
            self.chunk = []
            print(len(self.chunk))
            self.counter += 1


print("looking for an EEG stream...")
brain_stream = resolve_stream("name", "AURA_power")
video_stream = resolve_stream("name", "AURA_corsi")
print("found streams")

brain_inlet = StreamInlet(brain_stream[0])
video_inlet = StreamInlet(video_stream[0])
brain_inlet.open_stream()
video_inlet.open_stream()

saver = Saver()
video_info = None
print("While entered")

try:
    timestamp = None
    while True:
        brain_info, timestamp = brain_inlet.pull_sample()
        if video_inlet.samples_available():
            video_info, _ = video_inlet.pull_sample()
            video_info = video_info[0]
        saver.check_reading(video_info)
        saver.save_chunk(video_info)
Exemple #11
0
class PubSubInterface:
    def __init__(self, lsl_data_type,
                 num_channels):  # default board_id 2 for Cyton
        self.lsl_data_type = lsl_data_type
        self.lsl_num_channels = num_channels
        self._time_dilation = 1

        self._sfreq = int(1)
        self.subscriber = pubsub_v1.SubscriberClient()
        # The `subscription_path` method creates a fully qualified identifier
        # in the form `projects/{project_id}/subscriptions/{subscription_id}`
        self.subscription_path = self.subscriber.subscription_path(
            'vae-cloud-model', 'test_topic_out-sub')
        self.streaming_pull_future = None

        info = StreamInfo(self.lsl_data_type, 'Pubsub', num_channels, 0.0,
                          'string', 'gcppubsub')

        # next make an outlet
        self.outlet = StreamOutlet(info)
        self.streams = resolve_byprop('name', self.lsl_data_type, timeout=1)
        if len(self.streams) < 1:
            raise AttributeError(
                'Unable to find LSL Stream with given type {0}'.format(
                    lsl_data_type))
        self.inlet = StreamInlet(self.streams[0])
        pass

    def callback(self, message):
        self.outlet.push_sample([message.data.decode()])
        print(f"Received {message}.")
        message.ack()

    def start_sensor(self):
        # connect to the sensor
        self.streams = resolve_byprop('name', self.lsl_data_type, timeout=1)
        if len(self.streams) < 1:
            raise AttributeError(
                'Unable to find LSL Stream with given type {0}'.format(
                    self.lsl_data_type))
        self.inlet = StreamInlet(self.streams[0])
        self.streaming_pull_future = self.subscriber.subscribe(
            self.subscription_path, callback=self.callback)
        self.inlet.open_stream()
        print(
            'LSLInletInterface: resolved, created and opened inlet for lsl stream with type '
            + self.lsl_data_type)

        # read the channel names is there's any
        # tell the sensor to start sending frames

    def process_frames(self):
        # return one or more frames of the sensor
        try:
            frames, timestamps = self.inlet.pull_chunk()
        except LostError:
            frames, timestamps = [], []
            pass  # TODO handle stream lost
        return np.transpose(frames), timestamps

    def stop_sensor(self):
        if self.inlet:
            self.inlet.close_stream()
        print('LSLInletInterface: inlet stream closed.')

    def info(self):
        return self.inlet.info()

    def get_num_chan(self):
        return self.lsl_num_channels

    def get_nominal_srate(self):
        return self.streams[0].nominal_srate()