def cut_traces(xdffile: FileName, annotation: Annotations) -> List[TraceData]: """cut the tracedate from a matfile given Annotations args ---- xdfile: FileName the xdffile for cutting the data. must correspond in name to the one specified in the annotation annotation: Annotations the annotations specifying e.g. onsets as well as pre and post durations returns ------- traces: List[TraceData] """ streams = XDFFile(xdffile) soi = decode(annotation["attrs"]["channel_of_interest"]) print("Selecting traces for stream", soi) datastream = streams[soi] pre = decode(annotation["attrs"]["samples_pre_event"]) post = decode(annotation["attrs"]["samples_post_event"]) traces = [] for attrs in annotation["traces"]: onset = decode(attrs["event_sample"]) trace = datastream.time_series[onset - pre:onset + post, :] traces.append(trace) return traces
def cut_traces(xdffile: FileName, annotation: Annotations) -> List[TraceData]: """cut the tracedate from a matfile given Annotations args ---- xdfile: FileName the xdffile for cutting the data. must correspond in name to the one specified in the annotation annotation: Annotations the annotations specifying e.g. onsets as well as pre and post durations returns ------- traces: List[TraceData] """ streams = XDFFile(xdffile) channel = decode(annotation["attrs"]["channel_of_interest"]) print("Selecting traces for channel", channel) datastream = pick_stream_with_channel(channel, streams) cix = datastream.channel_labels.index(channel) pre = decode(annotation["attrs"]["samples_pre_event"]) post = decode(annotation["attrs"]["samples_post_event"]) traces = [] for attrs in annotation["traces"]: onset = decode(attrs["event_sample"]) trace = datastream.time_series[onset - pre : onset + post, cix] bl = trace[0:pre].mean() trace -= bl traces.append(trace) return traces
def concat_multifile(xdffiles: List[FileName]): files = [] origin = Path(xdffiles[0]).name filedate = time.ctime(Path(xdffiles[0]).stat().st_mtime) for fname in xdffiles: streams = XDFFile(fname) files.append(streams) return files, origin, filedate
def test_xdffile_class(xdf_file): xdfs = XDFFile(xdf_file) assert len(xdfs) == 2 key = "Liesl-Mock-EEG" xdf = xdfs[key] assert xdf.channel_count == 8 assert xdf.channel_labels[0] == "C001" assert xdf.channel_types[0] == "MockEEG" assert xdf.channel_units[0] == "au" assert xdf.type == "EEG" assert xdf.name == key assert xdf.channel_format == "float32" assert type(xdf.created_at) == float assert xdf.time_series.shape[1] == 8 assert len(xdf.time_stamps.shape) == 1 assert xdf.nominal_srate == 1000.0
def test_xdffile_class(xdf_file): xdfs = XDFFile(xdf_file) assert ( len(xdfs) == 4 ) # this is due to a bug on pytest fixtures, which creates two streams each key = "Liesl-Mock-EEG" xdf = xdfs[key] assert xdf.channel_count == 8 assert xdf.channel_labels[0] == "C001" assert xdf.channel_types[0] == "MockEEG" assert xdf.channel_units[0] == "au" assert xdf.type == "EEG" assert xdf.name == key assert xdf.channel_format == "float32" assert type(xdf.created_at) == float assert xdf.time_series.shape[1] == 8 assert len(xdf.time_stamps.shape) == 1 assert xdf.nominal_srate == 1000.0 assert xdf.hostname == xdfs["Liesl-Mock-EEG2"].hostname assert "Liesl-Mock-EEG" in repr(xdf)
def prepare_annotations( xdffile: FileName, channel: str, pre_in_ms: float, post_in_ms: float, xmlfile: FileName = None, event_stream: str = "localite_marker", event_name: Union[str, int] = "coil_0_didt", ) -> Annotations: """load a documentation.txt and cnt-files and distill annotations from them args ---- xdffile: FileName the :code:`.xdf`-file with the recorded streams, e.g. data and markers channel: str which channel to pick pre_in_ms: float how many ms to cut before the tms post_in_ms: float how many ms to cut after the tms returns ------- annotation: Annotations the annotations for this origin files """ stream_of_interest = channel # rename to have same function signature streams = XDFFile(xdffile) if stream_of_interest in streams: datastream = streams[stream_of_interest] else: raise KeyError( f"Stream {stream_of_interest} was not found in the data") e_stream = streams[event_stream] time_stamps = [ts for ts in yield_timestamps(e_stream, event_name)] event_count = len(time_stamps) if "localite_flow" in streams or "localite_marker" in streams: loc_stream = streams["localite_marker"] coords = list(yield_loc_coords(loc_stream, time_stamps)) stimulation_intensity_didt = list( yield_loc_didt(loc_stream, time_stamps)) stimulation_intensity_mso = list(yield_loc_mso(loc_stream, time_stamps)) else: coords = list_nan_coords(event_count) stimulation_intensity_didt = list_nan(event_count) stimulation_intensity_mso = list_nan(event_count) print(f"Found {event_count} events") if "reiz_marker_sa" in streams: comments = [ c for c in yield_comments( streams["reiz_marker_sa"], time_stamps=time_stamps, identifier="stimulus_idx", relative="earlier", ) ] else: comments = ["" for c in time_stamps] # global fields fs = datastream.nominal_srate anno = AnnotationFactory(readin="tms", readout="erp", origin=Path(xdffile).name) anno.set("filedate", time.ctime(Path(xdffile).stat().st_mtime)) anno.set("subject", "") # TODO parse from somewhere anno.set("samplingrate", fs) anno.set("samples_pre_event", int(pre_in_ms * fs / 1000)) anno.set("samples_post_event", int(post_in_ms * fs / 1000)) anno.set("channel_of_interest", datastream.name) anno.set("channel_labels", datastream.channel_labels) # trace fields event_samples = find_closest_samples(datastream, time_stamps) event_times = [ float(t) for t in datastream.time_stamps[event_samples] - datastream.time_stamps[0] ] time_since_last_pulse = [inf] + [ a - b for a, b in zip(event_times[1:], event_times[0:-1]) ] for idx in range(event_count): tattr = { "id": idx, "event_name": e_stream.name + "-" + str(event_name), "event_sample": event_samples[idx], "event_time": event_times[idx], "xyz_coords": coords[idx], "time_since_last_pulse_in_s": time_since_last_pulse[idx], "stimulation_intensity_mso": stimulation_intensity_mso[idx], "stimulation_intensity_didt": stimulation_intensity_didt[idx], "comment": comments[idx], } anno.append_trace_attr(tattr) return anno.anno
def prepare_annotations( xdffile: FileName, channel: str, pre_in_ms: float, post_in_ms: float, xmlfile: FileName = None, event_name="coil_0_didt", event_stream="localite_marker", comment_name=None, ) -> Annotations: """load a documentation.txt and cnt-files and distill annotations from them args ---- xmlfile: FileName an option xml file with information about the target coordinates readout: str which readout to use channel: str which channel to pick pre_in_ms: float how many ms to cut before the tms post_in_ms: float how many ms to cut after the tms xdffile: FileName the :code:`.xdf`-file with the recorded streams, e.g. data and markers returns ------- annotation: Annotations the annotations for this origin files """ # ------------------ streams = XDFFile(xdffile) datastream = pick_stream_with_channel(channel, streams) event_stream = streams[event_stream] print(f"Reading events from {event_stream.name} using {event_name}") time_stamps = [ts for ts in yield_timestamps(event_stream, event_name)] event_count = len(time_stamps) print(f"Found {event_count} events") if "localite_flow" in streams or "localite_marker" in streams: loc_stream = streams["localite_marker"] print(f"Reading information from {loc_stream.name}") coords = list(yield_loc_coords(loc_stream, time_stamps)) stimulation_intensity_didt = list(yield_loc_didt(loc_stream, time_stamps)) stimulation_intensity_mso = list(yield_loc_mso(loc_stream, time_stamps)) else: coords = list_nan_coords(event_count) stimulation_intensity_didt = list_nan(event_count) stimulation_intensity_mso = list_nan(event_count) if "reiz_marker_sa" in streams and comment_name is not None: print("Reading comments from reiz_marker_sa") comments = [ c for c in yield_comments( streams["reiz_marker_sa"], time_stamps=time_stamps, identifier="stimulus_idx", relative="earlier", ) ] else: comments = ["" for c in time_stamps] if "BrainVision RDA Markers" in streams: rda_stamps = list(yield_timestamps(streams["BrainVision RDA Markers"], "S 2")) print(f"Found {len(rda_stamps)} 'S 2' for {event_count} events") if len(rda_stamps) >= len(time_stamps): time_stamps = [find_closest(ts, rda_stamps) for ts in time_stamps] print("Corrected event timestamps for RDA 'S 2'") else: print("Count mismatch between RDA and Localite events") if "BrainVision RDA" in streams: bvr = streams["BrainVision RDA"] time_stamps = correct_tkeo(bvr, time_stamps) print("Corrected event timestamps for TMS artifact") # global fields fs = datastream.nominal_srate anno = AnnotationFactory(readin="tms", readout="cmep", origin=Path(xdffile).name) anno.set("filedate", time.ctime(Path(xdffile).stat().st_mtime)) anno.set("subject", "") # TODO parse from correctly organized file anno.set("samplingrate", fs) anno.set("samples_pre_event", int(pre_in_ms * fs / 1000)) anno.set("samples_post_event", int(post_in_ms * fs / 1000)) anno.set("channel_of_interest", channel) anno.set("channel_labels", [channel]) # trace fields event_samples = find_closest_samples(datastream, time_stamps) event_times = [ float(t) for t in datastream.time_stamps[event_samples] - datastream.time_stamps[0] ] time_since_last_pulse = [inf] + [ a - b for a, b in zip(event_times[1:], event_times[0:-1]) ] for idx, t in enumerate(event_samples): tattr = { "id": idx, "event_name": event_stream.name + "-" + str(event_name), "event_sample": event_samples[idx], "event_time": event_times[idx], "xyz_coords": coords[idx], "time_since_last_pulse_in_s": time_since_last_pulse[idx], "stimulation_intensity_mso": stimulation_intensity_mso[idx], "stimulation_intensity_didt": stimulation_intensity_didt[idx], } anno.append_trace_attr(tattr) return anno.anno
returns ------- traces: List[TraceData] """ channel = decode(annotation["attrs"]["channel_of_interest"]) print("Selecting traces for channel", channel) datastream = pick_stream_with_channel(channel, streams) cix = datastream.channel_labels.index(channel) pre = decode(annotation["attrs"]["samples_pre_event"]) post = decode(annotation["attrs"]["samples_post_event"]) traces = [] for attrs in annotation["traces"]: onset = decode(attrs["event_sample"]) trace = datastream.time_series[onset - pre:onset + post, cix] traces.append(trace) return traces if __name__ == "__main__": # folder = "/media/rtgugg/sd/Desktop/test-offspect/betti/nocoords" # fname = "TMS_NMES_AmWo_pre2.xdf" # fname = Path(folder) / fname # xdf = XDFFile(fname) folder = "/media/rtgugg/sd/Desktop/test-offspect/betti/toomanytraces" fname = Path(folder) / "TMS_NMES_MaBa_pre2.xdf" files = [XDFFile(fname)]
def prepare_annotations( xdffile: FileName, channel: str, pre_in_ms: float, post_in_ms: float, event_name="Spongebob-Trigger", event_mark=1, event_stream="Spongebob-Data", comment_name=None, ) -> Annotations: """ args ---- xdffile: FileName the :code:`.xdf`-file with the recorded streams, e.g. data and markers channel: str which channel to pick pre_in_ms: float how many ms to cut before the tms post_in_ms: float how many ms to cut after the tms returns ------- annotation: Annotations the annotations for this origin files """ # ------------------ streams = XDFFile(xdffile) datastream = pick_stream_with_channel(channel, streams) event_stream = streams[event_stream] time_stamps = [ts for ts in yield_timestamps(event_stream, event_mark)] event_count = len(time_stamps) print(f"Found {event_count} events") if "reiz_marker_sa" in streams and comment_name is not None: comments = [ c for c in yield_comments( streams["reiz_marker_sa"], time_stamps=time_stamps, identifier="stimulus_idx", relative="earlier", ) ] else: comments = ["" for c in time_stamps] # global fields fs = datastream.nominal_srate anno = AnnotationFactory(readin="tms", readout="cmep", origin=Path(xdffile).name) anno.set("filedate", time.ctime(Path(xdffile).stat().st_mtime)) anno.set("subject", "") # TODO parse from correctly organized file anno.set("samplingrate", fs) anno.set("samples_pre_event", int(pre_in_ms * fs / 1000)) anno.set("samples_post_event", int(post_in_ms * fs / 1000)) anno.set("channel_of_interest", channel) anno.set("channel_labels", [channel]) # trace fields event_samples = find_closest_samples(datastream, time_stamps) event_times = [ float(t) for t in datastream.time_stamps[event_samples] - datastream.time_stamps[0] ] time_since_last_pulse = [inf] + [ a - b for a, b in zip(event_times[1:], event_times[0:-1]) ] for idx, t in enumerate(event_samples): tattr = { "id": idx, "event_name": event_stream.name + "-" + str(event_name), "event_sample": event_samples[idx], "event_time": event_times[idx], "comment": comments[idx], "time_since_last_pulse_in_s": time_since_last_pulse[idx], } anno.append_trace_attr(tattr) return anno.anno