def prepare_annotations( xdffile: FileName, channel: str, pre_in_ms: float, post_in_ms: float, xmlfile: FileName = None, event_stream: str = "localite_marker", event_name: Union[str, int] = "coil_0_didt", ) -> Annotations: """load a documentation.txt and cnt-files and distill annotations from them args ---- xdffile: FileName the :code:`.xdf`-file with the recorded streams, e.g. data and markers channel: str which channel to pick pre_in_ms: float how many ms to cut before the tms post_in_ms: float how many ms to cut after the tms returns ------- annotation: Annotations the annotations for this origin files """ stream_of_interest = channel # rename to have same function signature streams = XDFFile(xdffile) if stream_of_interest in streams: datastream = streams[stream_of_interest] else: raise KeyError( f"Stream {stream_of_interest} was not found in the data") e_stream = streams[event_stream] time_stamps = [ts for ts in yield_timestamps(e_stream, event_name)] event_count = len(time_stamps) if "localite_flow" in streams or "localite_marker" in streams: loc_stream = streams["localite_marker"] coords = list(yield_loc_coords(loc_stream, time_stamps)) stimulation_intensity_didt = list( yield_loc_didt(loc_stream, time_stamps)) stimulation_intensity_mso = list(yield_loc_mso(loc_stream, time_stamps)) else: coords = list_nan_coords(event_count) stimulation_intensity_didt = list_nan(event_count) stimulation_intensity_mso = list_nan(event_count) print(f"Found {event_count} events") if "reiz_marker_sa" in streams: comments = [ c for c in yield_comments( streams["reiz_marker_sa"], time_stamps=time_stamps, identifier="stimulus_idx", relative="earlier", ) ] else: comments = ["" for c in time_stamps] # global fields fs = datastream.nominal_srate anno = AnnotationFactory(readin="tms", readout="erp", origin=Path(xdffile).name) anno.set("filedate", time.ctime(Path(xdffile).stat().st_mtime)) anno.set("subject", "") # TODO parse from somewhere anno.set("samplingrate", fs) anno.set("samples_pre_event", int(pre_in_ms * fs / 1000)) anno.set("samples_post_event", int(post_in_ms * fs / 1000)) anno.set("channel_of_interest", datastream.name) anno.set("channel_labels", datastream.channel_labels) # trace fields event_samples = find_closest_samples(datastream, time_stamps) event_times = [ float(t) for t in datastream.time_stamps[event_samples] - datastream.time_stamps[0] ] time_since_last_pulse = [inf] + [ a - b for a, b in zip(event_times[1:], event_times[0:-1]) ] for idx in range(event_count): tattr = { "id": idx, "event_name": e_stream.name + "-" + str(event_name), "event_sample": event_samples[idx], "event_time": event_times[idx], "xyz_coords": coords[idx], "time_since_last_pulse_in_s": time_since_last_pulse[idx], "stimulation_intensity_mso": stimulation_intensity_mso[idx], "stimulation_intensity_didt": stimulation_intensity_didt[idx], "comment": comments[idx], } anno.append_trace_attr(tattr) return anno.anno
def prepare_annotations( streams, origin, filedate, channel: str, pre_in_ms: float, post_in_ms: float, comment_name=None, ) -> Annotations: """load a documentation.txt and cnt-files and distill annotations from them args ---- xmlfile: FileName an option xml file with information about the target coordinates readout: str which readout to use channel: str which channel to pick pre_in_ms: float how many ms to cut before the tms post_in_ms: float how many ms to cut after the tms xdffile: FileName the :code:`.xdf`-file with the recorded streams, e.g. data and markers returns ------- annotation: Annotations the annotations for this origin files """ # ------------------ datastream = pick_stream_with_channel(channel, streams) iu1 = streams["reiz_marker_sa"].time_stamps[-1] idx = np.where(streams["Spongebob-Data"].time_series[:, 11] == 1.0)[0][-1] iu2 = streams["Spongebob-Data"].time_stamps[idx] irrelevant_until = max((iu1, iu2)) time_stamps = [] for event in streams["BrainVision RDA Markers"].time_stamps: if event > irrelevant_until: time_stamps.append(event) event_count = len(time_stamps) coords = list_nan_coords(event_count) stimulation_intensity_didt = list_nan(event_count) stimulation_intensity_mso = list_nan(event_count) comments = ["" for c in time_stamps] print(f"Found {event_count} events") if event_count == 350: grid_layout = "5x7" print("This corresponds to a subject with a 5x7 grid") elif event_count == 360: grid_layout = "6x6" print("This corresponds to a subject with a 6x6 grid") else: grid_layout = "Unknown" print("This does not correspond to a known grid layout") # global fields fs = datastream.nominal_srate anno = AnnotationFactory(readin="tms", readout="cmep", origin=origin) anno.set("filedate", filedate) anno.set("subject", "") # TODO parse from correctly organized file anno.set("samplingrate", fs) anno.set("samples_pre_event", int(pre_in_ms * fs / 1000)) anno.set("samples_post_event", int(post_in_ms * fs / 1000)) anno.set("channel_of_interest", channel) anno.set("channel_labels", [channel]) anno.set("global_comment", f"grid_layout={grid_layout}") # trace fields event_samples = find_closest_samples(datastream, time_stamps) # shift onset on Peak of artifact ephys = streams["BrainVision RDA"] gmfp = np.std(ephys.time_series[:, 0:64], 1) aptp = [] tp = [] for onset in event_samples: artifact = gmfp[onset - 25:onset + 25] aptp.append(np.ptp(artifact)) tp.append(int(np.argmax(artifact) - 25 + onset)) event_samples = tp event_times = [ float(t) for t in datastream.time_stamps[event_samples] - datastream.time_stamps[0] ] time_since_last_pulse = [inf] + [ a - b for a, b in zip(event_times[1:], event_times[0:-1]) ] for idx, t in enumerate(event_samples): tattr = { "id": idx, "comment": f'{{"artifact_amplitude":{aptp[idx]:3.2f}}}', "event_name": "BrainVision RDA Markers - 'S 2'", "event_sample": event_samples[idx], "event_time": event_times[idx], "xyz_coords": coords[idx], "time_since_last_pulse_in_s": time_since_last_pulse[idx], "stimulation_intensity_mso": stimulation_intensity_mso[idx], "stimulation_intensity_didt": stimulation_intensity_didt[idx], } anno.append_trace_attr(tattr) return anno.anno
def prepare_annotations( xdffile: FileName, channel: str, pre_in_ms: float, post_in_ms: float, xmlfile: FileName = None, event_name="coil_0_didt", event_stream="localite_marker", comment_name=None, ) -> Annotations: """load a documentation.txt and cnt-files and distill annotations from them args ---- xmlfile: FileName an option xml file with information about the target coordinates readout: str which readout to use channel: str which channel to pick pre_in_ms: float how many ms to cut before the tms post_in_ms: float how many ms to cut after the tms xdffile: FileName the :code:`.xdf`-file with the recorded streams, e.g. data and markers returns ------- annotation: Annotations the annotations for this origin files """ # ------------------ streams = XDFFile(xdffile) datastream = pick_stream_with_channel(channel, streams) event_stream = streams[event_stream] print(f"Reading events from {event_stream.name} using {event_name}") time_stamps = [ts for ts in yield_timestamps(event_stream, event_name)] event_count = len(time_stamps) print(f"Found {event_count} events") if "localite_flow" in streams or "localite_marker" in streams: loc_stream = streams["localite_marker"] print(f"Reading information from {loc_stream.name}") coords = list(yield_loc_coords(loc_stream, time_stamps)) stimulation_intensity_didt = list(yield_loc_didt(loc_stream, time_stamps)) stimulation_intensity_mso = list(yield_loc_mso(loc_stream, time_stamps)) else: coords = list_nan_coords(event_count) stimulation_intensity_didt = list_nan(event_count) stimulation_intensity_mso = list_nan(event_count) if "reiz_marker_sa" in streams and comment_name is not None: print("Reading comments from reiz_marker_sa") comments = [ c for c in yield_comments( streams["reiz_marker_sa"], time_stamps=time_stamps, identifier="stimulus_idx", relative="earlier", ) ] else: comments = ["" for c in time_stamps] if "BrainVision RDA Markers" in streams: rda_stamps = list(yield_timestamps(streams["BrainVision RDA Markers"], "S 2")) print(f"Found {len(rda_stamps)} 'S 2' for {event_count} events") if len(rda_stamps) >= len(time_stamps): time_stamps = [find_closest(ts, rda_stamps) for ts in time_stamps] print("Corrected event timestamps for RDA 'S 2'") else: print("Count mismatch between RDA and Localite events") if "BrainVision RDA" in streams: bvr = streams["BrainVision RDA"] time_stamps = correct_tkeo(bvr, time_stamps) print("Corrected event timestamps for TMS artifact") # global fields fs = datastream.nominal_srate anno = AnnotationFactory(readin="tms", readout="cmep", origin=Path(xdffile).name) anno.set("filedate", time.ctime(Path(xdffile).stat().st_mtime)) anno.set("subject", "") # TODO parse from correctly organized file anno.set("samplingrate", fs) anno.set("samples_pre_event", int(pre_in_ms * fs / 1000)) anno.set("samples_post_event", int(post_in_ms * fs / 1000)) anno.set("channel_of_interest", channel) anno.set("channel_labels", [channel]) # trace fields event_samples = find_closest_samples(datastream, time_stamps) event_times = [ float(t) for t in datastream.time_stamps[event_samples] - datastream.time_stamps[0] ] time_since_last_pulse = [inf] + [ a - b for a, b in zip(event_times[1:], event_times[0:-1]) ] for idx, t in enumerate(event_samples): tattr = { "id": idx, "event_name": event_stream.name + "-" + str(event_name), "event_sample": event_samples[idx], "event_time": event_times[idx], "xyz_coords": coords[idx], "time_since_last_pulse_in_s": time_since_last_pulse[idx], "stimulation_intensity_mso": stimulation_intensity_mso[idx], "stimulation_intensity_didt": stimulation_intensity_didt[idx], } anno.append_trace_attr(tattr) return anno.anno
def prepare_annotations( xdffile: FileName, channel: str, pre_in_ms: float, post_in_ms: float, event_name="Spongebob-Trigger", event_mark=1, event_stream="Spongebob-Data", comment_name=None, ) -> Annotations: """ args ---- xdffile: FileName the :code:`.xdf`-file with the recorded streams, e.g. data and markers channel: str which channel to pick pre_in_ms: float how many ms to cut before the tms post_in_ms: float how many ms to cut after the tms returns ------- annotation: Annotations the annotations for this origin files """ # ------------------ streams = XDFFile(xdffile) datastream = pick_stream_with_channel(channel, streams) event_stream = streams[event_stream] time_stamps = [ts for ts in yield_timestamps(event_stream, event_mark)] event_count = len(time_stamps) print(f"Found {event_count} events") if "reiz_marker_sa" in streams and comment_name is not None: comments = [ c for c in yield_comments( streams["reiz_marker_sa"], time_stamps=time_stamps, identifier="stimulus_idx", relative="earlier", ) ] else: comments = ["" for c in time_stamps] # global fields fs = datastream.nominal_srate anno = AnnotationFactory(readin="tms", readout="cmep", origin=Path(xdffile).name) anno.set("filedate", time.ctime(Path(xdffile).stat().st_mtime)) anno.set("subject", "") # TODO parse from correctly organized file anno.set("samplingrate", fs) anno.set("samples_pre_event", int(pre_in_ms * fs / 1000)) anno.set("samples_post_event", int(post_in_ms * fs / 1000)) anno.set("channel_of_interest", channel) anno.set("channel_labels", [channel]) # trace fields event_samples = find_closest_samples(datastream, time_stamps) event_times = [ float(t) for t in datastream.time_stamps[event_samples] - datastream.time_stamps[0] ] time_since_last_pulse = [inf] + [ a - b for a, b in zip(event_times[1:], event_times[0:-1]) ] for idx, t in enumerate(event_samples): tattr = { "id": idx, "event_name": event_stream.name + "-" + str(event_name), "event_sample": event_samples[idx], "event_time": event_times[idx], "comment": comments[idx], "time_since_last_pulse_in_s": time_since_last_pulse[idx], } anno.append_trace_attr(tattr) return anno.anno