コード例 #1
0
source = sources.FifSource(file_path=data_path)
pipeline.source = source


# Processors
preprocessing = processors.Preprocessing(collect_for_x_seconds=0)
pipeline.add_processor(preprocessing)

ica_rejection = processors.ICARejection(collect_for_x_seconds=10)
pipeline.add_processor(ica_rejection)

linear_filter = processors.LinearFilter(lower_cutoff=8, upper_cutoff=12)
pipeline.add_processor(linear_filter)

inverse_model = processors.InverseModel(method='dSPM', snr=1.0)
pipeline.add_processor(inverse_model)

envelope_extractor = processors.EnvelopeExtractor(0.99)
pipeline.add_processor(envelope_extractor)


# Outputs
signal_viewer = outputs.SignalViewer()
pipeline.add_output(signal_viewer, parent=linear_filter)

global_mode = outputs.BrainViewer.LIMITS_MODES.GLOBAL
three_dee_brain = outputs.BrainViewer(limits_mode=global_mode,
                                      buffer_length=6)
pipeline.add_output(three_dee_brain)
コード例 #2
0
source = sources.FileSource(file_path=file_path)
# source = sources.FileSource()
source.loop_the_file = True
source.MAX_SAMPLES_IN_CHUNK = 10000
pipeline.source = source

# Processors
preprocessing = processors.Preprocessing(collect_for_x_seconds=120)
pipeline.add_processor(preprocessing)

linear_filter = processors.LinearFilter(lower_cutoff=8.0, upper_cutoff=12.0)
pipeline.add_processor(linear_filter)

inverse_model = processors.InverseModel(method='MNE',
                                        snr=1.0,
                                        forward_model_path=fwd_path)
pipeline.add_processor(inverse_model)

envelope_extractor = processors.EnvelopeExtractor(0.99)
pipeline.add_processor(envelope_extractor)

# Outputs
global_mode = outputs.ThreeDeeBrain.LIMITS_MODES.GLOBAL
three_dee_brain = outputs.ThreeDeeBrain(limits_mode=global_mode,
                                        buffer_length=6,
                                        surfaces_dir=SURF_DIR)
pipeline.add_output(three_dee_brain)
# pipeline.add_output(outputs.LSLStreamOutput())

signal_viewer = outputs.SignalViewer()
コード例 #3
0
from cognigraph.pipeline import Pipeline
from cognigraph.nodes import sources, processors, outputs
from cognigraph import TIME_AXIS
from cognigraph.gui.window import GUIWindow

app = QtGui.QApplication(sys.argv)

# BOBE
pipeline = Pipeline()

file_path = r"/home/evgenii/Downloads/brainvision/Bulavenkova_A_2017-10-24_15-33-18_Rest.vhdr"
source = sources.FileSource(file_path=file_path)
pipeline.source = source

inverse = processors.InverseModel()
pipeline.add_processor(inverse)

three_dee = outputs.ThreeDeeBrain()
pipeline.add_output(three_dee)

pipeline.initialize_all_nodes()

three_dee.widget.show()

source.output = source.data.take(indices=(0,), axis=TIME_AXIS)

inverse.update()
three_dee.update()

# Sample
コード例 #4
0
def assemble_pipeline(file_path=None,
                      fwd_path=None,
                      subject=None,
                      subjects_dir=None,
                      inverse_method='mne'):
    pipeline = Pipeline()
    source = sources.FileSource(file_path=file_path)
    source.loop_the_file = True
    source.MAX_SAMPLES_IN_CHUNK = 10000
    pipeline.source = source

    # ----------------------------- processors ----------------------------- #
    preprocessing = processors.Preprocessing(collect_for_x_seconds=120)
    pipeline.add_processor(preprocessing)

    linear_filter = processors.LinearFilter(lower_cutoff=8.0,
                                            upper_cutoff=12.0)
    pipeline.add_processor(linear_filter)

    if inverse_method == 'mne':
        inverse_model = processors.InverseModel(method='MNE',
                                                snr=1.0,
                                                forward_model_path=fwd_path)
        # inverse_model = processors.MneGcs(snr=1.0, seed=1000,
        #                                   forward_model_path=fwd_path)
        pipeline.add_processor(inverse_model)
        envelope_extractor = processors.EnvelopeExtractor(0.99)
        pipeline.add_processor(envelope_extractor)
    elif inverse_method == 'beamformer':
        inverse_model = processors.Beamformer(
            forward_model_path=fwd_path,
            is_adaptive=True,
            output_type='activation',
            forgetting_factor_per_second=0.95)
        pipeline.add_processor(inverse_model)
        envelope_extractor = processors.EnvelopeExtractor(0.99)
        pipeline.add_processor(envelope_extractor)
    elif inverse_method == 'mce':
        inverse_model = processors.MCE(forward_model_path=fwd_path, snr=1.0)
        pipeline.add_processor(inverse_model)
        envelope_extractor = processors.EnvelopeExtractor(0.995)
        pipeline.add_processor(envelope_extractor)
    # ---------------------------------------------------------------------- #

    # ------------------------------ outputs ------------------------------ #
    global_mode = outputs.BrainViewer.LIMITS_MODES.GLOBAL

    brain_viewer = outputs.BrainViewer(limits_mode=global_mode,
                                       buffer_length=6,
                                       surfaces_dir=None)
    pipeline.add_output(brain_viewer, parent=envelope_extractor)

    # roi_average = processors.AtlasViewer(SUBJECT, subjects_dir)
    # roi_average.parent = inverse_model
    # pipeline.add_processor(roi_average)

    # aec = processors.AmplitudeEnvelopeCorrelations(
    #     method=None,
    #     seed=1000
    #     # method='temporal_orthogonalization',
    #     # method='geometric_correction',
    #     # seed=0
    # )
    # pipeline.add_processor(aec)
    # aec.parent = inverse_model
    # # coh = processors.Coherence(
    # #     method='coh', seed=0)
    # aec_env = processors.EnvelopeExtractor(0.995)
    # pipeline.add_processor(aec_env)

    # seed_viewer = outputs.BrainViewer(
    #     limits_mode=global_mode, buffer_length=6,
    #     surfaces_dir=op.join(subjects_dir, SUBJECT))

    # pipeline.add_output(seed_viewer, parent=aec_env)

    # pipeline.add_output(outputs.LSLStreamOutput())
    # signal_viewer = outputs.SignalViewer()
    # signal_viewer_src = outputs.SignalViewer()
    # pipeline.add_output(signal_viewer, parent=linear_filter)
    # pipeline.add_output(signal_viewer_src, parent=roi_average)
    # con_viewer = outputs.ConnectivityViewer(
    #     surfaces_dir=op.join(subjects_dir, SUBJECT))
    # pipeline.add_output(con_viewer, parent=aec)
    # --------------------------------------------------------------------- #
    return pipeline
コード例 #5
0
ファイル: launch_test.py プロジェクト: kalenkovich/cognigraph
pipeline = Pipeline()

source = sources.BrainvisionSource(file_path=launch_test_filepath)
source.loop_the_file = True
source.MAX_SAMPLES_IN_CHUNK = 30
pipeline.source = source

# Processors
preprocessing = processors.Preprocessing(collect_for_x_seconds=120)
pipeline.add_processor(preprocessing)

linear_filter = processors.LinearFilter(lower_cutoff=8.0, upper_cutoff=12.0)
pipeline.add_processor(linear_filter)

inverse_model = processors.InverseModel(method='MNE', snr=3.0)
pipeline.add_processor(inverse_model)

envelope_extractor = processors.EnvelopeExtractor()
pipeline.add_processor(envelope_extractor)

# Outputs
global_mode = outputs.ThreeDeeBrain.LIMITS_MODES.GLOBAL
three_dee_brain = outputs.ThreeDeeBrain(limits_mode=global_mode,
                                        buffer_length=6)
pipeline.add_output(three_dee_brain)
pipeline.add_output(outputs.LSLStreamOutput())

signal_viewer = outputs.SignalViewer()
pipeline.add_output(signal_viewer, input_node=linear_filter)