def assemble_pipeline( file_path=None, fwd_path=None, subject=None, subjects_dir=None, inverse_method="mne", ): pipeline = Pipeline() source = sources.FileSource(file_path=file_path) source.loop_the_file = True source.MAX_SAMPLES_IN_CHUNK = 1000 pipeline.add_child(source) # ----------------------------- processors ----------------------------- # preprocessing = processors.Preprocessing(collect_for_x_seconds=10) source.add_child(preprocessing) linear_filter = processors.LinearFilter(lower_cutoff=8.0, upper_cutoff=12.0) preprocessing.add_child(linear_filter) if inverse_method == "mne": inverse_model = processors.MNE(method="MNE", snr=1.0, forward_model_path=fwd_path) # inverse_model = processors.MneGcs(snr=1.0, seed=1000, # forward_model_path=fwd_path) linear_filter.add_child(inverse_model) envelope_extractor = processors.EnvelopeExtractor(0.99) inverse_model.add_child(envelope_extractor) elif inverse_method == "beamformer": inverse_model = processors.Beamformer( fwd_path=fwd_path, is_adaptive=True, output_type="activation", forgetting_factor_per_second=0.95, ) linear_filter.add_child(inverse_model) envelope_extractor = processors.EnvelopeExtractor(0.99) inverse_model.add_child(envelope_extractor) elif inverse_method == "mce": inverse_model = processors.MCE(forward_model_path=fwd_path, snr=1.0) linear_filter.add_child(inverse_model) envelope_extractor = processors.EnvelopeExtractor(0.995) inverse_model.add_child(envelope_extractor) # ---------------------------------------------------------------------- # # ------------------------------ outputs ------------------------------ # global_mode = outputs.BrainViewer.LIMITS_MODES.GLOBAL brain_viewer = outputs.BrainViewer(limits_mode=global_mode, buffer_length=6) envelope_extractor.add_child(brain_viewer) return pipeline
%gui qt from PyQt5 import QtCore from cognigraph.pipeline import Pipeline from cognigraph.nodes import sources, processors, outputs from cognigraph.gui.window import GUIWindow pipeline = Pipeline() file_path = r"C:\Users\evgenii\Downloads\brainvision\Bulavenkova_A_2017-10-24_15-33-18_Rest.vmrk" pipeline.source = sources.FileSource(file_path=file_path) # linear_filter = processors.LinearFilter(lower_cutoff=0.1, upper_cutoff=40) # pipeline.add_processor(linear_filter) # pipeline.add_processor(processors.MNE(method='MNE')) # pipeline.add_processor(processors.EnvelopeExtractor()) # pipeline.add_output(outputs.BrainViewer()) # pipeline.add_output(outputs.LSLStreamOutput()) # pipeline.initialize_all_nodes() window = GUIWindow(pipeline=pipeline) window.init_ui() window.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) window.show()
cur_dir = '/home/dmalt/Code/python/cogni_submodules' test_data_path = cur_dir + '/tests/data/' print(test_data_path) sim_data_fname = 'raw_sim_nobads.fif' # sim_data_fname = 'Koleno.fif' # fwd_fname = 'dmalt_custom_lr-fwd.fif' fwd_fname = 'dmalt_custom_mr-fwd.fif' # fwd_fname = 'sample_1005-eeg-oct-6-fwd.fif' surf_dir = '/home/dmalt/mne_data/MNE-sample-data/subjects/sample/surf' fwd_path = op.join(test_data_path, fwd_fname) sim_data_path = op.join(test_data_path, sim_data_fname) source = sources.FileSource(file_path=sim_data_path) source.loop_the_file = True source.MAX_SAMPLES_IN_CHUNK = 10000 pipeline.source = source # Processors preprocessing = processors.Preprocessing(collect_for_x_seconds=30) pipeline.add_processor(preprocessing) linear_filter = processors.LinearFilter(lower_cutoff=8.0, upper_cutoff=12.0) pipeline.add_processor(linear_filter) inverse_model = processors.MCE(forward_model_path=fwd_path, snr=1.0) # inverse_model = processors.InverseModel(method='MNE', forward_model_path=fwd_path, snr=1.0) pipeline.add_processor(inverse_model)
if not args.forward: try: fwd_tuple = QtGui.QFileDialog.getOpenFileName( caption="Select forward model", filter="MNE-python forward (*-fwd.fif)") fwd_path = fwd_tuple[0] except: print("DATA FILE IS MANDATORY!") else: fwd_path = args.forward.name if not fwd_path: raise Exception("FORWARD SOLUTION IS MANDATORY!") source = sources.FileSource(file_path=file_path) # source = sources.FileSource() source.loop_the_file = True source.MAX_SAMPLES_IN_CHUNK = 10000 pipeline.source = source # Processors preprocessing = processors.Preprocessing(collect_for_x_seconds=120) pipeline.add_processor(preprocessing) linear_filter = processors.LinearFilter(lower_cutoff=8.0, upper_cutoff=12.0) pipeline.add_processor(linear_filter) inverse_model = processors.InverseModel(method='MNE', snr=1.0, forward_model_path=fwd_path)
def assemble_pipeline(file_path=None, fwd_path=None, subject=None, subjects_dir=None, inverse_method='mne'): pipeline = Pipeline() source = sources.FileSource(file_path=file_path) source.loop_the_file = True source.MAX_SAMPLES_IN_CHUNK = 10000 pipeline.source = source # ----------------------------- processors ----------------------------- # preprocessing = processors.Preprocessing(collect_for_x_seconds=120) pipeline.add_processor(preprocessing) linear_filter = processors.LinearFilter(lower_cutoff=8.0, upper_cutoff=12.0) pipeline.add_processor(linear_filter) if inverse_method == 'mne': inverse_model = processors.InverseModel(method='MNE', snr=1.0, forward_model_path=fwd_path) # inverse_model = processors.MneGcs(snr=1.0, seed=1000, # forward_model_path=fwd_path) pipeline.add_processor(inverse_model) envelope_extractor = processors.EnvelopeExtractor(0.99) pipeline.add_processor(envelope_extractor) elif inverse_method == 'beamformer': inverse_model = processors.Beamformer( forward_model_path=fwd_path, is_adaptive=True, output_type='activation', forgetting_factor_per_second=0.95) pipeline.add_processor(inverse_model) envelope_extractor = processors.EnvelopeExtractor(0.99) pipeline.add_processor(envelope_extractor) elif inverse_method == 'mce': inverse_model = processors.MCE(forward_model_path=fwd_path, snr=1.0) pipeline.add_processor(inverse_model) envelope_extractor = processors.EnvelopeExtractor(0.995) pipeline.add_processor(envelope_extractor) # ---------------------------------------------------------------------- # # ------------------------------ outputs ------------------------------ # global_mode = outputs.BrainViewer.LIMITS_MODES.GLOBAL brain_viewer = outputs.BrainViewer(limits_mode=global_mode, buffer_length=6, surfaces_dir=None) pipeline.add_output(brain_viewer, parent=envelope_extractor) # roi_average = processors.AtlasViewer(SUBJECT, subjects_dir) # roi_average.parent = inverse_model # pipeline.add_processor(roi_average) # aec = processors.AmplitudeEnvelopeCorrelations( # method=None, # seed=1000 # # method='temporal_orthogonalization', # # method='geometric_correction', # # seed=0 # ) # pipeline.add_processor(aec) # aec.parent = inverse_model # # coh = processors.Coherence( # # method='coh', seed=0) # aec_env = processors.EnvelopeExtractor(0.995) # pipeline.add_processor(aec_env) # seed_viewer = outputs.BrainViewer( # limits_mode=global_mode, buffer_length=6, # surfaces_dir=op.join(subjects_dir, SUBJECT)) # pipeline.add_output(seed_viewer, parent=aec_env) # pipeline.add_output(outputs.LSLStreamOutput()) # signal_viewer = outputs.SignalViewer() # signal_viewer_src = outputs.SignalViewer() # pipeline.add_output(signal_viewer, parent=linear_filter) # pipeline.add_output(signal_viewer_src, parent=roi_average) # con_viewer = outputs.ConnectivityViewer( # surfaces_dir=op.join(subjects_dir, SUBJECT)) # pipeline.add_output(con_viewer, parent=aec) # --------------------------------------------------------------------- # return pipeline