コード例 #1
0
def get_natus(username, password, fname_iEEG = "HUP172_phaseII", annotationLayerName = "Imported Natus ENT annotations"):
    print("\nFinding seizures")
    print(f"fname_iEEG: {fname_iEEG}")
    
    s = Session(username, password)
    ds = s.open_dataset(fname_iEEG)
   
    annotation_layers = np.array(list(ds.get_annotation_layers()))
    if not any(annotationLayerName == annotation_layers): #if there are no annotation layer names matchingt the user input, then list the layer names
        raise Exception(f"\n{annotationLayerName} does not match any layer names.\n\nThe existing annotation layer names are:\n\n{annotation_layers}")
    annotationsLayer = ds.get_annotations(annotationLayerName)
        
    annotations = pd.DataFrame(columns=(["file", "annotationLayer", "description", "start", "stop"]))
    annotationsSeizure = pd.DataFrame(columns=(["file", "annotationLayer", "description", "start", "stop"]))
    annotationsUEOEEC = pd.DataFrame(columns=(["file", "annotationLayer", "description", "start", "stop"]))
    
    for j in range(len(annotationsLayer)):
            start = annotationsLayer[j].start_time_offset_usec
            stop = annotationsLayer[j].end_time_offset_usec
            description = annotationsLayer[j].description
            
            annotations = annotations.append({'file': fname_iEEG, "annotationLayer":annotationLayerName,  'description':description, 'start':start, 'stop':stop}, ignore_index=True)
            
            if any(["seizure" in description.lower(),  "sz" in description.lower() ]) :
                annotationsSeizure = annotationsSeizure.append({'file': fname_iEEG, "annotationLayer":annotationLayerName,  'description':description, 'start':start, 'stop':stop}, ignore_index=True)
                
            if any(["ueo" in description.lower(),  "eec" in description.lower() ]) :
                annotationsUEOEEC = annotationsUEOEEC.append({'file': fname_iEEG, "annotationLayer":annotationLayerName,  'description':description, 'start':start, 'stop':stop}, ignore_index=True)
                
    return annotations, annotationsSeizure, annotationsUEOEEC
コード例 #2
0
def get_fs(username, password, fname_iEEG):
    print("\nGetting sampling frequency from iEEG.org:")
    print(f"fname_iEEG: {fname_iEEG}")
    s = Session(username, password)
    ds = s.open_dataset(fname_iEEG)
    fs = ds.get_time_series_details(ds.ch_labels[0]).sample_rate #get sample rate
    return fs
コード例 #3
0
def get_iEEG_data(username, password, iEEG_filename, start_time_usec,
                  stop_time_usec, ignore_electrodes, outputfile):
    print("\nGetting data from iEEG.org:")
    print("iEEG_filename: {0}".format(iEEG_filename))
    print("start_time_usec: {0}".format(start_time_usec))
    print("stop_time_usec: {0}".format(stop_time_usec))
    print("ignore_electrodes: {0}".format(ignore_electrodes))
    start_time_usec = int(start_time_usec)
    stop_time_usec = int(stop_time_usec)
    duration = stop_time_usec - start_time_usec
    s = Session(username, password)
    ds = s.open_dataset(iEEG_filename)
    channels = list(range(len(ds.ch_labels)))
    fs = ds.get_time_series_details(
        ds.ch_labels[0]).sample_rate  #get sample rate

    #if duration is greater than ~10 minutes, then break up the API request to iEEG.org.
    #The server blocks large requests, so the below code breaks up the request and
    #concatenates the data
    server_limit_minutes = 10
    if duration < server_limit_minutes * 60 * 1e6:
        data = ds.get_data(start_time_usec, duration, channels)
    if duration >= server_limit_minutes * 60 * 1e6:
        break_times = np.ceil(
            np.linspace(start_time_usec,
                        stop_time_usec,
                        num=int(
                            np.ceil(duration /
                                    (server_limit_minutes * 60 * 1e6)) + 1),
                        endpoint=True))
        break_data = np.zeros(shape=(int(np.ceil(duration / 1e6 * fs)),
                                     len(channels)))  #initialize
        print(
            "breaking up data request from server because length is too long")
        for i in range(len(break_times) - 1):
            print("{0}/{1}".format(i + 1, len(break_times) - 1))
            break_data[range(
                int(np.ceil((break_times[i] - break_times[0]) / 1e6 * fs)),
                int(np.ceil((break_times[i + 1] - break_times[0]) / 1e6 *
                            fs))), :] = ds.get_data(
                                break_times[i],
                                break_times[i + 1] - break_times[i], channels)
        data = break_data

    index = np.round(
        np.linspace(start_time_usec, stop_time_usec, num=len(data)))
    df = pd.DataFrame(data, columns=ds.ch_labels, index=index)
    df = pd.DataFrame.drop(df, ignore_electrodes, axis=1)
    print("Saving to: {0}".format(outputfile))
    with open(outputfile, 'wb') as f:
        pickle.dump([df, fs], f)
    print("...done\n")
コード例 #4
0
def get_iEEG_data(username, password, fname_iEEG, startUsec, stopUsec, ignoreElectrodes, channels = "all"):
    print("\nGetting data from iEEG.org:")
    print(f"fname_iEEG: {fname_iEEG}")
    print(f"startUsec: {startUsec}")
    print(f"stopUsec: {stopUsec}")
    print(f"channels: {channels}")
    print(f"ignoreElectrodes: {ignoreElectrodes}")
    startUsec = int(startUsec)
    stopUsec = int(stopUsec)
    duration = stopUsec - startUsec
    s = Session(username, password)
    ds = s.open_dataset(fname_iEEG)
    if channels == "all":
        channels_ind = ds.get_channel_indices(ds.ch_labels)
        channel_labels = ds.ch_labels
    else: #get channel number from channel label
        channels_ind = ds.get_channel_indices(channels)
        channel_labels = channels
        
    fs = ds.get_time_series_details(ds.ch_labels[0]).sample_rate #get sample rate
    
    #if duration is greater than ~10 minutes, then break up the API request to iEEG.org. 
    #The server blocks large requests, so the below code breaks up the request and 
    #concatenates the data
    server_limit_minutes = 10
    if duration < server_limit_minutes*60*1e6:
        for c in range(len(channels_ind)):
            if c == 0: #initialize
                data = ds.get_data(startUsec, duration,[ channels_ind[c]])
            else:
                data = np.concatenate([data, ds.get_data(startUsec, duration, [channels_ind[c]])], axis=1)
        #data = ds.get_data(startUsec, duration, channels_ind)
    if duration >= server_limit_minutes*60*1e6:
        break_times = np.ceil(np.linspace(startUsec, stopUsec, num=int(np.ceil(duration/(server_limit_minutes*60*1e6))+1), endpoint=True))
        break_data = np.zeros(shape = (int(np.ceil(duration/1e6*fs)), len(channels_ind)))#initialize
        print("breaking up data request from server because length is too long")
        for i in range(len(break_times)-1):
            print("{0}/{1}".format(i+1, len(break_times)-1))
            break_data[range(int( np.ceil((break_times[i]-break_times[0])/1e6*fs) ), int(  np.ceil((break_times[i+1]- break_times[0])/1e6*fs) )  ),:] = ds.get_data(break_times[i], break_times[i+1]-break_times[i], channels_ind)
        data = break_data
        
        
    index = np.round(np.linspace(startUsec, stopUsec, num = len(data))).astype(int)
    df = pd.DataFrame(data, columns=channel_labels, index = index)
    df = pd.DataFrame.drop(df, ignoreElectrodes, axis=1, errors='ignore') #errors = ignore means only drop electrodes if actually contained within
    return df, fs
    #print("Saving to: {0}".format(outputfile))
    #with open(outputfile, 'wb') as f: pickle.dump([df, fs], f)
    print("...done\n")
コード例 #5
0
ファイル: get_data.py プロジェクト: lena318/seeg_GMvsWM
def main():
    """
    Prints requested data
    """
    parser = argparse.ArgumentParser()
    parser.add_argument('-u', '--user', required=True, help='username')
    parser.add_argument('-p',
                        '--password',
                        help='password (will be prompted if omitted)')

    parser.add_argument('dataset', help='dataset name')
    parser.add_argument('start', type=int, help='start offset in usec')
    parser.add_argument('duration', type=int, help='number of usec to request')

    args = parser.parse_args()

    if not args.password:
        args.password = getpass.getpass()

    with Session(args.user, args.password) as session:
        dataset_name = args.dataset
        dataset = session.open_dataset(dataset_name)
        channels = list(range(len(dataset.ch_labels)))
        raw_data = dataset.get_data(args.start, args.duration, channels)
        print('raw', raw_data)
        session.close_dataset(dataset_name)
コード例 #6
0
def main():
    """
    Parses the command line and dispatches subcommand.
    """
    parser = argparse.ArgumentParser(
        fromfile_prefix_chars='@',
        epilog="""Arguments can also be placed in a text file, one per line.
                  Pass the file name prefixed by '@': %(prog)s @/path/to/arg_file.txt"""
    )
    parser.add_argument('-u', '--user', required=True, help='username')
    parser.add_argument('-p',
                        '--password',
                        help='password (will be prompted if missing)')

    parser.add_argument('datasets',
                        nargs='+',
                        metavar='dataset',
                        help='dataset name')

    args = parser.parse_args()

    if not args.password:
        args.password = getpass.getpass()

    with Session(args.user, args.password) as session:
        for dataset_name in args.datasets:
            dataset = session.open_dataset(dataset_name)
            start_time_uutc = min(
                [ts.start_time for ts in dataset.ts_details.values()])
            timestamp = start_time_uutc / 1000000
            start_time = datetime.datetime.fromtimestamp(
                timestamp, datetime.timezone.utc)
            print('{0}, {1}'.format(dataset_name, start_time))
            session.close_dataset(dataset_name)
コード例 #7
0
def main():
    """
    Print all montages for given dataset if no montage is specified.
    Or get small amount of montaged data if a montage is specified.
    """
    parser = argparse.ArgumentParser()
    parser.add_argument('-u', '--user', required=True, help='username')
    parser.add_argument('-p',
                        '--password',
                        help='password (will be prompted if omitted)')

    parser.add_argument('dataset', help='dataset name')
    parser.add_argument('montage', nargs='?', help='montage name')

    args = parser.parse_args()

    if not args.password:
        args.password = getpass.getpass()

    with Session(args.user, args.password) as session:
        dataset_name = args.dataset
        dataset = session.open_dataset(dataset_name)
        montages = dataset.montages
        if not args.montage:
            for name, montage_list in montages.items():
                for montage in montage_list:
                    print(name, montage.portal_id, montage.pairs)
        else:
            assert dataset.get_current_montage() is None
            # Requesting unrealisticly short durations to verify montage arithmetic.
            raw_data = dataset.get_data(0, 8000,
                                        list(range(len(dataset.ch_labels))))
            print('raw', raw_data)

            dataset.set_current_montage(args.montage)
            montage = dataset.get_current_montage()
            print(montage)
            montage_channels = [0]
            if len(montage.pairs) > 1:
                montage_channels.append(1)
            montaged_data = dataset.get_data(0, 4000, montage_channels)
            print('montaged 1', montaged_data)
            montaged_data = dataset.get_data(4000, 4000, montage_channels)
            print('montaged 2', montaged_data)

            montage_labels = [montage.pairs[i] for i in montage_channels]
            print(montage_labels)
            window_result = ProcessSlidingWindowPerChannel.execute(
                dataset, montage_labels, 0, 4000, 4000, 8000, numpy.mean)
            print('per channel', window_result)

            def row_mean(matrix):
                return numpy.mean(matrix, axis=1)

            window_result = ProcessSlidingWindowAcrossChannels.execute(
                dataset, montage_labels, 0, 4000, 4000, 8000, row_mean)
            print('across channels', window_result)
        session.close_dataset(dataset_name)
コード例 #8
0
ファイル: get_iEEG_data.py プロジェクト: lena318/paper004
def get_iEEG_data(username, password, iEEG_filename, start_time_usec,
                  stop_time_usec, ignore_electrodes, outputfile):
    print("\n\nGetting data from iEEG.org:")
    print("iEEG_filename: {0}".format(iEEG_filename))
    print("start_time_usec: {0}".format(start_time_usec))
    print("stop_time_usec: {0}".format(stop_time_usec))
    print("ignore_electrodes: {0}".format(ignore_electrodes))
    print("Saving to: {0}".format(outputfile))
    start_time_usec = int(start_time_usec)
    stop_time_usec = int(stop_time_usec)
    duration = stop_time_usec - start_time_usec
    s = Session(username, password)
    ds = s.open_dataset(iEEG_filename)
    channels = list(range(len(ds.ch_labels)))
    data = ds.get_data(start_time_usec, duration, channels)
    df = pd.DataFrame(data, columns=ds.ch_labels)
    df = pd.DataFrame.drop(df, ignore_electrodes, axis=1)
    fs = ds.get_time_series_details(
        ds.ch_labels[0]).sample_rate  #get sample rate
    with open(outputfile, 'wb') as f:
        pickle.dump([df, fs], f)
    print("...done\n")
コード例 #9
0
def get_iEEG_annotations(username, password, fname_iEEG, annotationLayerName):
    print("\nGetting data from iEEG.org:")
    print(f"fname_iEEG: {fname_iEEG}")
    print(f"Annotation Layer: {annotationLayerName}")

    s = Session(username, password)
    ds = s.open_dataset(fname_iEEG)

    annotations = pd.DataFrame(columns=(["file", "electrode", "start", "stop"]))
    
    if annotationLayerName in ds.get_annotation_layers(): #if annotations exists, get them
        annotationsLayer = ds.get_annotations(annotationLayerName)
    

        for j in range(len(annotationsLayer)):
            start = annotationsLayer[j].start_time_offset_usec
            stop = annotationsLayer[j].end_time_offset_usec
            for k in range(len(annotationsLayer[j].annotated)):
                channel_label = annotationsLayer[j].annotated[k].channel_label
                annotations = annotations.append({'file': fname_iEEG, 'electrode':channel_label, 'start':start, 'stop':stop}, ignore_index=True)
        return annotations
    else:
        return print(f"Annotation layer does not exist: {annotationLayerName}")
    print("...done\n")
コード例 #10
0
ファイル: mprov_example.py プロジェクト: Ziaeemehr/ieegpy
def main():
    """
    Parses the command line and dispatches subcommand.
    """

    # create the top-level parser
    parser = argparse.ArgumentParser()
    parser.add_argument('-u', '--user', required=True, help='username')
    parser.add_argument('-p',
                        '--password',
                        help='password (will be prompted if missing)')
    parser.add_argument('--mprov_user', help='MProv username')
    parser.add_argument('--mprov_password',
                        help='MProv password (will be prompted if missing)')

    parser.add_argument('dataset_name',
                        help="""A dataset to which you have write access.
                        If a dataset with this name does not exist a copy
                        of Study 005 is created and used.""")

    args = parser.parse_args()
    dataset_name = args.dataset_name

    if not args.password:
        args.password = getpass.getpass('IEEG Password: '******'MProv Password: '******'http://localhost:8088'
        MProvConnection.graph_name = dataset_name
        mprov_connection = MProvConnection(args.mprov_user,
                                           args.mprov_password, mprov_url)
        mprov_listener = MProvListener(mprov_connection)
    with Session(args.user, args.password,
                 mprov_listener=mprov_listener) as session:
        tool_name = parser.prog
        dataset = open_or_create_dataset(session, dataset_name, tool_name)
        layer_name = tool_name + ' layer'
        annotations = create_annotatations(dataset, layer_name, tool_name)
        dataset.add_annotations(annotations)
        print("wrote {} annotations to layer '{}' in dataset '{}'".format(
            len(annotations), layer_name, dataset.name))
        if args.mprov_user:
            print("wrote provenance of annotations to graph '{}'".format(
                MProvConnection.graph_name))
        session.close_dataset(dataset)
コード例 #11
0
ファイル: annotations.py プロジェクト: Ziaeemehr/ieegpy
 def pass_dataset(args):
     if not args.password:
         args.password = getpass.getpass('IEEG Password: '******'MProv Password: '******'http' if args.no_ssl else 'https'
     mprov_listener = None
     if args.mprov_user:
         mprov_url = 'http://localhost:8088' if args.mprov_url is None else args.mprov_url
         if args.mprov_graph:
             MProvConnection.graph_name = args.mprov_graph
         mprov_connection = MProvConnection(args.mprov_user,
                                            args.mprov_password, mprov_url)
         mprov_listener = MProvListener(mprov_connection)
     with Session(args.user, args.password,
                  mprov_listener=mprov_listener) as session:
         dataset = session.open_dataset(args.dataset)
         func(dataset, args)
         session.close_dataset(dataset)
コード例 #12
0
def get_iEEG_data(username, password, iEEG_filename, start_time_usec,
                  stop_time_usec, ignore_electrodes, outputfile_EEG):
    print("\nGetting data from iEEG.org:")
    print("iEEG_filename: {0}".format(iEEG_filename))
    print("start_time_usec: {0}".format(start_time_usec))
    print("stop_time_usec: {0}".format(stop_time_usec))
    print("ignore_electrodes: {0}".format(ignore_electrodes))
    start_time_usec = int(start_time_usec)
    stop_time_usec = int(stop_time_usec)
    duration = stop_time_usec - start_time_usec
    s = Session(username, password)
    ds = s.open_dataset(iEEG_filename)
    channels = list(range(len(ds.ch_labels)))
    fs = ds.get_time_series_details(
        ds.ch_labels[0]).sample_rate  #get sample rate

    #if duration is greater than ~10 minutes, then break up the API request to iEEG.org.
    #The server blocks large requests, so the below code breaks up the request and
    #concatenates the data
    server_limit_minutes = 10
    if duration < server_limit_minutes * 60 * 1e6:
        data = ds.get_data(start_time_usec, duration, channels)
    if duration >= server_limit_minutes * 60 * 1e6:
        break_times = np.ceil(
            np.linspace(start_time_usec,
                        stop_time_usec,
                        num=int(
                            np.ceil(duration /
                                    (server_limit_minutes * 60 * 1e6)) + 1),
                        endpoint=True))
        break_data = np.zeros(shape=(int(np.ceil(duration / 1e6 * fs)),
                                     len(channels)))  #initialize
        print(
            "breaking up data request from server because length is too long")
        for i in range(len(break_times) - 1):
            print("{0}/{1}".format(i + 1, len(break_times) - 1))
            break_data[range(
                int(np.ceil((break_times[i] - break_times[0]) / 1e6 * fs)),
                int(np.ceil((break_times[i + 1] - break_times[0]) / 1e6 *
                            fs))), :] = ds.get_data(
                                break_times[i],
                                break_times[i + 1] - break_times[i], channels)
        data = break_data

    df = pd.DataFrame(data, columns=ds.ch_labels)
    df = pd.DataFrame.drop(df, ignore_electrodes, axis=1)
    #rename channels to standard 4 characters (2 letters, 2 numbers)
    for e in range(len(df.columns)):
        electrode_name = df.columns[e]
        if (len(electrode_name) == 3):
            electrode_name = f"{electrode_name[0:2]}0{electrode_name[2]}"
        df.columns.values[e] = electrode_name

    print("Saving to: {0}".format(outputfile_EEG))
    df.to_csv(outputfile_EEG, index=False)

    #save metadata like fs
    df_metadata = pd.DataFrame([{'fs': fs}])
    outputfile_EEG_metadata = os.path.splitext(
        outputfile_EEG)[0] + "_metadata.csv"
    df_metadata.to_csv(outputfile_EEG_metadata, index=False)
    #with open(outputfile_EEG, 'wb') as f: pickle.dump([df, fs], f)
    print("...done\n")
コード例 #13
0
def get_iEEG_data(username,
                  password,
                  iEEG_filename,
                  start_time_usec,
                  stop_time_usec,
                  ignore_electrodes,
                  outputfile,
                  get_all_channels=False,
                  redownload=False):
    print("\nGetting data from iEEG.org:")
    print("iEEG_filename: {0}".format(iEEG_filename))
    print("start_time_usec: {0}".format(start_time_usec))
    print("stop_time_usec: {0}".format(stop_time_usec))
    print("ignore_electrodes: {0}".format(ignore_electrodes))

    if not os.path.isfile(outputfile) or redownload == True:
        # data has not yet been downloaded for this interval
        start_time_usec = int(start_time_usec)
        stop_time_usec = int(stop_time_usec)
        duration = stop_time_usec - start_time_usec
        s = Session(username, password)
        ds = s.open_dataset(iEEG_filename)
        channels = list(range(len(ds.ch_labels)))
        fs = ds.get_time_series_details(
            ds.ch_labels[0]).sample_rate  #get sample rate

        #if duration is greater than ~10 minutes, then break up the API request to iEEG.org.
        #The server blocks large requests, so the below code breaks up the request and
        #concatenates the data
        server_limit_minutes = 5
        try:
            if duration < server_limit_minutes * 60 * 1e6:
                data = ds.get_data(start_time_usec, duration, channels)
            if duration >= server_limit_minutes * 60 * 1e6:
                break_times = np.ceil(
                    np.linspace(
                        start_time_usec,
                        stop_time_usec,
                        num=int(
                            np.ceil(duration /
                                    (server_limit_minutes * 60 * 1e6)) + 1),
                        endpoint=True))
                #break_data = np.zeros(shape = (int(np.ceil(duration/1e6*fs)), len(channels)))#initialize
                break_data = np.empty(shape=(0, len(channels)), dtype=float)
                print(
                    "breaking up data request from server because length is too long"
                )
                for i in range(len(break_times) - 1):
                    print("{0}/{1}".format(i + 1, len(break_times) - 1))
                    break_data = np.append(
                        break_data,
                        ds.get_data(break_times[i],
                                    break_times[i + 1] - break_times[i],
                                    channels),
                        axis=0)
                    #try:
                    #    break_data[range(int( np.floor((break_times[i]-break_times[0])/1e6*fs) ), int(  np.floor((break_times[i+1]- break_times[0])/1e6*fs) )  ),:] = ds.get_data(break_times[i], break_times[i+1]-break_times[i], channels)
                    #except ValueError as e:
                    #    print(e)
                    #    print("ValueError encountered in breaking data up for download, arrays are likely mishaped. Skipping...")
                    #    return
                data = break_data
        except ieeg.ieeg_api.IeegConnectionError:
            print("IeegConnectionError encountered, skipping...")
            return
        df = pd.DataFrame(data, columns=ds.ch_labels)
        true_ignore_electrodes = []
        if ignore_electrodes != ['']:
            true_ignore_electrodes = get_true_ignore_electrodes(
                ds.ch_labels, ignore_electrodes)

        if not get_all_channels:
            df = pd.DataFrame.drop(df, true_ignore_electrodes, axis=1)

        print("Saving to: {0}".format(outputfile))
        with open(outputfile, 'wb') as f:
            pickle.dump([df, fs], f, protocol=4)
        print("...done\n")
    else:
        s = Session(username, password)
        ds = s.open_dataset(iEEG_filename)

        true_ignore_electrodes = []
        if ignore_electrodes != ['']:
            true_ignore_electrodes = get_true_ignore_electrodes(
                ds.ch_labels, ignore_electrodes)
        # data has already been downloaded for this interval
        print("{} exists, skipping...".format(outputfile))

    # return ignore_electrodes as they are called on ieeg.org
    return true_ignore_electrodes
コード例 #14
0
WINDOW_SIZE = int(10e6)
HDF5_NAME = 'training_data.hdf5'
BATCH_SIZE = 100

os.chdir('matlab')

### load seizure timestamps
all_annots = loadmat('other/all_annots_32')['all_annots']
A = {row[0].item(): [(int(a*1e6), int(b*1e6)) for a,b in zip(row[1][0], row[2][0])]
    for row in all_annots[0] if row[0].item().startswith('RID00')}
pt_names = sorted(A.keys())

### Connect to IEEG
with open('ieeg_login', 'r') as f:
    LOGIN = f.readline().strip().split()
with Session(*LOGIN) as s:
    datasets = {p: s.open_dataset(p) for p in pt_names}

def create_file(fname):
    'create hdf5 file with ictal/interictal groups, truncate if exists'
    with h5py.File(fname, 'w') as f:
        for category in ('ictal', 'interictal'):
            g = f.create_group(category)

def append_dataset(fname, category, data_dict):
    '''
    fname: hdf5 file name
    category: 'ictal' or 'interictal'
    data_dict: {pt_name: [3d array of clips]}
    '''
    with h5py.File(fname, 'a') as f:
コード例 #15
0
ファイル: read_sample.py プロジェクト: Ziaeemehr/ieegpy
import numpy as np

default_user = '******'
default_password = '******'

if len(sys.argv) < 3:
    print(
        'To run this sample program, you must supply your user ID and password on the command-line'
    )
    print(
        'Syntax: read_sample [user id (in double-quotes if it has a space)] [password] [Prov userID] [Prov Password]'
    )
    sys.exit(1)

print('Logging into IEEG:', sys.argv[1], '/ ****')
with Session(sys.argv[1], sys.argv[2]) as s:

    conn = None
    if len(sys.argv) > 3:
        default_user = sys.argv[3]
        default_password = sys.argv[4]
        print('Logging into local MProv: ', default_user, '/ ****')
        conn = MProvConnection(default_user, default_password, None)
        print("Successfully connected to the MProv server")

    # We pick one dataset...
    ds = s.open_dataset('I004_A0003_D001')

    # Iterate through all of the channels and print their metadata
    for name in ds.get_channel_labels():
        print(ds.get_time_series_details(name))
コード例 #16
0
ファイル: stream.py プロジェクト: nathanielnyema/RAMSES
    print('connecting to existing matlab engine')
    eng = matlab.engine.connect_matlab()
    eng.addpath('matlab')
    eng.addpath('matlab/IEEGToolbox')
else:
    print('starting matlab engine')
    eng = matlab.engine.start_matlab('-r "matlab.engine.shareEngine"')
    eng.addpath('matlab')
    eng.addpath('matlab/IEEGToolbox')
    eng.init_classifier(nargout=0)

# get all the datasets
print('connecting to IEEG')
with open('matlab/ieeg_login', 'r') as f:
    login = f.readline().strip().split()
with Session(*login) as s:
    datasets = {bd: s.open_dataset(bd) for bd in BEDS}


def get_data(bd, start_usec, length_usec):
    'Pull raw data for bed from IEEG'
    ds = datasets[bd]
    ch_indices = [ds.ch_labels.index(ch) for ch in CHANNELS]
    return ds.get_data(start_usec, length_usec, ch_indices)


# pull data from each dataset for specified beds
# TODO: apply smoothing of predictions by:
#   1) removing isolated seizure predictions
#   2) combining seizure predictions which are close together
# TODO: detect poor performance and retroactively change predictions to unsure (1)
#%% Get iEEG spread annotations

# initialize spread annotations dataframe
spread_annotations = pd.DataFrame(columns=(
    ["RID", "HUP_ID", "3T_ID", "7T_ID", "file", "electrode", "start", "stop"]))
for i in range(len(files)):
    #parsing data DataFrame to get iEEG information

    sub_ID = data[data.file == files[i]].RID.iloc[0]
    HUP_ID = data[data.file == files[i]].HUP_ID.iloc[0]
    ID_3T = data[data.file == files[i]]['3T_ID'].iloc[0]
    ID_7T = data[data.file == files[i]]['7T_ID'].iloc[0]
    iEEG_filename = files[i]

    s = Session(username, password)
    ds = s.open_dataset(iEEG_filename)

    if "seizure_spread" in ds.get_annotation_layers(
    ):  #if annotations exists, get them
        annotations = ds.get_annotations("seizure_spread")

        for j in range(len(annotations)):
            start = annotations[j].start_time_offset_usec
            stop = annotations[j].end_time_offset_usec
            for k in range(len(annotations[j].annotated)):
                channel_label = annotations[j].annotated[k].channel_label
                spread_annotations = spread_annotations.append(
                    {
                        'RID': sub_ID,
                        'HUP_ID': HUP_ID,
コード例 #18
0
ファイル: mprov_example.py プロジェクト: lena318/seeg_GMvsWM
def main():
    """
    Parses the command line and dispatches subcommand.
    """

    # create the top-level parser
    parser = argparse.ArgumentParser()
    parser.add_argument('-u', '--user', required=True, help='username')
    parser.add_argument('-p',
                        '--password',
                        help='password (will be prompted if missing)')
    parser.add_argument('--mprov_user', help='MProv username')
    parser.add_argument('--mprov_password',
                        help='MProv password (will be prompted if missing)')

    parser.add_argument('dataset_name',
                        help="""A dataset to which you have write access.
                        If a dataset with this name does not exist a copy
                        of Study 005 is created and used.""")

    args = parser.parse_args()
    dataset_name = args.dataset_name

    if not args.password:
        args.password = getpass.getpass('IEEG Password: '******'MProv Password: '******'http://localhost:8088'
        MProvConnection.graph_name = dataset_name
        mprov_connection = MProvConnection(args.mprov_user, mprov_password,
                                           mprov_url)
    with Session(args.user, args.password) as session:
        tool_name = parser.prog
        dataset = open_or_create_dataset(session, dataset_name, tool_name)
        layer_name = 'negative mean layer ' + datetime.datetime.today(
        ).isoformat()
        dataset_duration_usec = dataset.end_time - dataset.start_time
        # Probably working with a copy of Study 005.
        # It has a gap at the beginning, so we'll try to skip it.
        study_005_post_gap_offset = 583000000
        start_time_usec = (study_005_post_gap_offset
                           if dataset_duration_usec > study_005_post_gap_offset
                           else 0)
        window_size_usec = 1000000
        slide_usec = 500000
        duration_usec = 120000000
        input_channel_labels = dataset.ch_labels[:2]
        window_annotator = SlidingWindowAnnotator(
            window_size_usec,
            slide_usec,
            negative_mean_annotator,
            mprov_connection=mprov_connection)
        print(
            "Processing {} usec of dataset '{}' starting at {} usec with a {} usec slide."
            .format(duration_usec, dataset.name, start_time_usec, slide_usec))
        if mprov_connection:
            print("Provenance graph '{}' will be viewable at {}/viz/.".format(
                mprov_connection.get_graph(),
                mprov_connection.configuration.host))
        annotations = window_annotator.annotate_dataset(
            dataset,
            layer_name,
            start_time_usec=start_time_usec,
            duration_usec=duration_usec,
            input_channel_labels=input_channel_labels)
        print("Wrote {} annotations to layer '{}' in dataset '{}'.".format(
            len(annotations), layer_name, dataset.name))
        if mprov_connection:
            print("Wrote provenance of annotations to graph '{}'.".format(
                mprov_connection.get_graph()))
        session.close_dataset(dataset)