Пример #1
0
 def __init__(self, filepath, lookuptab, rewards, file_size,
              number_positions):
     self.filepath = filepath
     self.lookuptab_file = lookuptab
     self.rewardsfile = rewards
     s_data = mat73.loadmat(self.filepath)
     tab_l = mat73.loadmat(self.lookuptab_file)
     rew = mat73.loadmat(self.rewardsfile)
     self.data_test = s_data['data_hexapods']
     self.lookuptab = tab_l['lookuptab']
     self.rewards_vec = rew['rewards']
     self.number_devices = 5
     self.number_positions = number_positions
     self.done = False
Пример #2
0
 def __init__(self):
     self.data = loadmat('dane.mat')['matrix']
     self.timePoint = 0
     self.STEP = 2000
     self.START = 5000  # od jakiego pkt w czasie zaczyna sie analiza
     self.FREQUENCY = 1000  # czestotliwosc sygnalu
     self.T = 1.0 / self.FREQUENCY
def main():

    # Read user options
    args = parser.parse_args()
    mat_folder = args.mat_folder

    mat_files_to_convert = glob.glob(os.path.join(mat_folder, '*.mat'))

    save_folder = os.path.join(mat_folder, 'converted-files')
    if not os.path.exists(save_folder):
        os.mkdir(save_folder)

    for mat_file_path in mat_files_to_convert:

        culture_file_name = os.path.basename(mat_file_path)
        culture_file_name_without_ext = os.path.splitext(culture_file_name)[0]
        culture_name = culture_file_name.split('DIV')[0][:-1]
        div = culture_file_name.split('DIV')[1][0:2]

        data_dict = mat73.loadmat(mat_file_path)
        spike_dict = mat_to_dict(data_dict,
                                 culture_file_name=culture_file_name,
                                 culture_name=culture_name,
                                 div=div)

        culture_dict = {culture_name: spike_dict, 'place_holder': spike_dict}

        save_name = culture_file_name_without_ext + '.pkl'
        with open(os.path.join(save_folder, save_name), 'wb') as handle:
            pkl.dump(culture_dict, handle)
Пример #4
0
    def __init__(self):
        data_set_no = 2
        data_dict = mat73.loadmat('NoStim_Data.mat')
        data = data_dict['NoStim_Data']

        deltaFOverF_bc = data['deltaFOverF_bc'][data_set_no]
        derivatives = data['derivs'][data_set_no]
        NeuronNames = data['NeuronNames'][data_set_no]
        fps = data['fps'][data_set_no]
        States = data['States'][data_set_no]

        self.states = np.sum(
            [n * States[s] for n, s in enumerate(States)], axis=0
        ).astype(
            int
        )  # making a single states array in which each number corresponds to a behaviour
        self.state_names = [*States.keys()]
        self.neuron_traces = np.array(deltaFOverF_bc).T
        self.derivative_traces = derivatives['traces'].T
        self.neuron_names = np.array(NeuronNames, dtype=object)
        self.fps = fps

        f = open('readme.txt', 'r')
        self.DESCR = f.read()
        f.close()
        '''
Пример #5
0
def mat2np(data_name):
    path = './data/{}.mat'.format(data_name)
    if data_name == 'http':
        data = mat73.loadmat(path)
    else:
        data = loadmat(path)
    x = data['X']
    y = data['y']
    if data_name == 'optdigits' or data_name == 'mnist' or data_name == 'arrhythmia':
        range_x = (np.amax(x, axis=0) - np.amin(x, axis=0))
        range_x[range_x == 0] = 1
        x = (x - np.amin(x, axis=0)) / range_x
    else:
        x = zscore(x)

    if data_name == 'locus':
        lagosid = data['Lagosid']
        np.save("./data/{}.npy".format(data_name), {
            'x': x,
            'y': y,
            'lagosid': lagosid
        })
    else:
        np.save("./data/{}.npy".format(data_name), {'x': x, 'y': y})
    print("{} data has been processed".format(data_name))
 def __init__(self, path="../data/raw-data/"):
     print("File conversion started.")
     self.path = path
     file = os.listdir(self.path)[0]
     file_data = loadmat(self.path + file)
     self.data = file_data["data"]
     self.labels = file_data["label"]
     del file_data
     print("Data file loaded")
Пример #7
0
def mat2dataframe(path, shift_idx_fields, td_name=None):
    """
    Load a trial_data .mat file and turn it into a pandas DataFrame

    Parameters
    ----------
    path : str
        path to the .mat file to load
        "Can also pass open file-like object."
    td_name : str, optional
        name of the variable under which the data was saved
    shift_idx_fields : bool
        whether to shift the idx fields
        set to True if the data was exported from matlab
        using its 1-based indexig

    Returns
    -------
    df : pd.DataFrame
        pandas dataframe replicating the trial_data format
        each row is a trial
    """
    try:
        mat = scipy.io.loadmat(path, simplify_cells=True)
    except NotImplementedError:
        try:
            import mat73
        except ImportError:
            raise ImportError("Must have mat73 installed to load mat73 files.")
        else:
            mat = mat73.loadmat(path)

    real_keys = [
        k for k in mat.keys() if not (k.startswith("__") and k.endswith("__"))
    ]

    if td_name is None:
        if len(real_keys) == 0:
            raise ValueError(
                "Could not find dataset name. Please specify td_name.")
        elif len(real_keys) > 1:
            raise ValueError(
                "More than one datasets found. Please specify td_name.")

        assert len(real_keys) == 1

        td_name = real_keys[0]

    df = pd.DataFrame(mat[td_name])

    df = data_cleaning.clean_0d_array_fields(df)
    df = data_cleaning.clean_integer_fields(df)

    if shift_idx_fields:
        df = data_cleaning.backshift_idx_fields(df)

    return df
Пример #8
0
    def test_file4(self):
        """
        Test a file created by Kubios HRV that created lots of problems before
        """
        d = mat73.loadmat(self.testfile4, use_attrdict=False)
        assert len(d) == 1
        res = d['Res']
        assert res['f_name'] == '219_92051.edf'
        assert res['f_path'] == 'C:\\Users\\sleep\\Desktop\\set2\\ECG_I\\'
        assert res['isPremium'] == True
        assert len(res) == 4
        self.assertEqual(sorted(res.keys()),
                         ['HRV', 'f_name', 'f_path', 'isPremium'])
        hrv = res['HRV']
        exp_key = [
            'Data', 'Frequency', 'NonLinear', 'Param', 'Statistics', 'Summary',
            'TimeVar', 'timevaranalOK'
        ]
        assert sorted(exp_key) == sorted(hrv.keys())
        assert len(hrv) == 8
        assert hrv['timevaranalOK'] == 1.0
        data = hrv['Data']
        assert len(data) == 18
        assert len(data['Artifacts']) == 1
        assert data['RR'].shape == (4564, )
        assert data['RRcorrtimes'].shape == (51, )
        assert data['RRdt'].shape == (4564, )
        assert data['RRdti'].shape == (20778, )
        assert data['RRi'].shape == (20778, )
        assert data['RRi'].shape == (20778, )
        assert data['T_RR'].shape == (4565, )
        assert data['T_RRi'].shape == (20778, )
        assert data['T_RRorig'].shape == (4572, )

        islist = [
            'RRs', 'RRsdt', 'RRsdti', 'RRsi', 'T_RRs', 'T_RRsi', 'tmp',
            'Artifacts'
        ]
        for lname in data:
            if lname in islist:
                assert isinstance(data[lname], list)
                assert len(data[lname]) == 1
            else:
                assert not isinstance(data[lname], list)

        assert data['RRs'][0].shape == (276, )
        assert data['RRsdt'][0].shape == (276, )
        assert data['RRsdti'][0].shape == (1190, )
        assert data['RRsi'][0].shape == (1190, )
        assert data['T_RRs'][0].shape == (276, )
        assert data['T_RRsi'][0].shape == (1190, )
        assert len(data['tmp'][0]) == 3
        for arr in data['tmp'][0].values():
            assert isinstance(arr, np.ndarray)

        np.testing.assert_allclose(data['Artifacts'][0], 2.17391304)
Пример #9
0
def load_matlab_file(path: Union[str, PathLike]):
    """Load a layer's activations from a Matlab file"""
    try:
        # this works
        # noinspection PyTypeChecker
        activations = scipy.io.loadmat(str(path))
    except NotImplementedError:
        # scipy can't load matlab v7.3 files, so we use a different library
        activations = mat73.loadmat(str(path))
    return activations
Пример #10
0
    def _load(self, edf_file, mat_file=None):
        if mat_file is None:
            filename = ospath.basename(edf_file)[:-4]
            folder = ospath.dirname(edf_file)
            print(mat_file)
            mat_file = ospath.list_files(folder, patterns=f'{filename}*.mat')
            if len(mat_file)>0: mat_file = mat_file[0]
            if not mat_file or not os.path.exists(mat_file): 
                print('matfile {} not found'.format(mat_file))
                dir = ospath.dirname(edf_file)
                mat_file = misc.choose_file(dir, exts='mat', 
                        title='Select the corresponding MAT file by Kubios')
            
        signals, sheader, header = highlevel.read_edf(edf_file, ch_names='ECG I')
        sfreq =  sheader[0]['sample_rate']
        data = signals[0].squeeze()
        stime = header['startdate']
        self.starttime = (stime.hour * 60 + stime.minute) * 60 + stime.second
        self.data = data
        self.sfreq = sfreq
        
        try:
            mat = mat73.loadmat(mat_file, verbose=False)
            rr = mat['Res']['HRV']['Data']['RR']
            trrs = mat['Res']['HRV']['Data']['T_RR'] - self.starttime
            rrorig = mat['Res']['HRV']['Data']['T_RRorig'] - self.starttime
            corr = mat['Res']['HRV']['Data']['RRcorrtimes'] - self.starttime
            art = mat['Res']['HRV']['TimeVar']['Artifacts']
            altered = trrs[np.where(np.diff(trrs)!=rr)[0]]
            
        except:
            raise FileNotFoundError('Mat file not found.')            

        artefacts_file = edf_file[:-4] + '.npy'  
        if os.path.exists(artefacts_file):
            self.artefacts = np.load(artefacts_file)
        else:
            art = np.nan_to_num(art, nan=99)
            self.artefacts = np.repeat(art>self.threshold, repeats=2, axis=0).T.reshape([-1,2])
            self.detect_flatline()
            
        self.kubios_art = np.nan_to_num(art.squeeze())
        self.mat = mat
        self.altered = altered.squeeze()
        self.rrorig = rrorig.squeeze()
        self.trrs = trrs.squeeze()
        print(trrs[:10])
        self.corr = corr.squeeze()

        self.file = edf_file
        self.mat_file = mat_file
        self.artefacts_file = artefacts_file
        self.max_page = len(data)//sfreq//self.interval//self.gridsize
        
        self.save()
Пример #11
0
def prepare_data():
    matrix = loadmat('dane.mat')['matrix']
    all_probes = matrix[:, 5:21]  # wycinek macierzy zawierajcy sygnal z wszystkich elektrod
    frequencies = matrix[:, 27]  # wycinek maciery zawierajacy informacje czy wystapily czestotliwosci
    signal1 = matrix[:, 11]  # sygnal z jednej elektrody potylicznej
    signal2 = matrix[:, 12]  # sygnal z jednej elektrody potylicznej
    freq = matrix[:, 21]    # jaka czestotliwosc jest wyswietlana
    car_signal1 = list(map(car, signal1, all_probes))
    car_signal2 = list(map(car, signal2, all_probes))
    freq_values = fftfreq(STEP, T)

    (minIndex, maxIndex) = frequency_range(8, 20, freq_values)
    s = START
    ambient_freq_pow1 = ambient_freq(car_signal1[START:], frequencies[START:], STEP)
    ambient_freq_pow2 = ambient_freq(car_signal2[START:], frequencies[START:], STEP)
    y_background1 = ambient_freq_pow1[minIndex:maxIndex]
    y_background2 = ambient_freq_pow2[minIndex:maxIndex]
    y_avg_background = (y_background1 + y_background2) / 2
    x_data = list()
    y_data = list()
    while s is not None:

        y1, s = non_zero_freq_test(car_signal1[s:], frequencies[s:], STEP, s)
        y2, _ = non_zero_freq_test(car_signal2[s:], frequencies[s:], STEP, s)

        if y1 is not None and y2 is not None:
            y1 = y1[minIndex:maxIndex]
            y2 = y2[minIndex:maxIndex]
            y_avg = np.add(y1, y2)
            y_avg /= 2
            ysr_avg = np.subtract(y_avg, y_avg_background)
            ysr_avg = normalize(ysr_avg)
            x_data.append(ysr_avg)
            y_data.append(tt(STEP, s, freq[s:s+STEP], frequencies[s:s+STEP]))
    x_data = np.array(x_data)
    y_data = np.array(y_data)

    # Wyrównanie liczby danych
    counted_y = [sum(y_data == 0),
                 sum(y_data == 1),
                 sum(y_data == 2),
                 sum(y_data == 3)]
    more_than_min_y = [sum(y_data == 0) - min(counted_y),
                       sum(y_data == 1) - min(counted_y),
                       sum(y_data == 2) - min(counted_y),
                       sum(y_data == 3) - min(counted_y)]

    for i, lst in enumerate(more_than_min_y):
        for j in range(lst):
            idx = np.where(y_data == i)[0][0]
            y_data = np.delete(y_data, idx)
            x_data = np.delete(x_data, idx, axis=0)

    return x_data, y_data
Пример #12
0
def process_data():

    data = np.moveaxis(
        mat73.loadmat(config["images_mat_path"])['images'], -1, 1)
    labels = sio.loadmat(config["labels_mat_path"])['centers_labels']

    for i in range(data.shape[0]):
        data[i] = (data[i] - data[i].mean()) / data[i].std()

    np.save('data/data.npy', data)
    np.save('data/labels.npy', labels)
Пример #13
0
def get_cube(file):
    try: 
        data = sio.loadmat(file)
        cube = np.array(data['cube']['betterRefl'][0][0])
        # cube = cube[:32][:64]  # model streaming format
        wn = np.array(data['cube']['wn'][0][0][20:280])
    except:
        data = mat73.loadmat(file)
        cube = np.array(data['cube']['betterRefl'])
        wn = np.array(data['cube']['wn'])
    return cube, wn
Пример #14
0
def prepare_data():
    matrix = loadmat('dane.mat')['matrix']
    all_probes = matrix[:, 5:
                        21]  # wycinek macierzy zawierajcy sygnal z wszystkich elektrod
    frequencies = matrix[:,
                         27]  # wycinek maciery zawierajacy informacje czy wystapily czestotliwosci
    signal1 = matrix[:, 11]  # sygnal z jednej elektrody potylicznej
    signal2 = matrix[:, 12]  # sygnal z jednej elektrody potylicznej
    freq = matrix[:, 21]  # jaka czestotliwosc jest wyswietlana
    car_signal1 = list(map(car, signal1, all_probes))
    car_signal2 = list(map(car, signal2, all_probes))

    freq_values = fft.fftfreq(STEP, T)
    (minIndex, maxIndex) = frequency_range(8, 20, freq_values)
    s = START

    x_data = list()
    y_data = list()

    while s is not None:

        y1, s, _ = non_zero_freq_test(car_signal1[s:], frequencies[s:], STEP,
                                      s)
        y2, _, isFreq = non_zero_freq_test(car_signal2[s:], frequencies[s:],
                                           STEP, s)

        if y1 is not None and y2 is not None:
            if isFreq:
                ambient_freq_pow1 = ambient_freq(car_signal1[s - 4 * STEP:s],
                                                 frequencies[s - 4 * STEP:s],
                                                 STEP)
                ambient_freq_pow2 = ambient_freq(car_signal2[s - 4 * STEP:s],
                                                 frequencies[s - 4 * STEP:s],
                                                 STEP)
                y_background1 = ambient_freq_pow1[minIndex:maxIndex]
                y_background2 = ambient_freq_pow2[minIndex:maxIndex]
                y1 = y1[minIndex:maxIndex] - y_background1
                y2 = y2[minIndex:maxIndex] - y_background2
                y1 = normalize(y1)
                y2 = normalize(y2)
                y_avg = np.append(y1.ravel(), y2.ravel())
                x_data.append(y_avg)
                y_data.append(calculate_output(STEP, freq[s:s + STEP]))
            else:
                y1 = normalize(y1[minIndex:maxIndex])
                y2 = normalize(y2[minIndex:maxIndex])
                y_avg = np.append(y1.ravel(), y2.ravel())
                x_data.append(y_avg)
                y_data.append(3)

    x_data = np.array(x_data)
    y_data = np.array(y_data)

    return x_data, y_data
Пример #15
0
    def __init__(self, config):
        self.config = config

        train_in_file = os.path.join(self.config.data_dir,
                                     self.config.train_input)
        train_out_file = os.path.join(self.config.data_dir,
                                      self.config.train_output)

        print('*** LOADING TRAINING INPUT DATA ***')
        train_in_dict = mat73.loadmat(train_in_file)

        print('*** LOADING TRAINING OUTPUT DATA ***')
        train_out_dict = mat73.loadmat(train_out_file)

        train_in_key = list(train_in_dict.keys())[0]
        train_out_key = list(train_out_dict.keys())[0]

        self.input = np.transpose(train_in_dict[train_in_key])
        self.output = np.transpose(train_out_dict[train_out_key])

        self.len = self.input.shape[0]
    def __init__(self, config):
        self.config = config

        inference_in_file = os.path.join(self.config.data_dir,
                                         self.config.inference_input)
        inference_out_file = os.path.join(self.config.data_dir,
                                          self.config.inference_target_output)

        print('*** LOADING INFERENCE INPUT DATA ***')
        inference_in_dict = mat73.loadmat(inference_in_file)

        print('*** LOADING INFERENCE OUTPUT DATA ***')
        inference_out_dict = mat73.loadmat(inference_out_file)

        inference_in_key = list(inference_in_dict.keys())[0]
        inference_out_key = list(inference_out_dict.keys())[0]

        self.input = np.transpose(inference_in_dict[inference_in_key])
        self.output = np.transpose(inference_out_dict[inference_out_key])

        self.len = self.input.shape[0]
Пример #17
0
 def loadMatFile(self, Subject):
     """
     Loads the MEG - Signal of specified Subject from Mat file
     :param Subject: Subject ID
     :return: Dictionary containing Signal and Sampling Frequency
     """
     SubjectFile = os.path.join(self.DataDir, Subject + '_AAL94_norm.mat')
     DataFile = mat73.loadmat(SubjectFile)
     fsample = int(DataFile['AAL94_norm']['fsample'])
     signal = DataFile['AAL94_norm']['trial'][
         0]  # Signal has to be transposed
     return signal.T, fsample
Пример #18
0
def load_dataset_mat(data_path):

    # load data
    try:
        res = sio.loadmat(data_path)
    except Exception as ee:
        import mat73
        res = mat73.loadmat(data_path)
    eeg = res['s'][:6]
    params = {'Fs': 200}

    return eeg, params
Пример #19
0
def write_climate2nc(years, file_name, **kwargs):

    for mtype in ['1', '2']:
        for etype in ['sce', 'ctl']:
            climate_path = os.path.join(data_dir, file_name)
            data = mat73.loadmat(climate_path)
            prec_ds = write_mat2nc(data, f'PrecipMean_{etype}', years,
                                   **kwargs)
            temp_ds = write_mat2nc(data, f'T2mMean_{etype}', years, **kwargs)
            prec_ds.to_netcdf(
                os.path.join(data_dir, f'prec{years}_{etype}_ds{mtype}.nc'))
            temp_ds.to_netcdf(
                os.path.join(data_dir, f'temp{years}_{etype}_ds{mtype}.nc'))
Пример #20
0
def load_data(filename=[],
              field_LFP=[],
              field_high_pass=[],
              field_label=[],
              field_intervs=[],
              sampling_freq=25000,
              kernel_dies=False):
    # loads the data for .mat .pkl or .csv
    if len(filename) == 0:
        filename = field_LFP
    filename_start, file_extension = os.path.splitext(filename)
    if file_extension == '.pkl':
        df = pd.read_pickle(filename)
        LFP = (norm_LFP(get_field_pkl(df, field_LFP), sampling_freq))
        high_pass = get_field_pkl(df, field_high_pass)
        Label = (get_field_pkl(df, field_label))
        Intervs = (get_field_pkl(df, field_intervs))
    elif file_extension == '.mat':
        try:
            data = io.loadmat(filename)
        except:  # perhaps the .mat file is a -v7.3 format
            data = mat73.loadmat(filename)
        LFP = get_field_mat(data, field_LFP)

        if not (kernel_dies):
            LFP = norm_LFP(LFP, sampling_freq)
        else:
            temp = []
            chunk = 5000000
            n = int(np.ceil(len(LFP) / chunk))
            for i in range(n):
                temp.append(
                    norm_LFP(
                        LFP[i *
                            chunk:int(np.min(((i + 1) * chunk,
                                              len(LFP) - 1)))], sampling_freq))
            LFP = np.concatenate(temp)
            temp = None
        high_pass = get_field_mat(data, field_high_pass)
        Label = (get_field_mat(data, field_label))
        Intervs = (get_field_mat(data, field_intervs))
    elif file_extension == '.csv':

        LFP = (norm_LFP(np.loadtxt(field_LFP, delimiter=','), sampling_freq))
        high_pass = np.loadtxt(field_high_pass, delimiter=',')
        Label = (np.loadtxt(field_label, delimiter=','))
        Intervs = (np.loadtxt(field_intervs, delimiter=','))
    high_pass = (norm_high_pass(high_pass))
    return (LFP, high_pass, Label, Intervs)
Пример #21
0
    def __init__(self):
        dir = "./"
        original = mat73.loadmat(dir + 'data/Shepplogan.mat')['Shepplogan']
        fdk_data = sio.loadmat(dir + 'data/ccbr_fdk_shepplogan.mat')\
        ['ccbr_fdk_shepplogan']
        plt.figure("ori, y=-2.5")
        plt.imshow(original[:, 95, :], vmin=0.98, vmax=1.05, cmap='gray')
        plt.colorbar()
        # plt.savefig("oriy-25.png")

        plt.figure('ccbr, y=-2.5')
        plt.imshow(fdk_data[:, 95, :], vmin=1.25, vmax=1.35, cmap='gray')
        plt.colorbar()
        # plt.savefig("ccbry-25.png")
        plt.show()
Пример #22
0
def loadmat(filename):
    from scipy import io
    import numpy as np
    import mat73
    try:
        data = io.loadmat(filename)
        key = sorted(data.keys(), reverse=True)[0]
        data = data[key]
        data = np.array(data)
    except:
        data = mat73.loadmat(filename)
        key = sorted(data.keys(), reverse=True)[0]
        data = data[key]
        data = np.array(data)
    return data
Пример #23
0
def load_conn(path='', mtrx_name='matrix', subject_dim=3, modality_dim=2):
    """loads matlab 4d connectivity matrix from matlab file"""
    try:
        matfile = sio.loadmat(path)
        mtrx = matfile[mtrx_name]
        mtrx = np.moveaxis(mtrx, [subject_dim, modality_dim], [0, 3])
    except:
        try:
            matfile = mat73.loadmat(path)
            mtrx = matfile[mtrx_name]
            mtrx = np.moveaxis(mtrx, [subject_dim, modality_dim], [0, 3])
        except:
            raise Exception('The format needs to be a mat file.')

    return mtrx
Пример #24
0
 def test_file3(self):
     """
     Test larger complex numbers are also loaded
     """
     d = mat73.loadmat(self.testfile3)
     raw1 = d['raw1']
     assert raw1['label'] == [''] * 5
     assert raw1['speakerType'] == ['main'] * 5
     np.testing.assert_array_equal(raw1['channel'], [1, 2, 3, 4, 5])
     np.testing.assert_allclose(
         raw1['measGain'],
         [-1.0160217, -0.70729065, -1.2158508, 0.68839645, 2.464653])
     for i in range(5):
         assert np.isclose(np.sum(raw1['h'][i]), -0.019355850366449)
     for i in range(5):
         assert np.isclose(np.sum(raw1['HSmooth'][i]), -0.019355850366449)
Пример #25
0
def write_climate_diff2nc(file_name,
                          var_name,
                          prcp_units=None,
                          temp_units=None,
                          suffix=''):
    """Write *.mat climate file to *.nc file
    Parameters
    ------
    file_name : str: the name of target file, should be ended with '.mat'

    """

    if suffix:
        suffix = '_' + suffix
    climate_path = os.path.join(data_dir, file_name)
    data = mat73.loadmat(climate_path)
    var_ds = write_mat2nc(data, var_name, 1)
    var_ds.to_netcdf(os.path.join(data_dir, f'{var_name}{suffix}.nc'))
Пример #26
0
def loadmat(mat_file: Union[str, Path]) -> dict:
    """Wrapper of scipy.io loadmat function, works for matv7.3.

    Parameters
    ----------
    mat_file : Union[str, Path]
        file path

    Returns
    -------
    dict
        data
    """
    try:
        data = _loadmat(mat_file)
    except:
        data = mat73.loadmat(mat_file)
    return data
Пример #27
0
def extract_electrode_labels(conversation_dir):
    """Read the header file electrode labels

    Args:
        conversation_dir (str): conversation folder name/path

    Returns:
        list: electrode labels
    """
    header_file = glob.glob(
        os.path.join(conversation_dir, 'misc', '*_header.mat'))[0]

    if not os.path.exists(header_file):
        return

    header = mat73.loadmat(header_file)
    labels = header.header.label

    return labels
Пример #28
0
def load_header(conversation_dir, subject_id):
    """[summary]

    Args:
        conversation_dir ([type]): [description]
        subject_id (string): Subject ID

    Returns:
        list: labels
    """
    misc_dir = os.path.join(conversation_dir, subject_id, 'misc')
    header_file = os.path.join(misc_dir, subject_id + '_header.mat')
    if not os.path.exists(header_file):
        print(f'[WARN] no header found in {misc_dir}')
        return
    header = mat73.loadmat(header_file)
    labels = header.header.label

    return labels
Пример #29
0
    def test_file2(self):
        """
        Test that complex numbers are loaded correctly
        """
        d = mat73.loadmat(self.testfile2)
        raw1 = d['raw1']
        assert raw1['label'] == [''] * 5
        assert raw1['speakerType'] == ['main'] * 5
        np.testing.assert_array_equal(raw1['channel'], [1, 2, 3, 4, 5])
        np.testing.assert_allclose(
            raw1['measGain'],
            [-1.0160217, -0.70729065, -1.2158508, 0.68839645, 2.464653])
        for i in range(5):
            assert np.isclose(np.sum(raw1['h'][i]), -0.0007341067459898744)

        np.testing.assert_array_almost_equal(raw1['HSmooth'][0][2], [
            0.001139 - 4.233492e-04j, 0.00068 + 8.927040e-06j,
            0.002382 - 7.647651e-04j, -0.012677 + 3.767829e-03j
        ])
Пример #30
0
def write_data_csv(mat_file, csv_file):
    '''
    Creates CSV spreadsheet for automated breast segmentation provided the segmentation data MATLAB output file.
    If the spreadsheet does not exist at runtime, it will be created. If the spreadsheet does exist, then it is
    appended to.

    NOTE: This function requires the mat73 module - as this reads MATLAB structures produced by MATLAB v7.3+.
        See the github page for further details: https://github.com/skjerns/mat7.3

    Arguments:
        mat_file (MATLAB .mat file): Segmentation data MATLAB output file (from DeepSeA)
        csv_file (csv file): Output csv filename (with extension)

    Returns:
        csv_file (csv file): Output csv filename (with extension) with appended data/information
    '''

    # Load and read mat file
    data_dict = mat73.loadmat(mat_file)

    # Store necessary data from dict
    sub_id = data_dict['segdata']['ID']
    vol_R = data_dict['segdata']['VT1']  # right
    vol_L = data_dict['segdata']['VT2']  # left
    vol_T = data_dict['segdata']['VT']  # both

    # Create CSV output dict
    csv_dict = {"Subject ID": [sub_id],
                "Right Breast Volume (cm\u00b3)": [vol_R],
                "Left Breast Volume (cm\u00b3)": [vol_L],
                "Total Breast Volume (cm\u00b3)": [vol_T]}

    # Create dataframe
    df = pd.DataFrame.from_dict(csv_dict, orient='columns')

    # Write to file
    if os.path.exists(csv_file):
        df.to_csv(csv_file, sep=",", header=False, index=False, mode='a')
    else:
        df.to_csv(csv_file, sep=",", header=True, index=False, mode='w')

    return csv_file