def load_data(mode='train'):
    if mode == 'train':
        load_data = loadmat('usps.mat')
        x_train = np.array(load_data['usps_train_input'])
        y_train = np.array(load_data['usps_train_target'])
        for i in range(0, len(x_train)):
            x_train[i] = np.rot90(np.flip(x_train[i].reshape(16, 16), 0),
                                  3).reshape(1, 256)

        return x_train, y_train
    elif mode == 'test':
        load_data = loadmat('usps.mat')
        x_test = np.array(load_data['usps_test_input'])
        y_test = np.array(load_data['usps_test_target'])
        for i in range(0, len(x_test)):
            x_test[i] = np.rot90(np.flip(x_test[i].reshape(16, 16), 0),
                                 3).reshape(1, 256)

        return x_test, y_test
    elif mode == 'all':
        load_data = loadmat('usps.mat')
        x_all = np.array(load_data['usps_all_input'])
        y_all = np.array(load_data['usps_all_target'])
        for i in range(0, len(x_all)):
            x_all[i] = np.rot90(np.flip(x_all[i].reshape(16, 16), 0),
                                3).reshape(1, 256)

    return x_all, y_all
def get_imgpath_labels(matfile_name, debug=False, train=True):
    """
    This function fetches the list of image names and labels from the annotation files.
    ARGS:
    matfile_name: The name of the annotation matfile.
    OUTPUTS:
    imgname_list: List containing image names.
    labels: List containing labels with adjusted values. i.e. starting with 0
    """
    matpath = os.path.join(DATA_DIR, matfile_name)
    data = loadmat(matpath)
    annos = data['annotations']

    meta_path = os.path.join(DATA_DIR, 'devkit/cars_meta.mat')
    meta = loadmat(meta_path)

    imgname_list = annos['fname']

    if debug:
        return imgname_list[0:500], labels[0:500]

    if not train:
        return imgname_list, None

    labels = [l - 1 for l in annos['class']]

    return imgname_list, labels
Exemple #3
0
    def __init__(self,
                 root_dir,
                 image_dir,
                 ann_dir,
                 transform=None,
                 train=True):
        img_dir = os.path.join(root_dir, image_dir)
        att_dir = os.path.join(root_dir, ann_dir,
                               'attributeLabels_continuous.mat')
        imgn_dir = os.path.join(root_dir, ann_dir, 'images.mat')
        attn_dir = os.path.join(root_dir, ann_dir, 'attributes.mat')
        self.attrs = loadmat(att_dir)['labels_cv']
        self.attrnames = loadmat(attn_dir)['attributes']
        self.attrnames = [i[0] for i in self.attrnames]
        self.imgs = loadmat(imgn_dir)['images']
        self.imgs = [os.path.join(img_dir, i[0]) for i in self.imgs]
        self.len = len(self.imgs)
        self.feature_size = len(self.attrnames)
        self.idx = np.arange(self.len)
        np.random.shuffle(self.idx)
        if train:
            self.dataidx = self.idx[:self.len * 4 // 5]
        else:
            self.dataidx = self.idx[self.len * 4 // 5:]
        self.transform = transform

        print('completed loading dataset (Training = {}), length is {}'.format(
            train, self.len),
              flush=True)
Exemple #4
0
def prepare_raw_data(data_root=Path('data/input/stanford')):
    devkit = data_root / 'devkit'
    cars_meta = pd.DataFrame(loadmat(devkit / 'cars_meta.mat'))
    cars_meta.to_csv(devkit / 'cars_meta.csv', index=False)

    cars_annos = pd.DataFrame(
        loadmat(devkit / 'cars_annos.mat')['annotations'])
    cars_annos['class'] -= 1
    cars_annos.to_csv(devkit / 'cars_annos.csv', index=False)
Exemple #5
0
def load_data():
    all_girths = mp.loadmat(RESULT_DIR + 'girths.mat')
    all_girths = all_girths['girths']
    if print_switch:
        print_list(all_girths)
    all_girths = np.array(all_girths)

    all_heights = mp.loadmat(RESULT_DIR + 'heights.mat')
    all_heights = all_heights['height']
    if print_switch:
        print_list(all_heights)
        all_heights = np.array(all_heights)
 def _loadLabels(self):
     mat = mp.loadmat(osp.join(self._root, "predicate.mat"))
     self._relList = mat["predicate"]
     self._numRelClass = len(self._relList)
     self._relMapping = {}
     for i in xrange(len(self._relList)):
         self._relMapping[self._relList[i]] = i
     mat = mp.loadmat(osp.join(self._root, "objectListN.mat"))
     self._objList = mat["objectListN"]
     self._numObjClass = len(self._objList) + 1
     self._objMapping = {}
     self._objMapping["__BG"] = 0
     for i in xrange(len(self._objList)):
         self._objMapping[self._objList[i]] = i + 1
Exemple #7
0
def main():
    # Load the boundary.
    bpoly = np.array(loadmat("../targets/bounds.mat")["bpolyh1"])

    # Load the world points.
    Wpts = np.array(loadmat("../targets/world_pts.mat")["world_pts"])

    # Load the example target image.
    I = imread("../targets/example_target.png")

    Ipts = cross_junctions(I, bpoly, Wpts)

    # You can plot the points to check!
    print(Ipts)
Exemple #8
0
def save_measure():
    girths_path = RESULT_DIR + 'girths.mat'
    heights_path = RESULT_DIR + 'heights.mat'
    girths = mp.loadmat(girths_path)
    heights = mp.loadmat(heights_path)

    girths = girths['girths']
    heights = heights['height']
    heights = np.array(heights).reshape(np.array(girths).shape[0], 1)

    measures = np.column_stack((girths, heights))
    measures = np.transpose(measures)
    print(measures)
    np.save(open(os.path.join(ROOT_DIR, "measures.npy"), "wb"), measures)
Exemple #9
0
def test_load_data():
    '''
    Test that data is loaded correctly
    '''
    # Load data
    raw_behavior_summary = mat4py.loadmat('TB41_behavior_summary.mat')
    raw_encoding_struct = mat4py.loadmat('TB41_encoding_structs.mat')
    assert len(raw_behavior_summary.keys()) == 11
    assert len(raw_encoding_struct.keys()) == 7
    assert len(raw_encoding_struct['left_onsetCells']) == 173
    assert len(raw_encoding_struct['right_onsetCells']) == 173
    assert len(raw_encoding_struct['rewardsCell']) == 173
    assert len(raw_behavior_summary['correct']) == 153
    assert len(raw_behavior_summary['incorrect']) == 20
    assert len(raw_behavior_summary['left']) == 79
Exemple #10
0
def put_all_vertexes_together():
    mesh_names_path = RESULT_DIR + 'meshNames.mat'
    mesh_names = mp.loadmat(mesh_names_path)
    mesh_names = mesh_names['meshNames']
    vertex = np.zeros((len(mesh_names), V_NUM, 3), dtype=np.double)
    for i, mesh_name in enumerate(mesh_names):
        print("序号:%s  值:%s" % (i + 1, mesh_name))
        mesh_path = MESHES_DIR + mesh_name
        points = mp.loadmat(mesh_path)
        points = points['points']
        points = np.array(points)
        vertex[i, :, :] = points

    print(vertex.shape[0])
    np.save(open(os.path.join(ROOT_DIR, "all_vertex.npy"), "wb"), vertex)
 def _loadLabels(self):
     mat = mp.loadmat(osp.join(self._root, "mat/predicate.mat"))
     self._relList = mat["predicate"]
     self._relList = ['__background__'] + self._relList
     self._numRelClass = len(self._relList)
     self._relMapping = {}
     for i in range(len(self._relList)):
         self._relMapping[self._relList[i]] = i
     mat = mp.loadmat(osp.join(self._root, "mat/objectListN.mat"))
     self._objList = mat["objectListN"]
     self._objList = ['__background__'] + self._objList
     self._numObjClass = len(self._objList)
     self._objMapping = {}
     for i in range(len(self._objList)):
         self._objMapping[self._objList[i]] = i
Exemple #12
0
 def test_loadmat2(self):
     """Test reading mat files using a fileobject"""
     for filename, result in test_data['loadmat'].items():
         with self.subTest(msg=filename):
             with open('data/' + filename, 'rb') as fileobj:
                 data = mat4py.loadmat(fileobj, meta=False)
             self.assertEqual(data, result)
Exemple #13
0
def dynamic_load(Pitch1, Pitch2, Turbxdd, Turbydd, Wsp, SeedInfo):
    Seed1, Seed2 = (1, 1) if SeedInfo == 1 else (1, 0)
    CaseNames = [
        'M000P' + str(Pitch1) + '00_T1_S1_Wsp' + str(Wsp) + '_s' + str(Seed1),
        'M000P' + str(Pitch2) + '00_T1_S1_Wsp' + str(Wsp) + '_s' + str(Seed2)
    ]
    CaseOrder = ['Turb1Pitch' + str(Pitch1), 'Turb1Pitch' + str(Pitch2)]
    Dict = {}
    MainDict = {}
    for n, Case in enumerate(CaseNames):
        temp = mat4py.loadmat(path + '\\' + Case +
                              '.mat')  # mat4py - load .mat to python
        Dict[CaseOrder[n]] = pd.DataFrame.from_dict(temp['sig'])
        if Case[9:11] == 'T0':
            Dict[CaseOrder[
                n]].columns = DfHeaders[:
                                        -1]  # because for T0 (NoTurb), there is no 'EqWsp' column.
        else:
            Dict[CaseOrder[n]].columns = DfHeaders
        list = GetUniqueinList(
            [Turbxdd, Turbydd, 'Time', 'Vo', 'Omega']
        )  #Time, Omega and Vo are always needed because of the input V figure and PSD
        Dict[CaseOrder[n]] = Dict[CaseOrder[n]][list]
    for key in Dict.keys():
        MainDict[key] = Dict[key].to_json(orient='split')

    return json.dumps(MainDict), []
def main():
    matlab_data = loadmat('SensorSpace/FLISj4.mat')

    data = matlab_data['data']

    num_channels = 273

    x_train = np.array(data['X_train'])
    x_train = np.transpose(x_train, (1, 0))
    x_train_small = x_train[:num_channels]
    x_train_epochs = np.array(np.array_split(x_train_small, 160, axis=1))

    y_train = np.array(data['Y_train'])
    y_train = np.transpose(y_train, (1, 0))
    value, time = np.where(y_train == 1)

    events = np.arange(start=0, stop=8000, step=50)
    events = np.expand_dims(events, axis=1)
    events = np.pad(events, ((0, 0), (0, 2)), mode='constant', constant_values=0)

    for i in range(160):
        sample = i * 50
        ind = np.where(time == sample)
        events[i][2] = value[ind]

    info = mne.create_info(ch_names=num_channels, sfreq=100, ch_types='mag')
    epochs = mne.EpochsArray(x_train_epochs, info=info, events=events)

    mne_wavelet_plot(epochs)
Exemple #15
0
    def __getitem__(self, idx):
        filename, label = self.images_info.iloc[idx, [0, 1]]
        try:
            mat = mat4py.loadmat(osp.join(self.images_path, filename))
        except Exception as e:
            print('\n\n\n')
            print(filename)
            raise Exception(e)
        image = np.asarray(mat['img'])
        if image.ndim == 2:
            image = np.repeat(np.expand_dims(image, 2), 3, axis=2)
        if self.scale_image:
            image = scale_img(image)
        image = image.astype(np.float32)

        if self.preprocess:
            image = self.preprocess(image=image)['image']
        if self.augmentation:
            image = self.augmentation(image=image)['image']
        if self.transform:
            image = self.transform(image)

        label = self.class_name_to_id[label]
        if type(image) == str:
            print(f'Something went wrong with {filename} image')
        if type(label) == str:
            print(f'Something went wrong with {filename} label')

        return image, label
Exemple #16
0
def get_train_or_test_annotations(is_for_training=True):
    annotations_pickle = dict()
    if is_for_training:
        pickle_path = os.path.join(PATH, 'train_annotations.pickle')
        annotations_path = os.path.join(devkit, 'cars_train_annos.mat')
    else:
        pickle_path = os.path.join(PATH, 'test_annotations.pickle')
        annotations_path = os.path.join(devkit,
                                        'cars_test_annos_withlabels.mat')
    if not os.path.exists(pickle_path):
        annotations = loadmat(annotations_path).get('annotations')
        bbox_x1 = annotations.get('bbox_x1')
        bbox_y1 = annotations.get('bbox_y1')
        bbox_x2 = annotations.get('bbox_x2')
        bbox_y2 = annotations.get('bbox_y2')
        label = annotations.get('class')
        fnames = annotations.get('fname')
        for i, fname in enumerate(fnames):
            data = dict()
            data['bounding_box'] = [
                bbox_x1[i], bbox_y1[i], bbox_x2[i], bbox_y2[i]
            ]
            data['class_index'] = label[i] - 1
            data['class_name'] = labels[data['class_index']]
            annotations_pickle.update({fname: data})
        pickle.dump(annotations_pickle, open(pickle_path, 'wb'))
    else:
        annotations_pickle = pickle.load(open(pickle_path, "rb"))
    return annotations_pickle
def readTrainData(path):

    files_list = os.listdir(path)
    files_list.sort()
    multiframe = []

    for fileindex in range(len(files_list)):
        filename = files_list[fileindex]
        datamat = mat4py.loadmat(os.path.join(path, filename))

        if 0:
            layer_num = datamat["layer_num"]
            transform_matrix_local = np.array(
                datamat["transform_matrix_local"])
            transform_matrix_global = np.array(
                datamat["transform_matrix_global"])

        labels = datamat['label']
        lidar_data = datamat["layer_data"]

        frame = np.empty((0, 6))
        for layerindex in range(VALID_LAYER_NUM):
            data = np.squeeze(np.array(lidar_data[layerindex]))
            label = np.squeeze(np.array(labels[layerindex]))
            label = np.reshape(label, (-1, 1))
            index = layerindex * np.ones(label.shape)
            layer = np.concatenate((data, label, index), axis=1)
            frame = np.concatenate((frame, layer), axis=0)

        multiframe.append(frame)

    return multiframe
    def save_trial_data_to_file(self, trial_data, experiment_data):
        # add the trial number as the first line.
        self.current_saved_file.write('Trial:{trial_number}\n'.format(trial_number=trial_data['Trial#']))

        # loop over all keys and values.
        for (key, value) in trial_data.items():
            if key != 'Trial#':
                self.current_saved_file.write('{key}:{value}\n'.format(key=key, value=value))

        # loop over all experiment_data
        self.current_saved_file.write(experiment_data.to_string())

        loaded_dict = dict()
        if trial_data['Trial#'] > 1:
            loaded_dict = mfp.loadmat(self.directory_path +
                                      self.inner_directory_path +
                                      self.current_saved_file_name + '.mat')

        trial_num_string = 'trial_' + str(trial_data['Trial#'])
        # delete the key with the # char because Matlab cannot read it as attribute in it's struct.
        trial_data['TrialNum'] = trial_data['Trial#']
        del trial_data['Trial#']

        loaded_dict[trial_num_string] = {'trial_data': trial_data,
                                         'experiment_data': experiment_data.to_dict()}

        mfp.savemat(self.directory_path +
                    self.inner_directory_path +
                    self.current_saved_file_name + '.mat', loaded_dict)
        pass
Exemple #19
0
def main(args):
    data = loadmat(args.filename)
    trainx, testx, trainy, testy = train_test_split(data['X'],
                                                    data['y'],
                                                    test_size=args.train_split,
                                                    random_state=2)
    valx, evalx, valy, evaly = train_test_split(testx, testy, test_size=0.5)
    data_size = len(trainx[0])
    encoder_neurons = [data_size, data_size / 2, data_size / 4]
    clf = KNN()
    clf.fit(trainx)
    print("Results Validation KNN")
    print_metrics(valy, clf.predict(valx))
    print("Results Evaluation KNN")
    print_metrics(evaly, clf.predict(evalx))

    clf = PCA(n_components=args.components)
    clf.fit(trainx)
    print("Results Validation PCA")
    print_metrics(valy, clf.predict(valx))
    print("Results Evaluation PCA")
    print_metrics(evaly, clf.predict(evalx))

    clf = VAE(encoder_neurons=encoder_neurons,
              decoder_neurons=encoder_neurons[::-1],
              epochs=args.epochs,
              contamination=args.contamination,
              gamma=args.gamma,
              capacity=args.capacity)
    clf.fit(trainx)
    print("Results Validation VAE")
    print_metrics(valy, clf.predict(valx))
    print("Results Evaluation VAE")
    print_metrics(evaly, clf.predict(evalx))
def load_reid_data(rap_mat_file):
    """
    For each person in RAP dataset, we have sampled one image from one camera at one day as the query,
    so one person may have several queries from one camera in different days. To change this setting,
    we refer you to https://github.com/dangweili/RAP/blob/master/person-reid/evaluation/rap2_evaluation_features.m
    """
    RAP_dictionary = mat4py.loadmat(rap_mat_file)
    name_of_imgs = RAP_dictionary["RAP_annotation"]["name"]
    all_person_ids = RAP_dictionary["RAP_annotation"][
        "person_identity"][:41585]
    train_ids_indices = RAP_dictionary["RAP_annotation"]["partition_reid"][
        "train_index"]
    gallery_ids_indices = RAP_dictionary["RAP_annotation"]["partition_reid"][
        "gallery_index"]
    query_ids_indices = RAP_dictionary["RAP_annotation"]["partition_reid"][
        "query_index"]

    name_of_imgs = [subsub for sub in name_of_imgs for subsub in sub]
    all_person_ids = [subsub for sub in all_person_ids for subsub in sub]
    train_ids_indices = [subsub for sub in train_ids_indices for subsub in sub]
    gallery_ids_indices = [
        subsub for sub in gallery_ids_indices for subsub in sub
    ]
    #query_ids_indices = [subsub for sub in query_ids_indices for subsub in sub]

    return name_of_imgs, all_person_ids, train_ids_indices, gallery_ids_indices, query_ids_indices
Exemple #21
0
def load_Wb(root_folder, filename):
    with open(root_folder + filename, 'r') as fp:
        data = mat4py.loadmat(fp)
    W = np.array(data['W'])
    B = np.array(data['B'])

    return W, B
Exemple #22
0
    def __init__(self,
                 cfg: Dict,
                 mode: str):
        '''
        Initializes the data loader.

        Args:
            cfg: a dict with configuration values.
            mode: the part of the dataset to be used (must be 'train', 'val' or 'test').
        '''
        
        self.n = 0
        self.mode = mode
        self.cfg = cfg
        self.img_w = cfg['common']['img_width']
        self.img_h = cfg['common']['img_height']
        self.alphabet = cfg['common']['alphabet']
        self.dataset_dir = Path(self.cfg['common']['iiit5k_dir'])

        if self.mode == 'train':
            mat_path = self.dataset_dir.joinpath('traindata.mat')
            key = 'traindata'
            self.batch_size = self.cfg['train']['batch_size']
        elif self.mode == 'test':
            mat_path = self.dataset_dir.joinpath('testdata.mat')
            key = 'testdata'
            self.batch_size = self.cfg['eval']['batch_size']
        else:
            raise ValueError('The mode must be "train", "val" or "test"!')

        self._df = pd.DataFrame(loadmat(str(mat_path))[key])
Exemple #23
0
    def __init__(self, root='datasets', **kwargs):
        self.root = root
        self.dataset_dir = osp.join(self.root, self.dataset_dir)
        self.train_dir = osp.join(self.dataset_dir, 'bounding_box_train')
        self.test_dir = osp.join(self.dataset_dir, 'bounding_box_test')

        required_files = [
            self.dataset_dir,
            self.train_dir,
            self.test_dir,
        ]
        self.check_before_run(required_files)

        duke_attr = mat4py.loadmat(
            osp.join(self.dataset_dir, 'duke_attribute.mat'))['duke_attribute']
        sorted_attrs = sorted(duke_attr['train'].keys())
        sorted_attrs.remove('image_index')
        attr_dict = {i: str(attr) for i, attr in enumerate(sorted_attrs)}

        train = self.process_dir(self.train_dir, duke_attr['train'],
                                 sorted_attrs)
        test = val = self.process_dir(self.test_dir, duke_attr['test'],
                                      sorted_attrs)

        super(DukeMTMCAttr, self).__init__(train,
                                           val,
                                           test,
                                           attr_dict=attr_dict,
                                           **kwargs)
Exemple #24
0
def plotSumsonMap():

    m = folium.Map([51.4545, -2.58], zoom_start=13)
    n_bins = 100
    (lat_min, lat_max) = (51.427563, 51.481476)
    (lng_min, lng_max) = (-2.631043, -2.544795)

    lat_range = np.linspace(lat_min, lat_max, n_bins)
    lng_range = np.linspace(lng_min, lng_max, n_bins)

    lat_res = (lat_range[1] - lat_range[0]) / 2
    lng_res = (lng_range[1] - lng_range[0]) / 2

    data = loadmat('Data/latlngthetasum.mat')
    lltss = data['llts']
    #lltss = lltss[:200]
    Sums = [Sum for Lat, Lng, Theta, Sum in lltss]
    Sum_max = max(Sums)

    bar = Bar('Plotting Tiles', max=len(Sums))
    for llts in lltss:
        Lat, Lng, Theta, Sum = llts
        bounds = [[Lat + lat_res, Lng - lng_res],
                  [Lat - lat_res, Lng + lng_res]]
        opacity = Sum / Sum_max
        folium.Rectangle(bounds,
                         color='#0000ff',
                         fill='#0000ff',
                         opacity=opacity,
                         fill_opacity=opacity,
                         tooltip=str((Sum))).add_to(m)
        bar.next()
    bar.finish()
    m.save('Maps/TotalDepartures.html')
def get_firing_data(folder_to_search_in, session_id, firing_data):
    firing_times_all_cells = []
    session_ids_all = []
    cell_names_all = []
    cluster_id_all = []
    number_of_spikes_all = []
    cell_counter = 1
    for name in glob.glob(folder_to_search_in + '/*' + session_id + '*'):
        if os.path.exists(name) and os.path.isdir(name) is False:
            if 'EEG' not in name and 'EGF' not in name and 'POS' not in name and 'md5' not in name:
                cell_id = name.split('\\')[-1].split('_')[-1].split('.')[0]
                print('I found this cell:' + name)
                firing_times = pd.DataFrame()
                firing_times['times'] = loadmat(name)['cellTS']
                firing_times['times'] = firing_times['times'].sum()
                firing_times_all_cells.append(firing_times.times.values)
                cell_names_all.append(cell_id)
                session_ids_all.append(session_id)
                cluster_id_all.append(cell_counter)
                number_of_spikes_all.append(len(firing_times.times))
                cell_counter += 1
    firing_data['session_id'] = session_ids_all
    firing_data['cell_id'] = cell_names_all
    firing_data['cluster_id'] = cluster_id_all
    firing_data['firing_times'] = firing_times_all_cells
    firing_data['number_of_spikes'] = number_of_spikes_all
    return firing_data
Exemple #26
0
def get_y_train(sample):
    matlab_data = loadmat('SensorSpace/FLISj{}.mat'.format(sample))
    data = matlab_data['data']
    y_train = np.array(data['Y_train'])

    y_train_samples = [np.where(x == 1)[0][0] for x in y_train]
    return np.array(y_train_samples)
Exemple #27
0
def main():
    #
    # get arguments and invoke the conversion routines
    #

    parser = argparse.ArgumentParser(
        description='Convert Matlab '
        'MAT-files to JSON formated text files, and the other way around.')

    parser.add_argument('file',
                        nargs='+',
                        help='path to a Matlab MAT-file or a JSON file')
    parser.add_argument('--remove-input',
                        action='store_const',
                        const=True,
                        default=False,
                        help='remove input file after conversion')
    parser.add_argument('-f',
                        '--force',
                        action='store_const',
                        const=True,
                        default=False,
                        help='overwrite existing files when converting')
    args = parser.parse_args()

    for path in args.file:
        spl = os.path.splitext(path)
        ext = spl[1].lower()

        if ext == '.mat':
            dest = spl[0] + '.json'
            try:
                if os.path.exists(dest) and not args.force:
                    raise Exception('File {} already exists.'.format(dest))
                data = loadmat(path)
                with open(dest, 'w') as fp:
                    json.dump(data, fp)
                if args.remove_input:
                    os.remove(path)
            except Exception as e:
                print('Error: {}'.format(e))
                exit(1)

        elif ext == '.json':
            dest = spl[0] + '.mat'
            try:
                if os.path.exists(dest) and not args.force:
                    raise Exception('File {} already exists.'.format(dest))
                with open(path) as fp:
                    data = json.load(fp)
                savemat(dest, data)
                if args.remove_input:
                    os.remove(path)
            except RuntimeError as e:
                print('Error: {}'.format(e))
                exit(1)
        else:
            print('Unsupported file extension on file: {0}'.format(path))
            exit(1)
Exemple #28
0
def prepare_data(file, cutoff=5, fs=250.0, order=6):
    raw_data = loadmat(file)
    useful_data = raw_data.copy()
    X = useful_data['y']
    flash = useful_data['trig']
    X_filtered = butter_bandpass_filter(X, cutoff, fs, order)

    return X_filtered, flash
Exemple #29
0
def load_filename_to_text_dict(dataset_path, name):
    filename_to_text = loadmat((dataset_path / f"{name}.mat").as_posix())[name]
    filename_to_text = {
        Path(path).name: text
        for path, text in zip(filename_to_text["ImgName"],
                              filename_to_text["GroundTruth"])
    }
    return dict_to_hash_table(filename_to_text)
Exemple #30
0
def getDate():
    data = mat4py.loadmat(
        '/home/shifa-yang/Desktop/work/study/大三上/机器学习/实验/实验4/lms.mat')
    X = data['X']
    y = data['y']
    X = np.array(X)
    y = np.array(y)
    return X, y
Exemple #31
0
 def test_save_load_mat(self):
     """Test writing mat files, and reading them again"""
     for filename, result in test_data['loadmat'].iteritems():
         tempname = 'data/{}.temp'.format(filename)
         try:
             mat4py.savemat(tempname, result)
             data = mat4py.loadmat(tempname, meta=False)
         finally:
             os.remove(tempname)
         self.assertEqual(data, result)
Exemple #32
0
def main():
    #
    # get arguments and invoke the conversion routines
    #

    parser = argparse.ArgumentParser(
        description='Convert Matlab '
        'MAT-files to JSON formated text files, and the other way around.')

    parser.add_argument(
        'file', nargs='+',
        help='path to a Matlab MAT-file or a JSON file')
    parser.add_argument(
        '--remove-input', action='store_const', const=True,
        default=False, help='remove input file after conversion')
    parser.add_argument(
        '-f', '--force', action='store_const', const=True,
        default=False, help='overwrite existing files when converting')
    args = parser.parse_args()

    for path in args.file:
        spl = os.path.splitext(path)
        ext = spl[1].lower()

        if ext == '.mat':
            dest = spl[0] + '.json'
            try:
                if os.path.exists(dest) and not args.force:
                    raise Exception('File {} already exists.'.format(dest))
                data = loadmat(path)
                with open(dest, 'w') as fp:
                    json.dump(data, fp)
                if args.remove_input:
                    os.remove(path)
            except Exception as e:
                print('Error: {}'.format(e))
                exit(1)

        elif ext == '.json':
            dest = spl[0] + '.mat'
            try:
                if os.path.exists(dest) and not args.force:
                    raise Exception('File {} already exists.'.format(dest))
                with open(path) as fp:
                    data = json.load(fp)
                savemat(dest, data)
                if args.remove_input:
                    os.remove(path)
            except RuntimeError as e:
                print('Error: {}'.format(e))
                exit(1)
        else:
            print('Unsupported file extension on file: {0}'.format(path))
            exit(1)
Exemple #33
0
    def setupData(self):
        '''
        Load .mat file, convert X and Y lists to numpy arrays
        '''
        data = {}

        data = loadmat(self.infile)

        self.X = np.array(data['X'])

        self.y = np.array(data['y'])

        n = self.X.shape[1]

        # Making an assumption about square images
        self.img_w = np.sqrt(n)

        self.img_h = self.img_w
	def dicom2JSON(pth,tasknames):
		"""convert relevant DICOM header .mat files in pth to JSON files"""
		# find all the .mat files within the directory pth
		# load each using mat4py
		# grab the useful metadata from that part of the struct
		# save it in an appropriately named JSON file within pth
		fieldNames = [
			'RepetitionTime',
			'TaskName', # this will be filled in from tasknames
			'Manufacturer',
			'ManufacturerModelName',
			'MagneticFieldStrength',
			'HardcopyDeviceSoftwareVersion',
			'ReceiveCoilName',
			'GradientSetType',
			'MRTransmitCoilSequence',
			'ScanningSequence',
			'SequenceVariant',
			'ScanOptions',
			'MRAcquisitionType',
			'SequenceName',
			'EchoTime',
			'SliceTiming',
			'SliceEncodingDirection',
			'FlipAngle',
			'MultibandAccelerationFactor'
		]

		outdict = {}

		for t in tasknames: # grab all .mats associated with this task
			mfiles=[f for f in os.listdir(pth) if fnmatch.fnmatch(f,'*'+t+'*.mat')]
			for runnum in range(len(mfiles)):
				data = mat4py.loadmat(pth+mfiles[runnum])
				for thisfield in fieldNames:
					try: 
						outdict[thisfield]=data[thisfield]
					except KeyError: 
						continue
				outdict['TaskName']=t
				jsonName = mfiles[runnum].replace('.mat','.json',1)
				with open(pth+jsonName,'w') as jsonfile:
					json.dump(outdict,jsonfile)
def clean_behaviorals(*argu):
	"""
	Short script for cleaning behavioral directory in advance of running analyses. Removes .mat files 
	corresponding to any behavioral runs that are missing an spm_input field. Before deleting these files,
	script creates a new directory labelled "behavioral_archive" into which all original behavioral files are copied. 


	Arguments:
	[1]: Full path to study directory 

	"""
	argu=argu[0]


	#Move to study directory
	os.chdir(argu[1])

	#rename behavioral folder "behavioral archive". From here on out, we will not touch this folder, except to copy files out of
	#it and into a new behavioral folder.
	os.rename('behavioral', 'behavioral_archive')

	#create folder labelled "behavioral_archive"

	os.mkdir('behavioral')

	#Copy files from behavioral_archive folder, move copied files into new behavioral folder
	for behavfiles in os.listdir(os.path.join(argu[1], 'behavioral_archive')):
		print(behavfiles)
		shutil.copy(os.path.join('behavioral_archive', behavfiles), os.path.join(argu[1], 'behavioral'))
		
	for behavfiles in os.listdir(os.path.join(argu[1],'behavioral')):
		#check if each matfile contains spm_inputs field
		matfile = mat4py.loadmat(os.path.join('behavioral', behavfiles))
		if 'spm_inputs' not in matfile.keys(): 
			print("removing file: " + behavfiles)
			os.remove(os.path.join('behavioral', behavfiles))
import os
import re
import json
import mat4py


TARGET_DIR = "/data/mat/"


# http://stackoverflow.com/a/16974952
for matDir, dirs, files in os.walk(TARGET_DIR):

    for fileName in files:

        jsonDir = re.sub('/mat/', '/json/', matDir)

        fileNameBase, fileNameExtension = os.path.splitext(fileName)
        
        if (fileNameExtension == '.mat'):

            inFilePath  = os.path.join(matDir, fileName)
            outFilePath = os.path.join(jsonDir, fileNameBase + '.json')

            data = mat4py.loadmat(inFilePath)

            if not os.path.exists(jsonDir):
                os.makedirs(jsonDir)

            with open(outFilePath, "w") as outFile:
                json.dump(data, outFile)
import mat4py
import json

hand_written_digits = mat4py.loadmat('ex4data1.mat')
weights = mat4py.loadmat('ex4weights.mat')


with open('digits.json', 'w') as outfile:
    json.dump(hand_written_digits, outfile)

with open('weights.json', 'w') as outfile:
    json.dump(weights, outfile)

# fd = open('data_y.txt', 'w')
# fd.write(jsondata)
# fd.close()
Exemple #38
0
 def test_loadmat(self):
     """Test reading mat files"""
     for filename, result in test_data['loadmat'].iteritems():
         data = mat4py.loadmat('data/' + filename, meta=False)
         self.assertEqual(data, result)
	def behav2TSV(fname,outpath,outfile):
		"""convert behavioral .mat files into .tsv files"""
		def argsort(S):
			return sorted(range(len(S)),key=S.__getitem__)

		print("Converting behavioral files into .tsv files...")
		# load the behavioral .mat file
		bdata=mat4py.loadmat(fname)
		# check that all the necessary variables are in there/fill missing with None
		if not 'spm_inputs' in bdata:
			print('ERROR: No spm_inputs variable in behavioral files!')
			return
		
		Namecol=[]
		Onscol=[]
		Durcol=[]
		# elaborate your spm_inputs variable
		for i in range(len(bdata['spm_inputs']['ons'])): #for each condition
			try: 
				namecol=[bdata['spm_inputs']['name'][i]] * (1+len(bdata['spm_inputs']['ons'][i]))
				Namecol = Namecol + namecol
				Onscol = Onscol + bdata['spm_inputs']['ons'][i]
				Durcol = Durcol + bdata['spm_inputs']['dur'][i]
			except TypeError: #if it's an int, meaning it's length 1
				namecol=[bdata['spm_inputs']['name'][i]]
				Namecol = Namecol + namecol
				Onscol = Onscol + [bdata['spm_inputs']['ons'][i]]
				Durcol = Durcol + [bdata['spm_inputs']['dur'][i]]
		inds=argsort(Onscol)

		for k in ['items_run','key','RT']:
			if not k in bdata:
				bdata[k] = [0] * (len(inds)+1)

		# set up your column names
		fieldNames=['onset','duration','condition','item','key','RT']
		if eventVars[0]: fieldNames=fieldNames+eventVars
		print(fieldNames)
		# open up a TSV file for writing
		with open(outpath+'/'+outfile,'w') as tsvfile:
			writer=csv.DictWriter(tsvfile,delimiter='\t',fieldnames=fieldNames)
			writer.writeheader()
			for i in range(len(inds)):
				try:
					newrow={
						'onset': Onscol[inds[i]],
						'duration': Durcol[inds[i]][0],
						'condition': Namecol[inds[i]],
						'item': bdata['items_run'][i],
						'key': bdata['key'][i][0],
						'RT': round(bdata['RT'][i][0],3)}
				except:
					newrow={
						'onset': Onscol[inds[i]],
						'duration': Durcol[inds[i]],
						'condition': Namecol[inds[i]],
						'item': bdata['items_run'][i],
						'key': bdata['key'][i][0],
						'RT': round(bdata['RT'][i][0],3)}
				# IMPORTANT: eventVars must be a sequence, even if only 1 item
				# also, this assumes that whatever your eventVars are,
				# they are the same length as your spm_inputs variable
				# i.e., one entry per stimulus run
				
					if eventVars[0]:
						for j in range(len(eventVars)):
							try:
								newrow[eventVars[j]]=bdata[eventVars[j]][i]
							except KeyError: 
								print('Warning: no event variable named %s in file %s' % (eventVars[j],fname))
				writer.writerow(newrow)
		params['params']['contrast']['contrasts'][str(num_cons+1)]=OrderedDict()
		con_name = input('Type name for contrast #{}: '.format(str(num_cons+1)))
		params['params']['contrast']['contrasts'][str(num_cons+1)]['name'] = con_name
		params['params']['contrast']['contrasts'][str(num_cons+1)]['con_type'] = 'T'
		con_vals = input('Type contrast values separated by spaces: ')
		con_vals = [float(x) for x in con_vals.split(' ')]
		if len(con_vals) ~= len(cond_names):
			print("Incorrect number of contrast values! Try again.")
			ask_for_con()
		params['params']['contrast']['contrasts'][str(num_cons+1)]['cond_names'] = cond_names
		params['params']['contrast']['contrasts'][str(num_cons+1)]['con_vals'] = con_vals

	paramfile = input('Type the full path to your parameter file, minus extension: ')
	with open(paramfile+'.json','r') as jsonfile:
		params = json.load(jsonfile,object_pairs_hook=OrderedDict)
	matfile = mat4py.loadmat(os.path.join(args.spm_mat_path,'SPM_betas.mat'))
	cond_names = [betaname for betaname in matfile['betanames']]
	# print('Condition names of betas in your SPM.mat file:')
	# for x in cond_names: print(x)
	params['params']['contrast']['contrasts']={}
	add_con = 'y'
	while add_con == 'y':
		print('Condition names of betas in your SPM.mat file:')
		for x in cond_names: print(x)
		ask_for_con()
		add_con = input('Add more contrasts? (y/n) ')

	print('Saving your parameter file.')
	with open(paramfile+'.json','w') as jsonfile:
		json.dump(params,jsonfile)