Exemplo n.º 1
0
    def _run_interface(self, runtime):
        from nltools.data import Brain_Data
        import os
        in_file = self.inputs.in_file
        mask = self.inputs.mask
        low_pass = self.inputs.low_pass_cutoff
        high_pass = self.inputs.high_pass_cutoff
        TR = self.inputs.sampling_rate

        if low_pass == 0:
            low_pass = None
        if high_pass == 0:
            high_pass = None

        dat = Brain_Data(in_file, mask=mask)
        # Handle no filtering
        if low_pass or high_pass:
            dat = dat.filter(sampling_rate=TR,
                             low_pass=low_pass,
                             high_pass=high_pass)

        # Generate output file name
        out_file = os.path.split(in_file)[-1].split(
            '.nii.gz')[0] + '_filtered.nii.gz'
        dat.write(out_file)

        self._out_file = out_file

        runtime.returncode = 0
        return runtime
Exemplo n.º 2
0
def test_roc(tmpdir):
    sim = simulator.Simulator()

    r = 10
    sigma = .1
    y = [0, 1]
    n_reps = 10
    #     output_dir = str(tmpdir)
    sim.create_data(y, sigma, reps=n_reps, output_dir=None)
    dat = Brain_Data(data=sim.data,Y=pd.DataFrame(sim.y))

    algorithm = 'svm'
    # output_dir = str(tmpdir)
    # cv = {'type': 'kfolds', 'n_folds': 5, 'subject_id': sim.rep_id}
    extra = {'kernel': 'linear'}

    output = dat.predict(algorithm='svm', plot=False, **extra)

    # Single-Interval
    roc = analysis.Roc(input_values=output['yfit_all'], binary_outcome=output['Y'] == 1)
    roc.calculate()
    roc.summary()
    assert roc.accuracy == 1

    # Forced Choice
    binary_outcome = output['Y'] == 1
    forced_choice = range(len(binary_outcome)/2) + range(len(binary_outcome)/2)
    forced_choice = forced_choice.sort()
    roc_fc = analysis.Roc(input_values=output['yfit_all'], binary_outcome=binary_outcome, forced_choice=forced_choice)
    roc_fc.calculate()
    assert roc_fc.accuracy == 1
    assert roc_fc.accuracy == roc_fc.auc == roc_fc.sensitivity == roc_fc.specificity
Exemplo n.º 3
0
def test_roc(tmpdir, sim):
    r = 10
    sigma = .1
    y = [0, 1]
    n_reps = 10
    output_dir = str(tmpdir)
    sim.create_data(y, sigma, reps=n_reps, output_dir=None)
    dat = Brain_Data(data=sim.data, Y=pd.DataFrame(sim.y))

    algorithm = 'svm'
    output_dir = str(tmpdir)
    # cv = {'type': 'kfolds', 'n_folds': 5, 'subject_id': sim.rep_id}
    extra = {'kernel': 'linear'}

    output = dat.predict(algorithm='svm', **extra)
    # predict = analysis.Predict(sim.data, sim.y, algorithm=algorithm,
    #                            output_dir=output_dir,
    #                            cv_dict=cv,
    #                            **extra)

    # predict.predict() # Save_Plot isn't working for SVM analysis, planning on deprecating analysis.Predict at some point, so not a big deal
    # predict.predict(save_plot=False)

    # Single-Interval
    roc = analysis.Roc(input_values=output['yfit_all'],
                       binary_outcome=output['Y'] == 1)
    # roc = analysis.Roc(
    #     input_values=predict.yfit_xval, binary_outcome=np.array(sim.y) == 1)
    roc.plot()
    roc.summary()
    assert roc.accuracy == 1
Exemplo n.º 4
0
def expand_mask(mask, custom_mask=None):
    """expand a mask with multiple integers into separate binary masks

    Args:
        mask: nibabel or Brain_Data instance
        custom_mask: nibabel instance or string to file path; optional

    Returns:
        out: Brain_Data instance of multiple binary masks

    """

    from nltools.data import Brain_Data

    if isinstance(mask, nib.Nifti1Image):
        mask = Brain_Data(mask, mask=custom_mask)
    if not isinstance(mask, Brain_Data):
        raise ValueError("Make sure mask is a nibabel or Brain_Data instance.")
    mask.data = np.round(mask.data).astype(int)
    tmp = []
    for i in np.nonzero(np.unique(mask.data))[0]:
        tmp.append((mask.data == i) * 1)
    out = mask.empty()
    out.data = np.array(tmp)
    return out
Exemplo n.º 5
0
def test_roc(tmpdir):
    sim = simulator.Simulator()

    r = 10
    sigma = .1
    y = [0, 1]
    n_reps = 10
    #     output_dir = str(tmpdir)
    sim.create_data(y, sigma, reps=n_reps, output_dir=None)
    dat = Brain_Data(data=sim.data, Y=pd.DataFrame(sim.y))

    algorithm = 'svm'
    # output_dir = str(tmpdir)
    # cv = {'type': 'kfolds', 'n_folds': 5, 'subject_id': sim.rep_id}
    extra = {'kernel': 'linear'}

    output = dat.predict(algorithm='svm', plot=False, **extra)

    # Single-Interval
    roc = analysis.Roc(input_values=output['yfit_all'],
                       binary_outcome=output['Y'] == 1)
    # roc = analysis.Roc(
    #     input_values=predict.yfit_xval, binary_outcome=np.array(sim.y) == 1)

    roc.calculate()

    # roc.plot()

    roc.summary()
    assert roc.accuracy == 1
Exemplo n.º 6
0
def test_roc(tmpdir, sim):
    r = 10
    sigma = .1
    y = [0, 1]
    n_reps = 10
    output_dir = str(tmpdir)
    sim.create_data(y, sigma, reps=n_reps, output_dir=None)
    dat = Brain_Data(data=sim.data,Y=pd.DataFrame(sim.y))

    algorithm = 'svm'
    output_dir = str(tmpdir)
    # cv = {'type': 'kfolds', 'n_folds': 5, 'subject_id': sim.rep_id}
    extra = {'kernel': 'linear'}

    output = dat.predict(algorithm='svm',**extra)
    # predict = analysis.Predict(sim.data, sim.y, algorithm=algorithm,
    #                            output_dir=output_dir,
    #                            cv_dict=cv,
    #                            **extra)

    # predict.predict() # Save_Plot isn't working for SVM analysis, planning on deprecating analysis.Predict at some point, so not a big deal
    # predict.predict(save_plot=False)

    # Single-Interval
    roc = analysis.Roc(input_values=output['yfit_all'], binary_outcome=output['Y'] == 1)
    # roc = analysis.Roc(
    #     input_values=predict.yfit_xval, binary_outcome=np.array(sim.y) == 1)
    roc.plot()
    roc.summary()
    assert roc.accuracy == 1
Exemplo n.º 7
0
def collapse_mask(mask, auto_label=True, custom_mask=None):
    """collapse separate masks into one mask with multiple integers
        overlapping areas are ignored

    Args:
        mask: nibabel or Brain_Data instance
        custom_mask: nibabel instance or string to file path; optional

    Returns:
        out: Brain_Data instance of a mask with different integers indicating
            different masks

    """

    from nltools.data import Brain_Data

    if not isinstance(mask, Brain_Data):
        if isinstance(mask, nib.Nifti1Image):
            mask = Brain_Data(mask, mask=custom_mask)
        else:
            raise ValueError("Make sure mask is a nibabel or Brain_Data "
                             "instance.")

    if len(mask.shape()) > 1:
        if len(mask) > 1:
            out = mask.empty()

            # Create list of masks and find any overlaps
            m_list = []
            for x in range(len(mask)):
                m_list.append(mask[x].to_nifti())
            intersect = intersect_masks(m_list, threshold=1, connected=False)
            intersect = Brain_Data(
                nib.Nifti1Image(np.abs(intersect.get_data() - 1),
                                intersect.get_affine()),
                mask=custom_mask,
            )

            merge = []
            if auto_label:
                # Combine all masks into sequential order
                # ignoring any areas of overlap
                for i in range(len(m_list)):
                    merge.append(
                        np.multiply(
                            Brain_Data(m_list[i], mask=custom_mask).data,
                            intersect.data) * (i + 1))
                out.data = np.sum(np.array(merge).T, 1).astype(int)
            else:
                # Collapse masks using value as label
                for i in range(len(m_list)):
                    merge.append(
                        np.multiply(
                            Brain_Data(m_list[i], mask=custom_mask).data,
                            intersect.data))
                out.data = np.sum(np.array(merge).T, 1)
            return out
    else:
        warnings.warn("Doesn't need to be collapased")
Exemplo n.º 8
0
    def create_data(self, levels, sigma, radius = 5, center = None, reps = 1, output_dir = None):
        """ create simulated data with integers

        Args:
            levels: vector of intensities or class labels
            sigma: amount of noise to add
            radius: vector of radius.  Will create multiple spheres if len(radius) > 1
            center: center(s) of sphere(s) of the form [px, py, pz] or [[px1, py1, pz1], ..., [pxn, pyn, pzn]]
            reps: number of data repetitions useful for trials or subjects
            output_dir: string path of directory to output data.  If None, no data will be written
            **kwargs: Additional keyword arguments to pass to the prediction algorithm

        """

        # Create reps
        nlevels = len(levels)
        y = levels
        rep_id = [1] * len(levels)
        for i in range(reps - 1):
            y = y + levels
            rep_id.extend([i+2] * nlevels)

        # Initialize Spheres with options for multiple radii and centers of the spheres (or just an int and a 3D list)
        A = self.n_spheres(radius, center)

        #for each intensity
        A_list = []
        for i in y:
            A_list.append(np.multiply(A, i))

        #generate a different gaussian noise profile for each mask
        mu = 0 #values centered around 0
        N_list = []
        for i in range(len(y)):
            N_list.append(self.normal_noise(mu, sigma))

        #add noise and signal together, then convert to nifti files
        NF_list = []
        for i in range(len(y)):
            NF_list.append(self.to_nifti(np.add(N_list[i], A_list[i]) ))
        NF_list = Brain_Data(NF_list)

        # Assign variables to object
        self.data = NF_list
        self.y = pd.DataFrame(data=y)
        self.rep_id = pd.DataFrame(data=rep_id)

        dat = self.data
        dat.Y = self.y

        # Write Data to files if requested
        if output_dir is not None and isinstance(output_dir, six.string_types):
            NF_list.write(os.path.join(output_dir,'data.nii.gz'))
            self.y.to_csv(os.path.join(output_dir, 'y.csv'), index=None,header=False)
            self.rep_id.to_csv(os.path.join(output_dir, 'rep_id.csv'), index=None,header=False)
        return dat
Exemplo n.º 9
0
def test_load(tmpdir):
    sim = Simulator()
    sigma = 1
    y = [0, 1]
    n_reps = 3
    output_dir = str(tmpdir)
    dat = sim.create_data(y, sigma, reps=n_reps, output_dir=output_dir)

    # if MNI_Template["resolution"] == '2mm':
    #     shape_3d = (91, 109, 91)
    #     shape_2d = (6, 238955)
    # elif MNI_Template["resolution"] == '3mm':
    #     shape_3d = (60, 72, 60)
    #     shape_2d = (6, 71020)

    y = pd.read_csv(os.path.join(str(tmpdir.join('y.csv'))), header=None, index_col=None)
    # holdout = pd.read_csv(os.path.join(str(tmpdir.join('rep_id.csv'))), header=None, index_col=None)

    # Test load list of 4D images
    file_list = [str(tmpdir.join('data.nii.gz')), str(tmpdir.join('data.nii.gz'))]
    dat = Brain_Data(file_list)
    dat = Brain_Data([nb.load(x) for x in file_list])

    # Test load list
    dat = Brain_Data(data=str(tmpdir.join('data.nii.gz')), Y=y)

    # Test Write
    dat.write(os.path.join(str(tmpdir.join('test_write.nii'))))
    assert Brain_Data(os.path.join(str(tmpdir.join('test_write.nii'))))
Exemplo n.º 10
0
def check_brain_data(data, mask=None):
    """Check if data is a Brain_Data Instance."""
    from nltools.data import Brain_Data

    if not isinstance(data, Brain_Data):
        if isinstance(data, nib.Nifti1Image):
            data = Brain_Data(data, mask=mask)
        else:
            raise ValueError("Make sure data is a Brain_Data instance.")
    else:
        if mask is not None:
            data = data.apply_mask(mask)
    return data
Exemplo n.º 11
0
def similarity(image_list,
               weight_map_filename,
               file_path_key='resampled_file'):
    tic = time.time()  # Start Timer

    weight_map = nb.load(weight_map_filename)
    file_path_list = [item[file_path_key] for item in image_list]

    dat = Brain_Data(data=file_path_list)
    r = dat.similarity(weight_map)

    log.info("Elapsed: %.2f seconds", (time.time() - tic))  # Stop timer

    return {'correlation': set_correlation(r, image_list)}
Exemplo n.º 12
0
def similarity(image_list, weight_map_filename,
               file_path_key='resampled_file'):
    tic = time.time()  # Start Timer

    weight_map = nb.load(weight_map_filename)
    file_path_list = [item[file_path_key] for item in image_list]

    dat = Brain_Data(data=file_path_list)
    r = dat.similarity(weight_map)

    log.info("Elapsed: %.2f seconds", (time.time() - tic))  # Stop timer

    return {
        'correlation': set_correlation(r, image_list)
    }
Exemplo n.º 13
0
def test_roi_to_brain():
    s1 = create_sphere([15, 10, -8], radius=10)
    s2 = create_sphere([-15, 10, -8], radius=10)
    s3 = create_sphere([0, -15, -8], radius=10)
    masks = Brain_Data([s1, s2, s3])

    d = [1, 2, 3]
    m = roi_to_brain(d, masks)
    assert np.all([np.any(m.data == x) for x in d])

    d = pd.Series([1.1, 2.1, 3.1])
    m = roi_to_brain(d, masks)
    assert np.all([np.any(m.data == x) for x in d])

    d = np.array([1, 2, 3])
    m = roi_to_brain(d, masks)
    assert np.all([np.any(m.data == x) for x in d])

    d = pd.DataFrame([np.ones(10) * x for x in [1, 2, 3]])
    m = roi_to_brain(d, masks)
    assert len(m) == d.shape[1]
    assert np.all([np.any(m[0].data == x) for x in d[0]])

    d = np.array([np.ones(10) * x for x in [1, 2, 3]])
    m = roi_to_brain(d, masks)
    assert len(m) == d.shape[1]
    assert np.all([np.any(m[0].data == x) for x in d[0]])
Exemplo n.º 14
0
    def reload(self):
        self.layout = BIDSLayout(self.root, derivatives=True)
        self.tr = self.layout.get_tr()
        with open(self.layout._get_unique(scope=self.name, suffix="pipeline").path) as file:
            pipeline = json.load(file)
        sys.path.append(os.path.dirname(self.layout._get_unique(
            scope=self.name, suffix="pipeline").path))
        self.masks = dict()
        for mask, mask_path in pipeline["Masks"].items():
            if not os.path.isabs(mask_path):
                mask_path = join(self.root, "derivatives",
                                 self.name, mask_path)
            self.masks[mask] = Brain_Data(mask_path)

        # Set up the process dictionary

        self.processes = dict()
        for process in pipeline["Processes"]:
            if not os.path.isabs(process["Source"]):
                process["Source"] = join(self.root, "derivatives",
                                         self.name, process["Source"])
            head, tail = os.path.split(os.path.abspath(process["Source"]))
            if tail.endswith(".py"):
                tail = tail[:-3]
            else:
                raise TypeError(f"{tail} is not a Python script.")
            sys.path.append(head)
            self.processes[process["Name"]] = Process(
                key=process["Readable"], process=getattr(__import__(tail), process["Name"]))
            sys.path.remove(head)
Exemplo n.º 15
0
def fetch_emotion_ratings(data_dir=None, resume=True, verbose=1):
    '''Download and loads emotion rating dataset from neurovault

	Args:
		data_dir: (string, optional). Path of the data directory. Used to
					force data storage in a specified location. Default: None
		n_subjects: (int, optional) Number of subjects, from 1 to 6.
					NOTE: n_subjects is deprecated from 0.2.6 and will be
					removed in 0.3 Use `subjects` instead.
		subjects : (list or int, optional) Either a list of subjects or the
					number of subjects to load, from 1 to 6. By default,
					2nd subject will be loaded. Empty list returns no subject
					data
	Returns:
	'''

    collection = 1964
    dataset_name = 'chang2015_emotion_ratings'
    data_dir = _get_dataset_dir(dataset_name,
                                data_dir=data_dir,
                                verbose=verbose)
    metadata, files = download_collection(collection=collection,
                                          data_dir=data_dir,
                                          resume=resume,
                                          verbose=verbose)
    return Brain_Data(data=files, X=metadata)
Exemplo n.º 16
0
def roi_to_brain(data, mask_x):
    '''
    Args:
        data: Pandas series or dataframe of ROI by observation
        mask_x: an expanded binary mask
    Returns:
        Brain_Data instance
    '''
    def series_to_brain(data, mask_x):
        '''Converts a pandas series of ROIs to a Brain_Data instance. Index must correspond to ROI index'''

        if not isinstance(data, pd.Series):
            raise ValueError('Data must be a pandas series')
        if len(mask_x) != len(data):
            raise ValueError(
                'Data must have the same number of rows as mask has ROIs.')
        return Brain_Data([mask_x[x] * data[x] for x in data.keys()]).sum()

    if len(mask_x) != data.shape[0]:
        raise ValueError(
            'Data must have the same number of rows as mask has ROIs.')

    if isinstance(data, pd.Series):
        return series_to_brain(data, mask_x)
    elif isinstance(data, pd.DataFrame):
        return Brain_Data(
            [series_to_brain(data[x], mask_x) for x in data.keys()])
Exemplo n.º 17
0
def roi_to_brain(data, mask_x):
    ''' This function will create convert an expanded binary mask of ROIs
    (see expand_mask) based on a vector of of values. The dataframe of values
    must correspond to ROI numbers.

    This is useful for populating a parcellation scheme by a vector of Values
    
    Args:
        data: Pandas series or dataframe of ROI by observation
        mask_x: an expanded binary mask
    Returns:
        out: (Brain_Data) Brain_Data instance where each ROI is now populated
             with a value
    '''
    from nltools.data import Brain_Data

    def series_to_brain(data, mask_x):
        '''Converts a pandas series of ROIs to a Brain_Data instance. Index must correspond to ROI index'''

        if not isinstance(data, pd.Series):
            raise ValueError('Data must be a pandas series')
        if len(mask_x) != len(data):
            raise ValueError(
                'Data must have the same number of rows as mask has ROIs.')
        return Brain_Data([mask_x[x] * data[x] for x in data.keys()]).sum()

    if len(mask_x) != data.shape[0]:
        raise ValueError(
            'Data must have the same number of rows as mask has ROIs.')

    if isinstance(data, pd.Series):
        return series_to_brain(data, mask_x)
    elif isinstance(data, pd.DataFrame):
        return Brain_Data(
            [series_to_brain(data[x], mask_x) for x in data.keys()])
Exemplo n.º 18
0
def test_check_brain_data(sim_brain_data):
    mask = Brain_Data(create_sphere([15, 10, -8], radius=10))
    a = check_brain_data(sim_brain_data)
    assert isinstance(a, Brain_Data)
    b = check_brain_data(sim_brain_data, mask=mask)
    assert isinstance(b, Brain_Data)
    assert b.shape()[1] == np.sum(mask.data==1)
Exemplo n.º 19
0
def test_groupby(tmpdir):
    # Simulate Brain Data
    sim = Simulator()
    r = 10
    sigma = 1
    y = [0, 1]
    n_reps = 3
    output_dir = str(tmpdir)
    sim.create_data(y, sigma, reps=n_reps, output_dir=output_dir)

    s1 = create_sphere([12, 10, -8], radius=r)
    s2 = create_sphere([22, -2, -22], radius=r)
    mask = Brain_Data([s1, s2])

    y = pd.read_csv(os.path.join(str(tmpdir.join('y.csv'))),
                    header=None,
                    index_col=None)
    data = Brain_Data(glob.glob(str(tmpdir.join('data.nii.gz'))), Y=y)
    data.X = pd.DataFrame(
        {
            'Intercept': np.ones(len(data.Y)),
            'X1': np.array(data.Y).flatten()
        },
        index=None)

    dat = Groupby(data, mask)

    # Test length
    assert len(dat) == len(mask)

    # Test Index
    assert isinstance(dat[1], Brain_Data)

    # Test apply
    mn = dat.apply('mean')
    assert len(dat) == len(mn)
    # assert mn[0].mean() > mn[1].mean() #JC edit: it seems this check relies on chance from simulated data
    assert mn[1].shape() == np.sum(mask[1].data == 1)
    reg = dat.apply('regress')
    assert len(dat) == len(mn)
    # r = dict([(x,reg[x]['beta'][1]) for x in reg.iterkeys()])

    # Test combine
    combine_mn = dat.combine(mn)
    assert len(combine_mn.shape()) == 1
Exemplo n.º 20
0
def test_groupby_aggregate(sim_brain_data):
    s1 = create_sphere([12, 10, -8], radius=10)
    s2 = create_sphere([22, -2, -22], radius=10)
    mask = Brain_Data([s1, s2])
    d = sim_brain_data.groupby(mask)
    assert isinstance(d, Groupby)
    mn = sim_brain_data.aggregate(mask, "mean")
    assert isinstance(mn, Brain_Data)
    assert len(mn.shape()) == 1
Exemplo n.º 21
0
    def series_to_brain(data, mask_x):
        '''Converts a pandas series of ROIs to a Brain_Data instance. Index must correspond to ROI index'''

        if not isinstance(data, pd.Series):
            raise ValueError('Data must be a pandas series')
        if len(mask_x) != len(data):
            raise ValueError(
                'Data must have the same number of rows as mask has ROIs.')
        return Brain_Data([mask_x[x] * data[x] for x in data.keys()]).sum()
Exemplo n.º 22
0
def check_brain_data(data):
    '''Check if data is a Brain_Data Instance.'''
    from nltools.data import Brain_Data

    if not isinstance(data, Brain_Data):
        if isinstance(data, nib.Nifti1Image):
            data = Brain_Data(data)
        else:
            raise ValueError("Make sure data is a Brain_Data instance.")
    return data
Exemplo n.º 23
0
def test_indexing(sim_brain_data):
    index = [0, 3, 1]
    assert len(sim_brain_data[index]) == len(index)
    index = range(4)
    assert len(sim_brain_data[index]) == len(index)
    index = sim_brain_data.Y == 1
    assert len(sim_brain_data[index.values.flatten()]) == index.values.sum()
    assert len(sim_brain_data[index]) == index.values.sum()
    assert len(sim_brain_data[:3]) == 3
    d = sim_brain_data.to_nifti()
    assert d.shape[0:3] == shape_3d
    assert Brain_Data(d)
Exemplo n.º 24
0
def simulate_data(n_observations, y, p, sigma, mask):
    ''' Simulate Brain Data

        Args:
            n_observations: (int) number of data points
            y: (array) one dimensional array of signal
            p: (float) probability of signal in voxels
            sigma: (float) amount of gaussian noise to add

        Returns:
            data: (list) of Brain_Data objects
    '''

    dat = Brain_Data(mask).apply_mask(mask)
    new_data = np.zeros((dat.shape()[0], n_observations))
    for i in np.where(dat.data == 1)[0]:
        if np.random.randint(0, high=10) < p:
            new_data[i, :] = y
    noise = np.random.randn(new_data.shape[0], n_observations) * sigma
    dat.data = (new_data + noise).T
    return dat
    def get_trialtype_pain_regressors(self,nifti_data,onset_file):
        print("importing nifti")
        #import the nifti
        if (os.path.isfile(nifti_data + "nltoolstandard.nii.gz")):
            msmrl1 = Brain_Data(
                nifti_data + "nltoolstandard.nii.gz")
        else:
            msmrl1 = Brain_Data(
                nifti_data + ".nii.gz")
            msmrl1.write(nifti_data + "nltoolstandard.nii.gz")
        #preprocess the nifti?
        print("importing onsets")
        #import the onset
        onsets = onsets_to_dm(
            onset_file,
            TR=2,
            runLength=msmrl1.shape()[0]
        )

        #process the onset files
        #
        onsets.sampling_rate=2

        onsets_convolved=onsets.convolve()

        for c in onsets_convolved.columns:
            if sum(onsets_convolved.ix[:, c]) <= 0:
                print('deleting '+ str(c))
                del onsets_convolved[c]

        onsets_convolved['linearterm']=range(1,361)
        onsets_convolved['quadraticterm']=[pow(x,2) for x in onsets_convolved['linearterm']]
        onsets_convolved['cubicterm']=[pow(x,3) for x in onsets_convolved['linearterm']]
        onsets_convolved['ones']=[1]*360
        msmrl1.X=onsets_convolved
        print("convolved onsets; regressing...")
        #regress
        regression=msmrl1.regress()
        print("Regressing; calculating similarity...")
        msm_predicted_pain = regression['beta'].similarity(self.stats['weight_map'], 'dot_product')
        onset_colnames = onsets_convolved.columns.tolist()
        msm_predicted_pain_dict={}
        for i, b in enumerate(msm_predicted_pain):
            msm_predicted_pain_dict[onset_colnames[i]] = b
        return msm_predicted_pain_dict
Exemplo n.º 26
0
def test_predict_multi():
    # Simulate data 100 images worth
    sim = Simulator()
    sigma = 1
    y = [0, 1]
    n_reps = 50
    output_dir = '.'
    dat = sim.create_data(y, sigma, reps=n_reps, output_dir=output_dir)
    y = pd.read_csv('y.csv', header=None, index_col=None)
    dat = Brain_Data('data.nii.gz', Y=y)

    # Predict within given ROIs
    # Generate some "rois" (in reality non-contiguous, but also not overlapping)
    roi_1 = dat[0].copy()
    roi_1.data = np.zeros_like(roi_1.data, dtype=bool)
    roi_2 = roi_1.copy()
    roi_3 = roi_1.copy()
    idx = np.random.choice(range(roi_1.shape()[-1]), size=9999, replace=False)
    roi_1.data[idx[:3333]] = 1
    roi_2.data[idx[3333:6666]] = 1
    roi_3.data[idx[6666:]] = 1
    rois = roi_1.append(roi_2).append(roi_3)

    # Load in all 50 rois so we can "insert" signal into the first one
    # rois = expand_mask(Brain_Data(os.path.join(get_resource_path(), 'k50.nii.gz')))
    # roi = rois[0]

    from sklearn.datasets import make_classification
    X, Y = make_classification(n_samples=100, n_features=rois[0].data.sum(), n_informative=500,  n_redundant=5, n_classes=2)
    dat.data[:, rois[0].data.astype(bool)] = X
    dat.Y = pd.Series(Y)

    out = dat.predict_multi(algorithm='svm', cv_dict={'type': 'kfolds', 'n_folds': 3},  method='rois', n_jobs=-1, rois=rois[:3], kernel='linear')
    assert len(out) == 3
    assert np.sum([elem['weight_map'].data.shape for elem in out]) == rois.data.sum()

    # Searchlight
    roi_mask = rois[:2].sum()
    out = dat.predict_multi(algorithm='svm', cv_dict={'type': 'kfolds', 'n_folds': 3}, method='searchlight', radius=4, verbose=50, n_jobs=-1, process_mask=roi_mask)
    assert len(np.nonzero(out.data)[0]) == len(np.nonzero(roi_mask.data)[0])
Exemplo n.º 27
0
def expand_mask(mask):
    """ expand a mask with multiple integers into separate binary masks

        Args:
            mask: nibabel or Brain_Data instance

        Returns:
            out: Brain_Data instance of multiple binary masks

     """

    from nltools.data import Brain_Data
    if isinstance(mask,nib.Nifti1Image):
        mask = Brain_Data(mask)
    if not isinstance(mask,Brain_Data):
        raise ValueError('Make sure mask is a nibabel or Brain_Data instance.')
    mask.data = mask.data.astype(int)
    tmp = []
    for i in np.unique(mask.data):
        tmp.append((mask.data==i)*1)
    out = mask.empty()
    out.data = np.array(tmp)
    return out
Exemplo n.º 28
0
def expand_mask(mask):
    """ expand a mask with multiple integers into separate binary masks

        Args:
            mask: nibabel or Brain_Data instance

        Returns:
            out: Brain_Data instance of multiple binary masks

     """

    from nltools.data import Brain_Data
    if isinstance(mask, nib.Nifti1Image):
        mask = Brain_Data(mask)
    if not isinstance(mask, Brain_Data):
        raise ValueError('Make sure mask is a nibabel or Brain_Data instance.')
    mask.data = mask.data.astype(int)
    tmp = []
    for i in np.unique(mask.data):
        tmp.append((mask.data == i) * 1)
    out = mask.empty()
    out.data = np.array(tmp)
    return out
Exemplo n.º 29
0
 def get_wager_nps_map(self, nps_map_filepath=None, data_mask=None):
     using_custom_filepath = False
     if nps_map_filepath is not None:
         using_custom_filepath = True
     else:
         nps_map_filepath = self.nps_map_filepath
     if (os.path.isfile(nps_map_filepath)):
         nps = Brain_Data(nps_map_filepath, mask=data_mask)
         self.decoder = nps
         if using_custom_filepath:
             self.decoder_origin = nps_map_filepath
         else:
             self.decoder_origin = 'nps'
     else:
         raise Exception("error; cannot find NPS map" + nps_map_filepath)
Exemplo n.º 30
0
def test_data(tmpdir):
    sim = Simulator()
    r = 10
    sigma = 1
    y = [0, 1]
    n_reps = 3
    output_dir = str(tmpdir)
    sim.create_data(y, sigma, reps=n_reps, output_dir=output_dir)

    shape_3d = (91, 109, 91)
    shape_2d = (6, 238955)
    y=pd.read_csv(os.path.join(str(tmpdir.join('y.csv'))),header=None,index_col=None).T
    flist = glob.glob(str(tmpdir.join('centered*.nii.gz')))
    dat = Brain_Data(data=flist,Y=y)

    # Test shape
    assert dat.shape() == shape_2d

    # Test Mean
    assert dat.mean().shape()[0] == shape_2d[1]

    # Test Std
    assert dat.std().shape()[0] == shape_2d[1]

    # Test to_nifti
    d = dat.to_nifti
    assert d().shape[0:3] == shape_3d

    # # Test T-test
    out = dat.ttest()
    assert out['t'].shape()[0]==shape_2d[1]

    # Test Regress
    dat.X = pd.DataFrame({'Intercept':np.ones(len(dat.Y)),'X1':np.array(dat.Y).flatten()},index=None)
    out = dat.regress()
    assert out['beta'].shape() == (2,shape_2d[1])

    # Test indexing
    assert out['t'][1].shape()[0] == shape_2d[1]

    # Test threshold
    i=1
    tt = threshold(out['t'][i], out['p'][i], threshold_dict={'fdr':.05})
    assert tt.shape()[0] == shape_2d[1]
Exemplo n.º 31
0
def test_extract_roi(sim_brain_data):
    mask = create_sphere([12, 10, -8], radius=10)
    assert len(sim_brain_data.extract_roi(mask, metric="mean")) == shape_2d[0]
    assert len(sim_brain_data.extract_roi(mask,
                                          metric="median")) == shape_2d[0]
    n_components = 2
    assert sim_brain_data.extract_roi(
        mask, metric="pca",
        n_components=n_components).shape == (n_components, shape_2d[0])
    with pytest.raises(NotImplementedError):
        sim_brain_data.extract_roi(mask, metric="p")

    assert isinstance(sim_brain_data[0].extract_roi(mask, metric="mean"),
                      (float, np.floating))
    assert isinstance(sim_brain_data[0].extract_roi(mask, metric="median"),
                      (float, np.floating))
    with pytest.raises(ValueError):
        sim_brain_data[0].extract_roi(mask, metric="pca")
    with pytest.raises(NotImplementedError):
        sim_brain_data[0].extract_roi(mask, metric="p")

    s1 = create_sphere([15, 10, -8], radius=10)
    s2 = create_sphere([-15, 10, -8], radius=10)
    s3 = create_sphere([0, -15, -8], radius=10)
    masks = Brain_Data([s1, s2, s3])
    mask = roi_to_brain([1, 2, 3], masks)
    assert len(sim_brain_data[0].extract_roi(mask,
                                             metric="mean")) == len(masks)
    assert len(sim_brain_data[0].extract_roi(mask,
                                             metric="median")) == len(masks)
    assert sim_brain_data.extract_roi(mask, metric="mean").shape == (
        len(masks),
        shape_2d[0],
    )
    assert sim_brain_data.extract_roi(mask, metric="median").shape == (
        len(masks),
        shape_2d[0],
    )
    assert len(
        sim_brain_data.extract_roi(mask,
                                   metric="pca",
                                   n_components=n_components)) == len(masks)
Exemplo n.º 32
0
def fetch_emotion_ratings(data_dir=None, resume=True, verbose=1):
    """Download and loads emotion rating dataset from neurovault

    Args:
        data_dir: (string, optional). Path of the data directory. Used to force data storage in a specified location. Default: None

    Returns:
        out: (Brain_Data) Brain_Data object with downloaded data. X=metadata

    """

    collection = 1964
    dataset_name = "chang2015_emotion_ratings"
    data_dir = _get_dataset_dir(dataset_name,
                                data_dir=data_dir,
                                verbose=verbose)
    metadata, files = download_collection(collection=collection,
                                          data_dir=data_dir,
                                          resume=resume,
                                          verbose=verbose)
    return Brain_Data(data=files, X=metadata)
Exemplo n.º 33
0
def multi_threshold(t_map, p_map, thresh):
    """ Threshold test image by multiple p-value from p image

    Args:
        stat: Brain_Data instance of arbitrary statistic metric
            (e.g., beta, t, etc)
        p: Brain_data instance of p-values
            threshold: list of p-values to threshold stat image

    Returns:
        out: Thresholded Brain_Data instance

    """
    from nltools.data import Brain_Data

    if not isinstance(t_map, Brain_Data):
        raise ValueError('Make sure stat is a Brain_Data instance')

    if not isinstance(p_map, Brain_Data):
        raise ValueError('Make sure p is a Brain_Data instance')

    if not isinstance(thresh, list):
        raise ValueError('Make sure thresh is a list of p-values')

    affine = t_map.to_nifti().get_affine()
    pos_out = np.zeros(t_map.to_nifti().shape)
    neg_out = deepcopy(pos_out)
    for thr in thresh:
        t = threshold(t_map, p_map, thr=thr)
        t_pos = deepcopy(t)
        t_pos.data = np.zeros(len(t_pos.data))
        t_neg = deepcopy(t_pos)
        t_pos.data[t.data > 0] = 1
        t_neg.data[t.data < 0] = 1
        pos_out = pos_out + t_pos.to_nifti().get_data()
        neg_out = neg_out + t_neg.to_nifti().get_data()
    pos_out = pos_out + neg_out * -1
    return Brain_Data(nib.Nifti1Image(pos_out, affine))
Exemplo n.º 34
0
def train_model(image_list, algorithm, cross_validation, output_dir,
                file_path_key='resampled_file', mask=None):
    """
    :param image_list: A list of dictionaries of the form
        {
            'collection_id': '504',
            'filename': 'Pain_Subject_1_Low.nii.gz',
            'target': '1',
            'resampled_file': 'path/to/the/resampled/file.nii.gz',
            'original_file': 'path/to/the/original/file.nii.gz'
        }
    """
    tic = time.time()  # Start Timer

    try:
        holdout = [int(item['subject_id']) for item in image_list]
    except KeyError:
        holdout = None

    if cross_validation:
        if holdout:
            cross_validation['subject_id'] = holdout
        elif cross_validation['type'] == 'loso':
            raise ValueError(
                "subject_id is required for a LOSO cross validation.")

    extra = {}
    if algorithm in ('svr', 'svm'):
        extra = {'kernel': 'linear'}

    categorical_mapping = None
    if algorithm in CLASSIFICATION_ALGORITHMS:
        classes = {item['target'] for item in image_list}
        assert len(classes) == 2, ('More than two classes. '
                                   'Classification requires binary data.')
        categorical_mapping = {cls: index for index, cls in enumerate(classes)}

        for image in image_list:
            image['target'] = categorical_mapping[image['target']]

    Y = pd.DataFrame([float(item['target']) for item in image_list])
    file_path_list = [item[file_path_key] for item in image_list]

    dat = Brain_Data(data=file_path_list, Y=Y)

    if mask:
        log.info('Applying a mask')
        nifti_mask = nb.load(mask)
        dat = dat.apply_mask(nifti_mask)

    output = dat.predict(algorithm=algorithm,
                         cv_dict=cross_validation,
                         plot=True,
                         **extra)

    weightmap_filename = '%s_weightmap.nii.gz' % algorithm
    output['weight_map'].write(os.path.join(output_dir, weightmap_filename))

    log.info("Elapsed: %.2f seconds", (time.time() - tic))  # Stop timer
    result = {'weightmap': weightmap_filename,
              'intercept': float(output['intercept']),
              'scatterplot': '%s_scatterplot.png ' % algorithm,
              'stats': {key: output[key].tolist()
                        for key in ('Y', 'yfit_xval', 'yfit_all')
                        if key in output},
              'categorical_mapping': categorical_mapping,
              'summary': get_summary(output)}

    if 'roc' in output:
        result['roc'] = output['roc']

    return result