Esempio n. 1
0
    def fit_files_st(self,
                     files_path_st,
                     subjects_id_list_st,
                     confounds_st,
                     files_path,
                     subjects_id_list,
                     confounds,
                     y,
                     n_seeds,
                     extra_var=[]):
        '''
        Use a list of subject IDs and search for them in the path, grab the results per network.
        Same as fit_files() except that you can train and test on different set of data
        '''
        if self.verbose: start = time.time()
        ### train subtypes
        self.st_crm = []
        # for ii in [5,13]:#range(x.shape[1]):
        xw = []
        for ii in range(n_seeds):
            print('Train seed ' + str(ii + 1))
            if self.dynamic:
                [x_dyn, x_ref] = sbp_util.grab_rmap(subjects_id_list_st,
                                                    files_path_st,
                                                    ii,
                                                    dynamic=self.dynamic)
                confounds_dyn = []
                for jj in range(len(x_dyn)):
                    confounds_dyn.append(
                        (confounds_st[jj], ) * x_dyn[jj].shape[0])
                confounds_dyn = np.vstack(confounds_dyn)
                x_dyn = np.vstack(x_dyn)
            else:
                x_ref = sbp_util.grab_rmap(subjects_id_list_st,
                                           files_path_st,
                                           ii,
                                           dynamic=self.dynamic)
                x_dyn = x_ref
                confounds_dyn = confounds_st

            del x_ref
            ## regress confounds
            crm = prediction.ConfoundsRm(confounds_dyn, x_dyn)
            ## extract subtypes
            st = subtypes.clusteringST()
            st.fit_network(crm.transform(confounds_dyn, x_dyn),
                           nSubtypes=self.nSubtypes)
            # stage 2
            st_s2 = subtypes.clusteringST()
            st_s2.fit_network(crm.transform(confounds_dyn, x_dyn),
                              nSubtypes=self.nSubtypes_stage2)
            self.st_crm.append([crm, st, st_s2])
            del x_dyn

        if self.verbose:
            print("Subtype extraction, Time elapsed: {}s)".format(
                int(time.time() - start)))
Esempio n. 2
0
    def fit_files_st(self,files_path_st,subjects_id_list_st,confounds_st,files_path,subjects_id_list,confounds,y,n_seeds,extra_var=[]):
        '''
        Use a list of subject IDs and search for them in the path, grab the results per network.
        Same as fit_files() except that you can train and test on different set of data
        '''
        if self.verbose: start = time.time()
        ### train subtypes
        self.st_crm = []
        #for ii in [5,13]:#range(x.shape[1]):
        xw = []
        for ii in range(n_seeds):
            print('Train seed '+str(ii+1))
            if self.dynamic:
                [x_dyn,x_ref] = sbp_util.grab_rmap(subjects_id_list_st,files_path_st,ii,dynamic=self.dynamic)
                confounds_dyn = []
                for jj in range(len(x_dyn)):
                    confounds_dyn.append((confounds_st[jj],)*x_dyn[jj].shape[0])
                confounds_dyn = np.vstack(confounds_dyn)
                x_dyn = np.vstack(x_dyn)
            else:
                x_ref = sbp_util.grab_rmap(subjects_id_list_st,files_path_st,ii,dynamic=self.dynamic)
                x_dyn = x_ref
                confounds_dyn = confounds_st

            del x_ref
            ## regress confounds
            crm = prediction.ConfoundsRm(confounds_dyn,x_dyn)
            ## extract subtypes
            st=subtypes.clusteringST()
            st.fit_network(crm.transform(confounds_dyn,x_dyn),nSubtypes=self.nSubtypes)
            self.st_crm.append([crm,st])
            del x_dyn

        # compute the W
        xw = self.get_w_files(files_path,subjects_id_list,confounds)
        if self.verbose: print("Subtype extraction, Time elapsed: {}s)".format(int(time.time() - start)))

        ### Include extra covariates
        if len(extra_var)!=0:
            all_var = np.hstack((xw,extra_var))
        else:
            all_var = xw

        ### prediction model
        if self.verbose: start = time.time()
        self.tlp = TwoLevelsPrediction(self.verbose,stage1_model_type=self.stage1_model_type,gamma=self.gamma)
        self.tlp.fit(all_var,all_var,y)
        if self.verbose: print("Two Levels prediction, Time elapsed: {}s)".format(int(time.time() - start)))
Esempio n. 3
0
    def get_w_files(self,files_path,subjects_id_list,confounds):
        ### extract w values
        W = []
        for ii in range(len(self.st_crm)):
            x_ref = sbp_util.grab_rmap(subjects_id_list,files_path,ii,dynamic=False)
            ## compute w values
            W.append(self.st_crm[ii][1].compute_weights(self.st_crm[ii][0].transform(confounds,x_ref),mask_part=self.mask_part))
            del x_ref

        xw = np.hstack(W)
        return subtypes.reshapeW(xw)
Esempio n. 4
0
    def get_w_files(self, files_path, subjects_id_list, confounds):
        ### extract w values
        W = []
        W2 = []
        for ii in range(len(self.st_crm)):
            x_ref = sbp_util.grab_rmap(subjects_id_list,
                                       files_path,
                                       ii,
                                       dynamic=False)
            ## compute w values
            W.append(self.st_crm[ii][1].compute_weights(
                self.st_crm[ii][0].transform(confounds, x_ref),
                mask_part=self.mask_part))

            W2.append(self.st_crm[ii][2].compute_weights(
                self.st_crm[ii][0].transform(confounds, x_ref),
                mask_part=self.mask_part))
            del x_ref

        xw = np.hstack(W)
        xw2 = np.hstack(W2)
        return subtypes.reshapeW(xw), subtypes.reshapeW(xw2)