Ejemplo n.º 1
0
    def splitData(self, testPercentage):
        X_train = []
        Y_train = []
        X_test = []
        Y_test = []

        for i in range(len(self.inputData.result.arrays)):
            X = []
            Y = []
            for j in range(len(self.inputData.result.arrays[i].array)):
                # call encoding as param
                X.append(
                    np.asarray(
                        self.en.get_a(self.inputData.result.arrays[i].array[j],
                                      self.lag)).ravel())
                Y.append(self.inputData.result.arrays[i].name)

            # REMAINS TO BE SEEN
            # shuffle trials
            # X = np.asarray(X)
            # Y = np.asarray(Y)
            # perm = permutation(len(X))
            # X = X[perm]
            # Y = Y[perm]

            df = pd.DataFrame(X)
            y = np.array(Y)
            temp_X_train, temp_X_test, temp_y_train, temp_y_test = train_test_split(
                df, y, test_size=testPercentage, shuffle=False)
            X_train.extend(temp_X_train.to_numpy())
            Y_train.extend(temp_y_train)
            X_test.extend(temp_X_test.to_numpy())
            Y_test.extend(temp_y_test)

        # shuffle output
        X_train = np.asarray(X_train)
        Y_train = np.asarray(Y_train)
        perm = permutation(len(X_train))
        X_train = X_train[perm]
        Y_train = Y_train[perm]

        X_test = np.asarray(X_test)
        Y_test = np.asarray(Y_test)
        perm = permutation(len(X_test))
        X_test = X_test[perm]
        Y_test = Y_test[perm]

        # save train deep light
        # save test deep light
        # save this to folders files

        return X_train, X_test, Y_train, Y_test
Ejemplo n.º 2
0
 def shuffle(self, X, y):
     
     from numpy.random.mtrand import permutation
     
     if self.dimension == 'labels':
         arg_x = range(X.shape[1])
         arg_y = permutation(range(len(y)))
     else:
         arg_x = permutation(range(X.shape[1]))
         arg_y = range(len(y))
     
     
     #print arg_x, arg_y
     
     return X[:, arg_x], y[arg_y]
Ejemplo n.º 3
0
def holdout_method(features, target):
    N = features.shape[0]
    N_train = floor(N * 0.7)
    idx = permutation(N)
    idx_train = idx[:N_train]
    idx_test = idx[N_train:]

    features_train, target_train = features.ix[idx_train], target[idx_train]
    features_test, target_test = features.ix[idx_test], target[idx_test]

    return features_train, target_train, features_test, target_test
Ejemplo n.º 4
0
def permutation_test(ds, labels, analysis, n_permutation=1000):
    
    null_dist = []
    
    for _ in range(n_permutation):
        p_labels = permutation(labels)
        t_, _ = analysis.run(ds, p_labels)
        
        null_dist.append(t_) 
    
    return np.array(null_dist)   
Ejemplo n.º 5
0
def permutation_test(ds, labels, analysis, n_permutation=1000):

    null_dist = []

    for _ in range(n_permutation):
        p_labels = permutation(labels)
        t_, _ = analysis.transform(ds, p_labels)

        null_dist.append(t_)

    return np.array(null_dist)
Ejemplo n.º 6
0
    def __shuffleList(self, lst ):
        """
        shuffle order of lst

        @param lst: list to shuffle
        @type  lst: [any]
        
        @return: shuffeled list
        @rtype: [any]
        """
        pos = R.permutation( len( lst ))
        return N0.take( lst, pos )
    def test_permutation_subclass(self):
        class N(np.ndarray):
            pass

        random.seed(1)
        orig = np.arange(3).view(N)
        perm = random.permutation(orig)
        assert_array_equal(perm, np.array([0, 2, 1]))
        assert_array_equal(orig, np.arange(3).view(N))

        class M(object):
            a = np.arange(5)

            def __array__(self):
                return self.a

        random.seed(1)
        m = M()
        perm = random.permutation(m)
        assert_array_equal(perm, np.array([2, 1, 4, 0, 3]))
        assert_array_equal(m.__array__(), np.arange(5))
Ejemplo n.º 8
0
    def _permute(self, ds, axis):

        ind = range(ds.shape[axis])

        ind = permutation(ind)

        if axis == 0:
            ds_ = ds[ind]
        elif axis == 1:
            ds_ = ds[:, ind]
        elif axis == 2:
            ds_ = ds[:, :, ind]

        return ds_
Ejemplo n.º 9
0
 def _permute(self, ds, axis):
             
     ind = range(ds.shape[axis])
     
     ind = permutation(ind)
     
     if axis == 0:
         ds_ = ds[ind]
     elif axis == 1:
         ds_ = ds[:,ind]
     elif axis == 2:
         ds_ = ds[:,:,ind]
     
     return ds_
Ejemplo n.º 10
0
 def _get_permutation_indices(self, n_samples):
     
     from numpy.random.mtrand import permutation
     
     if self.permutation == 0:
         return [range(n_samples)]
     
     
     # reset random state
     indices = [range(n_samples)]
     for _ in range(self.permutation):
         idx = permutation(indices[0])
         indices.append(idx)
     
     return indices
Ejemplo n.º 11
0
    def plot_ttvs(self,
                  burn=0,
                  thin=1,
                  axs=None,
                  figsize=None,
                  bwidth=0.8,
                  fmt='h',
                  windows=None,
                  sigma=inf,
                  nsamples=1000):
        assert fmt in ('d', 'h', 'min')
        multiplier = {'d': 1, 'h': 24, 'min': 1440}
        ncol = 1 if windows is None else len(windows)
        fig, axs = (None, axs) if axs is not None else subplots(
            1, ncol, figsize=figsize, sharey=True)
        df = self.posterior_samples(burn, thin, derived_parameters=False)
        tccols = [c for c in df.columns if 'tc' in c]
        df = df[tccols]
        s = df.std()
        m = (s < median(s) + sigma * s.std()).values
        df = df.iloc[:, m]
        epochs = self.epoch[m]

        samples = []
        for tcs in permutation(df.values)[:nsamples]:
            samples.append(tcs - poly1d(polyfit(epochs, tcs, 1))(epochs))
        samples = array(samples)
        p = multiplier[fmt] * percentile(samples, [50, 16, 84, 0.5, 99.5], 0)
        setp(axs,
             ylabel='Transit center - linear prediction [{}]'.format(fmt),
             xlabel='Transit number')
        if windows is None:
            plot_estimates(epochs, p, axs, bwidth)
            if with_seaborn:
                sb.despine(ax=axs, offset=15)
        else:
            setp(axs[1:], ylabel='')
            for ax, w in zip(axs, windows):
                m = (epochs > w[0]) & (epochs < w[1])
                plot_estimates(epochs[m], p[:, m], ax, bwidth)
                setp(ax, xlim=w)
                if with_seaborn:
                    sb.despine(ax=ax, offset=15)
        if fig:
            fig.tight_layout()
        return axs
Ejemplo n.º 12
0
    def shuffle(self, ds, labels):
        # Temporary function
        fp = np.memmap('/media/robbis/DATA/perm.dat',
                       dtype='float32',
                       mode='w+',
                       shape=(self.n_permutation, ds.shape[1], ds.shape[2]))
        #print fp.shape
        for i in range(self.n_permutation):
            p_labels = permutation(labels)
            #print p_labels
            fp[i, :] = self.analysis.transform(p_labels)
            #fp[i,:] = self.analysis.transform(ds_p)

            #null_dist.append(value_)

        #fp = np.array(null_dist)
        self.null_dist = fp

        return fp
Ejemplo n.º 13
0
 def train_CV(self,n_folds=5,num_neuron = 50,learning_rate_input=0.01,decay=0.01,maxEpochs_input=1200,verbose_input=True):
     '''call the class in model validators'''
     '''and do cross validation'''
     '''pass values'''
     dataset = self.data_set
     l = dataset.getLength()
     indim = dataset.indim
     outdim = dataset.outdim
     inp = dataset.getField("input")
     out = dataset.getField("target")
     perms = np.array_split(permutation(l), n_folds)
     perf = 0
     for i in range(n_folds):
         train_perms_idxs = list(range(n_folds))
         train_perms_idxs.pop(i)
         temp_list = []
         for train_perms_idx in train_perms_idxs:
             temp_list.append(perms[ train_perms_idx ])
         train_idxs = np.concatenate(temp_list)
         #this is the test set:
         test_idxs = perms[i]
         #train:
         print "Training on part: ", i
         train_ds = SupervisedDataSet(indim,outdim)
         train_ds.setField("input", inp[train_idxs])
         train_ds.setField("target",out[train_idxs])
         net_this = buildNetwork(indim,num_neuron,outdim,bias=True,hiddenclass = SigmoidLayer)
         t_this = BackpropTrainer(net_this,train_ds,learningrate = learning_rate_input,weightdecay=decay,
                                  momentum=0.,verbose=verbose_input)
         #train asked times:
         t_this.trainEpochs(maxEpochs_input)
         #test on testset.
         test_ds = SupervisedDataSet(indim,outdim)
         test_ds.setField("input", inp[test_idxs])
         test_ds.setField("target",out[test_idxs])
         perf_this = self._net_performance(net_this, test_ds)
         perf = perf + perf_this
     perf /=n_folds
     print perf
     return perf
Ejemplo n.º 14
0
    def ds_permutation(self, ds):

        from datetime import datetime
        start = datetime.now()

        dim = self._axis

        #check if dimension is coherent with ds shape
        new_indexing = []
        for i in range(len(ds.shape)):
            ind = range(ds.shape[i])
            if i == dim:
                ind = list(permutation(ind))

            new_indexing.append(ind)

        ds_ = ds[np.ix_(*new_indexing)]

        finish = datetime.now()
        print(finish - start)

        return ds_
Ejemplo n.º 15
0
 def shuffle(self, ds, labels):
     # Temporary function
     fp = np.memmap('/media/robbis/DATA/perm.dat',
                    dtype='float32', 
                    mode='w+',
                    shape=(self.n_permutation, 
                           ds.shape[1],
                           ds.shape[2])
                    )
     #print fp.shape
     for i in range(self.n_permutation):
         p_labels = permutation(labels)
         #print p_labels
         fp[i,:] = self.analysis.run(p_labels)
         #fp[i,:] = self.analysis.run(ds_p)
     
         #null_dist.append(value_)
     
     #fp = np.array(null_dist)
     self.null_dist = fp
 
     return fp
Ejemplo n.º 16
0
    def ds_permutation(self, ds):
        
        from datetime import datetime
        start = datetime.now()

        dim = self._axis
        
        #check if dimension is coherent with ds shape
        new_indexing = []
        for i in range(len(ds.shape)):
            ind = range(ds.shape[i])
            if i == dim:
                ind = list(permutation(ind))
            
            new_indexing.append(ind)
        
        ds_ = ds[np.ix_(*new_indexing)]
        
        finish = datetime.now()
        print (finish - start)
        
        return ds_
Ejemplo n.º 17
0
 def crossValidation(self, n_folds = 5,verbose_in = True):
     '''
     This is the caller function that call the validation function
     
     '''
     print 'Starting Cross Validation....'
     print 'Number of Neuron Range From', self.start,' to ', self.end, '....'
     print 
     self.all_perf_tst = []
     self.all_perf_trn = []
     self.all_num_neuron = []
     self.num_data, self.indim = self.tot_descs.shape
     self.perms = np.array_split(permutation(self.num_data), n_folds)
     
     for this_neuron in range(self.start, self.end, self.interval):
         self.all_num_neuron.append(this_neuron)
         print '##########################'
         print 'Now Cross-Validation With ', this_neuron, '...............'
         this_perf_tst, this_perf_trn = self._train_CV(num_neuron = this_neuron, 
                                    learning_rate_input=self.learning_rate, 
                                    perms = self.perms,
                                    n_folds = n_folds,
                                    decay=self.decay_rate,
                                    maxEpochs_input = self.max_Epoches,
                                    verbose_input=verbose_in)
         
         self.all_perf_tst.append(this_perf_tst)
         self.all_perf_trn.append(this_perf_trn)
     
     print 'Cross Validation Finished'
     print 'The Best Peformance is ', min(self.all_perf_tst)
     print 'The Best Number of Neuron is:', self.all_num_neuron[self.all_perf_tst.index(min(self.all_perf_tst))]
     print 'Plotting the results'
     
     self._plotResults(self.numRange, self.all_perf_tst,self.all_perf_trn)
     
     print 'Results Saved To Temp File...'
     np.savetxt('CrossValidation_Results.csv',zip(self.all_num_neuron,self.all_perf_tst,self.all_perf_trn),delimiter=',')
Ejemplo n.º 18
0
    def make_random_split(self,
                          source_value,
                          target_value,
                          absolute_per_class=1,
                          relative_per_class=0):
        # if num_test is None and num_train is None and percentage_train is None:
        #     logging.error("Your need to provide at lease on of the parameters num_train, num_test or percentage_train!")
        #     raise Exception
        # if num_train is not None and num_test is not None:
        #     logging.error("You can only specify either num_train or num_test!")
        #     raise Exception
        if source_value not in self.split_assignments:
            logging.warning(
                "No element with value 'source_value' in self.split_assignments!"
            )
        if absolute_per_class < 1:
            logging.error("Invalid value for parameter absolute_per_class.")
            raise Exception
        if relative_per_class > 1 or relative_per_class < 0:
            logging.error("Invalid value for parameter relative_per_class.")
            raise Exception

        self.split_assignments = array(self.split_assignments)
        # for all classes
        classes = unique(self.labels)
        for c in classes:
            class_elements = where(
                logical_and(
                    transpose(array(self.labels)[newaxis]) == c,
                    transpose(array(
                        self.split_assignments)[newaxis]) == source_value))[0]
            if len(class_elements) <= absolute_per_class:
                self.split_assignments[class_elements] = target_value
            else:
                how_many = max(absolute_per_class,
                               int(relative_per_class * len(class_elements)))
                self.split_assignments[permutation(class_elements)
                                       [:how_many]] = target_value
Ejemplo n.º 19
0
from numpy.random.mtrand import permutation

from sklearn.neighbors import KNeighborsClassifier

import dirty_importer
import import_data

if __name__ == '__main__':
    global visual
    dataset = dirty_importer.get_dataset()
    data = dataset[0]
    label = dataset[1]
    perm = permutation(len(data))
    data = data[perm]
    label = label[perm]
    clf = KNeighborsClassifier(n_neighbors=1)
    clf.fit(data, label)

    testset = dirty_importer.get_dataset('test')
    data = testset[0]
    label = testset[1]
    perm = permutation(len(data))
    data = data[perm]
    label = label[perm]
    predict = []

    bingo = 0
    for i in range(0, len(data)):
        predict.append(clf.predict(data[i].reshape(1, -1)))
        if predict[i] == label[i]:
            bingo += 1
Ejemplo n.º 20
0
    def plot_rv_vs_phase(self,
                         planet: int,
                         method='de',
                         pv=None,
                         nsamples: int = 200,
                         ntimes: int = 500,
                         axs=None):
        if axs is None:
            fig, axs = subplots(2,
                                1,
                                gridspec_kw={'height_ratios': (3, 1)},
                                sharex='all')
        else:
            fig, axs = None, axs

        if pv is None:
            if method == 'de':
                if self.lpf.de is None:
                    raise ValueError(
                        "The global optimizer hasn't been initialized.")
                pvp = None
                pv = self.lpf.de.minimum_location
            elif method == 'mcmc':
                if self.lpf.sampler is None:
                    raise ValueError("The sampler hasn't been initialized.")
                df = self.lpf.posterior_samples()
                pvp = permutation(df.values)[:nsamples, :]
                pv = median(pvp, 0)
        else:
            if pv.ndim == 1:
                pvp = None
                pv = pv
            else:
                pvp = permutation(pv)[:nsamples, :]
                pv = median(pvp, 0)

        rv_time = linspace(self._timea.min() - 1,
                           self._timea.max() + 1,
                           num=ntimes)

        all_planets = set(range(self.nplanets))
        other_planets = all_planets.difference([planet])

        if pvp is None:
            rv_model = self.rv_model(pv,
                                     rv_time + self._tref, [planet],
                                     add_sv=False)
            rv_others = self.rv_model(pv, planets=other_planets, add_sv=False)
            rv_model_limits = None
        else:
            rv_percentiles = percentile(
                self.rv_model(pvp,
                              rv_time + self._tref, [planet],
                              add_sv=False), [50, 16, 84, 2.5, 97.5], 0)
            rv_model = rv_percentiles[0]
            rv_model_limits = rv_percentiles[1:]
            rv_others = median(
                self.rv_model(pvp, planets=other_planets, add_sv=False), 0)

        period = pv[self.ps.names.index(f'p_{planet + 1}')]
        tc = pv[self.ps.names.index(f'tc_{planet + 1}')] - self._tref

        phase = (fold(self._timea, period, tc, 0.5) - 0.5) * period
        phase_model = (fold(rv_time, period, tc, 0.5) - 0.5) * period
        msids = argsort(phase_model)

        if pvp is not None:
            axs[0].fill_between(phase_model[msids],
                                rv_model_limits[2, msids],
                                rv_model_limits[3, msids],
                                facecolor='blue',
                                alpha=0.15)
            axs[0].fill_between(phase_model[msids],
                                rv_model_limits[0, msids],
                                rv_model_limits[1, msids],
                                facecolor='darkblue',
                                alpha=0.25)

        axs[0].errorbar(phase,
                        self._rva - rv_others - self.rv_shifts(pv),
                        self._rvea,
                        fmt='ok')
        axs[0].plot(phase_model[msids], rv_model[msids], 'k')
        axs[1].errorbar(phase,
                        self._rva - self.rv_model(pv),
                        self._rvea,
                        fmt='ok')

        setp(axs[0], ylabel='RV [m/s]')
        setp(axs[1], xlabel='Phase [d]', ylabel='O-M [m/s]')

        axs[0].autoscale(axis='x', tight=True)
        if fig is not None:
            fig.tight_layout()
        return fig
Ejemplo n.º 21
0
    def plot_rv_vs_time(self,
                        method='de',
                        pv=None,
                        nsamples: int = 200,
                        ntimes: int = 500,
                        axs=None):

        if axs is None:
            fig, axs = subplots(2,
                                1,
                                gridspec_kw={'height_ratios': (3, 1)},
                                sharex='all')
        else:
            fig, axs = None, axs

        if pv is None:
            if method == 'de':
                if self.lpf.de is None:
                    raise ValueError(
                        "The global optimizer hasn't been initialized.")
                pvp = None
                pv = self.lpf.de.minimum_location
            elif method == 'mcmc':
                if self.lpf.sampler is None:
                    raise ValueError("The sampler hasn't been initialized.")
                df = self.lpf.posterior_samples()
                pvp = permutation(df.values)[:nsamples, :]
                pv = median(pvp, 0)
        else:
            if pv.ndim == 1:
                pvp = None
                pv = pv
            else:
                pvp = permutation(pv)[:nsamples, :]
                pv = median(pvp, 0)

        rv_time = linspace(self._timea.min() - 1,
                           self._timea.max() + 1,
                           num=ntimes) + self._tref

        if pvp is None:
            rv_model = self.rv_model(pv, rv_time, add_sv=False)
            rv_model_limits = None
        else:
            rv_percentiles = percentile(
                self.rv_model(pvp, rv_time, add_sv=False),
                [50, 16, 84, 2.5, 97.5], 0)
            rv_model = rv_percentiles[0]
            rv_model_limits = rv_percentiles[1:]

        if rv_model_limits is not None:
            axs[0].fill_between(rv_time,
                                rv_model_limits[2],
                                rv_model_limits[3],
                                facecolor='blue',
                                alpha=0.25)
            axs[0].fill_between(rv_time,
                                rv_model_limits[0],
                                rv_model_limits[1],
                                facecolor='darkblue',
                                alpha=0.5)

        axs[0].plot(rv_time, rv_model, 'k', lw=1)
        axs[0].errorbar(self._timea + self._tref,
                        self._rva + self.rv_shifts(pv),
                        self._rvea,
                        fmt='ok')
        axs[1].errorbar(self._timea + self._tref,
                        self._rva - self.rv_model(pv),
                        self._rvea,
                        fmt='ok')

        if fig is not None:
            fig.tight_layout()
        return fig
Ejemplo n.º 22
0
def permutation(n):
    "permutation(n) = a permutation of indices range(n)"
    return mt.permutation(n)
from sklearn.datasets.base import load_digits
import matplotlib.pyplot as plt
from sklearn.learning_curve import learning_curve
from numpy.random.mtrand import permutation
from sklearn.svm.classes import SVC
from sklearn.neighbors.classification import KNeighborsClassifier

if __name__ == "__main__":
    # load data
    X, y = uci_loader.getdataset('heart')
    #data = load_digits(n_class = 2)
    #X,y = data.data, data.target
    y[y!=0] = 1
    
    # shuffle data
    random_idx = permutation(np.arange(len(y)))
    X = X[random_idx]
    y = y[random_idx]
    
    # create model
    model = LogisticRegression(C=1)
    #model = RandomForestClassifier(n_estimators=10)
    
    # plot high-dimensional decision boundary
    db = DBPlot(model)
    db.fit(X, y, training_indices=0.5)
    db.plot(plt, generate_testpoints=True) # set generate_testpoints=False to speed up plotting
    plt.show()
    
    #plot learning curves for comparison
    N = 10
Ejemplo n.º 24
0
    
    
    lassocv.fit(X_, y_)
    enetcv.fit(X_, y_)
    f = pl.figure()
    a = f.add_subplot(211)
    pl.plot(lassocv.coef_, c=color[i], label=labels_group[i])
    a = f.add_subplot(212)
    pl.plot(enetcv.coef_, c=color[i], label=labels_group[i])
    
##################################################

permut_ = []
for i in np.arange(1000):
    
    y_permuted = permutation(y)
    cv=ShuffleSplit(len(y), n_iter=50, test_size=0.25)
    
    mse_ = []
    
    svr_rbf = SVR(kernel='rbf', C=1)
    svr_lin = SVR(kernel='linear', C=1)
    svr_poly = SVR(kernel='poly', C=1, degree=2)
    
    for train_index, test_index in cv:
                 
            #pl.figure()
            X_train = X[train_index]
            y_train = y_permuted[train_index]
            
            X_test = X[test_index]
Ejemplo n.º 25
0
                      cv=ShuffleSplit(len(y_), n_iter=50, test_size=0.25))

    lassocv.fit(X_, y_)
    enetcv.fit(X_, y_)
    f = pl.figure()
    a = f.add_subplot(211)
    pl.plot(lassocv.coef_, c=color[i], label=labels_group[i])
    a = f.add_subplot(212)
    pl.plot(enetcv.coef_, c=color[i], label=labels_group[i])

##################################################

permut_ = []
for i in np.arange(1000):

    y_permuted = permutation(y)
    cv = ShuffleSplit(len(y), n_iter=50, test_size=0.25)

    mse_ = []

    svr_rbf = SVR(kernel='rbf', C=1)
    svr_lin = SVR(kernel='linear', C=1)
    svr_poly = SVR(kernel='poly', C=1, degree=2)

    for train_index, test_index in cv:

        #pl.figure()
        X_train = X[train_index]
        y_train = y_permuted[train_index]

        X_test = X[test_index]
Ejemplo n.º 26
0
    def plot_gb_transits(self,
                         method='de',
                         pv: ndarray = None,
                         remove_baseline: bool = True,
                         figsize: tuple = (14, 2),
                         axes=None,
                         ncol: int = 4,
                         xlim: tuple = None,
                         ylim: tuple = None,
                         nsamples: int = 200):

        if pv is None:
            if method == 'de':
                if self.de is None:
                    raise ValueError(
                        "The global optimizer hasn't been initialized.")
                pvp = None
                pv = self.de.minimum_location
            elif method == 'mcmc':
                if self.sampler is None:
                    raise ValueError("The sampler hasn't been initialized.")
                df = self.posterior_samples(derived_parameters=False)
                pvp = permutation(df.values)[:nsamples, :]
                pv = median(pvp, 0)
        else:
            if pv.ndim == 1:
                pvp = None
                pv = pv
            else:
                pvp = permutation(pv)[:nsamples, :]
                pv = median(pvp, 0)

        if pvp is None:
            if remove_baseline:
                fobs = self.ofluxa / squeeze(self.baseline(pv))
                fmodel = squeeze(self.transit_model(pv))
                fbasel = ones_like(self.ofluxa)
            else:
                fobs = self.ofluxa
                fmodel = squeeze(self.flux_model(pv))
                fbasel = squeeze(self.baseline(pv))
            fmodel_limits = None
        else:
            if remove_baseline:
                fobs = self.ofluxa / squeeze(self.baseline(pv))
                fmodels = percentile(self.transit_model(pvp),
                                     [50, 16, 84, 2.5, 97.5], 0)
                fbasel = ones_like(self.ofluxa)
            else:
                fobs = self.ofluxa
                fmodels = percentile(self.flux_model(pvp),
                                     [50, 16, 84, 2.5, 97.5], 0)
                fbasel = median(self.baseline(pvp), 0)
            fmodel = fmodels[0]
            fmodel_limits = fmodels[1:]

        tcids = [
            self.ps.names.index(f'tc_{i + 1}') for i in range(self.nplanets)
        ]
        prids = [
            self.ps.names.index(f'p_{i + 1}') for i in range(self.nplanets)
        ]

        t0s = pv[tcids]
        prs = pv[prids]

        tcs = array([t.mean() for t in self.times[self._stess:]])
        tds = array([
            abs(fold(tcs, prs[i], t0s[i], 0.5) - 0.5)
            for i in range(self.nplanets)
        ])
        pids = argmin(tds, 0)

        nlc = self.nlc - self._stess
        nrow = int(ceil(nlc / ncol))

        if axes is None:
            fig, axs = subplots(nrow,
                                ncol,
                                figsize=figsize,
                                sharex='all',
                                sharey='all',
                                squeeze=False)
        else:
            fig, axs = None, axes

        [ax.autoscale(enable=True, axis='x', tight=True) for ax in axs.flat]

        etess = self._stess
        for iax, i in enumerate(range(self.nlc - etess)):
            ax = axs.flat[iax]
            sl = self.lcslices[etess + i]
            t = self.times[etess + i]
            e = epoch(t.mean(), t0s[pids[i]], prs[pids[i]])
            tc = t0s[pids[i]] + e * prs[pids[i]]
            tt = 24 * (t - tc)

            if fmodel_limits is not None:
                ax.fill_between(tt,
                                fmodel_limits[2, sl],
                                fmodel_limits[3, sl],
                                facecolor='blue',
                                alpha=0.15)
                ax.fill_between(tt,
                                fmodel_limits[0, sl],
                                fmodel_limits[1, sl],
                                facecolor='darkblue',
                                alpha=0.25)
            ax.plot(tt, fobs[sl], 'k.', alpha=0.2)
            ax.plot(tt, fmodel[sl], 'k')

        setp(axs, xlim=xlim, ylim=ylim)
        setp(axs[-1, :], xlabel='Time - T$_c$ [h]')
        setp(axs[:, 0], ylabel='Normalised flux')
        fig.tight_layout()
        return fig
Ejemplo n.º 27
0
    def plot_folded_planets(self,
                            passband: str,
                            method: str = 'de',
                            bwidth: float = 10,
                            axs=None,
                            pv: ndarray = None,
                            nsamples: int = 100,
                            limp=(2.5, 97.5, 16, 84),
                            limc: str = 'darkblue',
                            lima: float = 0.15,
                            ylines=None):
        from pytransit.lpf.tesslpf import downsample_time

        if axs is None:
            fig, axs = subplots(1, self.nplanets, sharey='all')
        else:
            fig, axs = None, axs

        if pv is None:
            if method == 'de':
                if self.de is None:
                    raise ValueError(
                        "The global optimizer hasn't been initialized.")
                pvp = None
                pv = self.de.minimum_location
            elif method == 'mcmc':
                if self.sampler is None:
                    raise ValueError("The sampler hasn't been initialized.")
                df = self.posterior_samples(derived_parameters=False)
                pvp = permutation(df.values)[:nsamples, :]
                pv = median(pvp, 0)
        else:
            if pv.ndim == 1:
                pvp = None
                pv = pv
            else:
                pvp = permutation(pv)[:nsamples, :]
                pv = median(pvp, 0)

        is_pb = self.pbids == self.passbands.index(passband)
        pbmask = zeros(self.timea.size, 'bool')
        for sl, cpb in zip(self.lcslices, is_pb):
            if cpb:
                pbmask[sl] = 1

        tcids = [
            self.ps.names.index(f'tc_{i + 1}') for i in range(self.nplanets)
        ]
        prids = [
            self.ps.names.index(f'p_{i + 1}') for i in range(self.nplanets)
        ]
        t0s = pv[tcids]
        prs = pv[prids]

        for ipl in range(self.nplanets):
            planets = set(arange(self.nplanets))
            planets.remove(ipl)
            if pvp is None:
                mflux = squeeze(self.transit_model(pv, planets=[ipl]))[pbmask]
                rflux = squeeze(self.transit_model(pv,
                                                   planets=planets))[pbmask]
                mflim = None
                fbline = self.baseline(pv)[pbmask]
            else:
                mfluxes = self.transit_model(pvp, planets=[ipl])[:, pbmask]
                rfluxes = self.transit_model(pvp, planets=planets)[:, pbmask]
                fblines = self.baseline(pvp)[:, pbmask]
                mflux = median(mfluxes, 0)
                mflim = percentile(mfluxes, limp, 0)
                rflux = median(rfluxes, 0)
                fbline = median(fblines, 0)

            oflux = self.ofluxa[pbmask] / rflux / fbline
            phase = (fold(self.timea[pbmask], prs[ipl], t0s[ipl], 0.5) -
                     0.5) * prs[ipl]
            m = abs(phase) < 0.5 * self.bldur
            sids = argsort(phase[m])
            if m.sum() > 0:
                phase, mflux, oflux = phase[m][sids], mflux[m][sids], oflux[m][
                    sids]
                bp, bf, be = downsample_time(phase, oflux, bwidth / 24 / 60)
                if mflim is not None:
                    for il in range(mflim.shape[0] // 2):
                        axs[ipl].fill_between(phase,
                                              mflim[2 * il, m][sids],
                                              mflim[2 * il + 1, m][sids],
                                              fc=limc,
                                              alpha=lima)
                axs[ipl].errorbar(bp, bf, be, fmt='ok')
                axs[ipl].plot(phase, oflux, '.', alpha=0.25)
                axs[ipl].plot(phase, mflux, 'k')

                if ylines is not None:
                    axs[ipl].fill_between(phase, mflux, 1, fc='w', zorder=-99)
                    for yl in ylines:
                        axs[ipl].axhline(yl,
                                         lw=1,
                                         ls='--',
                                         c='k',
                                         alpha=0.5,
                                         zorder=-100)

        setp(axs,
             xlim=(-0.5 * self.bldur, 0.5 * self.bldur),
             ylim=(0.996, 1.004))
        if fig is not None:
            fig.tight_layout()
        return fig
Ejemplo n.º 28
0
    f.savefig(os.path.join(path,r,'vipassana_correlation_expertise_0.7.png'), facecolor='black')
    
    savemat(os.path.join(path,r,'all_analysis.mat'), fields)
    pl.close('all')



full_matrices = np.vstack((samatha[subjects.T[1] == level], 
                           vipassana[subjects.T[1] == level]))
labels = np.array(['S' for i in range(full_matrices.shape[0])])
labels[samatha.shape[0]:]='V'

p_ = []
t_ = []
for i in range(1000):
    labels_ = permutation(labels)
    
    samatha_ = full_matrices[labels_ == 'S']
    vipassana_ = full_matrices[labels_ == 'V']
    
    tp, pp = ttest_ind(samatha_, vipassana_, axis=0)
    
    t_.append(tp)
    p_.append(pp)

t_ = np.array(t_)
p_ = np.array(p_)

t_true, p_true = ttest_ind(samatha, vipassana, axis=0)

#print np.count_nonzero(p_true > p_)
Ejemplo n.º 29
0
 
 partitioner = SKLCrossValidation(StratifiedKFold(y, n_folds=i))
 
 cvte = CrossValidation(clf,
                        partitioner,
                        enable_ca=['stats', 'probabilities'])
 
 sl = sphere_searchlight(cvte, radius=3, space = 'voxel_indices')
 
 maps = []
 
 for p_ in range(100):
     
     print '-------- '+str(p_+1)+' of 100 ------------'
     
     y_perm = permutation(range(len(ds.targets)))
     
     ds.targets = ds.targets[y_perm]
     
     sl_map = sl(ds)
     sl_map.samples *= -1
     sl_map.samples +=  1
 
     map_ = map2nifti(sl_map, imghdr=ds.a.imghdr)
     ni.save(map_, os.path.join(path, subj+'_permutation_'+str(p_+1)+'.nii.gz'))
     permut_.append(map_.get_data())
     
     
 permut_ = np.array(permut_).mean(4)
 permut_ = np.rollaxis(permut_, 0, 4)
 perm_map = ni.Nifti1Image(permut_, map_.get_affine())
Ejemplo n.º 30
0
from numpy.random.mtrand import permutation

from sklearn.neighbors import KNeighborsClassifier

import dirty_importer
import import_data

if __name__ == '__main__':
    global visual
    dataset = dirty_importer.get_dataset()
    data = dataset[0]
    label = dataset[1]
    perm = permutation(len(data))
    data = data[perm]
    label = label[perm]
    clf = KNeighborsClassifier(n_neighbors=1)
    clf.fit(data, label)

    testset = dirty_importer.get_dataset('test')
    data = testset[0]
    label = testset[1]
    perm = permutation(len(data))
    data = data[perm]
    label = label[perm]
    predict = []

    bingo = 0
    for i in range(0, len(data)):
        predict.append(clf.predict(data[i].reshape(1, -1)))
        if predict[i] == label[i]:
            bingo += 1
Ejemplo n.º 31
0
 def shuffle(self, y):
     return permutation(y)
Ejemplo n.º 32
0
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from numpy.random.mtrand import permutation
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
import wandb
wandb.init()

# load data
iris = load_iris()
X = iris.data
y = iris.target
y[y != 0] = 1

# shuffle data
random_idx = permutation(np.arange(len(y)))
X = X[random_idx]
y = y[random_idx]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)

# create model
model = RandomForestClassifier()
wandb.sklearn.plot_learning_curve(model, X_test, y_test)
wandb.sklearn.plot_class_balance(y_train, y_test)
wandb.sklearn.plot_calibration_curve(X,
                                     y,
                                     RandomForestClassifier(),
                                     name="Random Forest")
wandb.sklearn.plot_decision_boundaries(model, X, y)
'''
# Visualize model performance
Ejemplo n.º 33
0
 def shuffle(self, y):
     return permutation(y)
Ejemplo n.º 34
0
    dispersion_ = []

    for k in np.unique(y):
        disp_ = cluster_dispersion(dist_, y, k)
        dispersion_.append(disp_)

    total_dispersion = cluster_dispersion(dist_, np.zeros_like(y), 0)
    relative_dispersion = np.array(dispersion_) / total_dispersion

    from numpy.random.mtrand import permutation

    permutation_ = np.zeros((np.unique(y).shape[0], 2000))

    for i in range(2000):
        y_perm = permutation(y)

        dispersion_p = []

        for j, k in enumerate(np.unique(y_perm)):
            disp_p = cluster_dispersion(dist_, y_perm, k)
            permutation_[j, i] = disp_p

    print index_list[0][:-2]
    for i, k in enumerate(np.unique(y)):
        print str(k)+': dispersion = '+str(relative_dispersion[i])+ \
                                           ' p = '+str(np.count_nonzero(permutation_[i]<dispersion_[i])/2000.)+ \
                                           ' n: '+str(np.count_nonzero(y==k))


def cluster_dispersion(distance, labels, cluster_number):
 def test_permutation_longs(self):
     random.seed(1234)
     a = random.permutation(12)
     random.seed(1234)
     b = random.permutation(long(12))
     assert_array_equal(a, b)
Ejemplo n.º 36
0
        dispersion_ = []
        
        for k in np.unique(y):
            disp_ = cluster_dispersion(dist_, y, k)
            dispersion_.append(disp_)
        
        
        total_dispersion = cluster_dispersion(dist_, np.zeros_like(y), 0)
        relative_dispersion = np.array(dispersion_)/total_dispersion
        
        from numpy.random.mtrand import permutation
        
        permutation_ = np.zeros((np.unique(y).shape[0], 2000))
        
        for i in range(2000):
            y_perm = permutation(y)
            
            dispersion_p = []
            
            for j, k in enumerate(np.unique(y_perm)):
                disp_p = cluster_dispersion(dist_, y_perm, k)
                permutation_[j, i] = disp_p
        
        print index_list[0][:-2]
        for i,k in enumerate(np.unique(y)):
            print str(k)+': dispersion = '+str(relative_dispersion[i])+ \
                                               ' p = '+str(np.count_nonzero(permutation_[i]<dispersion_[i])/2000.)+ \
                                               ' n: '+str(np.count_nonzero(y==k))


def permutation(n):
    "permutation(n) = a permutation of indices range(n)"
    return mt.permutation(n)
Ejemplo n.º 38
0
    print(i)
    return i


def updatefig(*args):
    global visual
    mat = np.reshape(visual[get_index()], (28, 28))
    im.set_array(mat)
    return im,


if __name__ == '__main__':
    global visual
    data = import_data.load_dataset()
    data = np.resize(data, (50000, 784))
    perm = permutation(50000)
    data = data[perm]
    rbm = alt_rbm.RBM(784, 200, learning_rate=0.1)
    rbm.train(data[0:20], 2000)
    f = open("matrix{}".format(datetime.datetime.now()), "wb")
    np.save(f, rbm.weights)
    # good ones are 6 data[6] >
    initial = np.random.rand(201)
    im = plt.imshow(np.random.rand(28, 28), cmap=plt.get_cmap('gray'), animated=True)
    visual = rbm.daydream(2, initial)

    ani = animation.FuncAnimation(fig, updatefig, interval=2000, blit=True)
    plt.show()