Example #1
0
    def _update_datamap(self):
        self._last_region = []
        # Create a new grid of the appropriate size, initialize it with new
        # Cell instance (of type self.celltype), and perform point insertion
        # on the new data.
        if self._data is None:
            self._cellgrid = array([], dtype=object)
            self._cell_lefts = array([])
            self._cell_bottoms = array([])
        else:
            num_x_cells, num_y_cells = self._calc_grid_dimensions()
            self._cellgrid = zeros((num_x_cells, num_y_cells), dtype=object)
            for i in range(num_x_cells):
                for j in range(num_y_cells):
                    self._cellgrid[i,j] = self.celltype(parent=self)
            ll, ur = self._extents
            cell_width = ur[0]/num_x_cells
            cell_height = ur[1]/num_y_cells

            # calculate the left and bottom edges of all the cells and store
            # them in two arrays
            self._cell_lefts = arange(ll[0], ll[0]+ur[0]-cell_width/2, step=cell_width)
            self._cell_bottoms = arange(ll[1], ll[1]+ur[1]-cell_height/2, step=cell_height)

            self._cell_extents = (cell_width, cell_height)

            # insert the data points
            self._basic_insertion(self.celltype)
        return
 def test_basic_instantiation(self):
     '''
     Tests the basic instantiation of the SHIFT class
     '''
     # Instantiatiation with float
     self.model = Shift(5.0)
     np.testing.assert_array_almost_equal(self.model.target_magnitudes,
                                          np.array([5.0]))
     self.assertEqual(self.model.number_magnitudes, 1)
     # Instantiation with a numpy array
     self.model = Shift(np.arange(5., 8., 0.5))
     np.testing.assert_array_almost_equal(self.model.target_magnitudes,
                                          np.arange(5., 8., 0.5))
     self.assertEqual(self.model.number_magnitudes, 6)
     # Instantiation with  list
     self.model = Shift([5., 6., 7., 8.])
     np.testing.assert_array_almost_equal(self.model.target_magnitudes,
                                          np.array([5., 6., 7., 8.]))
     self.assertEqual(self.model.number_magnitudes, 4)
     # Otherwise raise an error
     with self.assertRaises(ValueError) as ae:
         self.model = Shift(None)
     self.assertEqual(ae.exception.message,
                      'Minimum magnitudes must be float, list or array')
     # Check regionalisation - assuming defaults
     self.model = Shift(5.0)
     for region in self.model.regionalisation.keys():
         self.assertDictEqual(BIRD_GLOBAL_PARAMETERS[region],
                              self.model.regionalisation[region])
     np.testing.assert_array_almost_equal(np.log10(self.model.base_rate),
                                          np.array([-20.74610902]))
Example #3
0
    def compute_dr_wrt(self, wrt):

        if wrt not in (self.v, self.rt, self.t):
            return
        
        if wrt is self.t:
            if not hasattr(self, '_drt') or self._drt.shape[0] != self.v.r.size:                
                IS = np.arange(self.v.r.size)
                JS = IS % 3
                data = np.ones(len(IS))
                self._drt = sp.csc_matrix((data, (IS, JS)))
            return self._drt
        
        if wrt is self.rt:
            rot, rot_dr = cv2.Rodrigues(self.rt.r)
            rot_dr = rot_dr.reshape((3,3,3))
            dr = np.einsum('abc, zc -> zba', rot_dr, self.v.r).reshape((-1,3))
            return dr
        
        if wrt is self.v:
            rot = cv2.Rodrigues(self.rt.r)[0]
            
            IS = np.repeat(np.arange(self.v.r.size), 3)
            JS = np.repeat(np.arange(self.v.r.size).reshape((-1,3)), 3, axis=0)
            data = np.vstack([rot for i in range(self.v.r.size/3)])
            result = sp.csc_matrix((data.ravel(), (IS.ravel(), JS.ravel())))
            return result
Example #4
0
def scree_plot(pca_obj, fname=None): 
    '''
    Scree plot for variance & cumulative variance by component from PCA. 

    Arguments: 
        - pca_obj: a fitted sklearn PCA instance
        - fname: path to write plot to file

    Output: 
        - scree plot 
    '''   
    components = pca_obj.n_components_ 
    variance = pca.explained_variance_ratio_
    plt.figure()
    plt.plot(np.arange(1, components + 1), np.cumsum(variance), label='Cumulative Variance')
    plt.plot(np.arange(1, components + 1), variance, label='Variance')
    plt.xlim([0.8, components]); plt.ylim([0.0, 1.01])
    plt.xlabel('No. Components', labelpad=11); plt.ylabel('Variance Explained', labelpad=11)
    plt.legend(loc='best') 
    plt.tight_layout() 
    if fname is not None:
        plt.savefig(fname)
        plt.close() 
    else:
        plt.show() 
    return 
    def test_array_richcompare_legacy_weirdness(self):
        # It doesn't really work to use assert_deprecated here, b/c part of
        # the point of assert_deprecated is to check that when warnings are
        # set to "error" mode then the error is propagated -- which is good!
        # But here we are testing a bunch of code that is deprecated *because*
        # it has the habit of swallowing up errors and converting them into
        # different warnings. So assert_warns will have to be sufficient.
        assert_warns(FutureWarning, lambda: np.arange(2) == "a")
        assert_warns(FutureWarning, lambda: np.arange(2) != "a")
        # No warning for scalar comparisons
        with warnings.catch_warnings():
            warnings.filterwarnings("error")
            assert_(not (np.array(0) == "a"))
            assert_(np.array(0) != "a")
            assert_(not (np.int16(0) == "a"))
            assert_(np.int16(0) != "a")

        for arg1 in [np.asarray(0), np.int16(0)]:
            struct = np.zeros(2, dtype="i4,i4")
            for arg2 in [struct, "a"]:
                for f in [operator.lt, operator.le, operator.gt, operator.ge]:
                    if sys.version_info[0] >= 3:
                        # py3
                        with warnings.catch_warnings() as l:
                            warnings.filterwarnings("always")
                            assert_raises(TypeError, f, arg1, arg2)
                            assert_(not l)
                    else:
                        # py2
                        assert_warns(DeprecationWarning, f, arg1, arg2)
Example #6
0
def __unit_test_onset_function(metric):
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        # First, test for a warning on empty onsets
        metric(np.array([]), np.arange(10))
        assert len(w) == 1
        assert issubclass(w[-1].category, UserWarning)
        assert str(w[-1].message) == "Reference onsets are empty."
        metric(np.arange(10), np.array([]))
        assert len(w) == 2
        assert issubclass(w[-1].category, UserWarning)
        assert str(w[-1].message) == "Estimated onsets are empty."
        # And that the metric is 0
        assert np.allclose(metric(np.array([]), np.array([])), 0)

    # Now test validation function - onsets must be 1d ndarray
    onsets = np.array([[1., 2.]])
    nose.tools.assert_raises(ValueError, metric, onsets, onsets)
    # onsets must be in seconds (so not huge)
    onsets = np.array([1e10, 1e11])
    nose.tools.assert_raises(ValueError, metric, onsets, onsets)
    # onsets must be sorted
    onsets = np.array([2., 1.])
    nose.tools.assert_raises(ValueError, metric, onsets, onsets)

    # Valid onsets which are the same produce a score of 1 for all metrics
    onsets = np.arange(10, dtype=np.float)
    assert np.allclose(metric(onsets, onsets), 1)
Example #7
0
def barplot(grouped_df, _column, statistic, levels=[0]):
    means = grouped_df.groupby(level=levels).mean()
    bar_width = 1.0/(len(means.index))
    error_config = {'ecolor': '0.1'}
    sems = grouped_df.groupby(level=levels).sem().fillna(0)
    fig = plt.figure()
    fig.set_size_inches(10,6)
    ax = fig.add_subplot(1,1,1)
    
    plt.bar(np.arange(0.1,(len(means.index)+0.1),1), 
            means[_column].fillna(0), 
            color= '#AAAAAA',
            yerr=sems[_column].fillna(0),
            error_kw=error_config,
            label=list(means.index))

    if means[_column].values.min() >= 0:
        ax.set_ylim(0,1.1*((means[_column] + sems[_column]).values.max()))
    else:
        ax.set_ylim(1.1*((means[_column] - sems[_column]).values.min()),1.1*((means[_column] + sems[_column]).values.max()))
    
    ax.set_ylabel(statistic + ' ' + u"\u00B1" + ' SEM', fontsize=20)   # +/- sign is u"\u00B1"
    ax.set_xticks(np.arange(0.1+bar_width/2.0,(len(means.index)+0.1+(bar_width/2.0)),1)) 
    ax.set_xticklabels(list(means.index), rotation=90)
    ax.tick_params(axis='y', labelsize=16 )
    ax.set_xlabel('Group', fontsize=20)
    ax.spines['right'].set_visible(False)
    ax.spines['top'].set_visible(False)
    ax.yaxis.set_ticks_position('left')
    ax.xaxis.set_ticks_position('bottom')
    return fig
Example #8
0
    def test_sort_index_multicolumn(self):
        import random
        A = np.arange(5).repeat(20)
        B = np.tile(np.arange(5), 20)
        random.shuffle(A)
        random.shuffle(B)
        frame = DataFrame({'A': A, 'B': B,
                           'C': np.random.randn(100)})

        # use .sort_values #9816
        with tm.assert_produces_warning(FutureWarning):
            frame.sort_index(by=['A', 'B'])
        result = frame.sort_values(by=['A', 'B'])
        indexer = np.lexsort((frame['B'], frame['A']))
        expected = frame.take(indexer)
        assert_frame_equal(result, expected)

        # use .sort_values #9816
        with tm.assert_produces_warning(FutureWarning):
            frame.sort_index(by=['A', 'B'], ascending=False)
        result = frame.sort_values(by=['A', 'B'], ascending=False)
        indexer = np.lexsort((frame['B'].rank(ascending=False),
                              frame['A'].rank(ascending=False)))
        expected = frame.take(indexer)
        assert_frame_equal(result, expected)

        # use .sort_values #9816
        with tm.assert_produces_warning(FutureWarning):
            frame.sort_index(by=['B', 'A'])
        result = frame.sort_values(by=['B', 'A'])
        indexer = np.lexsort((frame['A'], frame['B']))
        expected = frame.take(indexer)
        assert_frame_equal(result, expected)
Example #9
0
    def test_sort_index_different_sortorder(self):
        A = np.arange(20).repeat(5)
        B = np.tile(np.arange(5), 20)

        indexer = np.random.permutation(100)
        A = A.take(indexer)
        B = B.take(indexer)

        df = DataFrame({'A': A, 'B': B,
                        'C': np.random.randn(100)})

        # use .sort_values #9816
        with tm.assert_produces_warning(FutureWarning):
            df.sort_index(by=['A', 'B'], ascending=[1, 0])
        result = df.sort_values(by=['A', 'B'], ascending=[1, 0])

        ex_indexer = np.lexsort((df.B.max() - df.B, df.A))
        expected = df.take(ex_indexer)
        assert_frame_equal(result, expected)

        # test with multiindex, too
        idf = df.set_index(['A', 'B'])

        result = idf.sort_index(ascending=[1, 0])
        expected = idf.take(ex_indexer)
        assert_frame_equal(result, expected)

        # also, Series!
        result = idf['C'].sort_index(ascending=[1, 0])
        assert_series_equal(result, expected['C'])
Example #10
0
    def get_gabors(self, rf):
        lams =  float(rf[0])/self.sfs # lambda = 1./sf  #1./np.array([.1,.25,.4])
        sigma = rf[0]/2./np.pi
        # rf = [100,100]
        gabors = np.zeros(( len(oris),len(phases),len(lams), rf[0], rf[1] ))

        i = np.arange(-rf[0]/2+1,rf[0]/2+1)
        #print i
        j = np.arange(-rf[1]/2+1,rf[1]/2+1)
        ii,jj = np.meshgrid(i,j)
        for o, theta in enumerate(self.oris):
            x = ii*np.cos(theta) + jj*np.sin(theta)
            y = -ii*np.sin(theta) + jj*np.cos(theta)

            for p, phase in enumerate(self.phases):
                for s, lam in enumerate(lams):
                    fxx = np.cos(2*np.pi*x/lam + phase) * np.exp(-(x**2+y**2)/(2*sigma**2))
                    fxx -= np.mean(fxx)
                    fxx /= np.linalg.norm(fxx)

                    #if p==0:
                        #plt.subplot(len(oris),len(lams),count+1)
                        #plt.imshow(fxx,cmap=mpl.cm.gray,interpolation='bicubic')
                        #count+=1

                    gabors[o,p,s,:,:] = fxx
        plt.show()
        return gabors
Example #11
0
    def test_groupby_groups_datetimeindex(self):
        # GH#1430
        periods = 1000
        ind = pd.date_range(start='2012/1/1', freq='5min', periods=periods)
        df = DataFrame({'high': np.arange(periods),
                        'low': np.arange(periods)}, index=ind)
        grouped = df.groupby(lambda x: datetime(x.year, x.month, x.day))

        # it works!
        groups = grouped.groups
        assert isinstance(list(groups.keys())[0], datetime)

        # GH#11442
        index = pd.date_range('2015/01/01', periods=5, name='date')
        df = pd.DataFrame({'A': [5, 6, 7, 8, 9],
                           'B': [1, 2, 3, 4, 5]}, index=index)
        result = df.groupby(level='date').groups
        dates = ['2015-01-05', '2015-01-04', '2015-01-03',
                 '2015-01-02', '2015-01-01']
        expected = {pd.Timestamp(date): pd.DatetimeIndex([date], name='date')
                    for date in dates}
        tm.assert_dict_equal(result, expected)

        grouped = df.groupby(level='date')
        for date in dates:
            result = grouped.get_group(date)
            data = [[df.loc[date, 'A'], df.loc[date, 'B']]]
            expected_index = pd.DatetimeIndex([date], name='date')
            expected = pd.DataFrame(data,
                                    columns=list('AB'),
                                    index=expected_index)
            tm.assert_frame_equal(result, expected)
Example #12
0
    def get_new_columns(self):
        if self.value_columns is None:
            return self.removed_level

        stride = len(self.removed_level)
        width = len(self.value_columns)
        propagator = np.repeat(np.arange(width), stride)
        if isinstance(self.value_columns, MultiIndex):
            new_levels = self.value_columns.levels + [self.removed_level]
            new_names = self.value_columns.names + [self.removed_name]

            new_labels = [lab.take(propagator)
                          for lab in self.value_columns.labels]
            new_labels.append(np.tile(np.arange(stride), width))
        else:
            new_levels = [self.value_columns, self.removed_level]
            new_names = [self.value_columns.name, self.removed_name]

            new_labels = []

            new_labels.append(propagator)
            new_labels.append(np.tile(np.arange(stride), width))

        return MultiIndex(levels=new_levels, labels=new_labels,
                          names=new_names)
def test_stratified_shuffle_split_init():
    X = np.arange(7)
    y = np.asarray([0, 1, 1, 1, 2, 2, 2])
    # Check that error is raised if there is a class with only one sample
    assert_raises(ValueError, next,
                  StratifiedShuffleSplit(3, 0.2).split(X, y))

    # Check that error is raised if the test set size is smaller than n_classes
    assert_raises(ValueError, next, StratifiedShuffleSplit(3, 2).split(X, y))
    # Check that error is raised if the train set size is smaller than
    # n_classes
    assert_raises(ValueError, next,
                  StratifiedShuffleSplit(3, 3, 2).split(X, y))

    X = np.arange(9)
    y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
    # Check that errors are raised if there is not enough samples
    assert_raises(ValueError, StratifiedShuffleSplit, 3, 0.5, 0.6)
    assert_raises(ValueError, next,
                  StratifiedShuffleSplit(3, 8, 0.6).split(X, y))
    assert_raises(ValueError, next,
                  StratifiedShuffleSplit(3, 0.6, 8).split(X, y))

    # Train size or test size too small
    assert_raises(ValueError, next,
                  StratifiedShuffleSplit(train_size=2).split(X, y))
    assert_raises(ValueError, next,
                  StratifiedShuffleSplit(test_size=2).split(X, y))
Example #14
0
def test_as_float_array():
    # Test function for as_float_array
    X = np.ones((3, 10), dtype=np.int32)
    X = X + np.arange(10, dtype=np.int32)
    # Checks that the return type is ok
    X2 = as_float_array(X, copy=False)
    np.testing.assert_equal(X2.dtype, np.float32)
    # Another test
    X = X.astype(np.int64)
    X2 = as_float_array(X, copy=True)
    # Checking that the array wasn't overwritten
    assert_true(as_float_array(X, False) is not X)
    # Checking that the new type is ok
    np.testing.assert_equal(X2.dtype, np.float64)
    # Here, X is of the right type, it shouldn't be modified
    X = np.ones((3, 2), dtype=np.float32)
    assert_true(as_float_array(X, copy=False) is X)
    # Test that if X is fortran ordered it stays
    X = np.asfortranarray(X)
    assert_true(np.isfortran(as_float_array(X, copy=True)))

    # Test the copy parameter with some matrices
    matrices = [
        np.matrix(np.arange(5)),
        sp.csc_matrix(np.arange(5)).toarray(),
        sparse_random_matrix(10, 10, density=0.10).toarray()
    ]
    for M in matrices:
        N = as_float_array(M, copy=True)
        N[0, 0] = np.nan
        assert_false(np.isnan(M).any())
Example #15
0
    def test_series(self):

        # GH6407
        # inferring series

        # invalid type of Series
        for s in [ Series(np.arange(10)),
                   Series(np.arange(10.))]:
            self.assertRaises(TypeError, lambda : infer_freq(s))

        # a non-convertible string
        self.assertRaises(ValueError, lambda : infer_freq(Series(['foo','bar'])))

        # cannot infer on PeriodIndex
        for freq in [None, 'L', 'Y']:
            s = Series(period_range('2013',periods=10,freq=freq))
            self.assertRaises(TypeError, lambda : infer_freq(s))

        # DateTimeIndex
        for freq in ['M', 'L', 'S']:
            s = Series(date_range('20130101',periods=10,freq=freq))
            inferred = infer_freq(s)
            self.assertEqual(inferred,freq)

        s = Series(date_range('20130101','20130110'))
        inferred = infer_freq(s)
        self.assertEqual(inferred,'D')
Example #16
0
File: LVQ.py Project: jayshonzs/ESL
def draw(data, classes, model, resolution=100):
    mycm = mpl.cm.get_cmap('Paired')
    
    one_min, one_max = data[:, 0].min()-0.1, data[:, 0].max()+0.1
    two_min, two_max = data[:, 1].min()-0.1, data[:, 1].max()+0.1
    xx1, xx2 = np.meshgrid(np.arange(one_min, one_max, (one_max-one_min)/resolution),
                     np.arange(two_min, two_max, (two_max-two_min)/resolution))
    
    inputs = np.c_[xx1.ravel(), xx2.ravel()]
    z = []
    for i in range(len(inputs)):
        z.append(predict(model, inputs[i])[0])
    result = np.array(z).reshape(xx1.shape)
    
    plt.contourf(xx1, xx2, result, cmap=mycm)
    plt.scatter(data[:, 0], data[:, 1], s=50, c=classes, cmap=mycm)
    
    t = np.zeros(15)
    for i in range(15):
        if i < 5:
            t[i] = 0
        elif i < 10:
            t[i] = 1
        else:
            t[i] = 2
    plt.scatter(model[:, 0], model[:, 1], s=150, c=t, cmap=mycm)
    
    plt.xlim([0, 10])
    plt.ylim([0, 10])
    
    plt.show()
def all_patches(padded_brain,i,predict_patchsize,obs_patchsize,num_channels):
    
    image = padded_brain[i]
    ishape_h , ishape_w = padded_brain.shape[1:3]
    #ipdb.set_trace()
    #ipdb.set_trace()
    half_obs_patchsize = obs_patchsize/2
    half_predict_patchsize = predict_patchsize/2
    extended_image = np.zeros((ishape_h+obs_patchsize-predict_patchsize,ishape_w+obs_patchsize-predict_patchsize,num_channels))
    extended_image[half_obs_patchsize - half_predict_patchsize   : -(half_obs_patchsize - half_predict_patchsize),half_obs_patchsize - half_predict_patchsize  : -(half_obs_patchsize - half_predict_patchsize)]= image
    num_patches_rows = ishape_h // predict_patchsize
    num_patches_cols = ishape_w // predict_patchsize
    
    list_patches = np.zeros((num_patches_cols*num_patches_rows, obs_patchsize, obs_patchsize, num_channels))
    index = 0
    h_range = np.arange(obs_patchsize/2,ishape_h+obs_patchsize/2,predict_patchsize)
    #h_range = h_range[:-1]
    v_range = np.arange(obs_patchsize/2,ishape_w+obs_patchsize/2,predict_patchsize)
    #v_range = v_range[:-1]
    #ipdb.set_trace()
    for index_h in h_range:
        for index_w in v_range:
            patch_brian = extended_image[index_h-obs_patchsize/2: index_h+obs_patchsize/2 ,index_w-obs_patchsize/2: index_w+obs_patchsize/2,:]
            #if patch_brian.shape == (38,29,4):
            #   ipdb.set_trace()
             
            list_patches[index,:,:,:] = patch_brian
            index += 1
    #ipdb.set_trace()
    assert index == num_patches_rows*num_patches_cols
    return list_patches       
Example #18
0
def main():
    """The main function."""

    # Build data ################

    x = np.arange(0, 10000, 500)
    y = np.arange(0, 1, 0.05)

    xx, yy = np.meshgrid(x, y)
    z = np.power(xx,yy)

    print "xx ="
    print xx
    print "yy ="
    print yy
    print "z ="
    print z

    # Plot data #################

    fig = plt.figure()
    ax = axes3d.Axes3D(fig)

    surf = ax.plot_surface(xx, yy, z, cmap=cm.jet, rstride=1, cstride=1, color='b', shade=True)

    ax.set_xlabel("X")
    ax.set_ylabel("Y")
    ax.set_zlabel("Z")

    fig.colorbar(surf, shrink=0.5, aspect=5)

    plt.show()
Example #19
0
def main():
    y, x = svmutil.svm_read_problem("char_recon_shuffled.db")
    x_train = x[:1800]
    y_train = y[:1800]
    x_val = x[1800:]
    y_val = y[1800:]

    gamma_c_pairs = [GammaCPair(1.0 / (2.0 * (3.0 ** log_sigma) ** 2), 3.0 ** log_C)
                     for log_sigma in [7]
                     for log_C     in [3]
                    ]

    log_log_pairs = [[log_sigma, log_C]
                     for log_sigma in np.arange(6, 10, 0.5)
                     for log_C     in np.arange(0, 5, 0.5)
                    ]

    def cv(gamma_c):
        return get_cross_val(x_train, y_train, x_val, y_val, gamma_c)

    cross_val = []

    for gamma_c in gamma_c_pairs:
        cross_val.append(cv(gamma_c))

    f = open("gamma_c", "w")
    for i in range(len(gamma_c_pairs)):
        f.write("{0}   {1}   {2}\n".format(log_log_pairs[i][0], log_log_pairs[i][1], cross_val[i]))
    f.close()
	def adwin_ultrafast_M_set_input (self, msmnt_result = [],  nr_coeff = 1):

		self.max_k = 2**self.N
		discr_steps = 2*self.max_k+1
		self.msmnt_phases = np.zeros((self.reps,self.N))
		self.msmnt_times = np.zeros(self.N)
		self.msmnt_results = np.zeros((self.reps,self.N))

		m = np.zeros (self.N+1)
		t = np.zeros (self.N+1)
		th = np.zeros(self.N+1)

		p_real = np.zeros (discr_steps)
		p_imag = np.zeros (discr_steps)
		p_real [0] = 1./(2*np.pi)
		t[0] = 2**self.N

		for n in np.arange(self.N)+1:
			t[n] = int(2**(self.N-n))
			k_opt = int(2**(self.N-n+1))
			th[n] = -0.5*np.angle(-1j*p_imag[k_opt]+p_real[k_opt])

			nr_ones = msmnt_result [n-1]
			nr_zeros = self.M - nr_ones

			for mmm in np.arange(nr_ones):
				for j in np.arange(nr_coeff)-nr_coeff/2:
					if (k_opt+j*t[n]>=0):
						p_real, p_imag = self.adwin_update (p_real = p_real, p_imag = p_imag, meas_res = 1, phase = th[n], tn = t[n], k=k_opt+j*t[n])
			for mmm in np.arange(nr_zeros):
				for j in np.arange(nr_coeff)-nr_coeff/2:
					if (k_opt+j*t[n]>=0):
						p_real, p_imag = self.adwin_update (p_real = p_real, p_imag = p_imag, meas_res = 0, phase = th[n], tn = t[n], k=k_opt+j*t[n])

		return th[1:]
Example #21
0
def test_no_interpolation_origin():
    fig = plt.figure()
    ax = fig.add_subplot(211)
    ax.imshow(np.arange(100).reshape((2, 50)), origin="lower", interpolation='none')

    ax = fig.add_subplot(212)
    ax.imshow(np.arange(100).reshape((2, 50)), interpolation='none')
def plot_figs(fig_num, elev, azim, a):
    fig = plt.figure(fig_num, figsize=(4, 3))
    plt.clf()
    ax = Axes3D(fig, elev=elev, azim=azim)

    ax.scatter(body_mass, work_level, heat_output, c='k', marker='+')
   
    X = np.arange(55, 85, 0.5)
    Y = np.arange(90, 180, 0.5)
    X, Y = np.meshgrid(X, Y)
    Z = a[0] + a[1]*X + +(Y/(a[2]+a[3]*X))
    ax.plot_surface(X, Y, Z,alpha=.5, antialiased=True,rstride=200, cstride=100, cmap=plt.cm.coolwarm)
             
    ax.set_xlabel('BODY_MASS', color='b')
    ax.set_ylabel('WORK_LEVEL', color='b')
    ax.set_zlabel('HEAT_OUTPUT', color='b')
    
    ax.w_xaxis.set_ticklabels([])
    ax.w_yaxis.set_ticklabels([])
    ax.w_zaxis.set_ticklabels([])
    
    ax.zaxis.set_major_locator(plt.LinearLocator(10))  
    ax.xaxis.set_major_formatter(plt.FormatStrFormatter('%.f'))
    ax.yaxis.set_major_formatter(plt.FormatStrFormatter('%.f'))
    ax.zaxis.set_major_formatter(plt.FormatStrFormatter('%.f'))
Example #23
0
    def createContext(self, item, plot, onlimits):
        self.origin = item.getOrigin()
        self.scale = item.getScale()

        self.data = item.getData(copy=True)

        if onlimits:
            minX, maxX = plot.getXAxis().getLimits()
            minY, maxY = plot.getYAxis().getLimits()

            XMinBound = int((minX - self.origin[0]) / self.scale[0])
            YMinBound = int((minY - self.origin[1]) / self.scale[1])
            XMaxBound = int((maxX - self.origin[0]) / self.scale[0])
            YMaxBound = int((maxY - self.origin[1]) / self.scale[1])

            XMinBound = max(XMinBound, 0)
            YMinBound = max(YMinBound, 0)

            if XMaxBound <= XMinBound or YMaxBound <= YMinBound:
                self.data = None
            else:
                self.data = self.data[YMinBound:YMaxBound + 1,
                                      XMinBound:XMaxBound + 1]
        if self.data.size > 0:
            self.min, self.max = min_max(self.data)
        else:
            self.min, self.max = None, None
        self.values = self.data

        if self.values is not None:
            self.axes = (self.origin[1] + self.scale[1] * numpy.arange(self.data.shape[0]),
                         self.origin[0] + self.scale[0] * numpy.arange(self.data.shape[1]))
Example #24
0
def test_arrayrepr():
    """Check the array representation."""
    # Check resize
    clf = tree.DecisionTreeRegressor(max_depth=None)
    X = np.arange(10000)[:, np.newaxis]
    y = np.arange(10000)
    clf.fit(X, y)
Example #25
0
def show_plot(X, y, n_neighbors=10, h=0.2):
    # Create color maps
    cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF','#FFAAAA', '#AAFFAA', '#AAAAFF','#FFAAAA', '#AAFFAA', '#AAAAFF','#AAAAFF'])
    cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF','#FF0000','#FF0000','#FF0000','#FF0000','#FF0000','#FF0000','#FF0000',])

    for weights in ['uniform', 'distance']:
        # we create an instance of Neighbours Classifier and fit the data.
        clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
        clf.fit(X, y)
        clf.n_neighbors = n_neighbors

        # Plot the decision boundary. For that, we will assign a color to each
        # point in the mesh [x_min, x_max]x[y_min, y_max].
        x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
        y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
        xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                             np.arange(y_min, y_max, h))
        Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])

        # Put the result into a color plot
        Z = Z.reshape(xx.shape)
        plt.figure()
        plt.pcolormesh(xx, yy, Z, cmap=cmap_light)

        # Plot also the training points
        plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
        plt.xlim(xx.min(), xx.max())
        plt.ylim(yy.min(), yy.max())
        plt.title("3-Class classification (k = %i, weights = '%s')"
                  % (n_neighbors, weights))

    plt.show()
Example #26
0
    def _setup_plot(self):
        self.plot.clear()
        explained_ratio = self._variance_ratio
        explained = self._cumulative
        p = min(len(self._variance_ratio), self.maxp)

        self.plot.plot(numpy.arange(p), explained_ratio[:p],
                       pen=pg.mkPen(QColor(Qt.red), width=2),
                       antialias=True,
                       name="Variance")
        self.plot.plot(numpy.arange(p), explained[:p],
                       pen=pg.mkPen(QColor(Qt.darkYellow), width=2),
                       antialias=True,
                       name="Cumulative Variance")

        cutpos = self._nselected_components() - 1
        self._line = pg.InfiniteLine(
            angle=90, pos=cutpos, movable=True, bounds=(0, p - 1))
        self._line.setCursor(Qt.SizeHorCursor)
        self._line.setPen(pg.mkPen(QColor(Qt.black), width=2))
        self._line.sigPositionChanged.connect(self._on_cut_changed)
        self.plot.addItem(self._line)

        self.plot_horlines = (
            pg.PlotCurveItem(pen=pg.mkPen(QColor(Qt.blue), style=Qt.DashLine)),
            pg.PlotCurveItem(pen=pg.mkPen(QColor(Qt.blue), style=Qt.DashLine)))
        self.plot_horlabels = (
            pg.TextItem(color=QColor(Qt.black), anchor=(1, 0)),
            pg.TextItem(color=QColor(Qt.black), anchor=(1, 1)))
        for item in self.plot_horlabels + self.plot_horlines:
            self.plot.addItem(item)
        self._set_horline_pos()

        self.plot.setRange(xRange=(0.0, p - 1), yRange=(0.0, 1.0))
        self._update_axis()
Example #27
0
 def unproject_depth_image(self, depth_image, camera_space=False):
     us = np.arange(depth_image.size) % depth_image.shape[1]
     vs = np.arange(depth_image.size) // depth_image.shape[1]
     ds = depth_image.ravel()
     uvd = ch.array(np.vstack((us.ravel(), vs.ravel(), ds.ravel())).T)
     xyz = self.unproject_points(uvd, camera_space=camera_space)
     return xyz.reshape((depth_image.shape[0], depth_image.shape[1], -1))
Example #28
0
def svm_loss(x, y):
    """
    Computes the loss and gradient using for multiclass SVM classification.

    Inputs:
    - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
    for the ith input.
    - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
    0 <= y[i] < C

    Returns a tuple of:
    - loss: Scalar giving the loss
    - dx: Gradient of the loss with respect to x
    """
    N = x.shape[0]
    correct_class_scores = x[np.arange(N), y]
    margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)
    margins[np.arange(N), y] = 0
    loss = np.sum(margins) / N
    num_pos = np.sum(margins > 0, axis=1)
    dx = np.zeros_like(x)
    dx[margins > 0] = 1
    dx[np.arange(N), y] -= num_pos
    dx /= N
    return loss, dx
Example #29
0
def showOverlapTable(modes_x, modes_y, **kwargs):
    """Show overlap table using :func:`~matplotlib.pyplot.pcolor`.  *modes_x*
    and *modes_y* are sets of normal modes, and correspond to x and y axes of
    the plot.  Note that mode indices are incremented by 1.  List of modes
    is assumed to contain a set of contiguous modes from the same model.

    Default arguments for :func:`~matplotlib.pyplot.pcolor`:

      * ``cmap=plt.cm.jet``
      * ``norm=plt.normalize(0, 1)``"""

    import matplotlib.pyplot as plt

    overlap = abs(calcOverlap(modes_y, modes_x))
    if overlap.ndim == 0:
        overlap = np.array([[overlap]])
    elif overlap.ndim == 1:
        overlap = overlap.reshape((modes_y.numModes(), modes_x.numModes()))

    cmap = kwargs.pop('cmap', plt.cm.jet)
    norm = kwargs.pop('norm', plt.normalize(0, 1))
    show = (plt.pcolor(overlap, cmap=cmap, norm=norm, **kwargs),
            plt.colorbar())
    x_range = np.arange(1, modes_x.numModes() + 1)
    plt.xticks(x_range-0.5, x_range)
    plt.xlabel(str(modes_x))
    y_range = np.arange(1, modes_y.numModes() + 1)
    plt.yticks(y_range-0.5, y_range)
    plt.ylabel(str(modes_y))
    plt.axis([0, modes_x.numModes(), 0, modes_y.numModes()])
    if SETTINGS['auto_show']:
        showFigure()
    return show
Example #30
0
  def testOneShotIteratorInsideContainer(self):
    components = (np.arange(7),
                  np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
                  np.array(37.0) * np.arange(7))

    def within_container():
      def _map_fn(x, y, z):
        return math_ops.square(x), math_ops.square(y), math_ops.square(z)
      iterator = (dataset_ops.Dataset.from_tensor_slices(components)
                  .map(_map_fn).repeat(14).make_one_shot_iterator())
      return iterator.get_next()

    server = server_lib.Server.create_local_server()

    # Create two iterators within unique containers, and run them to
    # make sure that the resources aren't shared.
    #
    # The test below would fail if cname were the same across both
    # sessions.
    for i in range(2):
      with session.Session(server.target) as sess:
        cname = "iteration%d" % i
        with ops.container(cname):
          get_next = within_container()

        for _ in range(14):
          for i in range(7):
            result = sess.run(get_next)
            for component, result_component in zip(components, result):
              self.assertAllEqual(component[i]**2, result_component)
        with self.assertRaises(errors.OutOfRangeError):
          sess.run(get_next)