コード例 #1
0
  def testOneShotIteratorInsideContainer(self):
    components = (np.arange(7),
                  np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
                  np.array(37.0) * np.arange(7))

    def within_container():
      def _map_fn(x, y, z):
        return math_ops.square(x), math_ops.square(y), math_ops.square(z)
      iterator = (dataset_ops.Dataset.from_tensor_slices(components)
                  .map(_map_fn).repeat(14).make_one_shot_iterator())
      return iterator.get_next()

    server = server_lib.Server.create_local_server()

    # Create two iterators within unique containers, and run them to
    # make sure that the resources aren't shared.
    #
    # The test below would fail if cname were the same across both
    # sessions.
    for i in range(2):
      with session.Session(server.target) as sess:
        cname = "iteration%d" % i
        with ops.container(cname):
          get_next = within_container()

        for _ in range(14):
          for i in range(7):
            result = sess.run(get_next)
            for component, result_component in zip(components, result):
              self.assertAllEqual(component[i]**2, result_component)
        with self.assertRaises(errors.OutOfRangeError):
          sess.run(get_next)
コード例 #2
0
ファイル: plotting.py プロジェクト: karolamik13/ProDy
def showOverlapTable(modes_x, modes_y, **kwargs):
    """Show overlap table using :func:`~matplotlib.pyplot.pcolor`.  *modes_x*
    and *modes_y* are sets of normal modes, and correspond to x and y axes of
    the plot.  Note that mode indices are incremented by 1.  List of modes
    is assumed to contain a set of contiguous modes from the same model.

    Default arguments for :func:`~matplotlib.pyplot.pcolor`:

      * ``cmap=plt.cm.jet``
      * ``norm=plt.normalize(0, 1)``"""

    import matplotlib.pyplot as plt

    overlap = abs(calcOverlap(modes_y, modes_x))
    if overlap.ndim == 0:
        overlap = np.array([[overlap]])
    elif overlap.ndim == 1:
        overlap = overlap.reshape((modes_y.numModes(), modes_x.numModes()))

    cmap = kwargs.pop('cmap', plt.cm.jet)
    norm = kwargs.pop('norm', plt.normalize(0, 1))
    show = (plt.pcolor(overlap, cmap=cmap, norm=norm, **kwargs),
            plt.colorbar())
    x_range = np.arange(1, modes_x.numModes() + 1)
    plt.xticks(x_range-0.5, x_range)
    plt.xlabel(str(modes_x))
    y_range = np.arange(1, modes_y.numModes() + 1)
    plt.yticks(y_range-0.5, y_range)
    plt.ylabel(str(modes_y))
    plt.axis([0, modes_x.numModes(), 0, modes_y.numModes()])
    if SETTINGS['auto_show']:
        showFigure()
    return show
コード例 #3
0
ファイル: camera.py プロジェクト: cadik/opendr
 def unproject_depth_image(self, depth_image, camera_space=False):
     us = np.arange(depth_image.size) % depth_image.shape[1]
     vs = np.arange(depth_image.size) // depth_image.shape[1]
     ds = depth_image.ravel()
     uvd = ch.array(np.vstack((us.ravel(), vs.ravel(), ds.ravel())).T)
     xyz = self.unproject_points(uvd, camera_space=camera_space)
     return xyz.reshape((depth_image.shape[0], depth_image.shape[1], -1))
コード例 #4
0
ファイル: subdivision_mapper.py プロジェクト: 5n1p/chaco
    def _update_datamap(self):
        self._last_region = []
        # Create a new grid of the appropriate size, initialize it with new
        # Cell instance (of type self.celltype), and perform point insertion
        # on the new data.
        if self._data is None:
            self._cellgrid = array([], dtype=object)
            self._cell_lefts = array([])
            self._cell_bottoms = array([])
        else:
            num_x_cells, num_y_cells = self._calc_grid_dimensions()
            self._cellgrid = zeros((num_x_cells, num_y_cells), dtype=object)
            for i in range(num_x_cells):
                for j in range(num_y_cells):
                    self._cellgrid[i,j] = self.celltype(parent=self)
            ll, ur = self._extents
            cell_width = ur[0]/num_x_cells
            cell_height = ur[1]/num_y_cells

            # calculate the left and bottom edges of all the cells and store
            # them in two arrays
            self._cell_lefts = arange(ll[0], ll[0]+ur[0]-cell_width/2, step=cell_width)
            self._cell_bottoms = arange(ll[1], ll[1]+ur[1]-cell_height/2, step=cell_height)

            self._cell_extents = (cell_width, cell_height)

            # insert the data points
            self._basic_insertion(self.celltype)
        return
コード例 #5
0
ファイル: camera.py プロジェクト: cadik/opendr
    def compute_dr_wrt(self, wrt):

        if wrt not in (self.v, self.rt, self.t):
            return
        
        if wrt is self.t:
            if not hasattr(self, '_drt') or self._drt.shape[0] != self.v.r.size:                
                IS = np.arange(self.v.r.size)
                JS = IS % 3
                data = np.ones(len(IS))
                self._drt = sp.csc_matrix((data, (IS, JS)))
            return self._drt
        
        if wrt is self.rt:
            rot, rot_dr = cv2.Rodrigues(self.rt.r)
            rot_dr = rot_dr.reshape((3,3,3))
            dr = np.einsum('abc, zc -> zba', rot_dr, self.v.r).reshape((-1,3))
            return dr
        
        if wrt is self.v:
            rot = cv2.Rodrigues(self.rt.r)[0]
            
            IS = np.repeat(np.arange(self.v.r.size), 3)
            JS = np.repeat(np.arange(self.v.r.size).reshape((-1,3)), 3, axis=0)
            data = np.vstack([rot for i in range(self.v.r.size/3)])
            result = sp.csc_matrix((data.ravel(), (IS.ravel(), JS.ravel())))
            return result
コード例 #6
0
def show_plot(X, y, n_neighbors=10, h=0.2):
    # Create color maps
    cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF','#FFAAAA', '#AAFFAA', '#AAAAFF','#FFAAAA', '#AAFFAA', '#AAAAFF','#AAAAFF'])
    cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF','#FF0000','#FF0000','#FF0000','#FF0000','#FF0000','#FF0000','#FF0000',])

    for weights in ['uniform', 'distance']:
        # we create an instance of Neighbours Classifier and fit the data.
        clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
        clf.fit(X, y)
        clf.n_neighbors = n_neighbors

        # Plot the decision boundary. For that, we will assign a color to each
        # point in the mesh [x_min, x_max]x[y_min, y_max].
        x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
        y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
        xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                             np.arange(y_min, y_max, h))
        Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])

        # Put the result into a color plot
        Z = Z.reshape(xx.shape)
        plt.figure()
        plt.pcolormesh(xx, yy, Z, cmap=cmap_light)

        # Plot also the training points
        plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
        plt.xlim(xx.min(), xx.max())
        plt.ylim(yy.min(), yy.max())
        plt.title("3-Class classification (k = %i, weights = '%s')"
                  % (n_neighbors, weights))

    plt.show()
コード例 #7
0
 def test_basic_instantiation(self):
     '''
     Tests the basic instantiation of the SHIFT class
     '''
     # Instantiatiation with float
     self.model = Shift(5.0)
     np.testing.assert_array_almost_equal(self.model.target_magnitudes,
                                          np.array([5.0]))
     self.assertEqual(self.model.number_magnitudes, 1)
     # Instantiation with a numpy array
     self.model = Shift(np.arange(5., 8., 0.5))
     np.testing.assert_array_almost_equal(self.model.target_magnitudes,
                                          np.arange(5., 8., 0.5))
     self.assertEqual(self.model.number_magnitudes, 6)
     # Instantiation with  list
     self.model = Shift([5., 6., 7., 8.])
     np.testing.assert_array_almost_equal(self.model.target_magnitudes,
                                          np.array([5., 6., 7., 8.]))
     self.assertEqual(self.model.number_magnitudes, 4)
     # Otherwise raise an error
     with self.assertRaises(ValueError) as ae:
         self.model = Shift(None)
     self.assertEqual(ae.exception.message,
                      'Minimum magnitudes must be float, list or array')
     # Check regionalisation - assuming defaults
     self.model = Shift(5.0)
     for region in self.model.regionalisation.keys():
         self.assertDictEqual(BIRD_GLOBAL_PARAMETERS[region],
                              self.model.regionalisation[region])
     np.testing.assert_array_almost_equal(np.log10(self.model.base_rate),
                                          np.array([-20.74610902]))
コード例 #8
0
ファイル: stats.py プロジェクト: dnaudet/silx
    def createContext(self, item, plot, onlimits):
        self.origin = item.getOrigin()
        self.scale = item.getScale()

        self.data = item.getData(copy=True)

        if onlimits:
            minX, maxX = plot.getXAxis().getLimits()
            minY, maxY = plot.getYAxis().getLimits()

            XMinBound = int((minX - self.origin[0]) / self.scale[0])
            YMinBound = int((minY - self.origin[1]) / self.scale[1])
            XMaxBound = int((maxX - self.origin[0]) / self.scale[0])
            YMaxBound = int((maxY - self.origin[1]) / self.scale[1])

            XMinBound = max(XMinBound, 0)
            YMinBound = max(YMinBound, 0)

            if XMaxBound <= XMinBound or YMaxBound <= YMinBound:
                self.data = None
            else:
                self.data = self.data[YMinBound:YMaxBound + 1,
                                      XMinBound:XMaxBound + 1]
        if self.data.size > 0:
            self.min, self.max = min_max(self.data)
        else:
            self.min, self.max = None, None
        self.values = self.data

        if self.values is not None:
            self.axes = (self.origin[1] + self.scale[1] * numpy.arange(self.data.shape[0]),
                         self.origin[0] + self.scale[0] * numpy.arange(self.data.shape[1]))
コード例 #9
0
    def test_array_richcompare_legacy_weirdness(self):
        # It doesn't really work to use assert_deprecated here, b/c part of
        # the point of assert_deprecated is to check that when warnings are
        # set to "error" mode then the error is propagated -- which is good!
        # But here we are testing a bunch of code that is deprecated *because*
        # it has the habit of swallowing up errors and converting them into
        # different warnings. So assert_warns will have to be sufficient.
        assert_warns(FutureWarning, lambda: np.arange(2) == "a")
        assert_warns(FutureWarning, lambda: np.arange(2) != "a")
        # No warning for scalar comparisons
        with warnings.catch_warnings():
            warnings.filterwarnings("error")
            assert_(not (np.array(0) == "a"))
            assert_(np.array(0) != "a")
            assert_(not (np.int16(0) == "a"))
            assert_(np.int16(0) != "a")

        for arg1 in [np.asarray(0), np.int16(0)]:
            struct = np.zeros(2, dtype="i4,i4")
            for arg2 in [struct, "a"]:
                for f in [operator.lt, operator.le, operator.gt, operator.ge]:
                    if sys.version_info[0] >= 3:
                        # py3
                        with warnings.catch_warnings() as l:
                            warnings.filterwarnings("always")
                            assert_raises(TypeError, f, arg1, arg2)
                            assert_(not l)
                    else:
                        # py2
                        assert_warns(DeprecationWarning, f, arg1, arg2)
コード例 #10
0
ファイル: test_image.py プロジェクト: 4over7/matplotlib
def test_no_interpolation_origin():
    fig = plt.figure()
    ax = fig.add_subplot(211)
    ax.imshow(np.arange(100).reshape((2, 50)), origin="lower", interpolation='none')

    ax = fig.add_subplot(212)
    ax.imshow(np.arange(100).reshape((2, 50)), interpolation='none')
コード例 #11
0
def scree_plot(pca_obj, fname=None): 
    '''
    Scree plot for variance & cumulative variance by component from PCA. 

    Arguments: 
        - pca_obj: a fitted sklearn PCA instance
        - fname: path to write plot to file

    Output: 
        - scree plot 
    '''   
    components = pca_obj.n_components_ 
    variance = pca.explained_variance_ratio_
    plt.figure()
    plt.plot(np.arange(1, components + 1), np.cumsum(variance), label='Cumulative Variance')
    plt.plot(np.arange(1, components + 1), variance, label='Variance')
    plt.xlim([0.8, components]); plt.ylim([0.0, 1.01])
    plt.xlabel('No. Components', labelpad=11); plt.ylabel('Variance Explained', labelpad=11)
    plt.legend(loc='best') 
    plt.tight_layout() 
    if fname is not None:
        plt.savefig(fname)
        plt.close() 
    else:
        plt.show() 
    return 
コード例 #12
0
def main():
    y, x = svmutil.svm_read_problem("char_recon_shuffled.db")
    x_train = x[:1800]
    y_train = y[:1800]
    x_val = x[1800:]
    y_val = y[1800:]

    gamma_c_pairs = [GammaCPair(1.0 / (2.0 * (3.0 ** log_sigma) ** 2), 3.0 ** log_C)
                     for log_sigma in [7]
                     for log_C     in [3]
                    ]

    log_log_pairs = [[log_sigma, log_C]
                     for log_sigma in np.arange(6, 10, 0.5)
                     for log_C     in np.arange(0, 5, 0.5)
                    ]

    def cv(gamma_c):
        return get_cross_val(x_train, y_train, x_val, y_val, gamma_c)

    cross_val = []

    for gamma_c in gamma_c_pairs:
        cross_val.append(cv(gamma_c))

    f = open("gamma_c", "w")
    for i in range(len(gamma_c_pairs)):
        f.write("{0}   {1}   {2}\n".format(log_log_pairs[i][0], log_log_pairs[i][1], cross_val[i]))
    f.close()
コード例 #13
0
ファイル: utilities.py プロジェクト: dbath/wahnsinn
def barplot(grouped_df, _column, statistic, levels=[0]):
    means = grouped_df.groupby(level=levels).mean()
    bar_width = 1.0/(len(means.index))
    error_config = {'ecolor': '0.1'}
    sems = grouped_df.groupby(level=levels).sem().fillna(0)
    fig = plt.figure()
    fig.set_size_inches(10,6)
    ax = fig.add_subplot(1,1,1)
    
    plt.bar(np.arange(0.1,(len(means.index)+0.1),1), 
            means[_column].fillna(0), 
            color= '#AAAAAA',
            yerr=sems[_column].fillna(0),
            error_kw=error_config,
            label=list(means.index))

    if means[_column].values.min() >= 0:
        ax.set_ylim(0,1.1*((means[_column] + sems[_column]).values.max()))
    else:
        ax.set_ylim(1.1*((means[_column] - sems[_column]).values.min()),1.1*((means[_column] + sems[_column]).values.max()))
    
    ax.set_ylabel(statistic + ' ' + u"\u00B1" + ' SEM', fontsize=20)   # +/- sign is u"\u00B1"
    ax.set_xticks(np.arange(0.1+bar_width/2.0,(len(means.index)+0.1+(bar_width/2.0)),1)) 
    ax.set_xticklabels(list(means.index), rotation=90)
    ax.tick_params(axis='y', labelsize=16 )
    ax.set_xlabel('Group', fontsize=20)
    ax.spines['right'].set_visible(False)
    ax.spines['top'].set_visible(False)
    ax.yaxis.set_ticks_position('left')
    ax.xaxis.set_ticks_position('bottom')
    return fig
コード例 #14
0
def all_patches(padded_brain,i,predict_patchsize,obs_patchsize,num_channels):
    
    image = padded_brain[i]
    ishape_h , ishape_w = padded_brain.shape[1:3]
    #ipdb.set_trace()
    #ipdb.set_trace()
    half_obs_patchsize = obs_patchsize/2
    half_predict_patchsize = predict_patchsize/2
    extended_image = np.zeros((ishape_h+obs_patchsize-predict_patchsize,ishape_w+obs_patchsize-predict_patchsize,num_channels))
    extended_image[half_obs_patchsize - half_predict_patchsize   : -(half_obs_patchsize - half_predict_patchsize),half_obs_patchsize - half_predict_patchsize  : -(half_obs_patchsize - half_predict_patchsize)]= image
    num_patches_rows = ishape_h // predict_patchsize
    num_patches_cols = ishape_w // predict_patchsize
    
    list_patches = np.zeros((num_patches_cols*num_patches_rows, obs_patchsize, obs_patchsize, num_channels))
    index = 0
    h_range = np.arange(obs_patchsize/2,ishape_h+obs_patchsize/2,predict_patchsize)
    #h_range = h_range[:-1]
    v_range = np.arange(obs_patchsize/2,ishape_w+obs_patchsize/2,predict_patchsize)
    #v_range = v_range[:-1]
    #ipdb.set_trace()
    for index_h in h_range:
        for index_w in v_range:
            patch_brian = extended_image[index_h-obs_patchsize/2: index_h+obs_patchsize/2 ,index_w-obs_patchsize/2: index_w+obs_patchsize/2,:]
            #if patch_brian.shape == (38,29,4):
            #   ipdb.set_trace()
             
            list_patches[index,:,:,:] = patch_brian
            index += 1
    #ipdb.set_trace()
    assert index == num_patches_rows*num_patches_cols
    return list_patches       
コード例 #15
0
ファイル: test_onset.py プロジェクト: justinsalamon/mir_eval
def __unit_test_onset_function(metric):
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        # First, test for a warning on empty onsets
        metric(np.array([]), np.arange(10))
        assert len(w) == 1
        assert issubclass(w[-1].category, UserWarning)
        assert str(w[-1].message) == "Reference onsets are empty."
        metric(np.arange(10), np.array([]))
        assert len(w) == 2
        assert issubclass(w[-1].category, UserWarning)
        assert str(w[-1].message) == "Estimated onsets are empty."
        # And that the metric is 0
        assert np.allclose(metric(np.array([]), np.array([])), 0)

    # Now test validation function - onsets must be 1d ndarray
    onsets = np.array([[1., 2.]])
    nose.tools.assert_raises(ValueError, metric, onsets, onsets)
    # onsets must be in seconds (so not huge)
    onsets = np.array([1e10, 1e11])
    nose.tools.assert_raises(ValueError, metric, onsets, onsets)
    # onsets must be sorted
    onsets = np.array([2., 1.])
    nose.tools.assert_raises(ValueError, metric, onsets, onsets)

    # Valid onsets which are the same produce a score of 1 for all metrics
    onsets = np.arange(10, dtype=np.float)
    assert np.allclose(metric(onsets, onsets), 1)
コード例 #16
0
ファイル: layers.py プロジェクト: Zardinality/cs231n_project
def svm_loss(x, y):
    """
    Computes the loss and gradient using for multiclass SVM classification.

    Inputs:
    - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
    for the ith input.
    - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
    0 <= y[i] < C

    Returns a tuple of:
    - loss: Scalar giving the loss
    - dx: Gradient of the loss with respect to x
    """
    N = x.shape[0]
    correct_class_scores = x[np.arange(N), y]
    margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)
    margins[np.arange(N), y] = 0
    loss = np.sum(margins) / N
    num_pos = np.sum(margins > 0, axis=1)
    dx = np.zeros_like(x)
    dx[margins > 0] = 1
    dx[np.arange(N), y] -= num_pos
    dx /= N
    return loss, dx
コード例 #17
0
ファイル: test_frequencies.py プロジェクト: B-Rich/pandas
    def test_series(self):

        # GH6407
        # inferring series

        # invalid type of Series
        for s in [ Series(np.arange(10)),
                   Series(np.arange(10.))]:
            self.assertRaises(TypeError, lambda : infer_freq(s))

        # a non-convertible string
        self.assertRaises(ValueError, lambda : infer_freq(Series(['foo','bar'])))

        # cannot infer on PeriodIndex
        for freq in [None, 'L', 'Y']:
            s = Series(period_range('2013',periods=10,freq=freq))
            self.assertRaises(TypeError, lambda : infer_freq(s))

        # DateTimeIndex
        for freq in ['M', 'L', 'S']:
            s = Series(date_range('20130101',periods=10,freq=freq))
            inferred = infer_freq(s)
            self.assertEqual(inferred,freq)

        s = Series(date_range('20130101','20130110'))
        inferred = infer_freq(s)
        self.assertEqual(inferred,'D')
コード例 #18
0
ファイル: test_tree.py プロジェクト: Calvin-O/scikit-learn
def test_arrayrepr():
    """Check the array representation."""
    # Check resize
    clf = tree.DecisionTreeRegressor(max_depth=None)
    X = np.arange(10000)[:, np.newaxis]
    y = np.arange(10000)
    clf.fit(X, y)
コード例 #19
0
ファイル: owpca.py プロジェクト: 675801717/orange3
    def _setup_plot(self):
        self.plot.clear()
        explained_ratio = self._variance_ratio
        explained = self._cumulative
        p = min(len(self._variance_ratio), self.maxp)

        self.plot.plot(numpy.arange(p), explained_ratio[:p],
                       pen=pg.mkPen(QColor(Qt.red), width=2),
                       antialias=True,
                       name="Variance")
        self.plot.plot(numpy.arange(p), explained[:p],
                       pen=pg.mkPen(QColor(Qt.darkYellow), width=2),
                       antialias=True,
                       name="Cumulative Variance")

        cutpos = self._nselected_components() - 1
        self._line = pg.InfiniteLine(
            angle=90, pos=cutpos, movable=True, bounds=(0, p - 1))
        self._line.setCursor(Qt.SizeHorCursor)
        self._line.setPen(pg.mkPen(QColor(Qt.black), width=2))
        self._line.sigPositionChanged.connect(self._on_cut_changed)
        self.plot.addItem(self._line)

        self.plot_horlines = (
            pg.PlotCurveItem(pen=pg.mkPen(QColor(Qt.blue), style=Qt.DashLine)),
            pg.PlotCurveItem(pen=pg.mkPen(QColor(Qt.blue), style=Qt.DashLine)))
        self.plot_horlabels = (
            pg.TextItem(color=QColor(Qt.black), anchor=(1, 0)),
            pg.TextItem(color=QColor(Qt.black), anchor=(1, 1)))
        for item in self.plot_horlabels + self.plot_horlines:
            self.plot.addItem(item)
        self._set_horline_pos()

        self.plot.setRange(xRange=(0.0, p - 1), yRange=(0.0, 1.0))
        self._update_axis()
コード例 #20
0
	def adwin_ultrafast_M_set_input (self, msmnt_result = [],  nr_coeff = 1):

		self.max_k = 2**self.N
		discr_steps = 2*self.max_k+1
		self.msmnt_phases = np.zeros((self.reps,self.N))
		self.msmnt_times = np.zeros(self.N)
		self.msmnt_results = np.zeros((self.reps,self.N))

		m = np.zeros (self.N+1)
		t = np.zeros (self.N+1)
		th = np.zeros(self.N+1)

		p_real = np.zeros (discr_steps)
		p_imag = np.zeros (discr_steps)
		p_real [0] = 1./(2*np.pi)
		t[0] = 2**self.N

		for n in np.arange(self.N)+1:
			t[n] = int(2**(self.N-n))
			k_opt = int(2**(self.N-n+1))
			th[n] = -0.5*np.angle(-1j*p_imag[k_opt]+p_real[k_opt])

			nr_ones = msmnt_result [n-1]
			nr_zeros = self.M - nr_ones

			for mmm in np.arange(nr_ones):
				for j in np.arange(nr_coeff)-nr_coeff/2:
					if (k_opt+j*t[n]>=0):
						p_real, p_imag = self.adwin_update (p_real = p_real, p_imag = p_imag, meas_res = 1, phase = th[n], tn = t[n], k=k_opt+j*t[n])
			for mmm in np.arange(nr_zeros):
				for j in np.arange(nr_coeff)-nr_coeff/2:
					if (k_opt+j*t[n]>=0):
						p_real, p_imag = self.adwin_update (p_real = p_real, p_imag = p_imag, meas_res = 0, phase = th[n], tn = t[n], k=k_opt+j*t[n])

		return th[1:]
コード例 #21
0
def plot_figs(fig_num, elev, azim, a):
    fig = plt.figure(fig_num, figsize=(4, 3))
    plt.clf()
    ax = Axes3D(fig, elev=elev, azim=azim)

    ax.scatter(body_mass, work_level, heat_output, c='k', marker='+')
   
    X = np.arange(55, 85, 0.5)
    Y = np.arange(90, 180, 0.5)
    X, Y = np.meshgrid(X, Y)
    Z = a[0] + a[1]*X + +(Y/(a[2]+a[3]*X))
    ax.plot_surface(X, Y, Z,alpha=.5, antialiased=True,rstride=200, cstride=100, cmap=plt.cm.coolwarm)
             
    ax.set_xlabel('BODY_MASS', color='b')
    ax.set_ylabel('WORK_LEVEL', color='b')
    ax.set_zlabel('HEAT_OUTPUT', color='b')
    
    ax.w_xaxis.set_ticklabels([])
    ax.w_yaxis.set_ticklabels([])
    ax.w_zaxis.set_ticklabels([])
    
    ax.zaxis.set_major_locator(plt.LinearLocator(10))  
    ax.xaxis.set_major_formatter(plt.FormatStrFormatter('%.f'))
    ax.yaxis.set_major_formatter(plt.FormatStrFormatter('%.f'))
    ax.zaxis.set_major_formatter(plt.FormatStrFormatter('%.f'))
コード例 #22
0
ファイル: LVQ.py プロジェクト: jayshonzs/ESL
def draw(data, classes, model, resolution=100):
    mycm = mpl.cm.get_cmap('Paired')
    
    one_min, one_max = data[:, 0].min()-0.1, data[:, 0].max()+0.1
    two_min, two_max = data[:, 1].min()-0.1, data[:, 1].max()+0.1
    xx1, xx2 = np.meshgrid(np.arange(one_min, one_max, (one_max-one_min)/resolution),
                     np.arange(two_min, two_max, (two_max-two_min)/resolution))
    
    inputs = np.c_[xx1.ravel(), xx2.ravel()]
    z = []
    for i in range(len(inputs)):
        z.append(predict(model, inputs[i])[0])
    result = np.array(z).reshape(xx1.shape)
    
    plt.contourf(xx1, xx2, result, cmap=mycm)
    plt.scatter(data[:, 0], data[:, 1], s=50, c=classes, cmap=mycm)
    
    t = np.zeros(15)
    for i in range(15):
        if i < 5:
            t[i] = 0
        elif i < 10:
            t[i] = 1
        else:
            t[i] = 2
    plt.scatter(model[:, 0], model[:, 1], s=150, c=t, cmap=mycm)
    
    plt.xlim([0, 10])
    plt.ylim([0, 10])
    
    plt.show()
コード例 #23
0
ファイル: test_sorting.py プロジェクト: AlexisMignon/pandas
    def test_sort_index_multicolumn(self):
        import random
        A = np.arange(5).repeat(20)
        B = np.tile(np.arange(5), 20)
        random.shuffle(A)
        random.shuffle(B)
        frame = DataFrame({'A': A, 'B': B,
                           'C': np.random.randn(100)})

        # use .sort_values #9816
        with tm.assert_produces_warning(FutureWarning):
            frame.sort_index(by=['A', 'B'])
        result = frame.sort_values(by=['A', 'B'])
        indexer = np.lexsort((frame['B'], frame['A']))
        expected = frame.take(indexer)
        assert_frame_equal(result, expected)

        # use .sort_values #9816
        with tm.assert_produces_warning(FutureWarning):
            frame.sort_index(by=['A', 'B'], ascending=False)
        result = frame.sort_values(by=['A', 'B'], ascending=False)
        indexer = np.lexsort((frame['B'].rank(ascending=False),
                              frame['A'].rank(ascending=False)))
        expected = frame.take(indexer)
        assert_frame_equal(result, expected)

        # use .sort_values #9816
        with tm.assert_produces_warning(FutureWarning):
            frame.sort_index(by=['B', 'A'])
        result = frame.sort_values(by=['B', 'A'])
        indexer = np.lexsort((frame['A'], frame['B']))
        expected = frame.take(indexer)
        assert_frame_equal(result, expected)
コード例 #24
0
ファイル: models.py プロジェクト: Pulvinar/psychopy_ext
    def get_gabors(self, rf):
        lams =  float(rf[0])/self.sfs # lambda = 1./sf  #1./np.array([.1,.25,.4])
        sigma = rf[0]/2./np.pi
        # rf = [100,100]
        gabors = np.zeros(( len(oris),len(phases),len(lams), rf[0], rf[1] ))

        i = np.arange(-rf[0]/2+1,rf[0]/2+1)
        #print i
        j = np.arange(-rf[1]/2+1,rf[1]/2+1)
        ii,jj = np.meshgrid(i,j)
        for o, theta in enumerate(self.oris):
            x = ii*np.cos(theta) + jj*np.sin(theta)
            y = -ii*np.sin(theta) + jj*np.cos(theta)

            for p, phase in enumerate(self.phases):
                for s, lam in enumerate(lams):
                    fxx = np.cos(2*np.pi*x/lam + phase) * np.exp(-(x**2+y**2)/(2*sigma**2))
                    fxx -= np.mean(fxx)
                    fxx /= np.linalg.norm(fxx)

                    #if p==0:
                        #plt.subplot(len(oris),len(lams),count+1)
                        #plt.imshow(fxx,cmap=mpl.cm.gray,interpolation='bicubic')
                        #count+=1

                    gabors[o,p,s,:,:] = fxx
        plt.show()
        return gabors
コード例 #25
0
ファイル: test_timegrouper.py プロジェクト: sinhrks/pandas
    def test_groupby_groups_datetimeindex(self):
        # GH#1430
        periods = 1000
        ind = pd.date_range(start='2012/1/1', freq='5min', periods=periods)
        df = DataFrame({'high': np.arange(periods),
                        'low': np.arange(periods)}, index=ind)
        grouped = df.groupby(lambda x: datetime(x.year, x.month, x.day))

        # it works!
        groups = grouped.groups
        assert isinstance(list(groups.keys())[0], datetime)

        # GH#11442
        index = pd.date_range('2015/01/01', periods=5, name='date')
        df = pd.DataFrame({'A': [5, 6, 7, 8, 9],
                           'B': [1, 2, 3, 4, 5]}, index=index)
        result = df.groupby(level='date').groups
        dates = ['2015-01-05', '2015-01-04', '2015-01-03',
                 '2015-01-02', '2015-01-01']
        expected = {pd.Timestamp(date): pd.DatetimeIndex([date], name='date')
                    for date in dates}
        tm.assert_dict_equal(result, expected)

        grouped = df.groupby(level='date')
        for date in dates:
            result = grouped.get_group(date)
            data = [[df.loc[date, 'A'], df.loc[date, 'B']]]
            expected_index = pd.DatetimeIndex([date], name='date')
            expected = pd.DataFrame(data,
                                    columns=list('AB'),
                                    index=expected_index)
            tm.assert_frame_equal(result, expected)
コード例 #26
0
def test_stratified_shuffle_split_init():
    X = np.arange(7)
    y = np.asarray([0, 1, 1, 1, 2, 2, 2])
    # Check that error is raised if there is a class with only one sample
    assert_raises(ValueError, next,
                  StratifiedShuffleSplit(3, 0.2).split(X, y))

    # Check that error is raised if the test set size is smaller than n_classes
    assert_raises(ValueError, next, StratifiedShuffleSplit(3, 2).split(X, y))
    # Check that error is raised if the train set size is smaller than
    # n_classes
    assert_raises(ValueError, next,
                  StratifiedShuffleSplit(3, 3, 2).split(X, y))

    X = np.arange(9)
    y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
    # Check that errors are raised if there is not enough samples
    assert_raises(ValueError, StratifiedShuffleSplit, 3, 0.5, 0.6)
    assert_raises(ValueError, next,
                  StratifiedShuffleSplit(3, 8, 0.6).split(X, y))
    assert_raises(ValueError, next,
                  StratifiedShuffleSplit(3, 0.6, 8).split(X, y))

    # Train size or test size too small
    assert_raises(ValueError, next,
                  StratifiedShuffleSplit(train_size=2).split(X, y))
    assert_raises(ValueError, next,
                  StratifiedShuffleSplit(test_size=2).split(X, y))
コード例 #27
0
ファイル: reshape.py プロジェクト: AjayRamanathan/pandas
    def get_new_columns(self):
        if self.value_columns is None:
            return self.removed_level

        stride = len(self.removed_level)
        width = len(self.value_columns)
        propagator = np.repeat(np.arange(width), stride)
        if isinstance(self.value_columns, MultiIndex):
            new_levels = self.value_columns.levels + [self.removed_level]
            new_names = self.value_columns.names + [self.removed_name]

            new_labels = [lab.take(propagator)
                          for lab in self.value_columns.labels]
            new_labels.append(np.tile(np.arange(stride), width))
        else:
            new_levels = [self.value_columns, self.removed_level]
            new_names = [self.value_columns.name, self.removed_name]

            new_labels = []

            new_labels.append(propagator)
            new_labels.append(np.tile(np.arange(stride), width))

        return MultiIndex(levels=new_levels, labels=new_labels,
                          names=new_names)
コード例 #28
0
ファイル: test_sorting.py プロジェクト: AlexisMignon/pandas
    def test_sort_index_different_sortorder(self):
        A = np.arange(20).repeat(5)
        B = np.tile(np.arange(5), 20)

        indexer = np.random.permutation(100)
        A = A.take(indexer)
        B = B.take(indexer)

        df = DataFrame({'A': A, 'B': B,
                        'C': np.random.randn(100)})

        # use .sort_values #9816
        with tm.assert_produces_warning(FutureWarning):
            df.sort_index(by=['A', 'B'], ascending=[1, 0])
        result = df.sort_values(by=['A', 'B'], ascending=[1, 0])

        ex_indexer = np.lexsort((df.B.max() - df.B, df.A))
        expected = df.take(ex_indexer)
        assert_frame_equal(result, expected)

        # test with multiindex, too
        idf = df.set_index(['A', 'B'])

        result = idf.sort_index(ascending=[1, 0])
        expected = idf.take(ex_indexer)
        assert_frame_equal(result, expected)

        # also, Series!
        result = idf['C'].sort_index(ascending=[1, 0])
        assert_series_equal(result, expected['C'])
コード例 #29
0
ファイル: test_validation.py プロジェクト: Afey/scikit-learn
def test_as_float_array():
    # Test function for as_float_array
    X = np.ones((3, 10), dtype=np.int32)
    X = X + np.arange(10, dtype=np.int32)
    # Checks that the return type is ok
    X2 = as_float_array(X, copy=False)
    np.testing.assert_equal(X2.dtype, np.float32)
    # Another test
    X = X.astype(np.int64)
    X2 = as_float_array(X, copy=True)
    # Checking that the array wasn't overwritten
    assert_true(as_float_array(X, False) is not X)
    # Checking that the new type is ok
    np.testing.assert_equal(X2.dtype, np.float64)
    # Here, X is of the right type, it shouldn't be modified
    X = np.ones((3, 2), dtype=np.float32)
    assert_true(as_float_array(X, copy=False) is X)
    # Test that if X is fortran ordered it stays
    X = np.asfortranarray(X)
    assert_true(np.isfortran(as_float_array(X, copy=True)))

    # Test the copy parameter with some matrices
    matrices = [
        np.matrix(np.arange(5)),
        sp.csc_matrix(np.arange(5)).toarray(),
        sparse_random_matrix(10, 10, density=0.10).toarray()
    ]
    for M in matrices:
        N = as_float_array(M, copy=True)
        N[0, 0] = np.nan
        assert_false(np.isnan(M).any())
コード例 #30
0
ファイル: pw_func.py プロジェクト: gandalfvn/mcts-1
def main():
    """The main function."""

    # Build data ################

    x = np.arange(0, 10000, 500)
    y = np.arange(0, 1, 0.05)

    xx, yy = np.meshgrid(x, y)
    z = np.power(xx,yy)

    print "xx ="
    print xx
    print "yy ="
    print yy
    print "z ="
    print z

    # Plot data #################

    fig = plt.figure()
    ax = axes3d.Axes3D(fig)

    surf = ax.plot_surface(xx, yy, z, cmap=cm.jet, rstride=1, cstride=1, color='b', shade=True)

    ax.set_xlabel("X")
    ax.set_ylabel("Y")
    ax.set_zlabel("Z")

    fig.colorbar(surf, shrink=0.5, aspect=5)

    plt.show()
コード例 #31
0
 def test_arange_to(self):
     """
     順数のndarrayを作成する(終了のみ指定)
     """
     vector = np.arange(5)
     assert_array_equal(vector, np.array([0, 1, 2, 3, 4]))
コード例 #32
0
ファイル: test_validate.py プロジェクト: xcorail/pyphi
def test_validate_tpm_nonbinary_nodes():
    tpm = np.arange(3*3*2).reshape(3, 3, 2)
    with pytest.raises(ValueError):
        assert validate.tpm(tpm)
コード例 #33
0
ファイル: test_validate.py プロジェクト: xcorail/pyphi
def test_validate_tpm_wrong_shape():
    tpm = np.arange(3**3).reshape(3, 3, 3)
    with pytest.raises(ValueError):
        assert validate.tpm(tpm)
コード例 #34
0
from hyperion.io import H5DataWriter
from hyperion.generators.sequence_batch_generator_v1 import SequenceBatchGeneratorV1 as SBG

output_dir = './tests/data_out/generators'
if not os.path.exists(output_dir):
    os.makedirs(output_dir)

h5_file = output_dir + '/seqbg.h5'
key_file = output_dir + '/seqbg.scp'

num_seqs = 10
dim = 2
min_seq_length = 100
delta = 10
max_seq_length = min_seq_length + (num_seqs - 1) * delta
seq_lengths = np.arange(100, max_seq_length + 1, delta)


def create_dataset():

    file_path = [str(k) for k in xrange(num_seqs)]
    key = []
    i = 0
    j = 0
    while i < num_seqs:
        key_i = (j + 1) * str(j)
        i += (i + 1)
        j += 1
        key += key_i
    key = key[:num_seqs]
コード例 #35
0
# excepcion de fichero
if os.path.isfile("/home/josemo/python/wavfiles/"+file_input):
    filename ="/home/josemo/python/wavfiles/"+file_input
    #filename ="/home/josemo/"+file_input
    print('file exit')
else:
    print('File not exit')
    exit()

# read wave file
fs, data = wavfile.read(filename)
print(' fs', fs,' shapes ', data.shape, ' data ', data)

# tipical delay, entre 10 y 50 ms, LFO entre 5 y 14 Hz
# chorus parameters
index = np.arange(len(data))
rate1 = 7
rate2 = 10
rate3 = 12
A = 5 # amplitud
lfo1 = A*np.sin(2*np.pi*index*(rate1/fs))
lfo2 = A*np.sin(2*np.pi*index*(rate2/fs))
lfo3 = A*np.sin(2*np.pi*index*(rate3/fs))
# ganancias 1
g = 0.20 

delay = int(0.040*fs) # frames

y = np.zeros(len(data))
y[:delay+5] = data[:delay+5]
for i in range (delay +5, len(data)):
コード例 #36
0
 def test_arange_with_step(self):
     """
     順数のndarrayを作成する(開始と終了とステップを指定)
     """
     vector = np.arange(0, 10, 2)
     assert_array_equal(vector, np.array([0, 2, 4, 6, 8]))
コード例 #37
0
 def test_arange_from_to(self):
     """
     順数のndarrayを作成する(開始と終了を指定)
     """
     vector = np.arange(0, 5)
     assert_array_equal(vector, np.array([0, 1, 2, 3, 4]))
コード例 #38
0
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE.  See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.

import random, os, datetime, itertools, glob
import numpy

numpy.random.seed(8765432)
random.seed(8765432)
rootdir = os.path.abspath(
    os.path.join(os.path.dirname(__file__),
                 "../remote_client/examples/solarsimulator_raw_data"))
voltages = numpy.arange(-0.1, 1.001, 0.01)

for filepath in glob.glob(os.path.join(rootdir, "measurement-*.dat")):
    os.remove(filepath)

shapes_inm = {
    "1": ((18, 18.5), (10, 10)),
    "2": ((31.5, 18.5), (10, 10)),
    "3": ((43, 18.5), (10, 10)),
    "4": ((56.5, 18.5), (10, 10)),
    "5": ((68, 18.5), (10, 10)),
    "6": ((81.5, 18.5), (10, 10)),
    "7": ((18, 32.5), (10, 5)),
    "8": ((33, 32.5), (20, 5)),
    "9": ((60, 32.5), (2, 5)),
    "10": ((64, 32.5), (2, 5)),
コード例 #39
0
def Simons_Observatory_V3_LA_noise(sensitivity_mode,
                                   f_sky,
                                   ell_max,
                                   delta_ell,
                                   N_LF=1.,
                                   N_MF=4.,
                                   N_UHF=2.,
                                   survey_time=5.):
    ## returns noise curves in both temperature and polarization, including the impact of the beam, for the SO large aperture telescope
    # sensitivity_mode:
    #     1: baseline,
    #     2: goal
    # f_sky:  number from 0-1
    # ell_max: the maximum value of ell used in the computation of N(ell)
    # delta_ell: the step size for computing N_ell
    ####################################################################
    ####################################################################
    ###                        Internal variables
    ## LARGE APERTURE
    # configuration
    # ensure valid parameter choices
    assert (sensitivity_mode == 1 or sensitivity_mode == 2)
    assert (f_sky > 0. and f_sky <= 1.)
    assert (ell_max <= 2e4)
    assert (delta_ell >= 1)
    # ensure total is 7
    if (N_LF + N_MF + N_UHF) != 7:
        print("WARNING! You requested:", N_LF + N_MF + N_UHF,
              " optics tubes while SO LAT design is for 7")
    NTubes_LF = N_LF  #default = 1
    NTubes_MF = N_MF  #default = 4.
    NTubes_UHF = N_UHF  #default = 2.
    # sensitivity in uK*sqrt(s)
    # set noise to irrelevantly high value when NTubes=0
    # note that default noise levels are for 1-4-2 tube configuration
    if (NTubes_LF == 0.):
        S_LA_27 = 1.e9 * np.ones(3)
        S_LA_39 = 1.e9 * np.ones(3)
    else:
        S_LA_27 = np.array([1.e9, 48., 35.]) * np.sqrt(
            1. / NTubes_LF)  ## converting these to per tube sensitivities
        S_LA_39 = np.array([1.e9, 24., 18.]) * np.sqrt(1. / NTubes_LF)
    if (NTubes_MF == 0.):
        S_LA_93 = 1.e9 * np.ones(3)
        S_LA_145 = 1.e9 * np.ones(3)
    else:
        S_LA_93 = np.array([1.e9, 5.4, 3.9]) * np.sqrt(4. / NTubes_MF)
        S_LA_145 = np.array([1.e9, 6.7, 4.2]) * np.sqrt(4. / NTubes_MF)
    if (NTubes_UHF == 0.):
        S_LA_225 = 1.e9 * np.ones(3)
        S_LA_280 = 1.e9 * np.ones(3)
    else:
        S_LA_225 = np.array([1.e9, 15., 10.]) * np.sqrt(2. / NTubes_UHF)
        S_LA_280 = np.array([1.e9, 36., 25.]) * np.sqrt(2. / NTubes_UHF)
    # 1/f polarization noise -- see Sec. 2.2 of SO science goals paper
    f_knee_pol_LA_27 = 700.
    f_knee_pol_LA_39 = 700.
    f_knee_pol_LA_93 = 700.
    f_knee_pol_LA_145 = 700.
    f_knee_pol_LA_225 = 700.
    f_knee_pol_LA_280 = 700.
    alpha_pol = -1.4
    # atmospheric 1/f temperature noise -- see Sec. 2.2 of SO science goals paper
    C_27 = 200.
    C_39 = 77.
    C_93 = 1800.
    C_145 = 12000.
    C_225 = 68000.
    C_280 = 124000.
    alpha_temp = -3.5

    ####################################################################
    ## calculate the survey area and time
    #survey_time = 5. #years
    t = survey_time * 365.25 * 24. * 3600.  ## convert years to seconds
    t = t * 0.2  ## retention after observing efficiency and cuts
    t = t * 0.85  ## a kludge for the noise non-uniformity of the map edges
    A_SR = 4. * np.pi * f_sky  ## sky areas in steradians
    A_deg = A_SR * (180 / np.pi)**2  ## sky area in square degrees
    A_arcmin = A_deg * 3600.
    print("sky area: ", A_deg, "degrees^2")

    ####################################################################
    ## make the ell array for the output noise curves
    ell = np.arange(2, ell_max, delta_ell)

    ####################################################################
    ###   CALCULATE N(ell) for Temperature
    ## calculate the experimental weight
    W_T_27 = S_LA_27[sensitivity_mode] / np.sqrt(t)
    W_T_39 = S_LA_39[sensitivity_mode] / np.sqrt(t)
    W_T_93 = S_LA_93[sensitivity_mode] / np.sqrt(t)
    W_T_145 = S_LA_145[sensitivity_mode] / np.sqrt(t)
    W_T_225 = S_LA_225[sensitivity_mode] / np.sqrt(t)
    W_T_280 = S_LA_280[sensitivity_mode] / np.sqrt(t)

    ## calculate the map noise level (white) for the survey in uK_arcmin for temperature
    MN_T_27 = W_T_27 * np.sqrt(A_arcmin)
    MN_T_39 = W_T_39 * np.sqrt(A_arcmin)
    MN_T_93 = W_T_93 * np.sqrt(A_arcmin)
    MN_T_145 = W_T_145 * np.sqrt(A_arcmin)
    MN_T_225 = W_T_225 * np.sqrt(A_arcmin)
    MN_T_280 = W_T_280 * np.sqrt(A_arcmin)
    Map_white_noise_levels = np.array(
        [MN_T_27, MN_T_39, MN_T_93, MN_T_145, MN_T_225, MN_T_280])
    print("white noise levels (T): ", Map_white_noise_levels, "[uK-arcmin]")

    ## calculate the atmospheric contribution for T
    ## see Sec. 2.2 of SO science goals paper
    ell_pivot = 1000.
    # handle cases where there are zero tubes of some kind
    if (NTubes_LF == 0.):
        AN_T_27 = 0.  #irrelevantly large noise already set above
        AN_T_39 = 0.
    else:
        AN_T_27 = C_27 * (ell / ell_pivot)**alpha_temp * A_SR / t / (2. *
                                                                     NTubes_LF)
        AN_T_39 = C_39 * (ell / ell_pivot)**alpha_temp * A_SR / t / (2. *
                                                                     NTubes_LF)
    if (NTubes_MF == 0.):
        AN_T_93 = 0.
        AN_T_145 = 0.
    else:
        AN_T_93 = C_93 * (ell / ell_pivot)**alpha_temp * A_SR / t / (2. *
                                                                     NTubes_MF)
        AN_T_145 = C_145 * (ell / ell_pivot)**alpha_temp * A_SR / t / (
            2. * NTubes_MF)
    if (NTubes_UHF == 0.):
        AN_T_225 = 0.
        AN_T_280 = 0.
    else:
        AN_T_225 = C_225 * (ell / ell_pivot)**alpha_temp * A_SR / t / (
            2. * NTubes_UHF)
        AN_T_280 = C_280 * (ell / ell_pivot)**alpha_temp * A_SR / t / (
            2. * NTubes_UHF)
    # include cross-frequency correlations in the atmosphere
    # use correlation coefficient of r=0.9 within each dichroic pair and 0 otherwise
    r_atm = 0.9
    AN_T_27x39 = r_atm * np.sqrt(AN_T_27 * AN_T_39)
    AN_T_93x145 = r_atm * np.sqrt(AN_T_93 * AN_T_145)
    AN_T_225x280 = r_atm * np.sqrt(AN_T_225 * AN_T_280)

    ## calculate N(ell)
    N_ell_T_27 = (W_T_27**2. * A_SR) + AN_T_27
    N_ell_T_39 = (W_T_39**2. * A_SR) + AN_T_39
    N_ell_T_93 = (W_T_93**2. * A_SR) + AN_T_93
    N_ell_T_145 = (W_T_145**2. * A_SR) + AN_T_145
    N_ell_T_225 = (W_T_225**2. * A_SR) + AN_T_225
    N_ell_T_280 = (W_T_280**2. * A_SR) + AN_T_280
    # include cross-correlations due to atmospheric noise
    N_ell_T_27x39 = AN_T_27x39
    N_ell_T_93x145 = AN_T_93x145
    N_ell_T_225x280 = AN_T_225x280

    ## include the impact of the beam
    LA_beams = Simons_Observatory_V3_LA_beams() / np.sqrt(
        8. * np.log(2)) / 60. * np.pi / 180.
    ## LAT beams as a sigma expressed in radians
    N_ell_T_27 *= np.exp(ell * (ell + 1) * LA_beams[0]**2.)
    N_ell_T_39 *= np.exp(ell * (ell + 1) * LA_beams[1]**2.)
    N_ell_T_93 *= np.exp(ell * (ell + 1) * LA_beams[2]**2.)
    N_ell_T_145 *= np.exp(ell * (ell + 1) * LA_beams[3]**2.)
    N_ell_T_225 *= np.exp(ell * (ell + 1) * LA_beams[4]**2.)
    N_ell_T_280 *= np.exp(ell * (ell + 1) * LA_beams[5]**2.)
    N_ell_T_27x39 *= np.exp(
        (ell * (ell + 1) / 2.) * (LA_beams[0]**2. + LA_beams[1]**2.))
    N_ell_T_93x145 *= np.exp(
        (ell * (ell + 1) / 2.) * (LA_beams[2]**2. + LA_beams[3]**2.))
    N_ell_T_225x280 *= np.exp(
        (ell * (ell + 1) / 2.) * (LA_beams[4]**2. + LA_beams[5]**2.))

    ## make an array of noise curves for T
    # include cross-correlations due to atmospheric noise
    N_ell_T_LA = np.array([
        N_ell_T_27, N_ell_T_39, N_ell_T_93, N_ell_T_145, N_ell_T_225,
        N_ell_T_280, N_ell_T_27x39, N_ell_T_93x145, N_ell_T_225x280
    ])

    ####################################################################
    ###   CALCULATE N(ell) for Polarization
    ## calculate the atmospheric contribution for P
    AN_P_27 = (ell / f_knee_pol_LA_27)**alpha_pol + 1.
    AN_P_39 = (ell / f_knee_pol_LA_39)**alpha_pol + 1.
    AN_P_93 = (ell / f_knee_pol_LA_93)**alpha_pol + 1.
    AN_P_145 = (ell / f_knee_pol_LA_145)**alpha_pol + 1.
    AN_P_225 = (ell / f_knee_pol_LA_225)**alpha_pol + 1.
    AN_P_280 = (ell / f_knee_pol_LA_280)**alpha_pol + 1.

    ## calculate N(ell)
    N_ell_P_27 = (W_T_27 * np.sqrt(2))**2. * A_SR * AN_P_27
    N_ell_P_39 = (W_T_39 * np.sqrt(2))**2. * A_SR * AN_P_39
    N_ell_P_93 = (W_T_93 * np.sqrt(2))**2. * A_SR * AN_P_93
    N_ell_P_145 = (W_T_145 * np.sqrt(2))**2. * A_SR * AN_P_145
    N_ell_P_225 = (W_T_225 * np.sqrt(2))**2. * A_SR * AN_P_225
    N_ell_P_280 = (W_T_280 * np.sqrt(2))**2. * A_SR * AN_P_280
    # include cross-correlations due to atmospheric noise
    # different approach than for T -- need to subtract off the white noise part to get the purely atmospheric part
    # see Sec. 2.2 of the SO science goals paper
    N_ell_P_27_atm = (W_T_27 * np.sqrt(2))**2. * A_SR * (
        ell / f_knee_pol_LA_27)**alpha_pol
    N_ell_P_39_atm = (W_T_39 * np.sqrt(2))**2. * A_SR * (
        ell / f_knee_pol_LA_39)**alpha_pol
    N_ell_P_93_atm = (W_T_93 * np.sqrt(2))**2. * A_SR * (
        ell / f_knee_pol_LA_93)**alpha_pol
    N_ell_P_145_atm = (W_T_145 * np.sqrt(2))**2. * A_SR * (
        ell / f_knee_pol_LA_145)**alpha_pol
    N_ell_P_225_atm = (W_T_225 * np.sqrt(2))**2. * A_SR * (
        ell / f_knee_pol_LA_225)**alpha_pol
    N_ell_P_280_atm = (W_T_280 * np.sqrt(2))**2. * A_SR * (
        ell / f_knee_pol_LA_280)**alpha_pol
    N_ell_P_27x39 = r_atm * np.sqrt(N_ell_P_27_atm * N_ell_P_39_atm)
    N_ell_P_93x145 = r_atm * np.sqrt(N_ell_P_93_atm * N_ell_P_145_atm)
    N_ell_P_225x280 = r_atm * np.sqrt(N_ell_P_225_atm * N_ell_P_280_atm)

    ## include the impact of the beam
    N_ell_P_27 *= np.exp(ell * (ell + 1) * LA_beams[0]**2)
    N_ell_P_39 *= np.exp(ell * (ell + 1) * LA_beams[1]**2)
    N_ell_P_93 *= np.exp(ell * (ell + 1) * LA_beams[2]**2)
    N_ell_P_145 *= np.exp(ell * (ell + 1) * LA_beams[3]**2)
    N_ell_P_225 *= np.exp(ell * (ell + 1) * LA_beams[4]**2)
    N_ell_P_280 *= np.exp(ell * (ell + 1) * LA_beams[5]**2)
    N_ell_P_27x39 *= np.exp(
        (ell * (ell + 1) / 2.) * (LA_beams[0]**2. + LA_beams[1]**2.))
    N_ell_P_93x145 *= np.exp(
        (ell * (ell + 1) / 2.) * (LA_beams[2]**2. + LA_beams[3]**2.))
    N_ell_P_225x280 *= np.exp(
        (ell * (ell + 1) / 2.) * (LA_beams[4]**2. + LA_beams[5]**2.))

    ## make an array of noise curves for P
    N_ell_P_LA = np.array([
        N_ell_P_27, N_ell_P_39, N_ell_P_93, N_ell_P_145, N_ell_P_225,
        N_ell_P_280, N_ell_P_27x39, N_ell_P_93x145, N_ell_P_225x280
    ])

    ####################################################################
    return (ell, N_ell_T_LA, N_ell_P_LA, Map_white_noise_levels)
def Rejection_Sampling_MixtureNormals(M,c):
    accepted_values = []
    rejected_values = []

    for i in range(M):

        #randomly sampling from Q(x)
        x = np.random.normal(mu_q, sigma_q)

        #using Q(x) function and samples x to sample from uniform
        u = np.random.uniform(0,c * Q(x))

        #eeccepting/rejecting
        if u <= P_star(x):
            accepted_values.append(x)
        else:
            rejected_values.append(x)

    return(np.array(accepted_values))


x = np.arange(-10,150)
c = max(P_star(x) / Q(x))
X_accepted = Rejection_Sampling_MixtureNormals(M = 100000,c = c)


import matplotlib.pyplot as plt
counts, bins, ignored = plt.hist(X_accepted, x, density = True,color = 'purple', label = 'accepted samples')
plt.title("Rejection Sampling for Mixture of Normal Distribution with Unif(0,cQ(x))")
plt.ylabel("Probability")
plt.show()
コード例 #41
0
ファイル: myEjemplo3.py プロジェクト: iTzAlver/pruebasML
     return signal

if __name__ == '__main__':

    myModel     = TfReg()
    mySession   = tf.Session(graph = myModel.grafo )

    #Num points must be at least grater than vary*0.1 to get a accurate model.
    numpoints = 30
    noiseVary = 0*numpoints

    true_A = +0.87
    true_B = -0.43
    true_C = +1.18

    x = np.arange(numpoints).reshape((numpoints))
    y_0 = true_A*x*x + true_B*x + true_C
    y = addNoise(signal = y_0, numpoints = numpoints, vary = noiseVary)
    ############################################
    plt.scatter(    x,
                    y,
                    label = 'Datos')
    plt.xlabel('Puntos')
    plt.title('Datos')
    plt.ylabel('Datos')
    plt.show()
    #############################################
    costs = train_reg(  theSession = mySession,
                        theModel   = myModel,
                        x_train    = x,
                        y_train    = y,
コード例 #42
0
def Simons_Observatory_V3_SA_noise(sensitivity_mode, one_over_f_mode,
                                   SAT_yrs_LF, f_sky, ell_max, delta_ell):
    ## returns noise curves in polarization only, including the impact of the beam, for the SO small aperture telescopes
    ## noise curves are polarization only
    # sensitivity_mode
    #     1: baseline,
    #     2: goal
    # one_over_f_mode
    #     0: pessimistic
    #     1: optimistic
    # SAT_yrs_LF: 0,1,2,3,4,5:  number of years where an LF is deployed on SAT
    # f_sky:  number from 0-1
    # ell_max: the maximum value of ell used in the computation of N(ell)
    # delta_ell: the step size for computing N_ell
    ####################################################################
    ####################################################################
    ###                        Internal variables
    ## SMALL APERTURE
    # ensure valid parameter choices
    assert (sensitivity_mode == 1 or sensitivity_mode == 2)
    assert (one_over_f_mode == 0 or one_over_f_mode == 1)
    assert (SAT_yrs_LF <= 5)  #N.B. SAT_yrs_LF can be negative
    assert (f_sky > 0. and f_sky <= 1.)
    assert (ell_max <= 2e4)
    assert (delta_ell >= 1)
    # configuration
    if (SAT_yrs_LF > 0):
        NTubes_LF = SAT_yrs_LF / 5. + 1e-6  ## regularized in case zero years is called
        NTubes_MF = 2 - SAT_yrs_LF / 5.
    else:
        NTubes_LF = np.fabs(
            SAT_yrs_LF) / 5. + 1e-6  ## regularized in case zero years is called
        NTubes_MF = 2
    NTubes_UHF = 1.

    NTubes_LF = 1.
    NTubes_MF = 1.

    # sensitivity
    # N.B. divide-by-zero will occur if NTubes = 0
    # handle with assert() since it's highly unlikely we want any configurations without >= 1 of each tube type
    assert (NTubes_LF > 0.)
    assert (NTubes_MF > 0.)
    assert (NTubes_UHF > 0.)
    S_SA_27 = np.array([1.e9, 21, 15]) * np.sqrt(1. / NTubes_LF)
    S_SA_39 = np.array([1.e9, 13, 10]) * np.sqrt(1. / NTubes_LF)
    S_SA_93 = np.array([1.e9, 3.4, 2.4]) * np.sqrt(2. / (NTubes_MF))
    S_SA_145 = np.array([1.e9, 4.3, 2.7]) * np.sqrt(2. / (NTubes_MF))
    S_SA_225 = np.array([1.e9, 8.6, 5.7]) * np.sqrt(1. / NTubes_UHF)
    S_SA_280 = np.array([1.e9, 22, 14]) * np.sqrt(1. / NTubes_UHF)
    # 1/f polarization noise
    # see Sec. 2.2 of the SO science goals paper
    f_knee_pol_SA_27 = np.array([30., 15.])
    f_knee_pol_SA_39 = np.array([30., 15.])  ## from QUIET
    f_knee_pol_SA_93 = np.array([50., 25.])
    f_knee_pol_SA_145 = np.array(
        [50., 25.])  ## from ABS, improvement possible by scanning faster
    f_knee_pol_SA_225 = np.array([70., 35.])
    f_knee_pol_SA_280 = np.array([100., 40.])
    alpha_pol = np.array([-2.4, -2.4, -2.5, -3, -3, -3])

    ####################################################################
    ## calculate the survey area and time
    t = 1 * 365. * 24. * 3600  ## five years in seconds
    #t = 5 * 365. * 24. * 3600    ## five years in seconds
    t = t * 0.2  ## retention after observing efficiency and cuts
    t = t * 0.85  ## a kludge for the noise non-uniformity of the map edges
    A_SR = 4 * np.pi * f_sky  ## sky area in steradians
    A_deg = A_SR * (180 / np.pi)**2  ## sky area in square degrees
    A_arcmin = A_deg * 3600.
    print("sky area: ", A_deg, "degrees^2")

    ####################################################################
    ## make the ell array for the output noise curves
    ell = np.arange(2, ell_max, delta_ell)

    ####################################################################
    ###   CALCULATE N(ell) for Temperature
    ## calculate the experimental weight
    W_T_27 = S_SA_27[sensitivity_mode] / np.sqrt(t)
    W_T_39 = S_SA_39[sensitivity_mode] / np.sqrt(t)
    W_T_93 = S_SA_93[sensitivity_mode] / np.sqrt(t)
    W_T_145 = S_SA_145[sensitivity_mode] / np.sqrt(t)
    W_T_225 = S_SA_225[sensitivity_mode] / np.sqrt(t)
    W_T_280 = S_SA_280[sensitivity_mode] / np.sqrt(t)

    ## calculate the map noise level (white) for the survey in uK_arcmin for temperature
    MN_T_27 = W_T_27 * np.sqrt(A_arcmin)
    MN_T_39 = W_T_39 * np.sqrt(A_arcmin)
    MN_T_93 = W_T_93 * np.sqrt(A_arcmin)
    MN_T_145 = W_T_145 * np.sqrt(A_arcmin)
    MN_T_225 = W_T_225 * np.sqrt(A_arcmin)
    MN_T_280 = W_T_280 * np.sqrt(A_arcmin)
    Map_white_noise_levels = np.array(
        [MN_T_27, MN_T_39, MN_T_93, MN_T_145, MN_T_225, MN_T_280])
    print("white noise levels (T): ", Map_white_noise_levels, "[uK-arcmin]")

    ####################################################################
    ###   CALCULATE N(ell) for Polarization
    ## calculate the atmospheric contribution for P
    ## see Sec. 2.2 of the SO science goals paper
    AN_P_27 = (ell / f_knee_pol_SA_27[one_over_f_mode])**alpha_pol[0] + 1.
    AN_P_39 = (ell / f_knee_pol_SA_39[one_over_f_mode])**alpha_pol[1] + 1.
    AN_P_93 = (ell / f_knee_pol_SA_93[one_over_f_mode])**alpha_pol[2] + 1.
    AN_P_145 = (ell / f_knee_pol_SA_145[one_over_f_mode])**alpha_pol[3] + 1.
    AN_P_225 = (ell / f_knee_pol_SA_225[one_over_f_mode])**alpha_pol[4] + 1.
    AN_P_280 = (ell / f_knee_pol_SA_280[one_over_f_mode])**alpha_pol[5] + 1.

    ## calculate N(ell)
    N_ell_P_27 = (W_T_27 * np.sqrt(2))**2. * A_SR * AN_P_27
    N_ell_P_39 = (W_T_39 * np.sqrt(2))**2. * A_SR * AN_P_39
    N_ell_P_93 = (W_T_93 * np.sqrt(2))**2. * A_SR * AN_P_93
    N_ell_P_145 = (W_T_145 * np.sqrt(2))**2. * A_SR * AN_P_145
    N_ell_P_225 = (W_T_225 * np.sqrt(2))**2. * A_SR * AN_P_225
    N_ell_P_280 = (W_T_280 * np.sqrt(2))**2. * A_SR * AN_P_280

    ## include the impact of the beam
    SA_beams = Simons_Observatory_V3_SA_beams() / np.sqrt(
        8. * np.log(2)) / 60. * np.pi / 180.
    ## SAT beams as a sigma expressed in radians
    N_ell_P_27 *= np.exp(ell * (ell + 1) * SA_beams[0]**2.)
    N_ell_P_39 *= np.exp(ell * (ell + 1) * SA_beams[1]**2.)
    N_ell_P_93 *= np.exp(ell * (ell + 1) * SA_beams[2]**2.)
    N_ell_P_145 *= np.exp(ell * (ell + 1) * SA_beams[3]**2.)
    N_ell_P_225 *= np.exp(ell * (ell + 1) * SA_beams[4]**2.)
    N_ell_P_280 *= np.exp(ell * (ell + 1) * SA_beams[5]**2.)

    ## make an array of noise curves for P
    N_ell_P_SA = np.array([
        N_ell_P_27, N_ell_P_39, N_ell_P_93, N_ell_P_145, N_ell_P_225,
        N_ell_P_280
    ])

    ####################################################################
    return (ell, N_ell_P_SA, Map_white_noise_levels)
コード例 #43
0
ファイル: conftest.py プロジェクト: Ultronixon/pyGeoPressure
def depth():
    return np.arange(10)
コード例 #44
0
ファイル: miniapp.py プロジェクト: zhangdashuai-shuai/imagepy
                  para,
                  on_handle=None,
                  on_ok=None,
                  on_cancel=None,
                  preview=False,
                  modal=True):
        dialog = ParaDialog(self, title)
        dialog.init_view(view, para, preview, modal=modal, app=self)
        dialog.Bind('cancel', on_cancel)
        dialog.Bind('parameter', on_handle)
        dialog.Bind('commit', on_ok)
        return dialog.show()


if __name__ == '__main__':
    import numpy as np
    import pandas as pd

    app = wx.App(False)
    frame = MiniApp(None)
    frame.Show()
    frame.show_img([np.zeros((512, 512), dtype=np.uint8)], 'zeros')
    #frame.show_img(None)
    frame.show_table(pd.DataFrame(np.arange(100).reshape((10, 10))), 'title')
    '''
    frame.show_md('abcdefg', 'md')
    frame.show_md('ddddddd', 'md')
    frame.show_txt('abcdefg', 'txt')
    frame.show_txt('ddddddd', 'txt')
    '''
    app.MainLoop()
コード例 #45
0
ファイル: utils.py プロジェクト: XinJiang1994/3DCNN_MRI
def visualize(sess, dcgan, config, option):
    if dcgan.dataset_name == 'mnist':
        data_dir = os.path.join("./data", dcgan.dataset_name)
        fd = open(os.path.join(data_dir, 't10k-images.idx3-ubyte'))
        loaded = np.fromfile(file=fd, dtype=np.uint8)
        teX = loaded[16:].reshape((10000, 28, 28, 1)).astype(np.float)

        fd = open(os.path.join(data_dir, 't10k-labels.idx1-ubyte'))
        loaded = np.fromfile(file=fd, dtype=np.uint8)
        teY = loaded[8:].reshape((10000)).astype(np.float)
        teY = np.asarray(teY)

        X = teX
        y = teY.astype(np.int)

        seed = 547
        np.random.seed(seed)
        np.random.shuffle(X)
        np.random.seed(seed)
        np.random.shuffle(y)

        y_vec = np.zeros((len(y), dcgan.y_dim), dtype=np.float)
        for i, label in enumerate(y):
            y_vec[i, y[i]] = 1.0
        X = X / 255.
        y = y_vec

    image_frame_dim = int(math.ceil(config.batch_size**.5))
    if option == 0:

        import numpy
        perm0 = numpy.arange(10000)
        numpy.random.shuffle(perm0)
        X = X[perm0]
        z_sample = X[:config.batch_size]
        samples, AE_loss = sess.run([dcgan.sampler, dcgan.AE_loss],
                                    feed_dict={dcgan.inputs_e: z_sample})
        print("[Test] AE_loss: %.8f" % (AE_loss))
        save_images(
            samples, [image_frame_dim, image_frame_dim],
            './samples/test_%s.png' % strftime("%Y%m%d%H%M%S", gmtime()))
    elif option == 1:
        values = np.arange(0, 1, 1. / config.batch_size)
        for idx in xrange(100):
            print(" [*] %d" % idx)
            z_sample = np.zeros([config.batch_size, dcgan.z_dim])
            for kdx, z in enumerate(z_sample):
                z[idx] = values[kdx]

            if config.dataset == "mnist":
                y = np.random.choice(10, config.batch_size)
                y_one_hot = np.zeros((config.batch_size, 10))
                y_one_hot[np.arange(config.batch_size), y] = 1

                samples = sess.run(dcgan.sampler,
                                   feed_dict={
                                       dcgan.inputs_e: z_sample,
                                       dcgan.y: y_one_hot
                                   })
            else:
                samples = sess.run(dcgan.sampler,
                                   feed_dict={dcgan.inputs_e: z_sample})

            save_images(samples, [image_frame_dim, image_frame_dim],
                        './samples/test_arange_%s.png' % (idx))
    elif option == 2:
        values = np.arange(0, 1, 1. / config.batch_size)
        for idx in [random.randint(0, 99) for _ in xrange(100)]:
            print(" [*] %d" % idx)
            z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim))
            z_sample = np.tile(z, (config.batch_size, 1))
            #z_sample = np.zeros([config.batch_size, dcgan.z_dim])
            for kdx, z in enumerate(z_sample):
                z[idx] = values[kdx]

            if config.dataset == "mnist":
                y = np.random.choice(10, config.batch_size)
                y_one_hot = np.zeros((config.batch_size, 10))
                y_one_hot[np.arange(config.batch_size), y] = 1

                samples = sess.run(dcgan.sampler,
                                   feed_dict={
                                       dcgan.z: z_sample,
                                       dcgan.y: y_one_hot
                                   })
            else:
                samples = sess.run(dcgan.sampler,
                                   feed_dict={dcgan.z: z_sample})

            try:
                make_gif(samples, './samples/test_gif_%s.gif' % (idx))
            except:
                save_images(
                    samples, [image_frame_dim, image_frame_dim],
                    './samples/test_%s.png' %
                    strftime("%Y%m%d%H%M%S", gmtime()))
    elif option == 3:
        values = np.arange(0, 1, 1. / config.batch_size)
        for idx in xrange(100):
            print(" [*] %d" % idx)
            z_sample = np.zeros([config.batch_size, dcgan.z_dim])
            for kdx, z in enumerate(z_sample):
                z[idx] = values[kdx]

            samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
            make_gif(samples, './samples/test_gif_%s.gif' % (idx))
    elif option == 4:
        image_set = []
        values = np.arange(0, 1, 1. / config.batch_size)

        for idx in xrange(100):
            print(" [*] %d" % idx)
            z_sample = np.zeros([config.batch_size, dcgan.z_dim])
            for kdx, z in enumerate(z_sample):
                z[idx] = values[kdx]

            image_set.append(
                sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}))
            make_gif(image_set[-1], './samples/test_gif_%s.gif' % (idx))

        new_image_set = [merge(np.array([images[idx] for images in image_set]), [10, 10]) \
            for idx in range(64) + range(63, -1, -1)]
        make_gif(new_image_set, './samples/test_gif_merged.gif', duration=8)
コード例 #46
0
ファイル: format.py プロジェクト: kalpitdixit/tensorpack
 def get_data(self):
     idxs = np.arange(self.size())
     if self.shuffle:
         self.rng.shuffle(idxs)
     for id in idxs:
         yield [self.X[id, :], self.y[id]]
コード例 #47
0
# 分析结果
frequencies = []
max_result = die_1.num_sides + die_2.num_sides
for value in range(2, max_result + 1):
    frequency = results.count(value)
    frequencies.append(frequency)

# 用pygal对结果进行可视化
# hist = pygal.Bar()
# hist.title = "Results of rolling two D6 1000 times."
# hist.x_labels = [i for i in range(2, 13)]
# hist.x_title = "Result"
# hist.y_title = "Frequency of Result"

# hist.add('D6 + D6', frequencies)
# hist.render_to_file('die_visual.svg')

# 用matplotlib对结果进行可视化
ind = np.arange(2, 13)

plt.bar(ind, frequencies, width=0.5)
plt.title("Results of rolling two D6 1000 times.")

plt.xlabel("Result")
plt.ylabel("Frequency of Result")

plt.xticks(ind)
plt.yticks(np.arange(0, 200, 20))

plt.savefig('die_visual2.svg', bbox_inches='tight')
コード例 #48
0
ファイル: plot_img_hack.py プロジェクト: nkern/capo
    if not opts.max is None: max = opts.max
    else: max = d.max()
    if not opts.drng is None: min = max - opts.drng
    else: min = d.min()

    #p.subplot(m2, m1, cnt+1)
    if not opts.nogrid:
        from mpl_toolkits.basemap import Basemap
        xpx,ypx = d.shape
        dx1 = -(xpx/2 + .5) * kwds['d_ra'] * a.img.deg2rad
        dx2 = (xpx/2 - .5) * kwds['d_ra'] * a.img.deg2rad
        dy1 = -(ypx/2 + .5) * kwds['d_dec'] * a.img.deg2rad
        dy2 = (ypx/2 - .5) * kwds['d_dec'] * a.img.deg2rad
        map = Basemap(projection='ortho', lon_0=180, lat_0=kwds['dec'],
            rsphere=1, llcrnrx=dx1, llcrnry=dy1, urcrnrx=dx2,urcrnry=dy2)
        map.drawmeridians(n.arange(kwds['ra']-180,kwds['ra']+180,30))
        map.drawparallels(n.arange(-90,120,30))
        map.drawmapboundary()
        map.imshow(d, vmin=min, vmax=max, cmap=cmaps[cnt%len(cmaps)], interpolation='nearest', alpha=1-.25*cnt)
    else: p.imshow(d, vmin=min, vmax=max, origin='lower', cmap=cmap, interpolation='nearest')
    p.colorbar(shrink=.5, fraction=.05)
    p.title(filename)

    if opts.batch:
        print 'Saving to', outfile
        p.savefig(outfile)
        p.clf()
        

# Add right-click functionality for finding locations/strengths in map.
cnt = 1
コード例 #49
0
ファイル: PA_2 code.py プロジェクト: oneilsh/machinelearning

model = OnlinePerceptron(iters = 15)      
final_weights, train_accuracies, val_accuracies, weight_history = model.train(X_train, Y_train, X_val, Y_val)


model_final = OnlinePerceptron(iters = 6)
trained_weights, trained_accuracies, val_acc, weight_hist = model_final.train(X_train, Y_train, X_val, Y_val)
oplabel = model_final.predict(X_test, trained_weights)
pd.DataFrame(oplabel).to_csv(path_or_buf='oplabel.csv', index= False, header = False)

fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title('Training Accuracy vs Validation Accuracy')

ax.plot(np.arange(0,train_accuracies.shape[0]), train_accuracies, label='Train')
ax.plot(np.arange(0,val_accuracies.shape[0]), val_accuracies, label='Dev')
ax.set_xlabel('Iterations')
ax.set_ylabel('Accuracy')
ax.set_ylim([90,100])
ax.legend()
plt.show()
fig.savefig('onlinePerceptronTrainingVal.png')

# Get weights for model's best performance on validation set
idx = np.argmax(val_accuracies)
print("best weights at iteration {}".format(idx))
best_online_weights = weight_history[idx]


コード例 #50
0
ファイル: bacteria.py プロジェクト: marknormanread/GutSim
    """
    steepness = 1.0 / 20.0   # dictates the gradient. Small values give shallower gradients.
    shift = 90			# shifts the curve relative to the x-axis. Positive values move curve right.
    return 1.0 / (1.0 + np.exp( -steepness * (limit - shift)))


death_rate_model = _calculate_death_rate_exp

# Lookup table for relating death rate to limiting resource level. 
# The equation is computationally expensive, hence the use of a lookup table.
limiting_resource_resolution = 0.01  # Resolution of limiting resource for which death rates are calculated.
max_limiting_resource_lookup_value = 500  # Maximum value for which lookup table values should be calculated.
death_rate_lookup_table = []  # The lookup table itself.
divide_rate_lookup_table = []
# Populate lookup tables.
for limit in np.arange(0.0, max_limiting_resource_lookup_value, limiting_resource_resolution):
    death_rate_lookup_table.append(death_rate_model(limit))
    divide_rate_lookup_table.append(_calculate_divide_rate_logistic(limit))


def death_rate_lookup(limit):
    """
    Uses look up table to estimate death rate based on limiting resource.
    """
    if limit > max_limiting_resource_lookup_value:
        print('WARNING: limiting resource value exceeds range of pre-calculated values : ' + str(limit))
        return death_rate_model(limit)
    scaled_limit = int(limit / limiting_resource_resolution)
    return death_rate_lookup_table[scaled_limit]

コード例 #51
0
def data():
    '''
    Data providing function:
    - reads all the files conteined inside "./np_data/" Those are numpy.arrays containing the images previousy read using utils/data.py
    - divides them by label, according to the input_labels list
    - divides the whole dataset in training and validation set, with an 80/20 ratio
    
    '''

    K.set_image_dim_ordering("tf")
    print("=" * 30)
    print('Loading and preprocessing train data...')
    print('=' * 30)
    data_path = './np_data/'

    trainX = {}

    for label in input_labels:
        trainX[label] = np.load(data_path + label + "_ordered.npy")

    trainY = np.load(data_path + "diagnosis_ordered.npy")
    channels = trainX["images"].shape[3]
    img_cols = trainX["images"].shape[2]
    img_rows = trainX["images"].shape[1]
    nb_train = trainX["images"].shape[0]

    trainX["images"] = trainX["images"].astype('float32')

    print "Dataset has ", nb_train, " training images"

    shuffled_index = np.arange(nb_train)

    np.random.shuffle(shuffled_index)

    mean = np.mean(trainX["images"])  # mean for data centering
    std = np.std(trainX["images"])  # std for data normalization
    trainX["images"] -= mean
    trainX["images"] /= std

    trainnum = trainX["images"].shape[0]
    trainlen = int(trainnum * 0.8)
    valX = {}
    for label in input_labels:
        trainX[label] = trainX[label][shuffled_index]
        valX[label] = trainX[label][trainlen:]
        trainX[label] = trainX[label][0:trainlen]

    trainY = trainY[shuffled_index]

    print("trainlen: ", trainlen)

    print("train Images: ", trainX["images"].shape)
    print("val images: ", valX["images"].shape)
    print("train globules: ", trainX["globules"].shape)

    valY = trainY[trainlen:]
    trainY = trainY[:trainlen]

    print("valY shape : ", valY.shape)
    print("trainY shape: ", trainY.shape)

    return trainX, trainY, valX, valY
コード例 #52
0
ファイル: plotter.py プロジェクト: biostars/biocode
def heatmap(data, colidx=3, labidx=0, fname='heatmap.png'):
    # Based on: https://stackoverflow.com/questions/14391959/heatmap-in-matplotlib-with-pcolor

    plt.rcParams.update({'figure.autolayout': True})

    if data.empty:
        print("Can not plot empty data set.")
        return

    df = pd.DataFrame()

    names = list(data.columns)
    label = names[labidx]
    names = [label] + names[colidx:]

    # A simpler dataframe with only labels and values
    for name in names:
        df[name] = data[name]

    df = df.set_index(label)

    # df = (df - df.mean()) / (df.max() - df.min())

    # Transform the scale to log.
    df = np.log(df + 1)

    # The size of the data frame.
    rnum, cnum = df.shape

    # The size of the plot will grow with the row numbers.
    hsize = 4 + cnum / 3
    vsize = 4 + rnum / 10
    fig, ax = plt.subplots(figsize=(hsize, vsize))

    heatmap = ax.pcolor(df, cmap=plt.cm.Blues, alpha=0.8)

    # put the major ticks at the middle of each cell
    ax.set_yticks(np.arange(rnum) + 0.5, minor=False)
    ax.set_xticks(np.arange(cnum) + 0.5, minor=False)

    # ax.invert_yaxis()
    ax.xaxis.tick_top()

    # Get the vertical labels.
    labels = list(df.columns)

    # Simplify label names.
    #labels = [label.split("_")[0] for label in labels]
    ax.set_xticklabels(labels, minor=False)
    ax.set_yticklabels(df.index, minor=False)

    ax.grid(False)

    # Turn off all the ticks
    ax = plt.gca()

    for t in ax.xaxis.get_major_ticks():
        t.tick1line.set_visible = False
        t.tick2line.set_visible = False
    for t in ax.yaxis.get_major_ticks():
        t.tick1line.set_visible = False
        t.tick2line.set_visible = False

    plt.xticks(rotation=90)
    plt.savefig(f'{fname}.pdf')

    if SHOW_PLOT:
        # Pop a window in non-offline mode.
        plt.show()
コード例 #53
0
# In[5]:


# import required modules
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')


# In[6]:


class_names=[0,1] # name  of classes
fig, ax = plt.subplots()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names)
plt.yticks(tick_marks, class_names)

# create heatmap
sns.heatmap(pd.DataFrame(cnf_matrix), annot=True, cmap="Greens" ,fmt='g')
ax.xaxis.set_label_position("top")
plt.tight_layout()
plt.title('Confusion matrix', y=1.1)
plt.ylabel('Actual label')
plt.xlabel('Predicted label')


# In[7]:

コード例 #54
0
    def build(self, atoms):
        """Build the list. Modified so that the vectors are also stored."""
        self.atoms = atoms
        self.sndict = symbol_number(atoms)

        self.positions = atoms.get_positions()
        self.pbc = atoms.get_pbc()
        self.cell = atoms.get_cell()
        if len(self.cutoffs) > 0:
            rcmax = self.cutoffs.max()
        else:
            rcmax = 0.0

        icell = np.linalg.inv(self.cell)
        scaled = np.dot(self.positions, icell)
        scaled0 = scaled.copy()

        N = []
        for i in range(3):
            if self.pbc[i]:
                scaled0[:, i] %= 1.0
                v = icell[:, i]
                h = 1 / sqrt(np.dot(v, v))
                n = int(2 * rcmax / h) + 1
            else:
                n = 0
            N.append(n)

        offsets = (scaled0 - scaled).round().astype(int)
        positions0 = np.dot(scaled0, self.cell)
        natoms = len(atoms)
        indices = np.arange(natoms)

        self.nneighbors = 0
        self.npbcneighbors = 0
        self.neighbors = [np.empty(0, int) for a in range(natoms)]
        self.displacements = [np.empty((0, 3), int) for a in range(natoms)]
        self.disp_vectors = [np.empty((0, 3), float) for a in range(natoms)]
        for n1 in range(0, N[0] + 1):
            for n2 in range(-N[1], N[1] + 1):
                for n3 in range(-N[2], N[2] + 1):
                    if n1 == 0 and (n2 < 0 or n2 == 0 and n3 < 0):
                        continue
                    displacement = np.dot((n1, n2, n3), self.cell)
                    for a in range(natoms):
                        d = positions0 + displacement - positions0[a]
                        i = indices[(d**2).sum(1) <
                                    (self.cutoffs + self.cutoffs[a])**2]
                        if n1 == 0 and n2 == 0 and n3 == 0:
                            if self.self_interaction:
                                i = i[i >= a]
                            else:
                                i = i[i > a]
                        self.nneighbors += len(i)
                        self.neighbors[a] = np.concatenate((self.neighbors[a],
                                                            i))
                        self.disp_vectors[a] = np.concatenate(
                            (self.disp_vectors[a], d[i]))
                        disp = np.empty((len(i), 3), int)
                        disp[:] = (n1, n2, n3)
                        disp += offsets[i] - offsets[a]
                        self.npbcneighbors += disp.any(1).sum()
                        self.displacements[a] = np.concatenate(
                            (self.displacements[a], disp))

        if self.bothways:
            neighbors2 = [[] for a in range(natoms)]
            displacements2 = [[] for a in range(natoms)]
            disp_vectors2 = [[] for a in range(natoms)]
            for a in range(natoms):
                for b, disp, disp_vec in zip(self.neighbors[a],
                                             self.displacements[a],
                                             self.disp_vectors[a]):
                    neighbors2[b].append(a)
                    displacements2[b].append(-disp)
                    disp_vectors2[b].append(-disp_vec)

            for a in range(natoms):
                self.neighbors[a] = np.concatenate(
                    (self.neighbors[a], np.array(neighbors2[a], dtype=int))
                )  ##here dtype should be int.Becuase np.array([]) is float64 .

                self.displacements[a] = np.array(
                    list(self.displacements[a]) + displacements2[a])
                self.disp_vectors[a] = np.array(
                    list(self.disp_vectors[a]) + disp_vectors2[a])

        if self.sorted:
            for a, i in enumerate(self.neighbors):
                mask = (i < a)
                if mask.any():
                    j = i[mask]
                    offsets = self.displacements[a][mask]
                    for b, offset in zip(j, offsets):
                        self.neighbors[b] = np.concatenate((self.neighbors[b],
                                                            [a]))
                        self.displacements[b] = np.concatenate(
                            (self.displacements[b], [-offset]))
                        self.disp_vectors[b] = np.concatenate(
                            (self.disp_vectors[b], [-offset]))
                    mask = np.logical_not(mask)
                    self.neighbors[a] = self.neighbors[a][mask]
                    self.displacements[a] = self.displacements[a][mask]
                    self.disp_vectors[a] = self.disp_vectors[a][mask]
        self.nupdates += 1
コード例 #55
0
#Loads and appends all folds all at once
trainfolds = []    # Train set
testfolds = []    # Test set (LEARNED)
testfolds_U = []    # Test set (UNLEARNED)

col_select = np.array([])

#This is an hack to test smaller windows
for i in range (spw*nmuscles,200):
    col_select = np.append(col_select,i)
    
for i in range (0,spw*nmuscles,nmuscles):
    for muscle in features_select:
        col_select = np.append(col_select,muscle -1 + i)
    cols=np.arange(0,spw*nmuscles+1)

if exclude_features & (not include_only_features): #delete gonio
    for j in range(fold_offset,fold_offset + nfold):
        print("Loading fold " + str(j))
        traindata = pd.read_table(os.path.join(cwd, prefix_train + str(j)+'.csv'),sep=',',header=None,dtype=np.float32,usecols=[i for i in cols if i not in col_select.astype(int)])
        trainfolds.append(traindata)
        testdata = pd.read_table(os.path.join(cwd, prefix_test + str(j)+'.csv'),sep=',',header=None,dtype=np.float32, usecols=[i for i in cols if i not in col_select.astype(int)])
        testfolds.append(testdata) 
elif include_only_features & (not exclude_features): #only gonio
    for j in range(fold_offset, fold_offset + nfold):
        print("Loading fold " + str(j))
        traindata = pd.read_table(os.path.join(cwd, prefix_train + str(j)+'.csv'),sep=',',header=None,dtype=np.float32,usecols=[i for i in cols if i in col_select.astype(int)])
        testdata = pd.read_table(os.path.join(cwd, prefix_test + str(j)+'.csv'),sep=',',header=None,dtype=np.float32, usecols=[i for i in cols if i in col_select.astype(int)])
        trainfolds.append(traindata)
        testfolds.append(testdata) 
#building the network to understand the generator network in VAE
network_architecture = \
    dict(n_hidden_recog_1=300, # 1st layer encoder neurons

         n_hidden_gener_1=300, # 1st layer decoder neurons
        # n_hidden_gener_2=500, # 2nd layer decoder neurons
         n_input=784, # MNIST data input (img shape: 28*28)
         n_z=15)  # dimensionality of latent space

vae, new_cost = train(network_architecture, training_epochs=10)
x_sample = mnist.test.next_batch(100)[0]
x_reconstruct = vae.reconstruct(x_sample)

training_epochs=10
#plotting reconstruct data
x = np.arange(0,training_epochs,1)
plt.title("Cost Graph")
plt.plot(x, new_cost)
plt.show()



#plotting the images before and after reconstruction
plt.figure(figsize=(8, 12))
for i in range(5):
    plt.subplot(5, 2, 2*i + 1)
    plt.imshow(x_sample[i].reshape(28, 28), vmin=0, vmax=1, cmap="gray")
    plt.title("Test input")
    plt.colorbar()
    plt.subplot(5, 2, 2*i + 2)
    plt.imshow(x_reconstruct[i].reshape(28, 28), vmin=0, vmax=1, cmap="gray")
コード例 #57
0
def legacy_resonator(config_file, channel=None, noise=True):
    """
    Function for loading in legacy matlab resonator data.
    Args:
        config_file: string
            The resonator configuration file name.
        channel: integer
            The resonator channel for the data.
        noise: boolean
            If False, ignore the noise data. The default is True.
    Returns:
        loop_kwargs: list of dictionaries
            A list of keyword arguments to send to Loop.from_file().
    """
    directory = os.path.dirname(config_file)
    config = loadmat(config_file, squeeze_me=True)['curr_config']
    temperatures = np.arange(
        config['starttemp'].astype(float), config['stoptemp'].astype(float) +
        config['steptemp'].astype(float) / 2, config['steptemp'].astype(float))
    attenuations = np.arange(
        config['startatten'].astype(float), config['stopatten'].astype(float) +
        config['stepatten'].astype(float) / 2,
        config['stepatten'].astype(float))

    loop_kwargs = []
    for t_index, temp in enumerate(temperatures):
        for a_index, atten in enumerate(attenuations):
            loop_kwargs.append({
                "loop_file_name": config_file,
                "index": (t_index, a_index),
                "data": legacy_loop,
                "channel": channel
            })
            if config['donoise'] and noise:
                group = channel // 2 + 1
                # on resonance file names
                on_res = glob.glob(
                    os.path.join(
                        directory,
                        "{:g}-{:d}a*-{:g}.ns".format(temp, group, atten)))
                noise_kwargs = []
                for file_name in on_res:
                    # collect the index for the file name
                    base_name = os.path.basename(file_name)
                    index2 = base_name.split("a")[1].split("-")[0]
                    index = (t_index, a_index,
                             int(index2)) if index2 else (t_index, a_index)
                    noise_kwargs.append({
                        "index": index,
                        "on_res": True,
                        "data": legacy_noise,
                        "channel": channel
                    })
                # off resonance file names
                off_res_names = glob.glob(
                    os.path.join(
                        directory,
                        "{:g}-{:d}b*-{:g}.ns".format(temp, group, atten)))
                for file_name in off_res_names:
                    # collect the index for the file name
                    base_name = os.path.basename(file_name)
                    index2 = base_name.split("b")[1].split("-")[0]
                    index = (t_index, a_index,
                             int(index2)) if index2 else (t_index, a_index)
                    noise_kwargs.append({
                        "index": index,
                        "on_res": False,
                        "data": legacy_noise,
                        "channel": channel
                    })
                loop_kwargs[-1].update({
                    "noise_file_names":
                    [config_file] * len(on_res + off_res_names),
                    "noise_kwargs":
                    noise_kwargs
                })
                if not noise_kwargs:
                    log.warning("Could not find noise files for '{}'".format(
                        config_file))
    return loop_kwargs
コード例 #58
0
'''
matplotlib.pyplot 画图工具
'''
import numpy as np
import matplotlib.pyplot as plt

plt.rcParams['font.sans-serif'] = ['Microsoft YaHei'] # 指定默认字体
plt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题

a = np.arange(1, 10, 0.2)
b = np.sin(a)

plt.figure(figsize=(10, 5)) # 生成画布
plt.plot(a, b, 'ro') # 准备数据

plt.figure()
plt.plot(a, b, color='r', linestyle='--', linewidth=3.0, label='sin')
c = np.cos(a)
plt.plot(a, c, 'g-.', label='cos')
plt.legend() # 显示图例(即不同线注解)
plt.xlabel('弧度') # x轴名称
plt.ylabel('正/余弦值') # y轴名称

plt.figure()
plt.scatter(a,b) 

plt.show() # 显示
コード例 #59
0
#Total Number of Sub regions
array_split_size_sub = [1, 4, 16, 64, 256]

for n in length_variable:

    array_split_size = length_variable[n] * 25.6  #Value for blocks
    array_split_size_sub = array_split_size_sub[n]
    array_split_size_sub_tot = array_split_size // array_split_size_sub
    arr_reshape = blockshaped(arr, array_split_size, array_split_size)

    #Callable Sizing Value
    array_size = np.size(arr_reshape[0])
    #Plane Fitting Process

    #Creating x,y coordintes for blocks of data
    xvalues = np.arange(0, data_length)
    yvalues = np.arange(0, data_length)

    xx, yy = np.meshgrid(xvalues, yvalues)

    xx = blockshaped(xx, array_split_size, array_split_size)
    yy = blockshaped(yy, array_split_size, array_split_size)

    #Importing Plane Fit Script here.
    from plane_fit import plane_fit

    #Making z of zeros for plane fit script
    first_plane_sec = np.zeros((array_split_size_sub, 1, 3))

    for i in range(0, array_split_size_sub):
        first_plane_sec[i] = plane_fit(np.ndarray.flatten(xx[i]),
コード例 #60
0
def main():
    args = get_arguments()
    started_datestring = "{0:%Y-%m-%dT%H-%M-%S}".format(datetime.now())
    logdir = os.path.join(args.logdir, 'generate', started_datestring)
    if not os.path.exists(logdir):
        os.makedirs(logdir)
    with open(args.wavenet_params, 'r') as config_file:
        wavenet_params = json.load(config_file)

    sess = tf.Session()

    net = WaveNetModel(
        batch_size=1,
        dilations=wavenet_params['dilations'],
        filter_width=wavenet_params['filter_width'],
        residual_channels=wavenet_params['residual_channels'],
        dilation_channels=wavenet_params['dilation_channels'],
        quantization_channels=wavenet_params['quantization_channels'],
        skip_channels=wavenet_params['skip_channels'],
        use_biases=wavenet_params['use_biases'],
        scalar_input=wavenet_params['scalar_input'],
        initial_filter_width=wavenet_params['initial_filter_width'],
        global_condition_channels=args.gc_channels,
        global_condition_cardinality=args.gc_cardinality)

    samples = tf.placeholder(tf.int32)

    if args.fast_generation:
        next_sample = net.predict_proba_incremental(samples, args.gc_id)
    else:
        next_sample = net.predict_proba(samples, args.gc_id)

    if args.fast_generation:
        sess.run(tf.global_variables_initializer())
        sess.run(net.init_ops)

    variables_to_restore = {
        var.name[:-2]: var for var in tf.global_variables()
        if not ('state_buffer' in var.name or 'pointer' in var.name)}
    saver = tf.train.Saver(variables_to_restore)

    print('Restoring model from {}'.format(args.checkpoint))
    saver.restore(sess, args.checkpoint)

    decode = mu_law_decode(samples, wavenet_params['quantization_channels'])

    quantization_channels = wavenet_params['quantization_channels']
    if args.wav_seed:
        seed = create_seed(args.wav_seed,
                           wavenet_params['sample_rate'],
                           quantization_channels,
                           net.receptive_field)
        waveform = sess.run(seed).tolist()
    else:
        # Silence with a single random sample at the end.
        waveform = [quantization_channels / 2] * (net.receptive_field - 1)
        waveform.append(np.random.randint(quantization_channels))

    if args.fast_generation and args.wav_seed:
        # When using the incremental generation, we need to
        # feed in all priming samples one by one before starting the
        # actual generation.
        # TODO This could be done much more efficiently by passing the waveform
        # to the incremental generator as an optional argument, which would be
        # used to fill the queues initially.
        outputs = [next_sample]
        outputs.extend(net.push_ops)

        print('Priming generation...')
        for i, x in enumerate(waveform[-net.receptive_field: -1]):
            if i % 100 == 0:
                print('Priming sample {}'.format(i))
            sess.run(outputs, feed_dict={samples: x})
        print('Done.')

    last_sample_timestamp = datetime.now()
    for step in range(args.samples):
        if args.fast_generation:
            outputs = [next_sample]
            outputs.extend(net.push_ops)
            window = waveform[-1]
        else:
            if len(waveform) > net.receptive_field:
                window = waveform[-net.receptive_field:]
            else:
                window = waveform
            outputs = [next_sample]

        # Run the WaveNet to predict the next sample.
        prediction = sess.run(outputs, feed_dict={samples: window})[0]

        # Scale prediction distribution using temperature.
        np.seterr(divide='ignore')
        scaled_prediction = np.log(prediction) / args.temperature
        scaled_prediction = (scaled_prediction -
                             np.logaddexp.reduce(scaled_prediction))
        scaled_prediction = np.exp(scaled_prediction)
        np.seterr(divide='warn')

        # Prediction distribution at temperature=1.0 should be unchanged after
        # scaling.
        if args.temperature == 1.0:
            np.testing.assert_allclose(
                    prediction, scaled_prediction, atol=1e-5,
                    err_msg='Prediction scaling at temperature=1.0 '
                            'is not working as intended.')

        sample = np.random.choice(
            np.arange(quantization_channels), p=scaled_prediction)
        waveform.append(sample)

        # Show progress only once per second.
        current_sample_timestamp = datetime.now()
        time_since_print = current_sample_timestamp - last_sample_timestamp
        if time_since_print.total_seconds() > 1.:
            print('Sample {:3<d}/{:3<d}'.format(step + 1, args.samples),
                  end='\r')
            last_sample_timestamp = current_sample_timestamp

        # If we have partial writing, save the result so far.
        if (args.wav_out_path and args.save_every and
                (step + 1) % args.save_every == 0):
            out = sess.run(decode, feed_dict={samples: waveform})
            write_wav(out, wavenet_params['sample_rate'], args.wav_out_path)

    # Introduce a newline to clear the carriage return from the progress.
    print()

    # Save the result as an audio summary.
    datestring = str(datetime.now()).replace(' ', 'T')
    writer = tf.summary.FileWriter(logdir)
    # writer = tf.train.SummaryWriter(logdir)
    tf.summary.audio('generated', decode, wavenet_params['sample_rate'])
    summaries = tf.summary.merge_all()
    summary_out = sess.run(summaries,
                           feed_dict={samples: np.reshape(waveform, [-1, 1])})
    writer.add_summary(summary_out)

    # Save the result as a wav file.
    if args.wav_out_path:
        out = sess.run(decode, feed_dict={samples: waveform})
        write_wav(out, wavenet_params['sample_rate'], args.wav_out_path)

    print('Finished generating. The result can be viewed in TensorBoard.')