def test_process_data(self, data_field_class):
     r"""
     checking if the process data method works
     """
     #
     # creating horizontal channels
     eval_chans = EvalChannels(data_field_class())
     eval_chans.data_map = sp.zeros((eval_chans.nz, eval_chans.nx), dtype=int)
     eval_chans.data_map[2:4, :] = 255
     eval_chans.data_map[6:9, :] = 255
     eval_chans.data_vector = sp.ravel(eval_chans.data_map)
     eval_chans.args = {
         'axis': 'x',
         'thresh': 100
     }
     eval_chans._process_data()
     #
     # creating vertical channels
     eval_chans = EvalChannels(data_field_class())
     eval_chans.data_map = sp.zeros((eval_chans.nz, eval_chans.nx), dtype=int)
     eval_chans.data_map[:, 2:4] = 255
     eval_chans.data_map[:, 6:9] = 255
     eval_chans.data_vector = sp.ravel(eval_chans.data_map)
     eval_chans.args = {
         'axis': 'z',
         'thresh': 100
     }
     eval_chans._process_data()
     #
     eval_chans.args = {
         'axis': 'y',
         'thresh': 100
     }
     eval_chans._process_data()
Ejemplo n.º 2
0
def calcProfilV(self, xy):
    """renvoie les valeurs des vitesses sur une section"""
    vxvy = self.getMfVitesse()
    grd = self.parent.aquifere.getFullGrid()
    x0, y0, dx, dy, nx, ny = grd['x0'], grd['y0'], grd['dx'], grd['dy'], grd[
        'nx'], grd['ny']
    x, y = zip(*xy)
    xl0, xl1 = x[:2]
    yl0, yl1 = y[:2]
    dd = min(dx, dy) * .95
    dxp, dyp = xl1 - xl0, yl1 - yl0
    ld = max(ceil(abs(dxp / dx)), ceil(abs(dyp / dy)))
    ld = int(ld + 1)
    ddx = dxp / ld
    ddy = dyp / ld
    xp2 = xl0 + arange(ld + 1) * ddx
    yp2 = yl0 + arange(ld + 1) * ddy
    ix = floor((xp2 - x0) / dx)
    ix = clip(ix.astype(int), 0, nx - 1)
    iy = floor((yp2 - y0) / dy)
    iy = clip(iy.astype(int), 0, ny - 1)
    vx = take(ravel(vxvy[0]), iy * nx + ix)
    vy = take(ravel(vxvy[1]), iy * nx + ix)
    V = sqrt(vx**2 + vy**2)
    cu = sqrt((xp2 - xp2[0])**2 + (yp2 - yp2[0])**2)
    return [cu, V]
Ejemplo n.º 3
0
    def newEpisode(self):
        if self.learning:
            params = ravel(self.explorationlayer.module.params)
            target = ravel(sum(self.history.getSequence(self.history.getNumSequences()-1)[2]) / 500)
        
            if target != 0.0:
                self.gp.addSample(params, target)
                if len(self.gp.trainx) > 20:
                    self.gp.trainx = self.gp.trainx[-20:, :]
                    self.gp.trainy = self.gp.trainy[-20:]
                    self.gp.noise = self.gp.noise[-20:]
                    
                self.gp._calculate()
                        
                # get new parameters where mean was highest
                max_cov = diag(self.gp.pred_cov).max()
                indices = where(diag(self.gp.pred_cov) == max_cov)[0]
                pick = indices[random.randint(len(indices))]
                new_param = self.gp.testx[pick]
            
                # check if that one exists already in gp training set
                if len(where(self.gp.trainx == new_param)[0]) > 0:
                    # add some normal noise to it
                    new_param += random.normal(0, 1, len(new_param))

                self.explorationlayer.module._setParameters(new_param)

            else:
                self.explorationlayer.drawRandomWeights()
        
        # don't call StateDependentAgent.newEpisode() because it randomizes the params
        LearningAgent.newEpisode(self)
Ejemplo n.º 4
0
def simplicial_grid_2d(n):
    """
    Create an NxN 2d grid in the unit square
    
    The number of vertices along each axis is (N+1) for a total of (N+1)x(N+1) vertices
    
    A tuple (vertices,indices) of arrays is returned
    """
    vertices = zeros(((n + 1)**2, 2))
    vertices[:, 0] = ravel(resize(arange(n + 1), (n + 1, n + 1)))
    vertices[:, 1] = ravel(transpose(resize(arange(n + 1), (n + 1, n + 1))))
    vertices /= n

    indices = zeros((2 * (n**2), 3), scipy.int32)

    t1 = transpose(
        concatenate((matrix(arange(n)), matrix(arange(
            1, n + 1)), matrix(arange(n + 2, 2 * n + 2))),
                    axis=0))
    t2 = transpose(
        concatenate((matrix(arange(n)), matrix(arange(
            n + 2, 2 * n + 2)), matrix(arange(n + 1, 2 * n + 1))),
                    axis=0))
    first_row = concatenate((t1, t2))

    for i in xrange(n):
        indices[(2 * n * i):(2 * n * (i + 1)), :] = first_row + i * (n + 1)

    return (vertices, indices)
def patch_holes(data_map):
    r"""
    Fills in any areas with a non finite value by taking a linear average of
    the nearest non-zero values along each axis
    """
    #
    # getting coordinates of all valid data points
    data_vector = sp.ravel(data_map)
    inds = sp.where(sp.isfinite(data_vector))[0]
    points = sp.unravel_index(inds, data_map.shape)
    values = data_vector[inds]
    #
    # linearly interpolating data to fill gaps
    xi = sp.where(~sp.isfinite(data_vector))[0]
    msg = '\tattempting to fill %d values with a linear interpolation'
    logger.debug(msg, xi.size)
    xi = sp.unravel_index(xi, data_map.shape)
    intrp = griddata(points, values, xi, fill_value=sp.nan, method='linear')
    data_map[xi[0], xi[1]] = intrp
    #
    # performing a nearest interpolation any remaining regions
    data_vector = sp.ravel(data_map)
    xi = sp.where(~sp.isfinite(data_vector))[0]
    msg = '\tfilling %d remaining values with a nearest interpolation'
    logger.debug(msg, xi.size)
    xi = sp.unravel_index(xi, data_map.shape)
    intrp = griddata(points, values, xi, fill_value=0, method='nearest')
    data_map[xi[0], xi[1]] = intrp
    #
    return data_map
Ejemplo n.º 6
0
def log_ptgd_at_maxent(N, phi_M, R, Delta):
    kernel_dim = Delta._kernel_dim
    M = utils.field_to_prob(phi_M)
    M_on_kernel = sp.zeros([kernel_dim, kernel_dim])
    kernel_basis = Delta._kernel_basis
    lambdas = Delta._eigenvalues
    for a in range(int(kernel_dim)):
        for b in range(int(kernel_dim)):
            psi_a = sp.ravel(kernel_basis[:, a])
            psi_b = sp.ravel(kernel_basis[:, b])
            M_on_kernel[a, b] = sp.sum(psi_a * psi_b * M)

    # Compute log occam factor at infinity
    log_Occam_at_infty = -0.5*sp.log(det(M_on_kernel)) \
                         - 0.5*sp.sum(sp.log(lambdas[kernel_dim:]))

    assert np.isreal(log_Occam_at_infty)

    # Compute the log likelihod at infinty
    log_likelihood_at_infty = -N * sp.sum(phi_M * R) - N

    assert np.isreal(log_likelihood_at_infty)

    # Compute the log posterior (not sure this is right)
    log_ptgd_at_maxent = log_likelihood_at_infty + log_Occam_at_infty

    assert np.isreal(log_ptgd_at_maxent)
    return log_ptgd_at_maxent
 def test_process_data(self, data_field_class):
     r"""
     checking if the process data method works
     """
     #
     # creating horizontal channels
     eval_chans = EvalChannels(data_field_class())
     eval_chans.data_map = sp.zeros((eval_chans.nz, eval_chans.nx), dtype=int)
     eval_chans.data_map[2:4, :] = 255
     eval_chans.data_map[6:9, :] = 255
     eval_chans.data_vector = sp.ravel(eval_chans.data_map)
     eval_chans.args = {
         'axis': 'x',
         'thresh': 100
     }
     eval_chans._process_data()
     #
     # creating vertical channels
     eval_chans = EvalChannels(data_field_class())
     eval_chans.data_map = sp.zeros((eval_chans.nz, eval_chans.nx), dtype=int)
     eval_chans.data_map[:, 2:4] = 255
     eval_chans.data_map[:, 6:9] = 255
     eval_chans.data_vector = sp.ravel(eval_chans.data_map)
     eval_chans.args = {
         'axis': 'z',
         'thresh': 100
     }
     eval_chans._process_data()
     #
     eval_chans.args = {
         'axis': 'y',
         'thresh': 100
     }
     eval_chans._process_data()
Ejemplo n.º 8
0
 def _perform_fit(self):
     """Perform the fit using scipy optimise curve fit.
     We must supply x and y as one argument and zs as anothger. in the form
     xs: 0 1 2 0 1 2 0 
     ys: 0 0 0 1 1 1 2
     zs: 1 5 6 1 9 8 2
     Hence the use of repeat and tile in  positions and unravel for zs
     initially xs,ys is a linspace array and zs is a 2d image array
     """
     if self.xs is None or self.ys is None or self.zs is None:
         logger.warning(
             "attempted to fit data but had no data inside the Fit object. set xs,ys,zs first"
         )
         return ([], [])
     params = self._getParameters()
     if self.fitSubSpace:  #fit only the sub space
         #create xs, ys and zs which are appropriate slices of the arrays
         xs, ys, zs = self._get_subSpaceArrays()
     else:  #fit the whole array of data (slower)
         xs, ys, zs = self.xs, self.ys, self.zs
     positions = scipy.array([
         scipy.tile(xs, len(ys)),
         scipy.repeat(ys, len(xs))
     ])  #for creating data necessary for gauss2D function
     if self.fitTimeLimitBool:
         modelFitResult = self.lmfitModel.fit(scipy.ravel(zs),
                                              positions=positions,
                                              params=params,
                                              iter_cb=self.getFitCallback(
                                                  time.time()))
     else:  #no iter callback
         modelFitResult = self.lmfitModel.fit(scipy.ravel(zs),
                                              positions=positions,
                                              params=params)
     return modelFitResult
def process_maps(aper_map, data_map1, data_map2, args):
    r"""
    subtracts the data maps and then calculates percentiles of the result
    before outputting a final map to file.
    """
    #
    # creating resultant map from clone of aperture map
    result = aper_map.clone()
    result.data_map = data_map1 - data_map2
    result.data_vector = sp.ravel(result.data_map)
    result.infile = args.out_name
    result.outfile = args.out_name
    #
    print('Percentiles of data_map1 - data_map2')
    output_percentile_set(result, args)
    #
    # checking if data is to be normalized and/or absolute
    if args.post_abs:
        result.data_map = sp.absolute(result.data_map)
        result.data_vector = sp.absolute(result.data_vector)
    #
    if args.post_normalize:
        result.data_map = result.data_map/sp.amax(sp.absolute(result.data_map))
        result.data_vector = sp.ravel(result.data_map)
    #
    return result
 def _generate_masked_mesh(self, cell_mask=None):
     r"""
     Generates the mesh based on the cell mask provided
     """
     #
     if cell_mask is None:
         cell_mask = sp.ones(self.data_map.shape, dtype=bool)
     #
     # initializing arrays
     self._edges = sp.ones(0, dtype=str)
     self._merge_patch_pairs = sp.ones(0, dtype=str)
     self._create_blocks(cell_mask)
     #
     # building face arrays
     mapper = sp.ravel(sp.array(cell_mask, dtype=int))
     mapper[mapper == 1] = sp.arange(sp.count_nonzero(mapper))
     mapper = sp.reshape(mapper, (self.nz, self.nx))
     mapper[~cell_mask] = -sp.iinfo(int).max
     #
     boundary_dict = {
         'bottom':
             {'bottom': mapper[0, :][cell_mask[0, :]]},
         'top':
             {'top': mapper[-1, :][cell_mask[-1, :]]},
         'left':
             {'left': mapper[:, 0][cell_mask[:, 0]]},
         'right':
             {'right': mapper[:, -1][cell_mask[:, -1]]},
         'front':
             {'front': mapper[cell_mask]},
         'back':
             {'back': mapper[cell_mask]},
         'internal':
             {'bottom': [], 'top': [], 'left': [], 'right': []}
     }
     #
     # determining cells linked to a masked cell
     cell_mask = sp.where(~sp.ravel(cell_mask))[0]
     inds = sp.in1d(self._field._cell_interfaces, cell_mask)
     inds = sp.reshape(inds, (len(self._field._cell_interfaces), 2))
     inds = inds[:, 0].astype(int) + inds[:, 1].astype(int)
     inds = (inds == 1)
     links = self._field._cell_interfaces[inds]
     #
     # adjusting order so masked cells are all on links[:, 1]
     swap = sp.in1d(links[:, 0], cell_mask)
     links[swap] = links[swap, ::-1]
     #
     # setting side based on index difference
     sides = sp.ndarray(len(links), dtype='<U6')
     sides[sp.where(links[:, 1] == links[:, 0]-self.nx)[0]] = 'bottom'
     sides[sp.where(links[:, 1] == links[:, 0]+self.nx)[0]] = 'top'
     sides[sp.where(links[:, 1] == links[:, 0]-1)[0]] = 'left'
     sides[sp.where(links[:, 1] == links[:, 0]+1)[0]] = 'right'
     #
     # adding each block to the internal face dictionary
     inds = sp.ravel(mapper)[links[:, 0]]
     for side, block_id in zip(sides, inds):
         boundary_dict['internal'][side].append(block_id)
     self.set_boundary_patches(boundary_dict, reset=True)
Ejemplo n.º 11
0
    def __init__(self, U, Y, statedim, reg=None):
        if size(shape(U)) == 1:
            U = reshape(U, (-1,1))
        if size(shape(Y)) == 1:
            Y = reshape(Y, (-1,1))
        if reg is None:
            reg = 0

        yDim = size(Y,1)
        uDim = size(U,1)

        self.output_size = size(Y,1) # placeholder

        # number of samples of past/future we'll mash together into a 'state'
        width = 1
        # total number of past/future pairings we get as a result
        K = size(U,0) - 2 * width + 1

        # build hankel matrices containing pasts and futures
        U_p = array([ravel(U[t : t + width]) for t in range(K)]).T
        U_f = array([ravel(U[t + width : t + 2 * width]) for t in range(K)]).T
        Y_p = array([ravel(Y[t : t + width]) for t in range(K)]).T
        Y_f = array([ravel(Y[t + width : t + 2 * width]) for t in range(K)]).T

        # solve the eigenvalue problem
        YfUfT = dot(Y_f, U_f.T)
        YfUpT = dot(Y_f, U_p.T)
        YfYpT = dot(Y_f, Y_p.T)
        UfUpT = dot(U_f, U_p.T)
        UfYpT = dot(U_f, Y_p.T)
        UpYpT = dot(U_p, Y_p.T)
        F = bmat([[None, YfUfT, YfUpT, YfYpT],
                  [YfUfT.T, None, UfUpT, UfYpT],
                  [YfUpT.T, UfUpT.T, None, UpYpT],
                  [YfYpT.T, UfYpT.T, UpYpT.T, None]])
        Ginv = bmat([[pinv(dot(Y_f,Y_f.T)), None, None, None],
                     [None, pinv(dot(U_f,U_f.T)), None, None],
                     [None, None, pinv(dot(U_p,U_p.T)), None],
                     [None, None, None, pinv(dot(Y_p,Y_p.T))]])
        F = F - eye(size(F, 0)) * reg

        # Take smallest eigenvalues
        _, W = eigs(Ginv.dot(F), k=statedim, which='SR')

        # State sequence is a weighted combination of the past
        W_U_p = W[ width * (yDim + uDim) : width * (yDim + uDim + uDim), :]
        W_Y_p = W[ width * (yDim + uDim + uDim):, :]
        X_hist = dot(W_U_p.T, U_p) + dot(W_Y_p.T, Y_p)

        # Regress; trim inputs to match the states we retrieved
        R = concatenate((X_hist[:, :-1], U[width:-width].T), 0)
        L = concatenate((X_hist[:, 1: ], Y[width:-width].T), 0)
        RRi = pinv(dot(R, R.T))
        RL  = dot(R, L.T)
        Sys = dot(RRi, RL).T
        self.A = Sys[:statedim, :statedim]
        self.B = Sys[:statedim, statedim:]
        self.C = Sys[statedim:, :statedim]
        self.D = Sys[statedim:, statedim:]
Ejemplo n.º 12
0
    def __init__(self, U, Y, statedim, reg=None):
        if size(shape(U)) == 1:
            U = reshape(U, (-1, 1))
        if size(shape(Y)) == 1:
            Y = reshape(Y, (-1, 1))
        if reg is None:
            reg = 0

        yDim = size(Y, 1)
        uDim = size(U, 1)

        self.output_size = size(Y, 1)  # placeholder

        # number of samples of past/future we'll mash together into a 'state'
        width = 1
        # total number of past/future pairings we get as a result
        K = size(U, 0) - 2 * width + 1

        # build hankel matrices containing pasts and futures
        U_p = array([ravel(U[t:t + width]) for t in range(K)]).T
        U_f = array([ravel(U[t + width:t + 2 * width]) for t in range(K)]).T
        Y_p = array([ravel(Y[t:t + width]) for t in range(K)]).T
        Y_f = array([ravel(Y[t + width:t + 2 * width]) for t in range(K)]).T

        # solve the eigenvalue problem
        YfUfT = dot(Y_f, U_f.T)
        YfUpT = dot(Y_f, U_p.T)
        YfYpT = dot(Y_f, Y_p.T)
        UfUpT = dot(U_f, U_p.T)
        UfYpT = dot(U_f, Y_p.T)
        UpYpT = dot(U_p, Y_p.T)
        F = bmat([[None, YfUfT, YfUpT, YfYpT], [YfUfT.T, None, UfUpT, UfYpT],
                  [YfUpT.T, UfUpT.T, None, UpYpT],
                  [YfYpT.T, UfYpT.T, UpYpT.T, None]])
        Ginv = bmat([[pinv(dot(Y_f, Y_f.T)), None, None, None],
                     [None, pinv(dot(U_f, U_f.T)), None, None],
                     [None, None, pinv(dot(U_p, U_p.T)), None],
                     [None, None, None,
                      pinv(dot(Y_p, Y_p.T))]])
        F = F - eye(size(F, 0)) * reg

        # Take smallest eigenvalues
        _, W = eigs(Ginv.dot(F), k=statedim, which='SR')

        # State sequence is a weighted combination of the past
        W_U_p = W[width * (yDim + uDim):width * (yDim + uDim + uDim), :]
        W_Y_p = W[width * (yDim + uDim + uDim):, :]
        X_hist = dot(W_U_p.T, U_p) + dot(W_Y_p.T, Y_p)

        # Regress; trim inputs to match the states we retrieved
        R = concatenate((X_hist[:, :-1], U[width:-width].T), 0)
        L = concatenate((X_hist[:, 1:], Y[width:-width].T), 0)
        RRi = pinv(dot(R, R.T))
        RL = dot(R, L.T)
        Sys = dot(RRi, RL).T
        self.A = Sys[:statedim, :statedim]
        self.B = Sys[:statedim, statedim:]
        self.C = Sys[statedim:, :statedim]
        self.D = Sys[statedim:, statedim:]
Ejemplo n.º 13
0
    def plotCurves(self, showSamples=False, force2D=True):
        from pylab import clf, hold, plot, fill, title, gcf, pcolor, gray

        if not self.calculated:
            self._calculate()

        if self.indim == 1:
            clf()
            hold(True)
            if showSamples:
                # plot samples (gray)
                for _ in range(5):
                    plot(self.testx,
                         self.pred_mean + random.multivariate_normal(
                             zeros(len(self.testx)), self.pred_cov),
                         color='gray')

            # plot training set
            plot(self.trainx, self.trainy, 'bx')
            # plot mean (blue)
            plot(self.testx, self.pred_mean, 'b', linewidth=1)
            # plot variance (as "polygon" going from left to right for upper half and back for lower half)
            fillx = r_[ravel(self.testx), ravel(self.testx[::-1])]
            filly = r_[self.pred_mean + 2 * diag(self.pred_cov),
                       self.pred_mean[::-1] - 2 * diag(self.pred_cov)[::-1]]
            fill(fillx, filly, facecolor='gray', edgecolor='white', alpha=0.3)
            title('1D Gaussian Process with mean and variance')

        elif self.indim == 2 and not force2D:
            from matplotlib import axes3d as a3

            fig = gcf()
            fig.clear()
            ax = a3.Axes3D(fig)  #@UndefinedVariable

            # plot training set
            ax.plot3D(ravel(self.trainx[:, 0]), ravel(self.trainx[:, 1]),
                      ravel(self.trainy), 'ro')

            # plot mean
            (x, y, z) = [
                m.reshape(sqrt(len(m)), sqrt(len(m)))
                for m in (self.testx[:, 0], self.testx[:, 1], self.pred_mean)
            ]
            ax.plot_wireframe(x, y, z, colors='gray')
            return ax

        elif self.indim == 2 and force2D:
            # plot mean on pcolor map
            gray()
            # (x, y, z) = map(lambda m: m.reshape(sqrt(len(m)), sqrt(len(m))), (self.testx[:,0], self.testx[:,1], self.pred_mean))
            m = floor(sqrt(len(self.pred_mean)))
            pcolor(self.pred_mean.reshape(m, m)[::-1, :])

        else:
            print("plotting only supported for indim=1 or indim=2.")
Ejemplo n.º 14
0
 def _getSequenceField(self, index, field):
     """Return a sequence of one single field given by `field` and indexed by
     `index`."""
     seq = ravel(self.getField('sequence_index'))
     if len(seq) == index + 1:
         # user wants to access the last sequence, return until end of data
         return self.getField(field)[ravel(self.getField('sequence_index'))[index]:]
     if len(seq) < index + 1:
         # sequence index beyond number of sequences. raise exception
         raise IndexError('sequence does not exist.')
     return self.getField(field)[ravel(self.getField('sequence_index'))[index]:ravel(self.getField('sequence_index'))[index + 1]]
Ejemplo n.º 15
0
    def plotCurves(self, showSamples=False, force2D=True):
        from pylab import clf, hold, plot, fill, title, gcf, pcolor, gray

        if not self.calculated:
            self._calculate()

        if self.indim == 1:
            clf()
            hold(True)
            if showSamples:
                # plot samples (gray)
                for _ in range(5):
                    plot(
                        self.testx,
                        self.pred_mean + random.multivariate_normal(zeros(len(self.testx)), self.pred_cov),
                        color="gray",
                    )

            # plot training set
            plot(self.trainx, self.trainy, "bx")
            # plot mean (blue)
            plot(self.testx, self.pred_mean, "b", linewidth=1)
            # plot variance (as "polygon" going from left to right for upper half and back for lower half)
            fillx = r_[ravel(self.testx), ravel(self.testx[::-1])]
            filly = r_[self.pred_mean + 2 * diag(self.pred_cov), self.pred_mean[::-1] - 2 * diag(self.pred_cov)[::-1]]
            fill(fillx, filly, facecolor="gray", edgecolor="white", alpha=0.3)
            title("1D Gaussian Process with mean and variance")

        elif self.indim == 2 and not force2D:
            from matplotlib import axes3d as a3

            fig = gcf()
            fig.clear()
            ax = a3.Axes3D(fig)  # @UndefinedVariable

            # plot training set
            ax.plot3D(ravel(self.trainx[:, 0]), ravel(self.trainx[:, 1]), ravel(self.trainy), "ro")

            # plot mean
            (x, y, z) = map(
                lambda m: m.reshape(sqrt(len(m)), sqrt(len(m))), (self.testx[:, 0], self.testx[:, 1], self.pred_mean)
            )
            ax.plot_wireframe(x, y, z, colors="gray")
            return ax

        elif self.indim == 2 and force2D:
            # plot mean on pcolor map
            gray()
            # (x, y, z) = map(lambda m: m.reshape(sqrt(len(m)), sqrt(len(m))), (self.testx[:,0], self.testx[:,1], self.pred_mean))
            m = floor(sqrt(len(self.pred_mean)))
            pcolor(self.pred_mean.reshape(m, m)[::-1, :])

        else:
            print("plotting only supported for indim=1 or indim=2.")
Ejemplo n.º 16
0
def mkcurve(chan1, chan2):
    "Calculate channel curve by averaging target values."
    fst = lambda p: p[0]
    snd = lambda p: p[1]
    sums = {}
    for v1, v2 in izip(ravel(chan1), ravel(chan2)):
        old = sums.get(v1, [])
        sums.update({v1: old + [v2]})
    c = array([(src, mean(vals)) for src, vals in sorted(sums.iteritems())])
    nvals = interp(range(256), c[:, 0], c[:, 1], 0, 255)
    return dict(zip(range(256), nvals))
 def _getSequenceField(self, index, field):
     """Return a sequence of one single field given by `field` and indexed by
     `index`."""
     seq = ravel(self.getField('sequence_index'))
     if len(seq) == index + 1:
         # user wants to access the last sequence, return until end of data
         return self.getField(field)[ravel(self.getField('sequence_index'))[index]:]            
     if len(seq) < index + 1:
         # sequence index beyond number of sequences. raise exception
         raise IndexError('sequence does not exist.')
     return self.getField(field)[ravel(self.getField('sequence_index'))[index]:ravel(self.getField('sequence_index'))[index + 1]]
 def integrateObservation(self, obs):
     if len(obs) == 3:
         if self.useSpecialInfo:
             self.lastobs[-2:] = obs[:2]
         leftindex = max(0, 11-self.inGridSize/2)
         rightindex = min(22, 11+self.inGridSize/2+1)
         middle = obs[2][leftindex:rightindex, leftindex:rightindex]
         #boolmid = logical_not(logical_not(middle))*1.
         if self.useSpecialInfo:
             self.lastobs[:-2] = ravel(middle)
         else:
             self.lastobs[:] = ravel(middle)
Ejemplo n.º 19
0
    def compute(nn_params):
        m = Y.shape[0]

        # Reshape nn_params back into the parameters theta_1 and theta_2
        theta_1 = nn_params[0:(hidden_layer_size*(input_layer_size+1))]. \
                    reshape([hidden_layer_size, input_layer_size+1])
        theta_2 = nn_params[(hidden_layer_size*(input_layer_size+1)):]. \
                    reshape([num_labels, hidden_layer_size+1])

        theta_1_reg = sp.copy(theta_1)
        theta_1_reg[:, 0] = 0
        theta_2_reg = sp.copy(theta_2)
        theta_2_reg[:, 0] = 0

        # Forward propagation
        f = forward_prop(X)(theta_1, theta_2)

        # Initialize variables for back propagation
        a = f['a']

        # Add bias
        a_1 = a[0]
        a_2 = a[1]
        a_3 = a[2]

        z = f['z']
        z_2 = z[0]
        z_3 = z[1]

        # Transform Y
        b = sp.matrix(
            sp.apply_along_axis(
                lambda n: sp.int_(sp.array(range(1, num_labels + 1)) == n), 1,
                Y))

        DEL_1 = sp.matrix(sp.zeros((hidden_layer_size, input_layer_size + 1)))
        DEL_2 = sp.matrix(sp.zeros((num_labels, hidden_layer_size + 1)))

        for i in range(0, m):
            del_3 = a_3[i, :].T - b[i, :].T
            del_2 = sp.multiply(theta_2[:, 1:].T * del_3,
                                sigmoid_gradient(z_2[i, :].T))

            DEL_2 = DEL_2 + del_3 * a_2[i, :]
            DEL_1 = DEL_1 + del_2 * a_1[i, :]

        # Regularize
        theta_1_grad = DEL_1 / m + (_lambda / m) * theta_1_reg
        theta_2_grad = DEL_2 / m + (_lambda / m) * theta_2_reg
        grad = sp.concatenate([sp.ravel(theta_1_grad), sp.ravel(theta_2_grad)])

        return grad
Ejemplo n.º 20
0
def sphere_features(features, sphere_vectors):
    
    features.shape = features.shape[0], -1

    fmean, fstd = sphere_vectors
    features -= fmean        
    assert((fstd!=0).all())
    features /= fstd

    assert(not sp.isnan(sp.ravel(features)).any())
    assert(not sp.isinf(sp.ravel(features)).any())
    
    return features
Ejemplo n.º 21
0
    def removeSequence(self, index):
        """Remove the `index`'th sequence from the dataset and places the
        marker to the sample following the removed sequence."""
        if index >= self.getNumSequences():
            # sequence doesn't exist, raise exception
            raise IndexError('sequence does not exist.')
        sequences = ravel(self.getField('sequence_index'))
        seqstart = sequences[index]
        if index == self.getNumSequences() - 1:
            # last sequence is going to be removed
            lastSeqDeleted = True
            seqend = self.getLength()
        else:
            lastSeqDeleted = False
            # sequence to remove is not last one (sequence_index exists)
            seqend = sequences[index + 1]

        # cut out data from all fields
        for label in self.link:
            # concatenate rows from start to seqstart and from seqend to end
            self.data[label] = r_[self.data[label][:seqstart, :],
                                  self.data[label][seqend:, :]]
            # update endmarkers of linked fields
            self.endmarker[label] -= seqend - seqstart

        # update sequence indices
        for i, val in enumerate(sequences):
            if val > seqstart:
                self.data['sequence_index'][i, :] -= seqend - seqstart

        # remove sequence index of deleted sequence and reduce its endmarker
        self.data['sequence_index'] = r_[
            self.data['sequence_index'][:index, :],
            self.data['sequence_index'][index + 1:, :]]
        self.endmarker['sequence_index'] -= 1

        if lastSeqDeleted:
            # last sequence was removed
            # move sequence marker to last remaining sequence
            self.currentSeq = index - 1
            # move sample marker to end of dataset
            self.index = self.getLength()
            # if there was only 1 sequence left, re-initialize sequence index
            if self.getLength() == 0:
                self.clear()
        else:
            # removed sequence was not last one (sequence_index exists)
            # move sequence marker to the new sequence at position 'index'
            self.currentSeq = index
            # move sample marker to beginning of sequence at position 'index'
            self.index = ravel(self.getField('sequence_index'))[index]
Ejemplo n.º 22
0
    def plot(self, func, interp=True, plotter='imshow'):
        import matplotlib as mpl
        from matplotlib import pylab as pl
        if interp:
            lpi = self.interpolator(func)
            z = lpi[self.yrange[0]:self.yrange[1]:complex(0, self.nrange),
                    self.xrange[0]:self.xrange[1]:complex(0, self.nrange)]
        else:
            y, x = sp.mgrid[
                self.yrange[0]:self.yrange[1]:complex(0, self.nrange),
                self.xrange[0]:self.xrange[1]:complex(0, self.nrange)]
            z = func(x, y)

        z = sp.where(sp.isinf(z), 0.0, z)

        extent = (self.xrange[0], self.xrange[1], self.yrange[0],
                  self.yrange[1])
        pl.ioff()
        pl.clf()
        pl.hot()  # Some like it hot
        if plotter == 'imshow':
            pl.imshow(sp.nan_to_num(z),
                      interpolation='nearest',
                      extent=extent,
                      origin='lower')
        elif plotter == 'contour':
            Y, X = sp.ogrid[
                self.yrange[0]:self.yrange[1]:complex(0, self.nrange),
                self.xrange[0]:self.xrange[1]:complex(0, self.nrange)]
            pl.contour(sp.ravel(X), sp.ravel(Y), z, 20)
        x = self.x
        y = self.y
        lc = mpl.collections.LineCollection(sp.array([
            ((x[i], y[i]), (x[j], y[j])) for i, j in self.tri.edge_db
        ]),
                                            colors=[(0, 0, 0, 0.2)])
        ax = pl.gca()
        ax.add_collection(lc)

        if interp:
            title = '%s Interpolant' % self.name
        else:
            title = 'Reference'
        if hasattr(func, 'title'):
            pl.title('%s: %s' % (func.title, title))
        else:
            pl.title(title)

        pl.show()
        pl.ion()
Ejemplo n.º 23
0
    def removeSequence(self, index):
        """Remove the `index`'th sequence from the dataset and places the
        marker to the sample following the removed sequence."""
        if index >= self.getNumSequences():
            # sequence doesn't exist, raise exception
            raise IndexError('sequence does not exist.')
        sequences = ravel(self.getField('sequence_index'))
        seqstart = sequences[index]
        if index == self.getNumSequences() - 1:
            # last sequence is going to be removed
            lastSeqDeleted = True
            seqend = self.getLength()
        else:
            lastSeqDeleted = False
            # sequence to remove is not last one (sequence_index exists)
            seqend = sequences[index + 1]

        # cut out data from all fields
        for label in self.link:
            # concatenate rows from start to seqstart and from seqend to end
            self.data[label] = r_[self.data[label][:seqstart, :], self.data[label][seqend:, :]]
            # update endmarkers of linked fields
            self.endmarker[label] -= seqend - seqstart

        # update sequence indices
        for i, val in enumerate(sequences):
            if val > seqstart:
                self.data['sequence_index'][i, :] -= seqend - seqstart

        # remove sequence index of deleted sequence and reduce its endmarker
        self.data['sequence_index'] = r_[
            self.data['sequence_index'][:index, :], self.data['sequence_index'][index + 1:, :]]
        self.endmarker['sequence_index'] -= 1

        if lastSeqDeleted:
            # last sequence was removed
            # move sequence marker to last remaining sequence
            self.currentSeq = index - 1
            # move sample marker to end of dataset
            self.index = self.getLength()
            # if there was only 1 sequence left, re-initialize sequence index
            if self.getLength() == 0:
                self.clear()
        else:
            # removed sequence was not last one (sequence_index exists)
            # move sequence marker to the new sequence at position 'index'
            self.currentSeq = index
            # move sample marker to beginning of sequence at position 'index'
            self.index = ravel(self.getField('sequence_index'))[index]
Ejemplo n.º 24
0
def mkcurve(chan1, chan2):
    """Calculate channel curve by averaging target values."""
    sums = {}
    asdf = ravel(chan1)
    asdff = len(asdf)
    for z, (v1, v2) in enumerate(zip(asdf, ravel(chan2))):
        progprint(z / asdff)
        try:
            sums[v1].append(v2)
        except KeyError:
            sums[v1] = [v2]
    c = array([(src, mean(vals)) for src, vals in sorted(sums.items())])
    nvals = interp(range(256), c[:, 0], c[:, 1], 0, 255)
    progclean()
    return dict(zip(range(256), nvals))
def output_percentile_set(data_field, args):
    r"""
    Does three sets of percentiles and stacks them as columns: raw data,
    absolute value data, normalized+absolute value
    """
    data = {}
    #
    # outputting percentiles of initial subtraction to screen
    field = data_field.clone()
    pctle = Percentiles(field, percentiles=args.perc)
    pctle.process()
    data['raw'] = pctle.processed_data
    #
    # normalizing data
    field = data_field.clone()
    field.data_map = field.data_map/sp.amax(sp.absolute(field.data_map))
    field.data_vector = sp.ravel(field.data_map)
    pctle = Percentiles(field, percentiles=args.perc)
    pctle.process()
    data['norm'] = pctle.processed_data
    #
    # taking absolute value of data
    field = data_field.clone()
    field.data_map = sp.absolute(field.data_map)
    field.data_vector = sp.absolute(field.data_vector)
    pctle = Percentiles(field, percentiles=args.perc)
    pctle.process()
    data['abs'] = pctle.processed_data
    #
    # absolute value + normed
    field.data_map = field.data_map/sp.amax(field.data_map)
    field.data_vector = sp.ravel(field.data_map)
    pctle = Percentiles(field, percentiles=args.perc)
    pctle.process()
    data['abs+norm'] = pctle.processed_data
    #
    # outputting stacked percentiles
    fmt = '    {:>6.2f}\t{: 0.6e}\t{: 0.6e}\t{: 0.6e}\t{: 0.6e}\n'
    content = 'Percentile\tRaw Data\tAbsolute\tNormalized\tNorm+abs\n'
    data = zip(args.perc, data['raw'].values(),
               data['abs'].values(),
               data['norm'].values(),
               data['abs+norm'].values())
    #
    for row in data:
        content += fmt.format(*row)
    content += '\n'
    print(content)
Ejemplo n.º 26
0
 def oneSample(self, k):
     """ produce one new sample and update phi correspondingly """
     thesum = 0.0
     for i in range(self.mu):
         thesum += exp(self.basealpha[i])
     for i in range(self.mu):
         self.alpha[i] = exp(self.basealpha[i]) / thesum
     choosem = drawIndex(self.alpha, tolerant=True)
     self.chosenCenter[k] = choosem
     z = mat(
         multivariate_normal(
             array(self.x[choosem]).flatten(), self.sigma[choosem])).T
     self.zs[k] = z
     self.R[k] = self.evaluateAt(z)
     # TODO make for all mu
     if self.importanceSampling:
         self.rellhood[k] = multivariateNormalPdf(z, self.x[0],
                                                  self.sigma[0])
     logderivbasealpha = zeros((self.mu, 1))
     logderivx = zeros((self.mu, self.xdim))
     logderivfactorsigma = zeros((self.mu, self.xdim, self.xdim))
     for m in range(self.mu):
         self.sigma[m] = dot(self.factorSigma[m].T, self.factorSigma[m])
         if self.mu > 1:
             relresponsibility = (self.alpha[m] * multivariateNormalPdf(
                 ravel(z), ravel(self.x[m]), self.sigma[m]) / sum(
                     map(
                         lambda mm: self.alpha[mm] * multivariateNormalPdf(
                             ravel(z), ravel(self.x[mm]), self.sigma[mm]),
                         range(self.mu))))
         else:
             relresponsibility = 1.0
         if self.mu > 1:
             logderivbasealpha[m] = relresponsibility * (1.0 -
                                                         self.alpha[m])
         else:
             logderivbasealpha[m] = 0.0
         logderivx[m] = relresponsibility * (self.sigma[m].I *
                                             (z - self.x[m])).flatten()
         A = 0.5 * self.sigma[m].I * (z - self.x[m]) * (
             z - self.x[m]).T * self.sigma[m].I - 0.5 * self.sigma[m].I
         logderivsigma_m = self.blackmagic * relresponsibility * A  #0.5 * (A + diag(diag(A)))  #* 2.0
         logderivfactorsigma[m] = self.factorSigma[m] * (logderivsigma_m +
                                                         logderivsigma_m.T)
     #print 'logalpha', logderivbasealpha.flatten(), self.alpha, sum(logderivbasealpha)
     tmp = self.combineParams(logderivbasealpha, logderivx,
                              logderivfactorsigma)
     self.phi[k] = tmp
 def generate_threshold_mesh(self, min_value=0.0, max_value=1.0e9):
     r"""
     Generates a mesh excluding all blocks below the min_value arg. Regions
     that are isolated by the thresholding are also automatically removed.
     """
     #
     # thresholding the data and then checking for isolated clusters
     self._field.threshold_data(min_value, max_value, repl=0.0)
     self._field.copy_data(self)
     #
     adj_matrix = self._field.create_adjacency_matrix()
     num_cs, cs_ids = csgraph.connected_components(csgraph=adj_matrix,
                                                   directed=False)
     # only saving the largest cluster
     if num_cs > 1:
         cs_count = sp.zeros(num_cs, dtype=int)
         for cs_num in cs_ids:
             cs_count[cs_num] += 1
         self.data_vector[sp.where(cs_ids != sp.argmax(cs_count))[0]] = 0.0
         self.data_map = sp.reshape(self.data_vector, (self.nz, self.nx))
     #
     self._field.data_map = self.data_map
     self._field.data_vector = sp.ravel(self.data_map)
     #
     # generating blocks and vertices
     mask = self.data_map > 0.0
     self._generate_masked_mesh(cell_mask=mask)
Ejemplo n.º 28
0
Archivo: ps3.py Proyecto: jrnold/psc585
def make_y0(model):
    """ Make y0 """
    def mu_ij(i, j):
        return -sp.sqrt(uij[j, i] + (model.c / (1 - model.p[j]))
                        - (1 - model.d) * ubar[j]
                        - model.d * v0[j])

    # \bar{u} : status quo payoffs
    ubar = -(model.ideals ** 2).sum(1) + model.K
    # TODO: where did plus 10 come from?
    uij = (-(model.ideals[:, 0] - model.ideals[:, 0][:, sp.newaxis])**2 +
           -(model.ideals[:, 1] - model.ideals[:, 1][:, sp.newaxis])**2 + model.K)
    # v_0
    v0 = (uij * model.p[:, sp.newaxis]).sum(1) + model.c
        ## \lambda_0
    lam0 = sp.ones((5, 6)) * -sp.sqrt(model.c)
    # if m_i = i
    lam0[sp.r_[0:5], sp.r_[0:5]] = 1
    lam0 = reshape(lam0, (lam0.size, ))
    # x_0
    x0 = sp.reshape(sp.repeat(model.ideals, 6, axis=0), (60, ))
    # \mu_0
    mu0 = sp.zeros((5, 6, 2))
    # For players
    for i in range(5):
        # For coalitions
        for mi in range(6):
            # for each other player in the coalition
            ii = i * 6 + mi
            mu0[i, mi, 0] = mu_ij(i, model.part1[ii])
            mu0[i, mi, 1] = mu_ij(i, model.part2[ii])
    mu0 = sp.ravel(mu0)
    # y_0
    y0 = sp.concatenate((v0, lam0, x0, mu0))
    return y0
Ejemplo n.º 29
0
def norm(x):
    """2-norm of x
    """

    y = ravel(x)
    p = sqrt(inner(y, y))
    return p
Ejemplo n.º 30
0
 def learn(self):
     """ calls the gradient calculation function and executes a step in direction
         of the gradient, scaled with a small learning rate alpha. """
     assert self.ds != None
     assert self.module != None
     
     # get the deltas from the dataset
     deltas = self.ds.getField('deltas')
     
     # initialize matrix D and vector R
     D = ones((self.ds.getNumSequences(), self.module.paramdim + 1))
     R = zeros((self.ds.getNumSequences(), 1))
     
     # calculate the gradient with pseudo inverse
     for seq in range(self.ds.getNumSequences()):
         _state, _action, reward = self.ds.getSequence(seq)
         D[seq,:-1] = deltas[seq,:]
         R[seq,:] = mean(reward)
     
     beta = dot(pinv(D), R)        
     gradient = ravel(beta[:-1])
     
     # update the weights
     self.original = self.gd(gradient)       
     self.module._setParameters(self.original)
        
     self.module.reset()
Ejemplo n.º 31
0
    def learn(self):
        """ calls the gradient calculation function and executes a step in direction
            of the gradient, scaled with a small learning rate alpha. """
        assert self.ds != None
        assert self.module != None

        # get the deltas from the dataset
        deltas = self.ds.getField('deltas')

        # initialize matrix D and vector R
        D = ones((self.ds.getNumSequences(), self.module.paramdim + 1))
        R = zeros((self.ds.getNumSequences(), 1))

        # calculate the gradient with pseudo inverse
        for seq in range(self.ds.getNumSequences()):
            _state, _action, reward = self.ds.getSequence(seq)
            D[seq, :-1] = deltas[seq, :]
            R[seq, :] = mean(reward)

        beta = dot(pinv(D), R)
        gradient = ravel(beta[:-1])

        # update the weights
        self.original = self.gd(gradient)
        self.module._setParameters(self.original)

        self.module.reset()
Ejemplo n.º 32
0
    def calculateGradient(self):
        # normalize rewards
        # self.dataset.data['reward'] /= max(ravel(abs(self.dataset.data['reward'])))

        # initialize variables
        R = ones((self.dataset.getNumSequences(), 1), float)
        X = ones((self.dataset.getNumSequences(),
                  self.loglh.getDimension('loglh') + 1), float)

        # collect sufficient statistics
        print self.dataset.getNumSequences()
        for n in range(self.dataset.getNumSequences()):
            _state, _action, reward = self.dataset.getSequence(n)
            seqidx = ravel(self.dataset['sequence_index'])
            if n == self.dataset.getNumSequences() - 1:
                # last sequence until end of dataset
                loglh = self.loglh['loglh'][seqidx[n]:, :]
            else:
                loglh = self.loglh['loglh'][seqidx[n]:seqidx[n + 1], :]

            X[n, :-1] = sum(loglh, 0)
            R[n, 0] = sum(reward, 0)

        # linear regression
        beta = dot(pinv(X), R)
        return beta[:-1]
Ejemplo n.º 33
0
def policyIteration(Ts,
                    R,
                    discountFactor,
                    VEvaluator=None,
                    initpolicy=None,
                    maxIters=20):
    """ Given transition matrices (one per action),
    produce the optimal policy, using the policy iteration algorithm.
    
    A custom function that maps policies to value functions can be provided. """
    if initpolicy is None:
        policy, T = randomPolicy(Ts)
    else:
        policy = initpolicy
        T = collapsedTransitions(Ts, policy)

    if VEvaluator is None:
        VEvaluator = lambda T: trueValues(T, R, discountFactor)

    while maxIters > 0:
        V = VEvaluator(T)
        newpolicy, T = greedyPolicy(Ts, R, discountFactor, V)
        # if the probabilities are not changing more than by 0.001, we're done.
        if sum(ravel(abs(newpolicy - policy))) < 1e-3:
            return policy, T
        policy = newpolicy
        maxIters -= 1
    return policy, T
Ejemplo n.º 34
0
    def calculateGradient(self):
        # normalize rewards
        # self.dataset.data['reward'] /= max(ravel(abs(self.dataset.data['reward'])))

        # initialize variables
        R = ones((self.dataset.getNumSequences(), 1), float)
        X = ones((self.dataset.getNumSequences(), self.loglh.getDimension('loglh') + 1), float)

        # collect sufficient statistics
        print self.dataset.getNumSequences()
        for n in range(self.dataset.getNumSequences()):
            _state, _action, reward = self.dataset.getSequence(n)
            seqidx = ravel(self.dataset['sequence_index'])
            if n == self.dataset.getNumSequences() - 1:
                # last sequence until end of dataset
                loglh = self.loglh['loglh'][seqidx[n]:, :]
            else:
                loglh = self.loglh['loglh'][seqidx[n]:seqidx[n + 1], :]

            X[n, :-1] = sum(loglh, 0)
            R[n, 0] = sum(reward, 0)

        # linear regression
        beta = dot(pinv(X), R)
        return beta[:-1]
Ejemplo n.º 35
0
    def add_pore_property_from_template(self, template, prop):
        r"""
        Add pore properties based on value stored at each location in the template array

        Parameters
        ----------
        template : array_like
            The template array containing the pore property values at the desired locations

        prop : string
            The name of the pore property being added

        Notes
        -----
        This method can lead to troubles if not executed in the right order.
        For instance, if throat sizes are assigned during the generation stage
        based on neighboring pores sizes, then rewriting pore sizes with this
        method could invalidate the throat sizes.  Ideally, this method should
        be called by the generate_pore_sizes() step during the generate process
        to avoid the issue.  Alternatively, an update_throat_sizes() method
        could be written and called subsequent to calling this method.

        """
        self._logger.info("add_pore_prop_from_template: Add pore properties")
        pore_prop = sp.ravel(template)[self.get_pore_data(prop='voxel_index')]
        self.set_pore_data(prop=prop, data=pore_prop)
        self._logger.debug("add_pore_prop_from_template: End of method")
Ejemplo n.º 36
0
def insert_sphere(im, c, r):
    r"""
    Inserts a sphere of a specified radius into a given image

    Parameters
    ----------
    im : array_like
        Image into which the sphere should be inserted
    c : array_like
        The [x, y, z] coordinate indicating the center of the sphere
    r : int
        The radius of sphere to insert

    Returns
    -------
    image : ND-array
        The original image with a sphere inerted at the specified location
    """
    c = sp.array(c, dtype=int)
    if c.size != im.ndim:
        raise Exception('Coordinates do not match dimensionality of image')

    bbox = []
    [bbox.append(sp.clip(c[i] - r, 0, im.shape[i])) for i in range(im.ndim)]
    [bbox.append(sp.clip(c[i] + r, 0, im.shape[i])) for i in range(im.ndim)]
    bbox = sp.ravel(bbox)
    s = bbox_to_slices(bbox)
    temp = im[s]
    blank = sp.ones_like(temp)
    blank[tuple(c - bbox[0:im.ndim])] = 0
    blank = spim.distance_transform_edt(blank) < r
    im[s] = blank
    return im
Ejemplo n.º 37
0
 def _updateWeights(self, state, action, reward, next_state, learned_policy=None):
     """ Policy is a function that returns a probability vector for all actions, 
     given the current state(-features). """
     if learned_policy is None:
         learned_policy = self._greedyPolicy
     
     self._updateEtraces(state, action)
     
     phi = zeros((self.num_actions, self.num_features))
     phi[action] += state        
     phi_n = outer(learned_policy(next_state), next_state)
     
     self._A += outer(ravel(self._etraces), ravel(phi - self.rewardDiscount * phi_n))
     self._b += reward * ravel(self._etraces)       
     
     self._theta = dot(pinv2(self._A), self._b).reshape(self.num_actions, self.num_features)
Ejemplo n.º 38
0
def norm(x):
    """2-norm of x
    """

    y = ravel(x)
    p = sqrt(inner(y, y))
    return p
Ejemplo n.º 39
0
def lossTraces(fwrap, aclass, dim, maxsteps, storesteps=None, x0=None,
               initNoise=0., minLoss=1e-10, algoparams={}):
    """ Compute a number of loss curves, for the provided settings,
    stored at specific storestep points. """
    if not storesteps:
        storesteps = range(maxsteps + 1)
    
    # initial points, potentially noisy
    if x0 is None:
        x0 = ones(dim) + randn(dim) * initNoise
    
    # tracking progress by callback
    paramtraces = {'index':-1}
    def storer(a):
        lastseen = paramtraces['index']
        for ts in [x for x in storesteps if x > lastseen and x <= a._num_updates]:
            paramtraces[ts] = a.bestParameters.copy()
        paramtraces['index'] = a._num_updates
        
    # initialization    
    algo = aclass(fwrap, x0, callback=storer, **algoparams)
    print algo, fwrap, dim, maxsteps,
    
    # store initial step   
    algo.callback(algo)
    algo.run(maxsteps)

    # process learning curve
    del paramtraces['index']
    paramtraces = array([x for _, x in sorted(paramtraces.items())])
    oloss = mean(fwrap.stochfun.expectedLoss(ones(100) * fwrap.stochfun.optimum))
    ls = abs(fwrap.stochfun.expectedLoss(ravel(paramtraces)) - oloss) + minLoss
    ls = reshape(ls, paramtraces.shape)
    print median(ls[-1])
    return ls
Ejemplo n.º 40
0
 def _updateWeights(self, state, action, reward, next_state, learned_policy=None):
     """ Policy is a function that returns a probability vector for all actions, 
     given the current state(-features). """
     if learned_policy is None:
         learned_policy = self._greedyPolicy
     
     self._updateEtraces(state, action)
     
     phi = zeros((self.num_actions, self.num_features))
     phi[action] += state        
     phi_n = outer(learned_policy(next_state), next_state)
     
     self._A += outer(ravel(self._etraces), ravel(phi - self.rewardDiscount * phi_n))
     self._b += reward * ravel(self._etraces)       
     
     self._theta = dot(pinv2(self._A), self._b).reshape(self.num_actions, self.num_features)
Ejemplo n.º 41
0
def solver_diagnostic(A):
    ##
    # Generate B
    B = ones((A.shape[0],1), dtype=A.dtype); BH = B.copy()

    ##
    # Random initial guess, zero right-hand side
    random.seed(0)
    b = zeros((A.shape[0],1))
    x0 = rand(A.shape[0],1)

    ##
    # Create solver
    ml = smoothed_aggregation_solver(A, B=B, BH=BH,
        strength=('symmetric', {'theta': 0.0}),
        smooth=('energy', {'weighting': 'local', 'krylov': 'gmres', 'degree': 1, 'maxiter': 2}),
        improve_candidates=[('gauss_seidel_nr', {'sweep': 'symmetric', 'iterations': 4}), None],
        aggregate="standard",
        presmoother=('gauss_seidel_nr', {'sweep': 'symmetric', 'iterations': 2}),
        postsmoother=('gauss_seidel_nr', {'sweep': 'symmetric', 'iterations': 2}),
        max_levels=15,
        max_coarse=300,
        coarse_solver="pinv")

    ##
    # Solve system
    res = []
    x = ml.solve(b, x0=x0, tol=1e-08, residuals=res, accel="gmres", maxiter=300, cycle="V")
    res_rate = (res[-1]/res[0])**(1.0/(len(res)-1.))
    normr0 = norm(ravel(b) - ravel(A*x0))
    print " "
    print ml
    print "System size:                " + str(A.shape)
    print "Avg. Resid Reduction:       %1.2f"%res_rate
    print "Iterations:                 %d"%len(res)
    print "Operator Complexity:        %1.2f"%ml.operator_complexity()
    print "Work per DOA:               %1.2f"%(ml.cycle_complexity()/abs(log10(res_rate)))
    print "Relative residual norm:     %1.2e"%(norm(ravel(b) - ravel(A*x))/normr0)

    ##
    # Plot residual history
    pylab.semilogy(array(res)/normr0)
    pylab.title('Residual Histories')
    pylab.xlabel('Iteration')
    pylab.ylabel('Relative Residual Norm')
    pylab.show()
Ejemplo n.º 42
0
def openSinglefile(filename):
    inFile = open(filename, 'r')
    buffero = scipy.loadtxt(inFile)
    inFile.close()
    x_array = buffero[:, 0]
    spectra = []
    for item in scipy.hsplit(buffero[:, 1:], buffero.shape[1] - 1):
        spectra.append(generic(x_array, scipy.ravel(item)))
    return spectra
Ejemplo n.º 43
0
 def addDataset(self, dataset):
     """ adds the points from the dataset to the training set """
     assert (dataset.getDimension('input') == self.indim)
     assert (dataset.getDimension('target') == 1)
     
     self.trainx = r_[self.trainx, dataset.getField('input')]
     self.trainy = r_[self.trainy, ravel(dataset.getField('target'))]
     self.noise = array([0.001]*len(self.trainx))
     self.calculated = False
Ejemplo n.º 44
0
def check_nn_gradients(_lambda=0):
    input_layer_size = 3
    hidden_layer_size = 5
    num_labels = 3
    m = 5
    theta_1 = debug_initialize_weights(input_layer_size, hidden_layer_size)
    theta_2 = debug_initialize_weights(hidden_layer_size, num_labels)
    X = debug_initialize_weights(input_layer_size - 1, m)
    Y = 1 + sp.matrix(sp.mod(range(0, m), num_labels)).T
    nn_params = sp.concatenate([sp.ravel(theta_1), sp.ravel(theta_2)])

    D = nn_gradients(input_layer_size, hidden_layer_size, num_labels, X, Y,
                     _lambda)(nn_params)
    N = compute_numerical_gradient(
        nn_cost_function(input_layer_size, hidden_layer_size, num_labels, X, Y,
                         _lambda), nn_params)
    diff = sp.linalg.norm(N - D) / sp.linalg.norm(N + D)
    return diff
Ejemplo n.º 45
0
 def f(self, x):
     N = self.xdim / 3
     coords = x.reshape((N, 3))
     distances = sqrt(
         scipy.sum((tile(coords,
                         (N, 1, 1)) - swapaxes(tile(coords,
                                                    (N, 1, 1)), 0, 1))**2,
                   axis=2)) + eye(N)
     return 2 * sum(ravel(distances**-12 - distances**-6))
Ejemplo n.º 46
0
def openSinglefile(filename):
    inFile = open(filename, 'r')
    buffero= scipy.loadtxt(inFile)    
    inFile.close()
    x_array=buffero[:,0]
    spectra=[]
    for item in scipy.hsplit(buffero[:,1:], buffero.shape[1]-1):
        spectra.append(generic(x_array,scipy.ravel(item)))
    return spectra
Ejemplo n.º 47
0
    def addDataset(self, dataset):
        """ adds the points from the dataset to the training set """
        assert (dataset.getDimension('input') == self.indim)
        assert (dataset.getDimension('target') == 1)

        self.trainx = r_[self.trainx, dataset.getField('input')]
        self.trainy = r_[self.trainy, ravel(dataset.getField('target'))]
        self.noise = array([0.001] * len(self.trainx))
        self.calculated = False
Ejemplo n.º 48
0
def discrete_data(net, params, pts, interval, vars=None, random=False,
                  uncert_func=typ_val_uncert(0.1, 1e-14)):
    """
    Return a set of data points for the given network generated at the given
    parameters.

    net         Network to generate data for
    params      Parameters for this evaluation of the network
    pts         Number of data points to output
    interval    Integration interval
    vars        Variables to output data for, defaults to all species in net
    random      If False data points are distributed evenly over interval
                If True they are spread randomly and uniformly over each
                variable
    uncert_func Function that takes in a trajectory and a variable id and
                returns what uncertainty should be assumed for that variable,
                either as a scalar or a list the same length as the trajectory.
    """
    # Default for vars
    if vars == None:
        vars = net.species.keys()

    # Assign observed times to each variable
    var_times = {}
    for var in vars:
        if random:
            var_times[var] = scipy.rand(pts) * (interval[1]-interval[0]) + interval[0]
        else:
            var_times[var] = scipy.linspace(interval[0], interval[1], pts)

    # Create a sorted list of the unique times in the var_times dict
    int_times = sets.Set(scipy.ravel(var_times.values()))
    int_times.add(0)
    int_times = list(int_times)
    int_times.sort()

    # Get the trajectory
    traj = Dynamics.integrate(net, int_times, params=params, fill_traj=False)

    # Build up our data dictionary
    data = {}; 
    for var, times in var_times.items():
        var_data = {}
        data[var] = var_data
        
        # Calculate our uncertainties
        var_uncerts = uncert_func(traj, var)
        for time in times:
            val = traj.get_var_val(var, time)
            if scipy.isscalar(var_uncerts):
                uncert = var_uncerts
            else:
                index = traj._get_time_index(time)
                uncert = var_uncerts[index]
            var_data[time] = (val, uncert)
    return data
Ejemplo n.º 49
0
def discrete_data(net, params, pts, interval, vars=None, random=False,
                  uncert_func=typ_val_uncert(0.1, 1e-14)):
    """
    Return a set of data points for the given network generated at the given
    parameters.

    net         Network to generate data for
    params      Parameters for this evaluation of the network
    pts         Number of data points to output
    interval    Integration interval
    vars        Variables to output data for, defaults to all species in net
    random      If False data points are distributed evenly over interval
                If True they are spread randomly and uniformly over each
                variable
    uncert_func Function that takes in a trajectory and a variable id and
                returns what uncertainty should be assumed for that variable,
                either as a scalar or a list the same length as the trajectory.
    """
    # Default for vars
    if vars is None:
        vars = net.species.keys()

    # Assign observed times to each variable
    var_times = {}
    for var in vars:
        if random:
            var_times[var] = scipy.rand(pts) * (interval[1]-interval[0]) + interval[0]
        else:
            var_times[var] = scipy.linspace(interval[0], interval[1], pts)

    # Create a sorted list of the unique times in the var_times dict
    int_times = sets.Set(scipy.ravel(var_times.values()))
    int_times.add(0)
    int_times = list(int_times)
    int_times.sort()

    # Get the trajectory
    traj = Dynamics.integrate(net, int_times, params=params, fill_traj=False)

    # Build up our data dictionary
    data = {}
    for var, times in var_times.items():
        var_data = {}
        data[var] = var_data

        # Calculate our uncertainties
        var_uncerts = uncert_func(traj, var)
        for time in times:
            val = traj.get_var_val(var, time)
            if scipy.isscalar(var_uncerts):
                uncert = var_uncerts
            else:
                index = traj._get_time_index(time)
                uncert = var_uncerts[index]
            var_data[time] = (val, uncert)
    return data
Ejemplo n.º 50
0
 def trainOnDataset(self, dataset):
     """ takes a SequentialDataSet with indim input dimension and scalar target """
     assert (dataset.getDimension('input') == self.indim)
     assert (dataset.getDimension('target') == 1)
      
     self.trainx = dataset.getField('input')
     self.trainy = ravel(dataset.getField('target'))
     self.noise = array([0.001]*len(self.trainx))
     # print self.trainx, self.trainy
     self.calculated = False
Ejemplo n.º 51
0
 def newSequence(self):
     """Marks the beginning of a new sequence. this function does nothing if
     called at the very start of the data set. Otherwise, it starts a new
     sequence. Empty sequences are not allowed, and an EmptySequenceError
     exception will be raised."""
     length = self.getLength()
     if length != 0:
         if ravel(self.getField('sequence_index'))[-1] == length:
             raise EmptySequenceError
         self._appendUnlinked('sequence_index', length)
Ejemplo n.º 52
0
    def trainOnDataset(self, dataset):
        """ takes a SequentialDataSet with indim input dimension and scalar target """
        assert (dataset.getDimension('input') == self.indim)
        assert (dataset.getDimension('target') == 1)

        self.trainx = dataset.getField('input')
        self.trainy = ravel(dataset.getField('target'))
        self.noise = array([0.001] * len(self.trainx))
        # print(self.trainx, self.trainy)
        self.calculated = False
 def newSequence(self):
     """Marks the beginning of a new sequence. this function does nothing if
     called at the very start of the data set. Otherwise, it starts a new
     sequence. Empty sequences are not allowed, and an EmptySequenceError
     exception will be raised."""
     length = self.getLength()
     if length != 0:            
         if ravel(self.getField('sequence_index'))[-1] == length:
             raise EmptySequenceError
         self._appendUnlinked('sequence_index', length) 
Ejemplo n.º 54
0
    def plot(self, func, interp=True, plotter='imshow'):
        import matplotlib as mpl
        from matplotlib import pylab as pl
        if interp:
            lpi = self.interpolator(func)
            z = lpi[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
                    self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
        else:
            y, x = sp.mgrid[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
                            self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
            z = func(x, y)

        z = sp.where(sp.isinf(z), 0.0, z)

        extent = (self.xrange[0], self.xrange[1],
            self.yrange[0], self.yrange[1])
        pl.ioff()
        pl.clf()
        pl.hot() # Some like it hot
        if plotter == 'imshow':
            pl.imshow(sp.nan_to_num(z), interpolation='nearest', extent=extent, origin='lower')
        elif plotter == 'contour':
            Y, X = sp.ogrid[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
                self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
            pl.contour(sp.ravel(X), sp.ravel(Y), z, 20)
        x = self.x
        y = self.y
        lc = mpl.collections.LineCollection(sp.array([((x[i], y[i]), (x[j], y[j]))
            for i, j in self.tri.edge_db]), colors=[(0,0,0,0.2)])
        ax = pl.gca()
        ax.add_collection(lc)

        if interp:
            title = '%s Interpolant' % self.name
        else:
            title = 'Reference'
        if hasattr(func, 'title'):
            pl.title('%s: %s' % (func.title, title))
        else:
            pl.title(title)

        pl.show()
        pl.ion()
Ejemplo n.º 55
0
def cube_grid(dims):
    """
    Return a regular nD-cube mesh with given shape.

    Eg.
      cube_grid_nd((2,2))   -> 2x2   - 2d mesh (x,y)
      cube_grid_nd((4,3,2)) -> 4x3x2 - 3d mesh (x,y,z)

    Eg.
    
      v,i = cube_grid_nd((2,1))

      v =
      array([[ 0.,  0.],
             [ 1.,  0.],
             [ 2.,  0.],
             [ 0.,  1.],
             [ 1.,  1.],
             [ 2.,  1.]])

      i = 
      array([[[0, 3],
              [1, 4]],

             [[1, 4],
              [2, 5]]])

    """
    dims = tuple(dims)
    
    vert_dims = tuple(x+1 for x in dims)
    N = len(dims)
    
    vertices = zeros((prod(vert_dims),N))
    grid     = mgrid[tuple(slice(0,x,None) for x in reversed(vert_dims))]
    for i in range(N):
        vertices[:,i] = ravel(grid[N-i-1])


    #construct one cube to be tiled
    cube  = zeros((2,)*N,dtype='i')
    cycle = array([1] + list(cumprod(vert_dims)[:-1]),dtype='i')
    for i in ndindex(*((2,)*N)):
        cube[i] = sum(array(i) * cycle)
        cycle = array([1] + list(cumprod(vert_dims)[:-1]),dtype='i')


    #indices of all vertices which are the lower corner of a cube
    interior_indices = arange(prod(vert_dims)).reshape(tuple(reversed(vert_dims))).T
    interior_indices = interior_indices[tuple(slice(0,x,None) for x in dims)]

    indices = tile(cube,(prod(dims),) + (1,)*N) + interior_indices.reshape((prod(dims),) + (1,)*N)
    
    return (vertices,indices)
Ejemplo n.º 56
0
def calcProfilV(self,xy):
    """renvoie les valeurs des vitesses sur une section"""
    vxvy = self.getMfVitesse()
    grd  = self.parent.aquifere.getFullGrid()
    x0,y0,dx,dy,nx,ny = grd['x0'],grd['y0'],grd['dx'],grd['dy'],grd['nx'],grd['ny']
    x,y = zip(*xy)
    xl0, xl1 = x[:2]
    yl0, yl1 = y[:2]
    dd = min(dx,dy)*.95;dxp, dyp = xl1-xl0, yl1-yl0
    ld = max(ceil(abs(dxp/dx)),ceil(abs(dyp/dy)))
    ld = int(ld+1); ddx = dxp/ld; ddy = dyp/ld
    xp2 = xl0+arange(ld+1)*ddx
    yp2 = yl0+arange(ld+1)*ddy
    ix = floor((xp2-x0)/dx);ix=clip(ix.astype(int),0,nx-1)
    iy = floor((yp2-y0)/dy);iy=clip(iy.astype(int),0,ny-1)
    vx = take(ravel(vxvy[0]),iy*nx+ix)
    vy = take(ravel(vxvy[1]),iy*nx+ix)
    V = sqrt(vx**2+vy**2)
    cu = sqrt((xp2-xp2[0])**2+(yp2-yp2[0])**2)
    return [cu,V]
def locate_nonzero_data(data_array):
    r"""
    Generates a vector of non-zero indicies for the flattened array
    """
    #
    logger.info('flattening array and locating non-zero voxels...')
    data_vector = sp.ravel(data_array)
    nonzero_locs = sp.where(data_vector)[0]
    logger.debug('    {} non-zero voxels'.format(nonzero_locs.size))
    #
    return nonzero_locs
Ejemplo n.º 58
0
 def _remove_disconnected_clusters(self):
     bad_pores = sp.array([], dtype=int)
     self._pore_map = self.pores()
     self._throat_map = self.throats()
     health = self.check_network_health()
     if health['disconnected_clusters'] == []:
         self._throat_map = self.throats()
         self._pore_map = self.pores()
     else:
         Np = self.num_pores()
         Nt = self.num_throats()
         cluster_sizes = [sp.shape(x)[0] for x in health['disconnected_clusters']]
         # 50 or less, if it's a really small network.
         acceptable_size = min([min([50, Np/2]), max(cluster_sizes)])
         # Step through each cluster of pores. If its a small cluster,
         # add it to the list
         for cluster in health['disconnected_clusters']:
             if sp.shape(cluster)[0] < acceptable_size:
                 bad_pores = sp.append(bad_pores, sp.ravel(cluster))
         bad_throats = sp.unique(self.find_neighbor_throats(bad_pores))
         # Create map for pores
         if sp.shape(bad_pores)[0] > 0:
             i = 0
             self._pore_map = sp.zeros((Np-sp.shape(bad_pores)[0],), dtype=int)
             for pore in self.pores():
                 if pore not in bad_pores:
                     self._pore_map[i] = pore
                     i += 1
         # Create map for throats
         if sp.shape(bad_throats)[0] > 0:
             i = 0
             self._throat_map = sp.zeros((Nt - sp.shape(bad_throats)[0],),
                                         dtype=int)
             for throat in self.throats():
                 if throat not in bad_throats:
                     self._throat_map[i] = throat
                     i += 1
         # Fix the pore transformer
         try:
             if sp.shape(bad_pores)[0] > 0:
                 i = 0
                 old_transform = self._dictionary['pname_transform']
                 self._dictionary['pname_transform'] = \
                     sp.zeros((Np-sp.shape(bad_pores)[0],), dtype=int)
                 for pore in self.pores():
                     if pore not in bad_pores:
                         self._dictionary['pname_transform'][i] = \
                             old_transform[pore]
                         i += 1
         except:
             logger.info('Could not update pname_transform. Imported network \
                          may not have had it.')
             pass
         self.trim(pores=bad_pores)